diff --git a/CMakeLists.txt b/CMakeLists.txt index 6c0d466f5b5174e5be9b22c509a3fd92d7feb6a6..2939ceb5fbe4997638f2d53dd77e96b50eff4f21 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,17 +1,21 @@ cmake_minimum_required(VERSION 3.14) project(TFAdapter) -set(CMAKE_CXX_STANDARD 17) -set(CMAKE_C_FLAGS "-O2 -DNDEBUG -Wall -fPIC -fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack -pipe ${CMAKE_C_FLAGS}") -set(CMAKE_CXX_FLAGS "-std=c++17 -O2 -DNDEBUG -Wall -fPIC -fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack -pipe ${CMAKE_CXX_FLAGS}") - -include(${CMAKE_CURRENT_LIST_DIR}/cmake/nlohmann_json.cmake) -include(${CMAKE_CURRENT_LIST_DIR}/cmake/secure_c.cmake) -include(${CMAKE_CURRENT_LIST_DIR}/cmake/tensorflow.cmake) -include_directories(${CMAKE_CURRENT_LIST_DIR}) -include_directories(${CMAKE_CURRENT_LIST_DIR}/inc) -include_directories(${CMAKE_CURRENT_LIST_DIR}/inc/external) -include_directories(${CMAKE_CURRENT_LIST_DIR}/inc/soft_dp) + set(CMAKE_CXX_STANDARD 17) + set(CMAKE_C_FLAGS "-O2 -Wall -fPIC -fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack -pipe ${CMAKE_C_FLAGS}") + set(CMAKE_CXX_FLAGS "-std=c++17 -O2 -Wall -fPIC -fstack-protector-all -Wl,-z,relro,-z,now,-z,noexecstack -pipe ${CMAKE_CXX_FLAGS}") + # build external prjects + if(DEFINED ENV{D_PKG_SERVER}) + set(TF_PKG_SERVER $ENV{D_PKG_SERVER}) + message("Download packages from PKG server") + endif() + include(${CMAKE_CURRENT_LIST_DIR}/cmake/nlohmann_json.cmake) + include(${CMAKE_CURRENT_LIST_DIR}/cmake/secure_c.cmake) + include(${CMAKE_CURRENT_LIST_DIR}/cmake/tensorflow.cmake) + include_directories(${CMAKE_CURRENT_LIST_DIR}) + include_directories(${CMAKE_CURRENT_LIST_DIR}/inc) + include_directories(${CMAKE_CURRENT_LIST_DIR}/inc/external) + include_directories(${CMAKE_CURRENT_LIST_DIR}/inc/soft_dp) if (NOT EXISTS ${CMAKE_CURRENT_LIST_DIR}/tools/COMPILE_FLAGS OR NOT EXISTS ${CMAKE_CURRENT_LIST_DIR}/tools/LINK_FLAGS OR NOT EXISTS diff --git a/README.en.md b/README.en.md index 816b01ea62cbae044e7a4ec09ce76b983cb921c7..d95b704b88bf4a5472c5175940c6f1cecdd42ba9 100755 --- a/README.en.md +++ b/README.en.md @@ -5,7 +5,7 @@ TF_Adapter is committed to providing the outstanding computing power of Shengteng AI processor to developers who use the Tensorflow framework. Developers only need to install the TF_Adapter plug-in and add a small amount of configuration to the existing Tensorflow script to accelerate their training tasks on the Shengteng AI processor. -![framework](docs/framework.jpg) +![tfadapter](https://images.gitee.com/uploads/images/2020/1027/094640_8f305b88_8175427.jpeg "framework.jpg") You can read [TF_Adapter Interface](https://support.huaweicloud.com/mprtg-A800_9000_9010/atlasprtg_13_0013.html) for more details。 ## Installation Guide @@ -13,79 +13,32 @@ You can read [TF_Adapter Interface](https://support.huaweicloud.com/mprtg-A800_9 You can build the TF_Adapter software package from the source code and install it on the Shengteng AI processor environment. > The TF_Adapter plug-in has a strict matching relationship with Tensorflow. Before building from source code, you need to ensure that it has been installed correctly [Tensorflow v1.15.0 ->版本](https://www.tensorflow.org/install) 。 +>版本](https://www.tensorflow.org/install/pip) 。 -At the same time, the system meets the following requirements +You may also build GraphEngine from source. To build GraphEngine, please make sure that you have access to an Ascend 910 environment as compiling environment, and make sure that following software requirements are fulfilled. - Linux OS - GCC >= 7.3.0 - CMake >= 3.14.0 - [SWIG](http://www.swig.org/download.html) #### Download - ``` git clone https://gitee.com/ascend/tensorflow.git cd tensorflow ``` -#### Configure environment -```BASH -./configure +#### Execute script to generate installation package ``` - -By default, executing the above command will pop up the following interactive session window - -> Your session may be different. -```BASH -Please specify the location of python with available tensorflow v1.15.0 installed. [Default is /usr/bin/python] -(You can make this quiet by set env [ASCEND_TARGET_PYTHON_BIN_PATH]): +chmod +x build.sh +./build.sh ``` -At this point, you are required to enter the path of the python interpreter with Tensorflow v1.15.0 installed. If the default path is correct, press Enter, otherwise, please enter the correct python -Interpreter path. -> You can set ASCEND_TARGET_PYTHON_BIN_PATH Environment variable to suppress the pop-up of the interactive window, but still make sure that the path is valid, otherwise, you will still be asked to enter the correct path to the python interpreter. +After the script is successfully executed, a compressed file of tfadapter.tar will be generated in the output directory -After typing, it will take a few seconds to make sure your input is valid, and then the following interactive window will pop up -``` -Please specify the location of ascend. [Default is /usr/local/Ascend] -(You can make this quiet by set env [ASCEND_INSTALL_PATH]): -``` - -At this time, you are required to enter the installation path of the Ascend processor development kit. If the default path is correct, press Enter, otherwise, please enter the correct path to the Ascend processor development kit installation. - -> You can set ASCEND_INSTALL_PATH Environment variables to suppress the pop-up of interactive windows, but still make sure that the path is valid, otherwise, you will still be asked to enter the correct installation path of the Ascend processor development kit. - - -After typing, it will follow the interaction window below -``` -Please specify the location of swig. [Default is /usr/local/bin/swig] -(You can make this quiet by set env [SWIG_INSTALL_PATH]): -``` -At this time, you are required to enter the path of the SWIG executable file. If the default path is correct, press Enter, otherwise, please enter the correct path of the SWIG executable file. - -> You can suppress the interactive window pop-up by setting the SWIG_INSTALL_PATH environment variable, but make sure that the path is valid, otherwise, you will still be asked to enter the correct SWIG executable file path. - -After typing, wait for the configuration to complete. -#### Configure cmake - -``` -mkdir build -cd build -cmake .. -``` -#### Build -```BASH -make -j8 -``` - -After compilation, the installation package will be generated in -``` -./dist/python/dist/npu_bridge-1.15.0-py3-none-any.whl -``` #### Install - -You can install the TF_Adapter plug-in using pip. +Unzip the tfadapter.tar file to generate npu_bridge-1.15.0-py3-none-any.whl, +After you can install the TF_Adapter plug-in using pip. ``` pip install ./dist/python/dist/npu_bridge-1.15.0-py3-none-any.whl ``` diff --git a/README.md b/README.md index b5dbcd4b0827ce48cf9e6166f88307095c9e39d9..7b215fd04195132237b291c2682343a33fd4c0ba 100755 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ Tensorflow Adapter For Ascend(简称TF_Adapter)致力于将昇腾AI处理器卓越的运算能力,便捷地提供给使用Tensorflow框架的开发者。 开发者只需安装TF_Adapter插件,并在现有Tensorflow脚本中添加少量配置,即可实现在昇腾AI处理器上加速自己的训练任务。 -![framework](docs/framework.jpg) +![tfadapter](https://images.gitee.com/uploads/images/2020/1027/094640_8f305b88_8175427.jpeg "framework.jpg") 您可以通过阅读 [TF_Adapter接口文档](https://support.huaweicloud.com/mprtg-A800_9000_9010/atlasprtg_13_0013.html) 获取更多使用细节。 ## 安装 @@ -13,14 +13,15 @@ Tensorflow Adapter For Ascend(简称TF_Adapter)致力于将昇腾AI处理器 您可以从源代码构建 TF_Adapter 软件包并将其安装在昇腾AI处理器环境上。 > TF_Adapter 插件与 Tensorflow 有严格的匹配关系,从源码构建前,您需要确保已经正确安装了 [Tensorflow v1.15.0 ->版本](https://www.tensorflow.org/install) 。 +>版本](https://www.tensorflow.org/install/pip) 。 -同时系统满足以下要求: + +tfadapter也支持由源码编译,进行源码编译前,首先确保你有昇腾910 AI处理器的环境,同时系统满足以下要求: - Linux OS - GCC >= 7.3.0 - CMake >= 3.14.0 -- [SWIG](http://www.swig.org/download.html) - +- [SWIG](http://www.swig.org/download.html) + #### 下载源码 ``` @@ -28,63 +29,19 @@ git clone https://gitee.com/ascend/tensorflow.git cd tensorflow ``` -#### 配置安装环境 -```BASH -./configure -``` -默认情况下,执行上述命会弹出如下的交互式会话窗口 -> 您的会话可能有所不同。 -```BASH -Please specify the location of python with available tensorflow v1.15.0 installed. [Default is /usr/bin/python] -(You can make this quiet by set env [ASCEND_TARGET_PYTHON_BIN_PATH]): -``` -此时,要求您输入安装了 Tensorflow v1.15.0 版本的python解释器路径,如果默认路径是正确的,直接回车,否则请输入正确的 python -解释器路径。 -> 你可以通过设置 ASCEND_TARGET_PYTHON_BIN_PATH ->的环境变量,来抑制交互式窗口弹出,但是要确保路径是有效的,否则,仍然会要求您输入正确的 python 解释器路径。 - -键入后,会耗费几秒钟以确保您的输入是有效的,接着,会弹出下面的交互式窗口 +#### 执行脚本生成安装包 ``` -Please specify the location of ascend. [Default is /usr/local/Ascend] -(You can make this quiet by set env [ASCEND_INSTALL_PATH]): +chmod +x build.sh +./build.sh ``` -此时,要求您输入昇腾处理器开发套件的安装路径,如果默认路径是正确的,直接回车,否则请输入正确的昇腾处理器开发套件安装路径。 - -> 你可以通过设置ASCEND_INSTALL_PATH ->的环境变量,来抑制交互式窗口弹出,但是要确保路径是有效的,否则,仍然会要求您输入正确的昇腾处理器开发套件安装路径。 -键入后,会接着弹出下面的交互式窗口 -``` -Please specify the location of swig. [Default is /usr/local/bin/swig] -(You can make this quiet by set env [SWIG_INSTALL_PATH]): -``` -此时,要求您输入SWIG可执行文件路径,如果默认路径是正确的,直接回车,否则请输入正确的SWIG可执行文件路径。 +脚本执行成功后,会在output目录生成tfadapter.tar压缩文件 -> 你可以通过设置SWIG_INSTALL_PATH ->的环境变量,来抑制交互式窗口弹出,但是要确保路径是有效的,否则,仍然会要求您输入正确的SWIG可执行文件路径。 - - -键入后,等待配置完成。 -#### 配置cmake -> 根据您的网络状况,可能需要数分钟来下载TF_Adapter的依赖项目以完成配置。 -``` -mkdir build -cd build -cmake .. -``` -#### 执行编译 -> 您应当根据实际编译环境,设置合适的并发编译数以提升编译速度。 -```BASH -make -j8 -``` -编译结束后,安装包会生成在 -``` -./dist/python/dist/npu_bridge-1.15.0-py3-none-any.whl -``` -#### 安装 -您可以使用 pip 安装 TF_Adapter 插件。 +#### 安装插件包 +解压tfadapter.tar文件,生成npu_bridge-1.15.0-py3-none-any.whl, +然后使用 pip 安装 TF_Adapter 插件。 ``` -pip install ./dist/python/dist/npu_bridge-1.15.0-py3-none-any.whl +pip install npu_bridge-1.15.0-py3-none-any.whl ``` 需要注意的是, 您应当保证安装路径与您编译时指定的 python 解释器搜索路径是一致的。 diff --git a/build.sh b/build.sh new file mode 100644 index 0000000000000000000000000000000000000000..e08fce2983688898445e523bbc8d4456f3949906 --- /dev/null +++ b/build.sh @@ -0,0 +1,102 @@ +#!/bin/bash +# Copyright 2019-2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ + +set -e +BASE_PATH=$(cd "$(dirname $0)"; pwd) +RELEASE_PATH="${BASE_PATH}/output" +export BUILD_PATH="${BASE_PATH}/build" +INSTALL_PATH="${BUILD_PATH}/install" +CMAKE_PATH="${BUILD_PATH}/tfadapter" + +# print usage message +usage() { + echo "Usage:" + echo " bash build.sh [-h] [-j[n]] [-v] [-g]" + echo "" + echo "Options:" + echo " -h Print usage" + echo " -j[n] Set the number of threads used to build CANN, default is 8" + echo " -v Verbose" + echo " -g GCC compiler prefix, used to specify the compiler toolchain" + echo "to be continued ..." +} + +logging() { + echo "[INFO] $@" +} + +# parse and set optionss +checkopts() { + VERBOSE="" + THREAD_NUM=8 + GCC_PREFIX="" + # Process the options + while getopts 'hj:vg:' opt + do + case "${opt}" in + h) usage + exit 0 ;; + j) THREAD_NUM=$OPTARG ;; + v) VERBOSE="VERBOSE=1" ;; + g) GCC_PREFIX=$OPTARG ;; + *) logging "Undefined option: ${opt}" + usage + exit 1 ;; + esac + done +} + +# mkdir directory +mk_dir() { + local create_dir="$1" + mkdir -pv "${create_dir}" + logging "Created ${create_dir}" +} + +# create build path +build_tfadapter() { + logging "Create build directory and build tfadapter" + cd "${BASE_PATH}" && ./configure + CMAKE_ARGS="-DENABLE_OPEN_SRC=True -DBUILD_PATH=$BUILD_PATH" + if [[ "$GCC_PREFIX" != "" ]]; then + CMAKE_ARGS="$CMAKE_ARGS -DGCC_PREFIX=$GCC_PREFIX" + fi + logging "CMake Args: ${CMAKE_ARGS}" + + mk_dir "${CMAKE_PATH}" + cd "${CMAKE_PATH}" && cmake ${CMAKE_ARGS} ../.. + make ${VERBOSE} -j${THREAD_NUM} + logging "tfadapter build success!" +} + +release_tfadapter() { + logging "Create output directory" + mk_dir "${RELEASE_PATH}" + RELEASE_TARGET="tfadapter.tar" + cd ${CMAKE_PATH}/dist/python/dist && mkdir -p tfplugin/bin && mv npu_bridge-*.whl tfplugin/bin && tar cfz "${RELEASE_TARGET}" * && mv "${RELEASE_TARGET}" "${RELEASE_PATH}" +} + +main() { + checkopts "$@" + # tfadapter build start + logging "---------------- tfadapter build start ----------------" + ${GCC_PREFIX}g++ -v + build_tfadapter + release_tfadapter + logging "---------------- tfadapter build finished ----------------" +} + +main "$@" \ No newline at end of file diff --git a/cmake/nlohmann_json.cmake b/cmake/nlohmann_json.cmake index b3832c63f678fc338f40a4bc5c370e6efacd7223..b2ce92c70bb2633d9b536632179b1c9c0a46f83d 100755 --- a/cmake/nlohmann_json.cmake +++ b/cmake/nlohmann_json.cmake @@ -1,10 +1,19 @@ include(FetchContent) - -FetchContent_Declare( - nlohmann_json - URL https://github.com/nlohmann/json/releases/download/v3.6.1/include.zip - URL_HASH MD5=0dc903888211db3a0f170304cd9f3a89 -) +set(_json_url "") +if(TF_PKG_SERVER) + set(_json_url "${TF_PKG_SERVER}/libs/json/v3.6.1/include.zip") + FetchContent_Declare( + nlohmann_json + URL ${_json_url} + URL_HASH MD5=0dc903888211db3a0f170304cd9f3a89 + ) +else() + FetchContent_Declare( + nlohmann_json + URL https://github.com/nlohmann/json/releases/download/v3.6.1/include.zip + URL_HASH MD5=0dc903888211db3a0f170304cd9f3a89 + ) +endif() FetchContent_GetProperties(nlohmann_json) if (NOT nlohmann_json_POPULATED) FetchContent_Populate(nlohmann_json) diff --git a/cmake/secure_c.cmake b/cmake/secure_c.cmake index ced768af1ac965781e1ca2818378215b0a3f146c..4e5ff552fe0e606147540ed9a49cc9eb82ef7467 100755 --- a/cmake/secure_c.cmake +++ b/cmake/secure_c.cmake @@ -1,10 +1,19 @@ include(FetchContent) - -FetchContent_Declare( - secure_c - URL https://gitee.com/openeuler/libboundscheck/repository/archive/v1.1.10.tar.gz - URL_HASH MD5=193f0ca5246c1dd84920db34d2d8249f -) +set(_json_url "") +if(TF_PKG_SERVER) + set(_json_url "${TF_PKG_SERVER}/libs/securec/v1.1.10.tar.gz") + FetchContent_Declare( + secure_c + URL ${_json_url} + URL_HASH MD5=193f0ca5246c1dd84920db34d2d8249f + ) +else() + FetchContent_Declare( + secure_c + URL https://gitee.com/openeuler/libboundscheck/repository/archive/v1.1.10.tar.gz + URL_HASH MD5=193f0ca5246c1dd84920db34d2d8249f + ) +endif() FetchContent_GetProperties(secure_c) if (NOT secure_c_POPULATED) FetchContent_Populate(secure_c) diff --git a/cmake/tensorflow.cmake b/cmake/tensorflow.cmake index 0353c2c3b9cde2889f2d6636b1cf77054eb908e0..d7b87279f1853b883df58d2b251c75a1783a000f 100755 --- a/cmake/tensorflow.cmake +++ b/cmake/tensorflow.cmake @@ -1,10 +1,19 @@ include(FetchContent) - -FetchContent_Declare( - tensorflow - URL https://github.com/tensorflow/tensorflow/archive/v1.15.0.zip - URL_HASH MD5=0ad811d8d59f56ecc1a6032af997ec1d -) +set(_json_url "") +if(TF_PKG_SERVER) + set(_json_url "${TF_PKG_SERVER}/libs/tensorflow/v1.15.0.zip") + FetchContent_Declare( + tensorflow + URL ${_json_url} + URL_HASH MD5=0ad811d8d59f56ecc1a6032af997ec1d + ) +else() + FetchContent_Declare( + tensorflow + URL https://github.com/tensorflow/tensorflow/archive/v1.15.0.zip + URL_HASH MD5=0ad811d8d59f56ecc1a6032af997ec1d + ) +endif() FetchContent_GetProperties(tensorflow) if (NOT tensorflow_POPULATED) FetchContent_Populate(tensorflow) diff --git a/configure.py b/configure.py index e268aee4299759d26f2dba9937af9f0e87eede67..73d6d98e43de1f479af0b4160a347e0df9ea0f3a 100755 --- a/configure.py +++ b/configure.py @@ -26,9 +26,10 @@ except ImportError: from distutils.spawn import find_executable as which _COMPAT_TENSORFLOW_VERSION = "1.15.0" -_PYTHON_BIN_PATH_ENV = "ASCEND_TARGET_PYTHON_BIN_PATH" -_ASCEND_INSTALL_PATH_ENV = "ASCEND_INSTALL_PATH" -_SWIG_INSTALL_PATH_ENV = "SWIG_INSTALL_PATH" +_COMPAT_PYTHON_VERSION = "Python 3.7" +_COMPAT_SWIG_VERSION = "SWIG Version " +_ASCEND_INSTALL_PATH_ENV = "ASCEND_CUSTOM_PATH" + def run_command(cmd): @@ -49,13 +50,26 @@ def get_input(question): def real_config_path(file): return os.path.join("tools", file) -def setup_python(env_path): +def setup_python(): """Get python install path.""" - default_python_bin_path = sys.executable - ask_python_bin_path = ('Please specify the location of python with valid ' - 'tensorflow 1.15.0 site-packages installed. [Default ' - 'is %s]\n(You can make this quiet by set env [ASCEND_TARGET_PYTHON_BIN_PATH]): ') % default_python_bin_path - custom_python_bin_path = env_path + default_python_bin_path = which('python3') + custom_python_bin_path = '' + ask_python_bin_path = '' + if default_python_bin_path: + custom_python_bin_path = default_python_bin_path + compile_args = run_command([ + custom_python_bin_path, '--version']) + if not _COMPAT_PYTHON_VERSION in compile_args: + print('Invalid default python version: %s, only support Python 3.7.' % compile_args) + ask_python_bin_path = ('Please specify the location of python with valid ' + 'tensorflow 1.15.0 site-packages installed. [Default ' + 'is %s]\n(Please enter the correct python path: ') % default_python_bin_path + custom_python_bin_path = '' + else: + ask_python_bin_path = ('Please specify the location of python with valid ' + 'tensorflow 1.15.0 site-packages installed. [Default ' + 'is %s]\n(Please enter the correct python path: ') % default_python_bin_path + while True: if not custom_python_bin_path: python_bin_path = get_input(ask_python_bin_path) @@ -106,17 +120,12 @@ def setup_python(env_path): def setup_ascend(env_path): """Get ascend install path.""" default_ascend_path = "/usr/local/Ascend" - ask_ascend_path = ('Please specify the location of ascend. [Default is ' - '%s]\n(You can make this quiet by set env [ASCEND_INSTALL_PATH]): ') % default_ascend_path custom_ascend_path = env_path while True: if not custom_ascend_path: - ascend_path = get_input(ask_ascend_path) + ascend_path = default_ascend_path else: ascend_path = custom_ascend_path - custom_ascend_path = None - if not ascend_path: - ascend_path = default_ascend_path # Check if the path is valid if os.path.isdir(ascend_path) and os.access(ascend_path, os.X_OK): break @@ -130,12 +139,24 @@ def setup_ascend(env_path): f.write(os.path.join(ascend_path, "driver", "lib64", "driver", "libtsdclient.so\n")) f.write(os.path.join(ascend_path, "driver", "lib64", "common", "libc_sec.so\n")) -def setup_swig(env_path): +def setup_swig(): """Get swig install path.""" - default_swig_path = which('swig') or "/usr/bin/swig" - ask_swig_path = ('Please specify the location of swig. [Default is ' - '%s]\n(You can make this quiet by set env [SWIG_INSTALL_PATH]): ') % default_swig_path - custom_swig_path = env_path + default_swig_path = which('swig') + custom_swig_path = '' + ask_swig_path = '' + if default_swig_path: + custom_swig_path = default_swig_path + compile_args = run_command([ + custom_swig_path, '-version']) + if not _COMPAT_SWIG_VERSION in compile_args: + print('Invalid default python version: %s.' % compile_args) + ask_swig_path = ('Please specify the location of swig. [Default is ' + '%s]\n(Please enter the correct swig path: ') % default_swig_path + custom_swig_path = '' + else: + ask_swig_path = ('Please specify the location of swig. [Default is ' + '%s]\n(Please enter the correct swig path: ') % default_swig_path + while True: if not custom_swig_path: swig_path = get_input(ask_swig_path) @@ -159,9 +180,9 @@ def setup_swig(env_path): def main(): env_snapshot = dict(os.environ) - setup_python(env_snapshot.get(_PYTHON_BIN_PATH_ENV)) + setup_python() setup_ascend(env_snapshot.get(_ASCEND_INSTALL_PATH_ENV)) - setup_swig(env_snapshot.get(_SWIG_INSTALL_PATH_ENV)) + setup_swig() if __name__ == '__main__': diff --git a/docs/framework.jpg b/docs/framework.jpg deleted file mode 100644 index e7729bfb507a80ad8423c67d7dd41d6a16ff691d..0000000000000000000000000000000000000000 Binary files a/docs/framework.jpg and /dev/null differ diff --git a/inc/common/proto/fusion_model.proto b/inc/common/proto/fusion_model.proto index fab31788e4b83658d54fc43f20675dc81746b5bb..002bef42e9c252c1f6f438f098d89fef1ef4d1a8 100644 --- a/inc/common/proto/fusion_model.proto +++ b/inc/common/proto/fusion_model.proto @@ -1,17 +1,13 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd +/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * This program is free software; you can redistribute it and/or modify + * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Apache License for more details at * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ syntax = "proto3"; diff --git a/inc/common/proto/ge_api.proto b/inc/common/proto/ge_api.proto index 59be2e5a9b8864d6e24529df0fab908d050c02bc..331c5aeae79ac80a36cf1d34e0096ed11f8af4de 100644 --- a/inc/common/proto/ge_api.proto +++ b/inc/common/proto/ge_api.proto @@ -1,18 +1,3 @@ -/* - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ syntax = "proto3"; package ge.api_pb; diff --git a/inc/common/proto/ge_ir.proto b/inc/common/proto/ge_ir.proto index 225a4adb8a2caa9073a3744856f679c5d479fc7e..ab21f672308b332aa0a5feb6dcce40a5c4a38532 100644 --- a/inc/common/proto/ge_ir.proto +++ b/inc/common/proto/ge_ir.proto @@ -1,18 +1,3 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ syntax = "proto3"; package ge.proto; diff --git a/inc/common/proto/insert_op.proto b/inc/common/proto/insert_op.proto index ac6a4162f10b70e635082886fe8fb1de4e6c4213..032b5cf43d00778b9b56752a83f39948e7fa2bb3 100644 --- a/inc/common/proto/insert_op.proto +++ b/inc/common/proto/insert_op.proto @@ -1,18 +1,3 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ syntax = "proto3"; package domi; @@ -40,35 +25,35 @@ message AippOpParams { RAW16 = 13; RAW24 = 14; } - + enum AippMode { undefined = 0; static = 1; dynamic = 2; } + + // AIPPģʽ־̬AIPPͶ̬AIPP + AippMode aipp_mode = 1; - // AIPPģʽ�����־�̬AIPP�Ͷ�̬AIPP - AippMode aipp_mode = 1; - - // related_input_rank����Ϊ�������Ϊ���ͣ����÷�Χ>=0, <=����Data���ӵĸ�����Ĭ��ֵΪ0�� - // ��ʶ��ģ�͵ĵڼ���������AIPP����������ģ�����������룬��Ҫ�Ե�2��������AIPP��������related_input_rankΪ1�� + // related_input_rankΪΪͣ÷Χ>=0, <=DataӵĸĬֵΪ0 + // ʶģ͵ĵڼAIPPģ룬ҪԵ2AIPPrelated_input_rankΪ1 uint32 related_input_rank = 2; - // input_edge_idx����Ϊ��ѡ������Ϊ���ͣ����÷�ΧΪ>=0�� - // ���øò��������ã����ڶ�Data���Ӳ�ͬ���������ͬ��AIPP����������ò���û�����ã�Ĭ�϶�related_input_rankָ����ģ������������������AIPP�� - // ����ֵ <= Data��������ߵĸ����� + // input_edge_idxΪѡΪͣ÷ΧΪ>=0 + // øòãڶDataӲͬͬAIPPòûãĬ϶related_input_rankָģAIPP + // ֵ <= Dataߵĸ repeated uint32 input_edge_idx = 3; - // [Begin] ��̬AIPP���������þ�̬AIPPʱ��Ч + // [Begin] ̬AIPPþ̬AIPPʱЧ uint32 max_src_image_size = 4; - // �Ƿ�֧����ת��Ĭ�ϲ�֧�֣�����֧����תʱ�����ж���Ŀռ��������ʧ + // Ƿ֧תĬϲ֧֣֧תʱжĿռʧ bool support_rotation = 5; - // [End] ��̬AIPP���� + // [End] ̬AIPP - // [Begin] ��̬AIPP���������ö�̬AIPPʱ��Ч + // [Begin] ̬AIPPö̬AIPPʱЧ InputFormat input_format = 51; bool csc_switch = 52; float cpadding_value = 53; @@ -124,18 +109,18 @@ message AippOpParams { repeated int32 input_bias_1 = 43; repeated int32 input_bias_2 = 44; - // [End] ��̬AIPP���� + // [End] ̬AIPP } message MultiShapeOpParams { enum MultiShapeMode { - batch = 0; //��̬batch - resolution = 1; //��̬�ֱ��ʣ���չ�� + batch = 0; //̬batch + resolution = 1; //ֱ̬ʣչ } - MultiShapeMode mode = 1; //����ģʽ - uint32 related_input_rank = 2; //�������Ӳ��뵽�ĸ����� + MultiShapeMode mode = 1; //ģʽ + uint32 related_input_rank = 2; //Ӳ뵽ĸ - repeated uint32 batch_list = 11; //batch_listֵ��batch_list�ĸ�����2��8֮�� + repeated uint32 batch_list = 11; //batch_listֵbatch_listĸ28֮ } diff --git a/inc/common/proto/om.proto b/inc/common/proto/om.proto index 2f7fa86dddf4a0f191329c053ffcdbcdca544cf0..e15e5f808640e77f52d3df6cf0dacfec1aff7d50 100644 --- a/inc/common/proto/om.proto +++ b/inc/common/proto/om.proto @@ -1,17 +1,13 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd +/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * This program is free software; you can redistribute it and/or modify + * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Apache License for more details at * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ syntax = "proto3"; diff --git a/inc/common/proto/op_mapping_info.proto b/inc/common/proto/op_mapping_info.proto index 7511f8b8a44950821c392127fa03faba89daa3d2..e23b7ebeabfbdbab9ed0fc0491ab294195b8fc18 100644 --- a/inc/common/proto/op_mapping_info.proto +++ b/inc/common/proto/op_mapping_info.proto @@ -1,18 +1,3 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ syntax = "proto3"; package aicpu.dump; @@ -40,6 +25,16 @@ message Input { uint64 size = 5; } +enum BufferType { + L1 = 0; +} + +message OpBuffer { + BufferType buffer_type = 1; + uint64 address = 2; + uint64 size = 3; +} + message Op { string op_name = 1; string op_type = 2; @@ -52,6 +47,7 @@ message Task { repeated Output output = 4; bool end_graph = 5; repeated Input input = 6; + repeated OpBuffer buffer = 7; } message OpMappingInfo { diff --git a/inc/common/proto/optimizer_priority.proto b/inc/common/proto/optimizer_priority.proto index 383752803eaa5afbb427f758223503f008b2346b..769619cfc031ee8571eb0d6972cb786386c5a206 100644 --- a/inc/common/proto/optimizer_priority.proto +++ b/inc/common/proto/optimizer_priority.proto @@ -1,18 +1,3 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ syntax = "proto3"; package ge.optimizers; diff --git a/inc/common/proto/task.proto b/inc/common/proto/task.proto index 1bb4e199597ec5d11b33f19c0a82348c2ff0f43e..d0c09840e82d51dcba66632fb31b85950cb0fb89 100644 --- a/inc/common/proto/task.proto +++ b/inc/common/proto/task.proto @@ -1,17 +1,13 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd +/* Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * This program is free software; you can redistribute it and/or modify + * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Apache License for more details at * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ syntax = "proto3"; diff --git a/inc/common/util/ai_core/common/aicore_util_attr_define.h b/inc/common/util/ai_core/common/aicore_util_attr_define.h index 7ad009a96bec26fb25fe343af4dcea33e895dfe9..ac1c2c97661725703316dd8d2f44e99627c8a3b3 100644 --- a/inc/common/util/ai_core/common/aicore_util_attr_define.h +++ b/inc/common/util/ai_core/common/aicore_util_attr_define.h @@ -1,19 +1,13 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * @file aicore_util_attr_define.h * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Copyright (c) Huawei Technologies Co., Ltd. 2019-2019. All rights reserved. * - * http://www.apache.org/licenses/LICENSE-2.0 + * @brief attribute define + * + * @version 1.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ - #ifndef INC_COMMON_UTILS_AI_CORE_COMMON_ATTR_DEFINE_H_ #define INC_COMMON_UTILS_AI_CORE_COMMON_ATTR_DEFINE_H_ diff --git a/inc/common/util/ai_core/common/aicore_util_types.h b/inc/common/util/ai_core/common/aicore_util_types.h index 16855190e14e2463731442488ad2ceec8947d011..25a7979c1119a1ce49e9f1f4cbed4d59e75bb152 100644 --- a/inc/common/util/ai_core/common/aicore_util_types.h +++ b/inc/common/util/ai_core/common/aicore_util_types.h @@ -1,17 +1,12 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * @file aicore_util_types.h * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Copyright (c) Huawei Technologies Co., Ltd. 2019-2019. All rights reserved. * - * http://www.apache.org/licenses/LICENSE-2.0 + * @brief struct\enuum + * + * @version 1.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ #ifndef INC_COMMON_UTILS_AI_CORE_COMMON_TYPES_H_ diff --git a/inc/common/util/ai_core/common/graph_comm.h b/inc/common/util/ai_core/common/graph_comm.h index 5ebe204356c6f6ab4e1564fe2e26191015a9d00d..493d644d3092277abc977dc1513a7834cd1ea0ed 100644 --- a/inc/common/util/ai_core/common/graph_comm.h +++ b/inc/common/util/ai_core/common/graph_comm.h @@ -1,17 +1,12 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * @file graph_comm.h * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Copyright(C), 2017 - 2017, Huawei Tech. Co., Ltd. ALL RIGHTS RESERVED. * - * http://www.apache.org/licenses/LICENSE-2.0 + * @brief graph builder + * + * @version 1.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ #ifndef INC_COMMON_UTILS_AI_CORE_COMMON_GRAPH_COMMON_H_ @@ -41,10 +36,6 @@ public: GraphComm(const GraphComm &in) = delete; GraphComm &operator=(const GraphComm &in) = delete; - Status CreateFusionGraph(ge::ComputeGraph &modelGraph, - ge::ComputeGraph &fusionGraph, - const bool &isMapOpIndex); - Status GetscopeNodeMap(ge::ComputeGraph &graph, kScopeNodeMap_t &fusionMap); Status CopyFusionOpNodes(vector &fusInputEdgeList, diff --git a/inc/common/util/ai_core/common/scope_allocator.h b/inc/common/util/ai_core/common/scope_allocator.h index 4848fbe7dafa29d80961e36d291c31c1bc38f746..50b2ba564f53b50463b1850b0d5046ee02507f7a 100644 --- a/inc/common/util/ai_core/common/scope_allocator.h +++ b/inc/common/util/ai_core/common/scope_allocator.h @@ -1,17 +1,12 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * @file scope_allocator.h * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Copyright (c) Huawei Technologies Co., Ltd. 2019-2019. All rights reserved. * - * http://www.apache.org/licenses/LICENSE-2.0 + * @brief tbe fusion scope id allocator + * + * @version 1.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ #ifndef INC_COMMON_UTILS_AI_CORE_COMMON_SCOPE_ALLOCATOR_H_ diff --git a/inc/common/util/ai_core/param_calculate/aicore_param_calculator.h b/inc/common/util/ai_core/param_calculate/aicore_param_calculator.h index 22064ccd002e6215c89da24abacb0f8d1293d892..0957c662530e52a8025a0578cf1a849bd2852dbb 100644 --- a/inc/common/util/ai_core/param_calculate/aicore_param_calculator.h +++ b/inc/common/util/ai_core/param_calculate/aicore_param_calculator.h @@ -1,17 +1,12 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * @file aicore_param_calculator.h * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Copyright (c) Huawei Technologies Co., Ltd. 2019-2020. All rights reserved. * - * http://www.apache.org/licenses/LICENSE-2.0 + * @brief aicore param calculator + * + * @version 1.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ #ifndef AICORE_PARAM_CALCULATOR diff --git a/inc/common/util/ai_core/param_calculate/tensorsize_calculator.h b/inc/common/util/ai_core/param_calculate/tensorsize_calculator.h index 2fdec1392e3d3665b90b22b0b32eff32520445c8..d9f8f68727d4b55c0040ce2454b2ffd3bbc56a19 100644 --- a/inc/common/util/ai_core/param_calculate/tensorsize_calculator.h +++ b/inc/common/util/ai_core/param_calculate/tensorsize_calculator.h @@ -1,19 +1,14 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * @file tensorsize_calculator.h * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Copyright (c) Huawei Technologies Co., Ltd. 2019-2019. All rights reserved. * - * http://www.apache.org/licenses/LICENSE-2.0 + * @brief provide the capability of calculating + * workspace and input/output size + * + * @version 1.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ - #ifndef TENSORSIZE_CALCULATOR_H #define TENSORSIZE_CALCULATOR_H diff --git a/inc/common/util/compress/compress.h b/inc/common/util/compress/compress.h index b702324eb87dadab74ee20ed9a755bf2c7abd231..7f0c135fb735a0093e653e5a1053a752d904a1a0 100644 --- a/inc/common/util/compress/compress.h +++ b/inc/common/util/compress/compress.h @@ -1,19 +1,21 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2019-2020. All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * This program is free software; you can redistribute it and/or modify + * it under the terms of the Apache License Version 2.0.You may not use this file except in compliance with the License. * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Apache License for more details at * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * @brief compress header file + * + * @file compress.h + * + * @version 1.0 */ - #ifndef COMPRESS_H #define COMPRESS_H diff --git a/inc/common/util/compress/compress_weight.h b/inc/common/util/compress/compress_weight.h index aa48d4a07f72f73e937a6249b99239c1a73148a4..38c2523069f655682f48460362ac7f1158f1cc57 100644 --- a/inc/common/util/compress/compress_weight.h +++ b/inc/common/util/compress/compress_weight.h @@ -1,19 +1,12 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * @brief header file of compress weight * - * http://www.apache.org/licenses/LICENSE-2.0 + * @file compress_weight.h * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * @version 1.0 */ - #ifndef COMPRESS_WEIGHT_H #define COMPRESS_WEIGHT_H diff --git a/inc/common/util/platform_info.h b/inc/common/util/platform_info.h index 55baadad2b11cc66a97a4233d2237349f2fafd54..40ee762afa233e18d2be50ece86bb4bae6cdb3f8 100644 --- a/inc/common/util/platform_info.h +++ b/inc/common/util/platform_info.h @@ -1,17 +1,12 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd +/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Copyright (c) Huawei Technologies Co., Ltd. 2019-2020. All rights reserved. - * http://www.apache.org/licenses/LICENSE-2.0 + * Description: platform info init + + * Author: + * Create: 2020-03-22 - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ #ifndef PLATFORM_INFO_H diff --git a/inc/common/util/platform_info_def.h b/inc/common/util/platform_info_def.h index 05e6caeced8c4f5a561ea0cb9a807beb59de69ba..7824c75ba604475aab085f27ae3a305a38f975e2 100644 --- a/inc/common/util/platform_info_def.h +++ b/inc/common/util/platform_info_def.h @@ -1,17 +1,12 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd +/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Copyright (c) Huawei Technologies Co., Ltd. 2019-2020. All rights reserved. - * http://www.apache.org/licenses/LICENSE-2.0 + * Description: platform info init + + * Author: + * Create: 2020-03-22 - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. */ #ifndef PLATFORM_INFO_DEF_H diff --git a/inc/external/ge/ge_api.h b/inc/external/ge/ge_api.h index e711bf61ad6c7cd6561c231e66d5cfe3c0a4bbd0..b4b9bb2afc26fc61b3ab00d1808ad54cc848bd74 100644 --- a/inc/external/ge/ge_api.h +++ b/inc/external/ge/ge_api.h @@ -57,7 +57,7 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Session { /// @param [in] options graph options /// @return Status result of function /// - Status AddGraph(uint32_t graphId, const Graph& graph, const std::map& options); + Status AddGraph(uint32_t graphId, const Graph &graph, const std::map &options); /// /// @ingroup ge_graph @@ -98,6 +98,15 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Session { /// Status RunGraphAsync(uint32_t graphId, const std::vector &inputs, RunAsyncCallback callback); + /// + /// @ingroup ge_graph + /// @brief get variables in the session with specific session id + /// @param [in] var_names: variable names + /// @param [out] var_values: variable values + /// @return Status result of function + /// + Status GetVariables(const std::vector &var_names, std::vector &var_values); + /// /// @ingroup ge_graph /// @brief register callback func with specific summary or checkpoint by users @@ -111,8 +120,6 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Session { bool IsGraphNeedRebuild(uint32_t graphId); - std::map GetAllVariable(); - private: uint64_t sessionId_; }; diff --git a/inc/external/ge/ge_api_types.h b/inc/external/ge/ge_api_types.h index e2041847589054aef3a84065add91d5f4986c745..68743bc8f7f050d80714604901ade5e391c19335 100644 --- a/inc/external/ge/ge_api_types.h +++ b/inc/external/ge/ge_api_types.h @@ -40,6 +40,7 @@ const char *const OPTION_EXEC_DEPLOY_MODE = "ge.exec.deployMode"; const char *const OPTION_EXEC_RANK_TABLE_FILE = "ge.exec.rankTableFile"; const char *const GE_AICPU_FLAG = "ge.aicpuFlag"; const char *const OPTION_EXEC_EXTERN_PLUGIN_PATH = "ge.soLoadPath"; +// Dump flag and para const char *const OPTION_EXEC_ENABLE_DUMP = "ge.exec.enableDump"; const char *const OPTION_EXEC_DUMP_PATH = "ge.exec.dumpPath"; const char *const OPTION_EXEC_DUMP_STEP = "ge.exec.dumpStep"; @@ -48,7 +49,10 @@ const char *const OPTION_EXEC_ENABLE_DUMP_DEBUG = "ge.exec.enableDumpDebug"; const char *const OPTION_EXEC_DUMP_DEBUG_MODE = "ge.exec.dumpDebugMode"; const char *const OPTION_EXEC_ENABLE_INCRE_BUILD = "ge.exec.enableIncreBuild"; const char *const OPTION_EXEC_INCRE_BUILD_CACHE_PATH = "ge.exec.increBuildCachePath"; +const char *const OPTION_EXEC_ENABLE_EXCEPTION_DUMP = "ge.exec.enable_exception_dump"; const char *const OPTION_EXEC_ENABLE_SCOPE_FUSION_PASSES = "ge.exec.enableScopeFusionPasses"; +const char *const OPTION_EXEC_PROFILING_FPPONIT_OPTIONS = "ge.exec.profilingFpPointOptions"; +const char *const OPTION_EXEC_PROFILING_BPPONIT_OPTIONS = "ge.exec.profilingBpPointOptions"; // profiling flag const char *const OPTION_EXEC_PROFILING_MODE = "ge.exec.profilingMode"; const char *const OPTION_EXEC_PROFILING_OPTIONS = "ge.exec.profilingOptions"; @@ -234,10 +238,10 @@ enum GraphRunMode { PREDICTION = 0, TRAIN }; // Input/Output tensor info struct InputTensorInfo { - uint32_t data_type; // data type - std::vector dims; // shape description - void *data; // tensor data - int64_t length; // tensor length + uint32_t data_type; // data type + std::vector dims; // shape description + void *data; // tensor data + int64_t length; // tensor length }; struct OutputTensorInfo { @@ -246,11 +250,8 @@ struct OutputTensorInfo { std::unique_ptr data; // tensor data int64_t length; // tensor length OutputTensorInfo() : data_type(0), dims({}), data(nullptr), length(0) {} - OutputTensorInfo(OutputTensorInfo &&out) : - data_type(out.data_type), - dims(out.dims), - data(std::move(out.data)), - length(out.length) {} + OutputTensorInfo(OutputTensorInfo &&out) + : data_type(out.data_type), dims(out.dims), data(std::move(out.data)), length(out.length) {} OutputTensorInfo &operator=(OutputTensorInfo &&out) { if (this != &out) { @@ -269,67 +270,55 @@ using Status = uint32_t; using RunAsyncCallback = std::function &)>; // for ir build namespace ir_option { - static const char *const INPUT_FORMAT = "input_format"; - static const char *const INPUT_SHAPE = "input_shape"; - static const char *const OP_NAME_MAP = "op_name_map"; - static const char *const DYNAMIC_BATCH_SIZE = kDynamicBatchSize; - static const char *const DYNAMIC_IMAGE_SIZE = kDynamicImageSize; - static const char *const DYNAMIC_DIMS = kDynamicDims; - static const char *const INSERT_OP_FILE = ge::INSERT_OP_FILE.c_str(); - static const char *const PRECISION_MODE = ge::PRECISION_MODE.c_str(); - static const char *const EXEC_DISABLE_REUSED_MEMORY = ge::OPTION_EXEC_DISABLE_REUSED_MEMORY; - static const char *const AUTO_TUNE_MODE = ge::AUTO_TUNE_MODE.c_str(); - static const char *const CORE_TYPE = ge::CORE_TYPE.c_str(); - static const char *const SOC_VERSION = ge::SOC_VERSION.c_str(); - static const char *const ENABLE_SINGLE_STREAM = ge::ENABLE_SINGLE_STREAM; - static const char *const AICORE_NUM = ge::AICORE_NUM.c_str(); - static const char *const FUSION_SWITCH_FILE = ge::FUSION_SWITCH_FILE.c_str(); - static const char *const ENABLE_SMALL_CHANNEL = ge::ENABLE_SMALL_CHANNEL.c_str(); - static const char *const OP_SELECT_IMPL_MODE = ge::OP_SELECT_IMPL_MODE.c_str(); - static const char *const OUTPUT_TYPE = ge::OUTPUT_DATATYPE.c_str(); - static const char *const BUFFER_OPTIMIZE = ge::BUFFER_OPTIMIZE.c_str(); - static const char *const ENABLE_COMPRESS_WEIGHT = ge::ENABLE_COMPRESS_WEIGHT.c_str(); - static const char *const COMPRESS_WEIGHT_CONF = "compress_weight_conf"; - static const char *const OUT_NODES = ge::OUTPUT_NODE_NAME.c_str(); - static const char *const INPUT_FP16_NODES = ge::INPUT_FP16_NODES.c_str(); - static const char *const LOG_LEVEL = "log"; - static const char *const OPTYPELIST_FOR_IMPLMODE = ge::OPTYPELIST_FOR_IMPLMODE.c_str(); - - // for interface: aclgrphBuildModel - const std::set ir_builder_suppported_options = { - INPUT_FORMAT, - INPUT_SHAPE, - OP_NAME_MAP, - DYNAMIC_BATCH_SIZE, - DYNAMIC_IMAGE_SIZE, - DYNAMIC_DIMS, - INSERT_OP_FILE, - PRECISION_MODE, - EXEC_DISABLE_REUSED_MEMORY, - AUTO_TUNE_MODE, - OUTPUT_TYPE, - OUT_NODES, - INPUT_FP16_NODES, - LOG_LEVEL - }; - // for interface: aclgrphBuildInitialize - const std::set global_options = { - CORE_TYPE, - SOC_VERSION, - BUFFER_OPTIMIZE, - ENABLE_COMPRESS_WEIGHT, - COMPRESS_WEIGHT_CONF, - PRECISION_MODE, - EXEC_DISABLE_REUSED_MEMORY, - AUTO_TUNE_MODE, - ENABLE_SINGLE_STREAM, - AICORE_NUM, - FUSION_SWITCH_FILE, - ENABLE_SMALL_CHANNEL, - OP_SELECT_IMPL_MODE, - OPTYPELIST_FOR_IMPLMODE - }; -} +static const char *const INPUT_FORMAT = "input_format"; +static const char *const INPUT_SHAPE = "input_shape"; +static const char *const OP_NAME_MAP = "op_name_map"; +static const char *const DYNAMIC_BATCH_SIZE = kDynamicBatchSize; +static const char *const DYNAMIC_IMAGE_SIZE = kDynamicImageSize; +static const char *const DYNAMIC_DIMS = kDynamicDims; +static const char *const INSERT_OP_FILE = ge::INSERT_OP_FILE.c_str(); +static const char *const PRECISION_MODE = ge::PRECISION_MODE.c_str(); +static const char *const EXEC_DISABLE_REUSED_MEMORY = ge::OPTION_EXEC_DISABLE_REUSED_MEMORY; +static const char *const AUTO_TUNE_MODE = ge::AUTO_TUNE_MODE.c_str(); +static const char *const CORE_TYPE = ge::CORE_TYPE.c_str(); +static const char *const SOC_VERSION = ge::SOC_VERSION.c_str(); +static const char *const ENABLE_SINGLE_STREAM = ge::ENABLE_SINGLE_STREAM; +static const char *const AICORE_NUM = ge::AICORE_NUM.c_str(); +static const char *const FUSION_SWITCH_FILE = ge::FUSION_SWITCH_FILE.c_str(); +static const char *const ENABLE_SMALL_CHANNEL = ge::ENABLE_SMALL_CHANNEL.c_str(); +static const char *const OP_SELECT_IMPL_MODE = ge::OP_SELECT_IMPL_MODE.c_str(); +static const char *const OUTPUT_TYPE = ge::OUTPUT_DATATYPE.c_str(); +static const char *const BUFFER_OPTIMIZE = ge::BUFFER_OPTIMIZE.c_str(); +static const char *const ENABLE_COMPRESS_WEIGHT = ge::ENABLE_COMPRESS_WEIGHT.c_str(); +static const char *const COMPRESS_WEIGHT_CONF = "compress_weight_conf"; +static const char *const OUT_NODES = ge::OUTPUT_NODE_NAME.c_str(); +static const char *const INPUT_FP16_NODES = ge::INPUT_FP16_NODES.c_str(); +static const char *const LOG_LEVEL = "log"; +static const char *const OPTYPELIST_FOR_IMPLMODE = ge::OPTYPELIST_FOR_IMPLMODE.c_str(); + +// for interface: aclgrphBuildModel +const std::set ir_builder_suppported_options = { + INPUT_FORMAT, INPUT_SHAPE, OP_NAME_MAP, + DYNAMIC_BATCH_SIZE, DYNAMIC_IMAGE_SIZE, DYNAMIC_DIMS, + INSERT_OP_FILE, PRECISION_MODE, EXEC_DISABLE_REUSED_MEMORY, + AUTO_TUNE_MODE, OUTPUT_TYPE, OUT_NODES, + INPUT_FP16_NODES, LOG_LEVEL}; +// for interface: aclgrphBuildInitialize +const std::set global_options = {CORE_TYPE, + SOC_VERSION, + BUFFER_OPTIMIZE, + ENABLE_COMPRESS_WEIGHT, + COMPRESS_WEIGHT_CONF, + PRECISION_MODE, + EXEC_DISABLE_REUSED_MEMORY, + AUTO_TUNE_MODE, + ENABLE_SINGLE_STREAM, + AICORE_NUM, + FUSION_SWITCH_FILE, + ENABLE_SMALL_CHANNEL, + OP_SELECT_IMPL_MODE, + OPTYPELIST_FOR_IMPLMODE}; +} // namespace ir_option } // namespace ge #endif // INC_EXTERNAL_GE_GE_API_TYPES_H_ diff --git a/inc/external/ge/ge_ir_build.h b/inc/external/ge/ge_ir_build.h index dd48687f6e262d0831036196798c9256d56c3885..acf6991a967f204dba5bf65341a07e79c711d69c 100644 --- a/inc/external/ge/ge_ir_build.h +++ b/inc/external/ge/ge_ir_build.h @@ -1,18 +1,18 @@ /** -* Copyright 2020 Huawei Technologies Co., Ltd - -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at - -* http://www.apache.org/licenses/LICENSE-2.0 - -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ #ifndef INC_EXTERNAL_GE_IR_BUILD_H_ #define INC_EXTERNAL_GE_IR_BUILD_H_ @@ -27,12 +27,11 @@ namespace { #define IR_MAJOR_VERSION (int(1)) #define IR_MINOR_VERSION (int(0)) #define IR_PATCH_VERSION (int(0)) -} +} // namespace -namespace ge{ +namespace ge { -struct ModelBufferData -{ +struct ModelBufferData { std::shared_ptr data = nullptr; uint64_t length; }; @@ -64,7 +63,8 @@ void aclgrphBuildFinalize(); * @retval GRAPH_SUCCESS The function is successfully executed. * @retval OtherValues Failure */ -graphStatus aclgrphBuildModel(const ge::Graph &graph, const std::map &build_options, ModelBufferData& model); +graphStatus aclgrphBuildModel(const ge::Graph &graph, const std::map &build_options, + ModelBufferData &model); /** * @ingroup AscendCL @@ -75,7 +75,7 @@ graphStatus aclgrphBuildModel(const ge::Graph &graph, const std::map +#include +#include + +#include "ge/ge_api_error_codes.h" + +namespace ge { +enum ProfDataTypeConfig { + kProfTaskTime = 0x0002, + kProfAiCoreMetrics = 0x0004, + kProfAicpuTrace = 0x0008, + kProfTrainingTrace = 0x0800, + kProfHcclTrace = 0x1000 +}; + +enum ProfilingAicoreMetrics { + kAicoreArithmaticThroughput = 0, + kAicorePipeline = 1, + kAicoreSynchronization = 2, + kAicoreMemory = 3, + kAicoreInternalMemory = 4, + kAicoreStall = 5 +}; + +typedef struct ProfAicoreEvents ProfAicoreEvents; +typedef struct aclgrphProfConfig aclgrphProfConfig; + +/// +/// @ingroup AscendCL +/// @brief Initialize the profiling and set profiling configuration path +/// @param [in] profiler_path: configuration path of profiling +/// @param [in] length: length of configuration path +/// @return Status result of function +/// +Status aclgrphProfInit(const char *profiler_path, uint32_t length); + +/// +/// @ingroup AscendCL +/// @brief Finalize profiling +/// @return Status result of function +/// +Status aclgrphProfFinalize(); + +/// +/// @ingroup AscendCL +/// @brief Create data of type aclgrphProfConfig +/// @param [in] deviceid_list: device id list +/// @param [in] device_nums: device numbers +/// @param [in] aicore_metrics: type of aicore metrics +/// @param [in] aicore_events: pointer to aicore events be reserved, only support NULL now +/// @param [in] data_type_config: modules need profiling +/// @return Status result of function +/// +aclgrphProfConfig *aclgrphProfCreateConfig(uint32_t *deviceid_list, uint32_t device_nums, + ProfilingAicoreMetrics aicore_metrics, ProfAicoreEvents *aicore_events, + uint64_t data_type_config); + +/// +/// @ingroup AscendCL +/// @brief Destroy data of type aclgrphProfConfig +/// @param [in] profiler_config: config of profiling +/// @return Status result of function +/// +Status aclgrphProfDestroyConfig(aclgrphProfConfig *profiler_config); + +/// +/// @ingroup AscendCL +/// @brief Start profiling of modules which is configured by profiler config +/// @param [in] profiler_config: config of profiling +/// @return Status result of function +/// +Status aclgrphProfStart(aclgrphProfConfig *profiler_config); + +/// +/// @ingroup AscendCL +/// @brief Stop profiling of modules which is configured by profiler config +/// @param [in] profiler_config: config of profiling +/// @return Status result of function +/// +Status aclgrphProfStop(aclgrphProfConfig *profiler_config); +} // namespace ge + +#endif // INC_EXTERNAL_GE_GE_PROF_H_ diff --git a/inc/external/graph/operator.h b/inc/external/graph/operator.h index b84ae1d483deb6be4268e960d7ba621c26a0eadd..a02b6207533b5a870cd9ba5820ac07651fbbbd8d 100644 --- a/inc/external/graph/operator.h +++ b/inc/external/graph/operator.h @@ -45,9 +45,11 @@ namespace ge { class Operator; class OperatorImpl; +class NodeUtils; class NamedAttrs; class Graph; class AttrValue; +class Node; using SubgraphBuilder = std::function; using OperatorImplPtr = std::shared_ptr; @@ -65,8 +67,8 @@ using std::string; class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Operator { public: friend class OperatorImpl; - friend class GraphBuilderImpl; + friend class NodeUtils; using OpInt = int64_t; using OpFloat = float; @@ -279,6 +281,8 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY Operator { OperatorImplPtr operator_impl_{nullptr}; graphStatus GetInputConstDataOut(const string &dst_name, Tensor &data) const; + + std::shared_ptr GetNode() const; }; /*lint +e148*/ } // namespace ge diff --git a/inc/tdt/train_mode.h b/inc/external/parser/caffe_parser.h similarity index 61% rename from inc/tdt/train_mode.h rename to inc/external/parser/caffe_parser.h index 0b1475fcc5988b6d8a56e555db6f46aa716b024c..2a687d0b39d5ece2db1925a2ba01ac9c84b7379d 100644 --- a/inc/tdt/train_mode.h +++ b/inc/external/parser/caffe_parser.h @@ -14,17 +14,19 @@ * limitations under the License. */ -#ifndef INC_TDT_TRAIN_MODE_H -#define INC_TDT_TRAIN_MODE_H +#ifndef INC_EXTERNAL_ACL_GRAPH_CAFFE_H_ +#define INC_EXTERNAL_ACL_GRAPH_CAFFE_H_ -enum TrainMode { - NOFLAG = -1, - DPFLAG = 0, - MEFLAG = 1 -}; +#include +#include +#include -TrainMode GetTrainMode(); +#include "graph/ge_error_codes.h" +#include "graph/types.h" +#include "graph/graph.h" -void SetTrainMode(TrainMode mode); +namespace ge { +graphStatus aclgrphParseCaffe(const char *model_file, const char *weights_file, ge::Graph &graph); +} // namespace ge -#endif +#endif // INC_EXTERNAL_ACL_GRAPH_CAFFE_H_ diff --git a/inc/toolchain/stackcore/stackcore.h b/inc/external/parser/tensorflow_parser.h similarity index 59% rename from inc/toolchain/stackcore/stackcore.h rename to inc/external/parser/tensorflow_parser.h index d4b74cc76967a9b9da260f4da4f8dc834751b2ac..b7c1c8ce7634d0ba7f6109a5cc439f718b49f30e 100644 --- a/inc/toolchain/stackcore/stackcore.h +++ b/inc/external/parser/tensorflow_parser.h @@ -14,19 +14,20 @@ * limitations under the License. */ -/** @defgroup stackcore StackCore */ -#ifndef LIB_STACKCORE_H -#define LIB_STACKCORE_H +#ifndef INC_EXTERNAL_ACL_PARSER_TENSORFLOW_H_ +#define INC_EXTERNAL_ACL_PARSER_TENSORFLOW_H_ -/** - * @ingroup stackcore - * @brief init stackcore, which register signal hander for exception core - */ -#ifdef __cplusplus -extern "C"{ -#endif -int StackInit(); -#ifdef __cplusplus -} -#endif -#endif +#include +#include +#include +#include + +#include "graph/ge_error_codes.h" +#include "graph/types.h" +#include "graph/graph.h" + +namespace ge { +graphStatus aclgrphParseTensorFlow(const char *model_file, ge::Graph &graph); +} // namespace ge + +#endif // INC_EXTERNAL_ACL_PARSER_TENSORFLOW_H_ \ No newline at end of file diff --git a/inc/external/register/scope/scope_fusion_pass_register.h b/inc/external/register/scope/scope_fusion_pass_register.h index 05eaecbdfd24e4da39ea9c9a4b043ae5214d7914..93d97033b471bce71f6bcdffd2e4e5663906dae3 100644 --- a/inc/external/register/scope/scope_fusion_pass_register.h +++ b/inc/external/register/scope/scope_fusion_pass_register.h @@ -26,11 +26,25 @@ #include "register/register_types.h" #include "graph/operator.h" +#define CHECK_INNER_NODE_CONDITION(cond, fusion_rlt) \ + do { \ + if (!(cond)) { \ + if ((fusion_rlt) != nullptr) { \ + (fusion_rlt)->SetType(ge::kScopeInvalidType); \ + } \ + return; \ + } \ + } while (0) + namespace domi { class TensorFlowModelParser; } // namespace domi namespace ge { const int32_t kFusionDisableIndex = 99999; +const char *const kScopeToMultiNodes = "ScopeToMultiNodes"; +const char *const kScopeInvalidType = "ScopeInvalidType"; +const char *const kInputFromFusionScope = "InputFromFusionScope"; +const char *const kOutputToFusionScope = "OutputToFusionScope"; class ScopePattern; using ScopeFusionPatterns = std::vector>; @@ -71,12 +85,47 @@ class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY FusionScopesResult { void InsertInputs(const std::string &inner_op_name, const std::vector &index_map); void InsertOutputs(const std::string &inner_op_name, const std::vector &index_map); + class InnerNodeInfo { + public: + explicit InnerNodeInfo(const std::string &fusion_node_name); + InnerNodeInfo(const std::string &fusion_node_name, const std::string &name, const std::string &type); + InnerNodeInfo(InnerNodeInfo &&other) noexcept; + InnerNodeInfo &operator=(InnerNodeInfo &&other) noexcept; + InnerNodeInfo(const InnerNodeInfo &) = delete; + InnerNodeInfo &operator=(const InnerNodeInfo &) = delete; + ~InnerNodeInfo(); + InnerNodeInfo &SetName(const std::string &name); + InnerNodeInfo &SetType(const std::string &type); + InnerNodeInfo &InsertInput(const std::string &input_node, int32_t peer_out_idx); + InnerNodeInfo &InsertOutput(const std::string &output_node, int32_t peer_in_idx); + ge::graphStatus BuildInnerNode(); + ge::graphStatus SetInputFormat(const std::string &input_name, const std::string &format); + ge::graphStatus SetOutputFormat(const std::string &output_name, const std::string &format); + ge::graphStatus SetDynamicInputFormat(const std::string &input_name, uint32_t index, const std::string &format); + ge::graphStatus SetDynamicOutputFormat(const std::string &output_name, uint32_t index, const std::string &format); + ge::Operator *MutableOperator(); + + std::string GetName() const; + std::string GetType() const; + std::vector> GetInputs() const; + std::vector> GetOutputs() const; + + private: + class InnerNodeInfoImpl; + std::unique_ptr impl_; + }; + + InnerNodeInfo *AddInnerNode(const std::string &name, const std::string &type); + InnerNodeInfo *MutableRecentInnerNode(); + InnerNodeInfo *MutableInnerNode(uint32_t index); + ge::graphStatus CheckInnerNodesInfo(); + private: class FusionScopesResultImpl; std::unique_ptr impl_; friend class ScopeGraph; friend class ScopeBasePass; - friend class domi::TensorFlowModelParser; + friend class TensorFlowModelParser; }; class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY ScopeTree { @@ -112,7 +161,7 @@ class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY ScopeGraph { std::unique_ptr impl_; friend class ScopePassManager; friend class ScopeBasePass; - friend class domi::TensorFlowModelParser; + friend class TensorFlowModelParser; }; class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY ScopeAttrValue { @@ -251,7 +300,7 @@ class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY ScopeFusionPassRegistry { class ScopeFusionPassRegistryImpl; /*lint -e148*/ std::unique_ptr impl_; - friend class domi::TensorFlowModelParser; + friend class TensorFlowModelParser; }; class GE_FUNC_HOST_VISIBILITY GE_FUNC_DEV_VISIBILITY ScopeUtil { diff --git a/inc/framework/common/debug/log.h b/inc/framework/common/debug/log.h index 6d4499191edfef9c111e9da8565a9814ecc97acf..dbf22ead8e0c7997fe556c970c8109b7fa3ecb3c 100644 --- a/inc/framework/common/debug/log.h +++ b/inc/framework/common/debug/log.h @@ -28,7 +28,7 @@ #if !defined(__ANDROID__) && !defined(ANDROID) #define DOMI_LOGE(...) GE_LOG_ERROR(GE_MODULE_NAME, ge::FAILED, __VA_ARGS__) #else -#include +#include #if defined(BUILD_VERSION_PERF) #define DOMI_LOGE(fmt, ...) #else @@ -83,12 +83,12 @@ } while (0); // If expr is not GRAPH_SUCCESS, print the log and return FAILED -#define GE_CHK_GRAPH_STATUS_RET(expr, ...) \ - do { \ - if ((expr) != ge::GRAPH_SUCCESS) { \ - DOMI_LOGE(__VA_ARGS__); \ - return FAILED; \ - } \ +#define GE_CHK_GRAPH_STATUS_RET(expr, ...) \ + do { \ + if ((expr) != ge::GRAPH_SUCCESS) { \ + DOMI_LOGE(__VA_ARGS__); \ + return FAILED; \ + } \ } while (0); // If expr is not SUCCESS, print the log and execute a custom statement @@ -99,13 +99,13 @@ } while (0); // If expr is not true, print the log and return the specified status -#define GE_CHK_BOOL_RET_STATUS(expr, _status, ...) \ - do { \ - bool b = (expr); \ - if (!b) { \ - GELOGE(_status, __VA_ARGS__); \ - return _status; \ - } \ +#define GE_CHK_BOOL_RET_STATUS(expr, _status, ...) \ + do { \ + bool b = (expr); \ + if (!b) { \ + GELOGE(_status, __VA_ARGS__); \ + return _status; \ + } \ } while (0); // If expr is not true, print the log and return the specified status diff --git a/inc/framework/common/ge_inner_error_codes.h b/inc/framework/common/ge_inner_error_codes.h index cc044cb1c96099a370e9922cb902db1c11367da7..3ab6cf06ca38d6c4cf7a117d87faee19ae696c32 100644 --- a/inc/framework/common/ge_inner_error_codes.h +++ b/inc/framework/common/ge_inner_error_codes.h @@ -97,6 +97,7 @@ GE_ERRORNO_COMMON(INTERNAL_ERROR, 4, "Internal errors"); // 1343225 GE_ERRORNO_COMMON(CSEC_ERROR, 5, "Failed to call libc_sec API!"); // 1343225861 GE_ERRORNO_COMMON(TEE_ERROR, 6, "Failed to call tee API!"); // 1343225862 GE_ERRORNO_COMMON(END_OF_SEQUENCE, 7, "End of sequence!"); // 1343225863 +GE_ERRORNO_COMMON(PATH_INVALID, 8, "Path is invalid!"); // 1343225864 // Error code for plugin manager GE_ERRORNO_COMMON(GE_PLGMGR_PATH_INVALID, 30, "Path is invalid!"); // 1343225886 @@ -124,9 +125,13 @@ GE_ERRORNO_CLIENT(GE_CLI_GE_ALREADY_INITIALIZED, 10, "GE is already initialized. GE_ERRORNO_CLIENT(GE_CLI_GE_NOT_INITIALIZED, 11, "GE is not yet initialized or is finalized."); // 1343229963 // Init module error code definition -GE_ERRORNO_INIT(GE_MULTI_INIT, 0, "Multiple initializations are not supported."); // 1343234048 -GE_ERRORNO_INIT(GE_FINALIZE_NOT_INIT, 1, "Finalize is not allowed before initialization."); // 1343234049 -GE_ERRORNO_INIT(GE_MULTI_FINALIZE, 2, "Multiple finalizations are not supported."); // 1343234050 +GE_ERRORNO_INIT(GE_MULTI_INIT, 0, "Multiple initializations are not supported."); // 1343234048 +GE_ERRORNO_INIT(GE_FINALIZE_NOT_INIT, 1, "Finalize is not allowed before initialization."); // 1343234049 +GE_ERRORNO_INIT(GE_MULTI_FINALIZE, 2, "Multiple finalizations are not supported."); // 1343234050 +GE_ERRORNO_INIT(GE_PROF_MULTI_INIT, 3, "Multiple profiling initializations are not supported."); // 1343234051 +GE_ERRORNO_INIT(GE_PROF_NOT_INIT, 4, "Profing initializations have not been done."); // 1343234052 +GE_ERRORNO_INIT(GE_PROF_MODE_CONFLICT, 5, + "Profiling command mode which is preferred is running, the api mode will not work."); // 1343234053 // Session module error code definition GE_ERRORNO_SESSION(GE_SESS_INIT_FAILED, 0, "Failed to initialize session."); // 1343238144 @@ -211,8 +216,8 @@ GE_ERRORNO_ENGINE(GE_ENG_FINALIZE_FAILED, 1, "Engine finalize failed."); GE_ERRORNO_ENGINE(GE_ENG_MEMTYPE_ERROR, 2, "Memory type HBM is necessary when engine is in device"); // 1343246338 // Optimize errocode -GE_ERRORNO_GRAPH(TO_BE_DELETED, 63, "The node of the graph to be deleted."); // 1343242303 -GE_ERRORNO_GRAPH(NOT_CHANGED, 64, "The node of the graph no changed."); // 1343242304 +GE_ERRORNO_GRAPH(TO_BE_DELETED, 63, "The node of the graph to be deleted."); // 1343242303 +GE_ERRORNO_GRAPH(NOT_CHANGED, 64, "The node of the graph no changed."); // 1343242304 // Ops module error code definition GE_ERRORNO_OPS(GE_OPS_KERNEL_STORE_INIT_FAILED, 0, "Failed to initialize OpsKernelInfoStore."); // 1343250432 diff --git a/inc/framework/common/ge_types.h b/inc/framework/common/ge_types.h index d1eb95b71e9b756f755f832194b660f423a7656f..6033521ce359538186ffc78102fe7fe32ddf7c04 100644 --- a/inc/framework/common/ge_types.h +++ b/inc/framework/common/ge_types.h @@ -28,16 +28,9 @@ #include "external/ge/ge_api_types.h" namespace ge { -enum RuntimeType { - HOST = 0, - DEVICE = 1 -}; +enum RuntimeType { HOST = 0, DEVICE = 1 }; -enum PerfLevel { - GEN_TASK_WITH_FUSION = -1, - GEN_TASK_WITHOUT_L2FUSION = 3, - GEN_TASK_WITHOUT_FUSION = 4 -}; +enum PerfLevel { GEN_TASK_WITH_FUSION = -1, GEN_TASK_WITHOUT_L2FUSION = 3, GEN_TASK_WITHOUT_FUSION = 4 }; enum FrameworkType { CAFFE = 0, @@ -48,13 +41,15 @@ enum FrameworkType { }; enum OpEngineType { - ENGINE_SYS = 0, // default engine - ENGINE_AICORE = 1, - ENGINE_VECTOR = 2, - ENGINE_AICUBE = 3, // not support - ENGINE_AIVECTOR = 4 // not support + ENGINE_SYS = 0, // default engine + ENGINE_AICORE = 1, + ENGINE_VECTOR = 2, + ENGINE_AICUBE = 3, // not support + ENGINE_AIVECTOR = 4 // not support }; +enum InputAippType { DATA_WITHOUT_AIPP = 0, DATA_WITH_STATIC_AIPP, DATA_WITH_DYNAMIC_AIPP, DYNAMIC_AIPP_NODE }; + const char *const GE_ENGINE_ATTR_MEM_TYPE_HBM = "HBM"; const char *const GE_OPTION_EXEC_PLACEMENT = "ge.exec.placement"; @@ -100,6 +95,7 @@ struct OutputData { struct Command { std::string cmd_type; // Command type std::vector cmd_params; // Command params + uint64_t module_index; // prof module }; // The definition of I/O shape description @@ -136,6 +132,7 @@ struct OriginInputInfo { // The structure of AIPP info struct AippConfigInfo { + int8_t aipp_mode; int8_t input_format; int32_t src_image_size_w; int32_t src_image_size_h; @@ -183,6 +180,9 @@ struct AippConfigInfo { float var_reci_chn_1; float var_reci_chn_2; float var_reci_chn_3; + int8_t support_rotation; + uint32_t related_input_rank; + uint32_t max_src_image_size; }; // The structure of offline Modeldata @@ -261,16 +261,31 @@ struct ComputeGraphDescInfo { struct OpDescInfo { std::string op_name; + std::string op_type; uint32_t task_id; uint32_t stream_id; std::vector input_format; std::vector> input_shape; std::vector input_data_type; std::vector input_addrs; + std::vector input_size; std::vector output_format; std::vector> output_shape; std::vector output_data_type; std::vector output_addrs; + std::vector output_size; +}; +struct ModelDumpConfig { + std::string model_name; + std::vector layers; +}; + +struct DumpConfig { + std::string dump_path; + std::string dump_mode; + std::string dump_status; + std::string dump_op_switch; + std::vector dump_list; }; } // namespace ge #endif // INC_FRAMEWORK_COMMON_GE_TYPES_H_ diff --git a/inc/framework/common/helper/model_helper.h b/inc/framework/common/helper/model_helper.h index 27f1bc4d78e1de86ef485e8fff27a6ea98f80d6e..fbe7e73fd2030cdbefaaf25724c8f102eb95da51 100644 --- a/inc/framework/common/helper/model_helper.h +++ b/inc/framework/common/helper/model_helper.h @@ -32,10 +32,10 @@ class ModelHelper { ModelHelper() = default; ~ModelHelper(); - Status SaveToOmModel(const GeModelPtr &ge_model, const SaveParam &save_param, - const std::string &output_file, ge::ModelBufferData &model); + Status SaveToOmModel(const GeModelPtr& ge_model, const SaveParam& save_param, const std::string& output_file, + ge::ModelBufferData& model); Status SaveOriginalGraphToOmModel(const ge::Graph& graph, const std::string& output_file); - Status LoadModel(const ge::ModelData &model_data); + Status LoadModel(const ge::ModelData& model_data); Status GetModelBufferData(ge::ModelBufferData& model); const ModelFileHeader* GetFileHeader() const { return file_header_; } @@ -44,15 +44,15 @@ class ModelHelper { void SetSaveMode(bool val) { is_offline_ = val; } bool GetSaveMode(void) const { return is_offline_; } - Status GetBaseNameFromFileName(const std::string &file_name, std::string &base_name); - Status GetModelNameFromMergedGraphName(const std::string &graph_name, std::string &model_name); + Status GetBaseNameFromFileName(const std::string& file_name, std::string& base_name); + Status GetModelNameFromMergedGraphName(const std::string& graph_name, std::string& model_name); private: bool is_assign_model_ = false; bool is_offline_ = true; ModelFileHeader* file_header_ = nullptr; // Encrypted model need delete temp model and unencrypted model need not delete model - uint8_t *model_addr_tmp_ = nullptr; + uint8_t* model_addr_tmp_ = nullptr; uint32_t model_len_tmp_ = 0; GeModelPtr model_; @@ -64,9 +64,10 @@ class ModelHelper { Status LoadWeights(OmFileLoadHelper& om_load_helper); Status LoadTask(OmFileLoadHelper& om_load_helper); Status LoadTBEKernelStore(OmFileLoadHelper& om_load_helper); + Status LoadCustAICPUKernelStore(OmFileLoadHelper& om_load_helper); Status ReleaseLocalModelData() noexcept; - Status SaveModelPartition(std::shared_ptr& om_file_save_helper, - ModelPartitionType type, const uint8_t* data, size_t size); + Status SaveModelPartition(std::shared_ptr& om_file_save_helper, ModelPartitionType type, + const uint8_t* data, size_t size); }; } // namespace ge #endif // INC_FRAMEWORK_COMMON_HELPER_MODEL_HELPER_H_ diff --git a/inc/framework/common/helper/om_file_helper.h b/inc/framework/common/helper/om_file_helper.h index 4ca54b507a7e871f365fb766a0d5f172a09431b0..fec7e2940c251423dbff621057d779a104d2557d 100644 --- a/inc/framework/common/helper/om_file_helper.h +++ b/inc/framework/common/helper/om_file_helper.h @@ -32,7 +32,7 @@ using std::vector; namespace ge { struct ModelPartition { ModelPartitionType type; - uint8_t* data = 0; + uint8_t *data = 0; uint32_t size = 0; }; @@ -81,8 +81,8 @@ class OmFileSaveHelper { const std::vector &GetModelPartitions() const; - Status SaveModel(const SaveParam &save_param, const char *target_file, - ge::ModelBufferData& model, bool is_offline = true); + Status SaveModel(const SaveParam &save_param, const char *target_file, ge::ModelBufferData &model, + bool is_offline = true); Status SaveModelToFile(const char *output_file, ge::ModelBufferData &model, bool is_offline = true); diff --git a/inc/framework/common/op/attr_value_util.h b/inc/framework/common/op/attr_value_util.h index 6ef9b11de5e0c71f92aad2ea8736cae793f9358b..8a90cfa2a990b3ddbe8881d5b93e785f5f74ccfa 100644 --- a/inc/framework/common/op/attr_value_util.h +++ b/inc/framework/common/op/attr_value_util.h @@ -156,6 +156,6 @@ bool GetAttrDefListValue(const std::string &key, int idx, int32_t *value, const bool GetAttrDefListValue(const std::string &key, int idx, uint32_t *value, const AttrDefMap &attr); bool GetAttrDefListValue(const std::string &key, int idx, float *value, const AttrDefMap &attr); bool GetAttrDefListValue(const std::string &key, int idx, double *value, const AttrDefMap &attr); -} +} // namespace ge #endif // INC_FRAMEWORK_COMMON_OP_ATTR_VALUE_UTIL_H_ diff --git a/inc/framework/common/scope_guard.h b/inc/framework/common/scope_guard.h index 001a0e757e934faffa4bf9c44059bd8196ab4f59..2154648dec24280495596547d183de1ca2610aa8 100644 --- a/inc/framework/common/scope_guard.h +++ b/inc/framework/common/scope_guard.h @@ -42,8 +42,9 @@ class ScopeGuard { if (on_exit_scope_ != nullptr) { try { on_exit_scope_(); - } catch (std::bad_function_call &e) { } - catch (...) { } + } catch (std::bad_function_call &e) { + } catch (...) { + } } } } diff --git a/inc/framework/common/string_util.h b/inc/framework/common/string_util.h index 47e80e759975d47909beaac0d31531bc12382a13..3e4bf093554d026c84fd5cbb27eee2da7974ac91 100644 --- a/inc/framework/common/string_util.h +++ b/inc/framework/common/string_util.h @@ -37,7 +37,7 @@ class StringUtils { return s; } // lint -esym(551,*) - static std::string &Rtrim(std::string &s) { /*lint !e618*/ + static std::string &Rtrim(std::string &s) { /*lint !e618*/ #if __cplusplus >= 201103L (void)s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](int c) { return !std::isspace(c); })); #else @@ -61,8 +61,10 @@ class StringUtils { /// @param [in] delim separator /// @return string array after segmentation /// + /*lint -e1077*/ static std::vector Split(const std::string &str, char delim) { std::vector elems; + /*lint +e1077*/ if (str.empty()) { elems.emplace_back(""); diff --git a/inc/framework/common/types.h b/inc/framework/common/types.h index 55e0870cac1096af0d86abff77565b6254de18be..ad284d076931ccbd1736563e9d91ce9e03a00b89 100644 --- a/inc/framework/common/types.h +++ b/inc/framework/common/types.h @@ -565,10 +565,10 @@ enum ModelCheckType { /// @brief dynamic input type /// enum DynamicInputType { - FIXED = 0, // default mode - DYNAMIC_BATCH = 1, - DYNAMIC_IMAGE = 2, - DYNAMIC_DIMS = 3 + FIXED = 0, // default mode + DYNAMIC_BATCH = 1, + DYNAMIC_IMAGE = 2, + DYNAMIC_DIMS = 3 }; /// @@ -851,9 +851,9 @@ static constexpr int32_t PARTITION_TYPE_WEIGHTS = 1; static constexpr int32_t PARTITION_TYPE_TASK_INFO = 2; // number of partitions in the current model -static constexpr uint32_t PARTITION_SIZE = 4; +static constexpr uint32_t PARTITION_SIZE = 5; -enum ModelPartitionType { MODEL_DEF = 0, WEIGHTS_DATA, TASK_INFO, TBE_KERNELS }; +enum ModelPartitionType { MODEL_DEF = 0, WEIGHTS_DATA, TASK_INFO, TBE_KERNELS, CUST_AICPU_KERNELS }; struct ModelPartitionMemInfo { ModelPartitionType type; diff --git a/inc/framework/common/util.h b/inc/framework/common/util.h index 8ba2333a6b8a238e4c17a4d45bc0fdbe709016b4..b1c278d8d4539cd9aaf29de1b28c353f311ba939 100644 --- a/inc/framework/common/util.h +++ b/inc/framework/common/util.h @@ -30,12 +30,12 @@ #include "framework/common/ge_inner_error_codes.h" #include "mmpa/mmpa_api.h" -#define GE_CHECK_POSITIVE_SIZE_RANGE(size) \ - do { \ - if (size <= 0) { \ - DOMI_LOGE("param[%s] is not a positive number", #size); \ - return PARAM_INVALID; \ - } \ +#define GE_CHECK_POSITIVE_SIZE_RANGE(size) \ + do { \ + if (size <= 0) { \ + DOMI_LOGE("param[%s] is not a positive number", #size); \ + return PARAM_INVALID; \ + } \ } while (0) #define CHECK_FALSE_EXEC(expr, exec_expr, ...) \ @@ -113,84 +113,84 @@ } while (0) // Check if the parameter is null. If yes, return PARAM_INVALID and record the error -#define GE_CHECK_NOTNULL(val) \ - do { \ - if (val == nullptr) { \ - DOMI_LOGE("param[%s] must not be null.", #val); \ - return ge::PARAM_INVALID; \ - } \ +#define GE_CHECK_NOTNULL(val) \ + do { \ + if (val == nullptr) { \ + DOMI_LOGE("param[%s] must not be null.", #val); \ + return ge::PARAM_INVALID; \ + } \ } while (0) // Check if the parameter is null. If yes, just return and record the error -#define GE_CHECK_NOTNULL_JUST_RETURN(val) \ - do { \ - if (val == nullptr) { \ - DOMI_LOGE("param[%s] must not be null.", #val); \ - return; \ - } \ +#define GE_CHECK_NOTNULL_JUST_RETURN(val) \ + do { \ + if (val == nullptr) { \ + DOMI_LOGE("param[%s] must not be null.", #val); \ + return; \ + } \ } while (0) // Check whether the parameter is null. If so, execute the exec_expr expression and record the error log -#define GE_CHECK_NOTNULL_EXEC(val, exec_expr) \ - do { \ - if (val == nullptr) { \ - DOMI_LOGE("param[%s] must not be null.", #val); \ - exec_expr; \ - } \ +#define GE_CHECK_NOTNULL_EXEC(val, exec_expr) \ + do { \ + if (val == nullptr) { \ + DOMI_LOGE("param[%s] must not be null.", #val); \ + exec_expr; \ + } \ } while (0) // Check whether the parameter is null. If yes, return directly and record the error log -#define GE_RT_VOID_CHECK_NOTNULL(val) \ - do { \ - if (val == nullptr) { \ - DOMI_LOGE("param[%s] must not be null.", #val); \ - return; \ - } \ +#define GE_RT_VOID_CHECK_NOTNULL(val) \ + do { \ + if (val == nullptr) { \ + DOMI_LOGE("param[%s] must not be null.", #val); \ + return; \ + } \ } while (0) // Check if the parameter is null. If yes, return false and record the error log -#define GE_RT_FALSE_CHECK_NOTNULL(val) \ - do { \ - if (val == nullptr) { \ - DOMI_LOGE("param[%s] must not be null.", #val); \ - return false; \ - } \ +#define GE_RT_FALSE_CHECK_NOTNULL(val) \ + do { \ + if (val == nullptr) { \ + DOMI_LOGE("param[%s] must not be null.", #val); \ + return false; \ + } \ } while (0) // Check if the parameter is out of bounds -#define GE_CHECK_SIZE(size) \ - do { \ - if (size == 0) { \ - DOMI_LOGE("param[%s] is out of range", #size); \ - return ge::PARAM_INVALID; \ - } \ +#define GE_CHECK_SIZE(size) \ + do { \ + if (size == 0) { \ + DOMI_LOGE("param[%s] is out of range", #size); \ + return ge::PARAM_INVALID; \ + } \ } while (0) // Check if the container is empty -#define GE_CHECK_VECTOR_NOT_EMPTY(vector) \ - do { \ - if (vector.empty()) { \ - DOMI_LOGE("param[%s] is empty!", #vector); \ - return ge::FAILED; \ - } \ +#define GE_CHECK_VECTOR_NOT_EMPTY(vector) \ + do { \ + if (vector.empty()) { \ + DOMI_LOGE("param[%s] is empty!", #vector); \ + return ge::FAILED; \ + } \ } while (0) // Check if the value on the left is greater than or equal to the value on the right -#define GE_CHECK_GE(lhs, rhs) \ - do { \ - if (lhs < rhs) { \ - DOMI_LOGE("param[%s] is less than[%s]", #lhs, #rhs); \ - return ge::PARAM_INVALID; \ - } \ +#define GE_CHECK_GE(lhs, rhs) \ + do { \ + if (lhs < rhs) { \ + DOMI_LOGE("param[%s] is less than[%s]", #lhs, #rhs); \ + return ge::PARAM_INVALID; \ + } \ } while (0) // Check if the value on the left is less than or equal to the value on the right -#define GE_CHECK_LE(lhs, rhs) \ - do { \ - if (lhs > rhs) { \ - DOMI_LOGE("param[%s] is greater than[%s]", #lhs, #rhs); \ - return ge::PARAM_INVALID; \ - } \ +#define GE_CHECK_LE(lhs, rhs) \ + do { \ + if (lhs > rhs) { \ + DOMI_LOGE("param[%s] is greater than[%s]", #lhs, #rhs); \ + return ge::PARAM_INVALID; \ + } \ } while (0) #define GE_DELETE_NEW_SINGLE(var) \ @@ -347,6 +347,14 @@ std::string ToString(const google::protobuf::RepeatedField &rpd_field) { /// uint64_t GetCurrentTimestap(); +/// +/// @ingroup domi_common +/// @brief Obtains the absolute time (timestamp) of the current system. +/// @return Timestamp, in seconds (US) +/// +/// +uint32_t GetCurrentSecondTimestap(); + /// /// @ingroup domi_common /// @brief Check whether the product of two int64 numbers exceeds the int64 range. @@ -390,6 +398,24 @@ bool CheckOutputPathValid(const std::string &file_path, const std::string &atc_p /// @param [out] result /// bool ValidateStr(const std::string &filePath, const std::string &mode); + +/// +/// @ingroup domi_common +/// @brief Check whether the file is normal file. +/// @param [in] file_path file path +/// @param [out] result +/// +bool IsValidFile(const char *file_path); + +/// +/// @ingroup domi_common +/// @brief Check path invalid +/// @param [in] path, path to be checked +/// @param [in] length, length of path +/// @return 0 success +/// @return -1 fail +/// +Status CheckPath(const char *path, size_t length); } // namespace ge #endif // INC_FRAMEWORK_COMMON_UTIL_H_ diff --git a/inc/framework/executor/ge_executor.h b/inc/framework/executor/ge_executor.h index 613152e9e05e25242556bbac75127bb73a0d41a3..6e82bb96f74c3788b248b08b3c28d752ee5a705e 100644 --- a/inc/framework/executor/ge_executor.h +++ b/inc/framework/executor/ge_executor.h @@ -38,14 +38,14 @@ class DynamicSingleOp; struct RunModelData { uint32_t index; // Data index uint32_t modelId; - std::vector blobs; // All input/output data buffer - uint32_t timestamp; // Data creation time - uint32_t timeout; // Processing timeout - uint64_t request_id = 0; // Request ID - uint64_t dynamic_batch_size = 0; // Dynamic batch size scene, set dynamic size, not supported by default:0 - uint64_t dynamic_image_height = 0; // Dynamic image size scene, set image height, not supported by default:0 - uint64_t dynamic_image_width = 0; // Dynamic image size scene, set image width, not supported by default:0 - std::vector dynamic_dims; // Dynamic dims scene, set dynamic dims, not supported by default:empty + std::vector blobs; // All input/output data buffer + uint32_t timestamp; // Data creation time + uint32_t timeout; // Processing timeout + uint64_t request_id = 0; // Request ID + uint64_t dynamic_batch_size = 0; // Dynamic batch size scene, set dynamic size, not supported by default:0 + uint64_t dynamic_image_height = 0; // Dynamic image size scene, set image height, not supported by default:0 + uint64_t dynamic_image_width = 0; // Dynamic image size scene, set image width, not supported by default:0 + std::vector dynamic_dims; // Dynamic dims scene, set dynamic dims, not supported by default:empty }; class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeExecutor { @@ -108,11 +108,11 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeExecutor { /// @ingroup ge /// @brief Get current dynamic dims info by combined dims /// @param [in] model_id: model id allocate from manager - /// @param [in] combined_dims: array of combined dimensions + /// @param [in] dynamic_dims: cur gear dynamic dims value /// @param [out] cur_dynamic_dims: current dynamic dims /// @return execute result /// - ge::Status GetCurDynamicDims(uint32_t model_id, const std::vector &combined_dims, + ge::Status GetCurDynamicDims(uint32_t model_id, const std::vector &dynamic_dims, std::vector &cur_dynamic_dims); /// @@ -135,6 +135,15 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeExecutor { /// ge::Status GetCombinedDynamicDims(uint32_t model_id, std::vector> &batch_info); + /// + /// @ingroup ge + /// @brief Get user designeate shape order + /// @param [in] model_id + /// @param [out] user_designate_shape_order + /// @return execute result + /// + ge::Status GetUserDesignateShapeOrder(uint32_t model_id, std::vector &user_designate_shape_order); + ge::Status GetCurShape(const uint32_t model_id, std::vector &batch_info, int32_t &dynamic_type); /// @@ -154,6 +163,8 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeExecutor { ge::Status GetAIPPInfo(uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info); ge::Status GetModelAttr(uint32_t model_id, std::vector &dynamic_output_shape_info); + ge::Status GetAippType(uint32_t model_id, uint32_t index, InputAippType &type, size_t &aipp_index); + ge::Status GetModelDescInfoForZeroCopy(uint32_t model_id, std::vector &input_desc, std::vector &output_desc); @@ -162,6 +173,8 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeExecutor { ge::Status CommandHandle(const ge::Command &command); + ge::Status SetDump(const DumpConfig &dump_config); + /// /// @ingroup ge /// @brief Query model memory consuming interface @@ -251,10 +264,8 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeExecutor { static ge::Status LoadDynamicSingleOp(const std::string &model_name, const ge::ModelData &modelData, void *stream, DynamicSingleOp **single_op); - static ge::Status ExecuteAsync(DynamicSingleOp *executor, - const std::vector &input_desc, - const std::vector &inputs, - std::vector &output_desc, + static ge::Status ExecuteAsync(DynamicSingleOp *executor, const std::vector &input_desc, + const std::vector &inputs, std::vector &output_desc, std::vector &outputs); static ge::Status ReleaseSingleOpResource(void *stream); diff --git a/inc/framework/ge_runtime/davinci_model.h b/inc/framework/ge_runtime/davinci_model.h index 8b6ca978fd9b1904d81048d4bb717cfc899d4ffb..91e70159132e79ecdecad89753a1551da306fe1f 100644 --- a/inc/framework/ge_runtime/davinci_model.h +++ b/inc/framework/ge_runtime/davinci_model.h @@ -27,10 +27,10 @@ namespace ge { namespace model_runner { class DavinciModel { public: - DavinciModel(const std::vector> &task_info_list, /*lint !e151*/ + DavinciModel(const std::vector> &task_info_list, const std::vector> &data_info_list, - const std::vector> &output_info_list, /*lint !e151*/ - const std::vector> &constant_info_list, /*lint !e1049*/ + const std::vector> &output_info_list, + const std::vector> &constant_info_list, const std::vector &variable_info_list, const std::vector &wait_active_stream_list, const std::vector &force_copy_stream_list, uint64_t mem_size = 0, uint64_t weight_size = 0, @@ -68,12 +68,12 @@ class DavinciModel { uint32_t GetBatchNum() const { return batch_num_; } uint32_t GetEventNum() const { return event_num_; } - const std::vector &GetWaitActiveStreams() const { return wait_active_stream_list_; } /*lint !e1413*/ - const std::vector &GetForceCopyStreams() const { return force_copy_stream_list_; } /*lint !e1413*/ + const std::vector &GetWaitActiveStreams() const { return wait_active_stream_list_; } + const std::vector &GetForceCopyStreams() const { return force_copy_stream_list_; } int32_t GetPriority() const { return priority_; } - const std::vector> &GetTaskInfoList() const { return task_info_list_; } /*lint !e151*/ + const std::vector> &GetTaskInfoList() const { return task_info_list_; } const std::vector> &GetDataInfoList() const { return data_info_list_; } const std::vector> &GetOutputInfoList() const { return output_info_list_; } const std::vector> &GetConstantInfoList() const { return output_info_list_; } @@ -81,7 +81,7 @@ class DavinciModel { private: std::vector> task_info_list_; - std::vector> data_info_list_; /*lint !e151*/ + std::vector> data_info_list_; std::vector> output_info_list_; std::vector> constant_info_list_; std::vector variable_info_list_; diff --git a/inc/framework/ge_runtime/model_runner.h b/inc/framework/ge_runtime/model_runner.h index a5256af761509f0818adfd406534cc6284fd7d38..e495dfdfd6f290dba3a6eb7b9b44e06cc09a3b77 100644 --- a/inc/framework/ge_runtime/model_runner.h +++ b/inc/framework/ge_runtime/model_runner.h @@ -52,11 +52,8 @@ class ModelRunner { bool RunModel(uint32_t model_id, const InputData &input_data, OutputData *output_data); - bool GetInputOutputDescInfo(uint32_t model_id, - bool zero_copy, - std::vector *input_desc, - std::vector *output_desc, - std::vector *input_format, + bool GetInputOutputDescInfo(uint32_t model_id, bool zero_copy, std::vector *input_desc, + std::vector *output_desc, std::vector *input_format, std::vector *output_format); private: diff --git a/inc/framework/ge_runtime/task_info.h b/inc/framework/ge_runtime/task_info.h index 861192198d1d40fe3c6c190591383e28f94ee2d6..e36c43334033b1011cd687b3ae4f7b1df9214f97 100644 --- a/inc/framework/ge_runtime/task_info.h +++ b/inc/framework/ge_runtime/task_info.h @@ -161,12 +161,13 @@ class TbeTaskInfo : public TaskInfo { class AicpuTaskInfo : public TaskInfo { public: AicpuTaskInfo(const std::string &op_name, uint32_t stream_id, const string &so_name, const std::string &kernel_name, - const std::string &node_def, const std::vector &input_data_addrs, + const std::string &node_def, const std::string &ext_info, const std::vector &input_data_addrs, const std::vector &output_data_addrs, bool dump_flag) : TaskInfo(op_name, stream_id, TaskInfoType::AICPU, dump_flag), so_name_(so_name), kernel_name_(kernel_name), node_def_(node_def), + ext_info_(ext_info), input_data_addrs_(input_data_addrs), output_data_addrs_(output_data_addrs) {} ~AicpuTaskInfo() override {} @@ -176,11 +177,13 @@ class AicpuTaskInfo : public TaskInfo { const std::string &node_def() const { return node_def_; } const std::vector &input_data_addrs() const { return input_data_addrs_; } const std::vector &output_data_addrs() const { return output_data_addrs_; } + const std::string &ext_info() const { return ext_info_; } private: std::string so_name_; std::string kernel_name_; std::string node_def_; + std::string ext_info_; std::vector input_data_addrs_; std::vector output_data_addrs_; }; @@ -293,19 +296,19 @@ class HcclTaskInfo : public TaskInfo { hcom_distribute_task_(hcom_distribute_task) {} ~HcclTaskInfo() override {} - const std::string &hccl_type() const { return hccl_type_; } /*lint !e1413*/ + const std::string &hccl_type() const { return hccl_type_; } void *input_data_addr() const { return input_data_addr_; } void *output_data_addr() const { return output_data_addr_; } void *workspace_addr() const { return workspace_addr_; } int64_t workspace_size() const { return workspace_size_; } int64_t hccl_stream_num() const { return hccl_stream_num_; } - const std::vector &private_def() const { return private_def_; } /*lint !e1413*/ + const std::vector &private_def() const { return private_def_; } void *ops_kernel_store() const { return ops_kernel_store_; } int32_t count() const { return count_; } int64_t root_id() const { return root_id_; } int64_t op_type() const { return op_type_; } int64_t data_type() const { return data_type_; } - const std::string group() const { return group_; } + const std::string &group() const { return group_; } std::function hcom_bind_model() const { return hcom_bind_model_; } std::function hcom_unbind_model() const { return hcom_unbind_model_; } std::function, void *)> hcom_distribute_task() const { diff --git a/inc/framework/generator/ge_generator.h b/inc/framework/generator/ge_generator.h index 931dfccec7d0d443f4891cddbfb43ff99e9797bf..37bca89717a7f202e6e1096176c5893375f618f6 100644 --- a/inc/framework/generator/ge_generator.h +++ b/inc/framework/generator/ge_generator.h @@ -28,6 +28,7 @@ #include "graph/graph.h" #include "graph/op_desc.h" #include "graph/detail/attributes_holder.h" +#include "omg/omg_inner_types.h" namespace ge { class GeGenerator { @@ -45,13 +46,14 @@ class GeGenerator { GeGenerator &operator=(const GeGenerator &) = delete; Status Initialize(const std::map &options); + Status Initialize(const std::map &options, OmgContext &context); Status Finalize(); Status GenerateOfflineModel(const Graph &graph, const std::string &file_name_prefix, const std::vector &inputs = std::vector()); - Status GenerateOnlineModel(const Graph &graph, const vector &inputs, ge::ModelBufferData& model); + Status GenerateOnlineModel(const Graph &graph, const vector &inputs, ge::ModelBufferData &model); Status GenerateInfershapeGraph(const Graph &graph); @@ -75,16 +77,15 @@ class GeGenerator { /// @param [in] engine_type: specific engine. /// @param [out] model_buff: model buff of single op. /// @return SUCCESS or FAILED - Status BuildSingleOpModel(OpDescPtr &op_desc, const vector &inputs, - const vector &outputs, OpEngineType engine_type, - ModelBufferData &model_buff); + Status BuildSingleOpModel(OpDescPtr &op_desc, const vector &inputs, const vector &outputs, + OpEngineType engine_type, ModelBufferData &model_buff); private: - Status GenerateModel(const Graph &graph, const string &file_name_prefix, - const vector &inputs, ge::ModelBufferData& model, bool is_offline = true); + Status GenerateModel(const Graph &graph, const string &file_name_prefix, const vector &inputs, + ge::ModelBufferData &model, bool is_offline = true); Status BuildSingleOp(OpDescPtr &op_desc, const vector &inputs, const vector &outputs, - const string &model_file_name, OpEngineType engine_type, - ModelBufferData &model_buff, bool is_offline = true); + const string &model_file_name, OpEngineType engine_type, ModelBufferData &model_buff, + bool is_offline = true); class Impl; diff --git a/inc/framework/memory/memory_api.h b/inc/framework/memory/memory_api.h index 52ac682a11502d72c6f6a124e9dd14f59c1b0b28..ebb7e68cc32742a7393e02ea9793c8b158269d69 100644 --- a/inc/framework/memory/memory_api.h +++ b/inc/framework/memory/memory_api.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,7 +24,7 @@ #include "runtime/mem.h" namespace ge { -enum MemStorageType{ +enum MemStorageType { HBM = 0, RDMA_HBM, HOST_DDR, diff --git a/inc/framework/omg/omg.h b/inc/framework/omg/omg.h index 623f49af929648ed94bfcd97f4132fc3987101fe..45a8896dae1376eb60a2444ce97db73165eaf1cb 100644 --- a/inc/framework/omg/omg.h +++ b/inc/framework/omg/omg.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/inc/framework/omg/omg_inner_types.h b/inc/framework/omg/omg_inner_types.h index 80361232287326fcc707d15e9a89cd62d5e72bc1..e1a7da0b33c1f6721b8519bd47ed7540f59907ee 100644 --- a/inc/framework/omg/omg_inner_types.h +++ b/inc/framework/omg/omg_inner_types.h @@ -92,30 +92,23 @@ struct OmgContext { std::map> out_nodes_map; // user-designate out nodes (this is used for determing the orders) std::vector> user_out_nodes; + // save the output node of the network, value = topName, + // topName indicates the output name of the operator. + std::vector user_out_nodes_top_vec; // net out nodes (where user_out_nodes or leaf nodes) std::vector net_out_nodes; // net out nodes top names(only caffe has top) std::vector out_top_names; // path for the aicpu custom operator so_file std::vector aicpu_op_run_paths; - // ddk version - std::string ddk_version; // preferential format used by the entire network domiTensorFormat_t net_format = DOMI_TENSOR_RESERVED; domi::FrameworkType type = domi::FRAMEWORK_RESERVED; RunMode run_mode = ONLY_PRE_CHECK; bool train_flag = false; - // whether to use FP16 high precision - int32_t fp16_high_precision = HIGH_PRECISION_DEFAULT; std::string output_type; - // Save the name of the entire network: Some special operators are used to determine a network. Some operators in the - // network require special processing based on the specific network. e.g:faster-rcnn, the FirstStageProcessor module - // is determined as the Faster-R-CNN network based on the scope fusion. Then, the conv+reshape operators in the - // FirstStageBoxPredictor/BoxEncodingPredictor scope are combined. The convolution kernel rearrangement reshape - // operator needs to be deleted for the convolution kernel. - std::string net_name; // Whether to use dynamic batch size or dynamic image size bool is_dynamic_input = false; std::string dynamic_batch_size; diff --git a/inc/framework/omg/parser/model_parser.h b/inc/framework/omg/parser/model_parser.h index 3a8aa6ced98a7040dbf9804bfa161699ec0de023..20bfcef4e801e3a3755df019e0466570a6246ff6 100644 --- a/inc/framework/omg/parser/model_parser.h +++ b/inc/framework/omg/parser/model_parser.h @@ -18,7 +18,7 @@ #define INC_FRAMEWORK_OMG_PARSER_MODEL_PARSER_H_ #include -#include "framework/common/types.h" +#include "framework/omg/parser/parser_types.h" #include "framework/omg/omg_inner_types.h" #include "graph/attr_value.h" #include "graph/compute_graph.h" diff --git a/inc/framework/omg/parser/op_parser.h b/inc/framework/omg/parser/op_parser.h index 251c04479a7e3fbeb8a462aa64f9bb9c840f2c44..087bad32e8174243c072c0d66873cde577479dd8 100644 --- a/inc/framework/omg/parser/op_parser.h +++ b/inc/framework/omg/parser/op_parser.h @@ -18,7 +18,7 @@ #define INC_FRAMEWORK_OMG_PARSER_OP_PARSER_H_ #include -#include "common/types.h" +#include "framework/omg/parser/parser_types.h" #include "omg/omg_inner_types.h" #include "proto/om.pb.h" #include "graph/ge_tensor.h" diff --git a/inc/framework/omg/parser/parser_factory.h b/inc/framework/omg/parser/parser_factory.h index 90d441d71b014cba944d9d8fb82904c1b456ce29..4845606f78731291186f211c95e4402019d49f6d 100644 --- a/inc/framework/omg/parser/parser_factory.h +++ b/inc/framework/omg/parser/parser_factory.h @@ -21,8 +21,8 @@ #include #include #include -#include "framework/common/types.h" #include "framework/omg/omg_inner_types.h" +#include "framework/omg/parser/parser_types.h" using Status = domi::Status; diff --git a/inc/framework/omg/parser/parser_inner_ctx.h b/inc/framework/omg/parser/parser_inner_ctx.h index 53f79895512bd17856b638a1514bca2c166ea051..b57420eb1a98464e7417a8dfdb574e38b0fb55d9 100644 --- a/inc/framework/omg/parser/parser_inner_ctx.h +++ b/inc/framework/omg/parser/parser_inner_ctx.h @@ -29,8 +29,33 @@ namespace ge { struct ParserContext { + // format of the input specified by the command line + std::unordered_map input_nodes_format_map; + // user-designate input dims + std::vector>> user_input_dims; std::unordered_map> input_dims; + // resolve the mapping between operators with the same name and corresponding network. format e.g. + // Detectionoutput:SsdDetectiontOutput + std::map op_conf_map; + // user-designate out nodes (this is used for determing the orders) + std::vector> user_out_nodes; + // default out nodes (this is used for determing the orders) + std::vector> default_out_nodes; + // save the output node of the network. key = operator name, value = index, index indicates the output index of the + // operator + std::map> out_nodes_map; + // save the output node of the network, value = topName, + // topName indicates the output name of the operator. + std::vector user_out_nodes_top_vec; + // net out nodes (where user_out_nodes or leaf nodes) + std::vector net_out_nodes; + // net out nodes top names(only caffe has top) + std::vector out_top_names; + // Whether to use dynamic batch size or dynamic image size + bool is_dynamic_input = false; + bool train_flag = false; domi::domiTensorFormat_t format = domi::DOMI_TENSOR_ND; + domi::FrameworkType type = domi::FRAMEWORK_RESERVED; RunMode run_mode = ONLY_PRE_CHECK; std::string custom_proto_path; // save caffe custom proto path, used by caffe parse std::string caffe_proto_path; // save caffe proto path, used by caffe parse diff --git a/inc/framework/omg/parser/parser_types.h b/inc/framework/omg/parser/parser_types.h new file mode 100644 index 0000000000000000000000000000000000000000..62c9c750b20a02ec32ac8e7dc7b7816558501577 --- /dev/null +++ b/inc/framework/omg/parser/parser_types.h @@ -0,0 +1,508 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef PARSER_COMMON_TYPES_H_ +#define PARSER_COMMON_TYPES_H_ + +#include +#include + +#include "register/register_types.h" + +#if !defined(__ANDROID__) && !defined(ANDROID) +#ifndef DOMI_DYNAMIC_CAST +#define DOMI_DYNAMIC_CAST static_cast +#endif +#ifndef DOMI_DYNAMIC_POINTER_CAST +#define DOMI_DYNAMIC_POINTER_CAST std::static_pointer_cast +#endif +#else +#ifndef DOMI_DYNAMIC_CAST +#define DOMI_DYNAMIC_CAST static_cast +#endif +#ifndef DOMI_DYNAMIC_POINTER_CAST +#define DOMI_DYNAMIC_POINTER_CAST std::static_pointer_cast +#endif +#endif + +namespace ge { +namespace parser { +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DATA; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *AIPPDATA; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONVOLUTION; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CORRELATION; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CORRELATIONV2; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DECONVOLUTION; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *POOLING; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ELTWISE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RELU; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RELU6; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SIGMOID; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ABSVAL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TANH; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PRELU; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BATCHNORM; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FUSIONBATCHNORM; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SCALE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FULL_CONNECTION; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SOFTMAX; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PLUS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ACTIVATION; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FLATTEN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ADD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SUB; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MUL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MATMUL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RSQRT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BIASADD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RESHAPE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REFORMAT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DEPCONVOLUTION; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DROPOUT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DROPOUTGENMASK; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DROPOUTDOMASK; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONCAT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ROIPOOLING; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PROPOSAL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FSRDETECTIONOUTPUT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DETECTIONPOSTPROCESS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LRN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TRANSDATA; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PERMUTE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDNORMALIZE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDPRIORBOX; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *NETOUTPUT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDDETECTIONOUTPUT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REFINEDETDETECTIONOUTPUT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CHANNELAXPY; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PSROIPOOLING; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *POWER; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *POW; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ROIALIGN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PYTHON; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FREESPACEEXTRACT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPATIALTF; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SHAPE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SHAPEN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ARGMAX; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *GATHERND; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *GATHER; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REALDIV; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PACK; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SLICE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SLICED; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FLOORDIV; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SQUEEZE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *UNSQUEEZE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STRIDEDSLICE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RANGE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RPNPROPOSALS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DECODEBBOX; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PADV2; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MIRRORPAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TILE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SIZE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CLIPBOXES; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FASTRCNNPREDICTIONS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPLIT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPLITV; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *EXPANDDIMS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *EMPTY; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MEAN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *GREATER; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SWITCH; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SWITCHN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MERGE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SYMBOLICGRADIENT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REMOTECALL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *_IF; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STATELESSIF; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *IF; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CASE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *_WHILE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *WHILE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STATELESSWHILE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FOR; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PARTITIONEDCALL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STATEFULPARTITIONEDCALL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FAKEPARAM; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TRANSPOSE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TRANSPOSED; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CAST; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REGION; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *YOLO; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *YOLODETECTIONOUTPUT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FILL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REVERSE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *UNPACK; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *YOLO2REORG; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REDUCESUM; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SUM; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONSTANT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RESIZEBILINEAR; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RESIZEBILINEARGRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MAXIMUM; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FRAMEWORKOP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ARG; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FUSEDBATCHNORMGRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LSTM; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HIGHWAY; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RNN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ATTENTIONDECODER; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LOGICAL_NOT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LOGICAL_AND; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LOGICAL_OR; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *EQUAL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *NOTEQUAL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *INTERP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SHUFFLECHANNEL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *AIPP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MULTISHAPE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RECIPROCAL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SELU; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ELU; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ACOSH; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASINH; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MINIMUM; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CLIP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *L2NORMALIZE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CROPANDRESIZE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *UNUSEDCONST; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPARSETODENSE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *NONMAXSUPPRESSION; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TOPKV2; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *INVERTPERMUTATION; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MULTINOMIAL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REVERSESEQUENCE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REDUCEPROD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REDUCEMAX; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REDUCEMIN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *EXTRACTIMAGEPATCHES; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SQRT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REDUCEALL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RESIZENEARESTNEIGHBOR; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPACETOBATCHND; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BATCHTOSPACEND; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASSERT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *GREATEREQUAL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FLOOR; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RANDOMUNIFORM; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BATCHMATMUL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPACETODEPTH; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DEPTHTOSPACE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RINT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ATAN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ATAN2; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ATANH; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ACOS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASIN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *NEG; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LOG; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TAN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ROUND; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *UPSAMPLE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FLOORMOD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LESS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LESSEQUAL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ONEHOT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REFSWITCH; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REFMERGE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ENTER; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REFENTER; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LOOPCOND; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *NEXTITERATION; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REFNEXTITERATION; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *EXIT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REFEXIT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONTROLTRIGGER; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ZEROSLIKE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *EXP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *WHERE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FAKEQUANTWITHMINMAXVARS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SOFTPLUS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SOFTSIGN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *COSH; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SINH; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SQUAREDDIFFERENCE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char + *REQUIREDSPACETOBATCHPADDINGS; // for retinanet scope fusion +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDPOSTPROCESSOR; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RETINANETBOXES; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RETINAMULTIANCHORS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RETINANETCLIPPEDBOXES; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RETINANETFILTEREDDETECTIONS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RETINANETPOSTPROCESSOR; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RETINANETANCHORS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FASTERRCNNMAP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FASTERRCNNMAP1; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FASTERRCNNSECONDSTAGEPOSTPROCESSOR; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FASTERRCNNROIINTERPOOLING; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FASTERRCNNFIRSTSTAGEPOSTPROCESSOR; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FASTERRCNNGRIDANCHORGENERATOR; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ROIINTERPOOLING; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FASTERRCNNCLIPTOWINDOW; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *EMBEDLOOKUP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HASHLOOKUP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LSH_PROJ; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SVDF; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDANCHORGENERATOR; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *IDENTITY; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *IDENTITYN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PLACEHOLDERWITHDEFAULT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SELECT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *GETSPAN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STOPGRADIENT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PREVENTGRADIENT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *GUARANTEECONST; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BROADCASTGRADIENTARGS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BROADCASTARGS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONFUSIONMATRIX; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RANK; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PLACEHOLDER; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *END; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BASICLSTMCELL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *GETNEXT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *INITDATA; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REFIDENTITY; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BITCAST; + +/***************Ann special operator*************************/ +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_MEAN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_CONVOLUTION; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_DEPCONVOLUTION; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_FULLCONNECTION; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_NETOUTPUT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_DATA; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_RESHAPE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_ADD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_MUL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_SUB; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_DIV; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_DEQUANTIZE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_QUANTIZE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_PAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANN_RESIZE_BILINEAR; + +/***************************************************/ +/******************Training operator*************************/ +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *GATHERV2; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONVGRADFILTER; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONV2D; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONV2DBACKPROPINPUT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FUSEDBATCHNORM; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BIASADDGRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ACTIVATIONGRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MAXPOOLWITHARGMAX; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MAXPOOLGRADWITHARGMAX; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPARSESOFTMAXCROSSENTROPYWITHLOGITS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SNAPSHOT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *VAR; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MEANGRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TRANSLATE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ADDN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *L2LOSS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MULTIPLY; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HUBERLOSSGRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HUBERLOSS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *NEGATIVE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDCAST; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPARSESOFTMAXCROSSENTROPY; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SPARSESOFTMAXCROSSENTROPYGRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDSQUEEZEFUSION; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONCATFOUR2FIVE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONCATFIVE2FOUR; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDREALDIVTILEMUL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SSDSUMMULREALDIVMEAN; + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *VARIABLEV2; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *VARHANDLEOP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TEMPORARYVARIABLE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DESTROYTEMPORARYVARIABLE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *VARIABLE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASSIGN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASSIGNVARIABLEOP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASSIGNADD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASSIGNADDVARIABLEOP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASSIGNSUB; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASSIGNSUBVARIABLEOP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYMOMENTUM; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RESOURCEAPPLYMOMENTUM; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SGD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *NOOP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *READVARIABLEOP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *PARALLELCONCATSTART; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONSTANTOP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DEPTHWISECONV2DBACKPROPFILTER; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DEPTHWISECONV2DBACKPORPINPUT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DEPTHWISECONV2DFORWARDNATIVE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DROPOUTGRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYRMSPROPMIXEDPRECISION; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYRMSPROP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RELU6GRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *AVGPOOLGRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONCATV2; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONCATOFFSET; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LAYERNORMGRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LAYERNORM; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LARS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DYNAMICSTITCH; + +/***************************************************/ +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SQUARE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMBROADCAST; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMALLGATHER; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMALLREDUCE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMREDUCESCATTER; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMSEND; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMRECEIVE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMREMOTEREAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HCOMREMOTEWRITE; + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *VARASSIGN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *VARISINITIALIZEDOP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LogTimeStamp; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ISVARIABLEINITIALIZED; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STREAMSWITCH; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STREAMSWITCHN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STREAMACTIVE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MEMCPYASYNC; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *MEMCPYADDRASYNC; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *STREAMMERGE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ENDGRAPH; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SEND; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *RECV; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ENDOFSEQUENCE; + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LABELSET; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LABELGOTO; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LABELGOTOEX; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LABELSWITCH; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *LABELSWITCHBYINDEX; + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ATOMICADDRCLEAN; + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ABS_GRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ACCUMULATE_N_V2; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ACOS_GRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ACOSH_GRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ANY; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPROXIMATE_EQUAL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASIN_GRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ASINH_GRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ATAN_GRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BROADCAST_TO; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ELU_GRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ADD_V2; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DATAFORMATDIMMAP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DATAFORMATVECPERMUTE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BESSELI0E; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *BESSELI1E; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYADADELTA; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYADAGRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYADAGRADDA; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYADAM; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYADAMAX; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYADDSIGN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYCENTEREDRMSPROP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYFTRL; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYFTRLV2; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYGRADIENTDESCENT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYPOWERSIGN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYPROXIMALADAGRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *APPLYPROXIMALGRADIENTDESCENT; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DEQUANTIZE; + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FOCAL_LOSS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *FOCAL_LOSS_GRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SMOOTHL1_LOSS; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SMOOTHL1_LOSS_grad; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *REDUCEMEAN; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CONCAT_V2; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *ONEHOT_V2; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SLICE_V2; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TILE_V2; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SUM_V2; +// Common type when the operator has the same name +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DETECTIONOUTPUT; +// Custom operator +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CUSTOMOP; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CUSTOMOP_NCHW; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CUSTOMOP_NHWC; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *CUSTOMOP_NC1HWC0; + +// Depthwise 4d_2_6d,6d_2_4d +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DEPTHWISEWEIGHT4D26D; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *DEPTHWISEWEIGHT6D24D; + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SQRTGRAD; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *SIGMOIDGRAD; + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *TRANSSHAPE; + +// Horovod operator +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HVDCALLBACKALLREDUCE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HVDCALLBACKALLGATHER; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HVDCALLBACKBROADCAST; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const char *HVDWAIT; + +/// +/// @brief Magic number of model file +/// +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t MODEL_FILE_MAGIC_NUM; // magic number + +/// +/// @brief Model head length +/// +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t MODEL_FILE_HEAD_LEN; + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const uint32_t MODEL_VERSION; ///< Model version 1.0/// + +// alpha default value +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const float ALPHA_DEFAULT_VALUE; + +// beta default value +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const float BETA_DEFAULT_VALUE; + +/// +/// @ingroup domi_omg +/// @brief INPUT node type +/// +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string INPUT_TYPE; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string DUMMY_DATA; + +// dim default size value +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY static const int32_t DIM_DEFAULT_SIZE = 4; + +// for fusion op plugin +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string ATTR_NAME_FUSIONOP_ORIGINAL_TYPE; + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string ATTR_NAME_INPUT_TENSOR_DESC; +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string ATTR_NAME_OUTPUT_TENSOR_DESC; + +// DATA node type +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string DATA_TYPE; + +// framework Operator Type +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string FRAMEWORK_OP_TYPE; + +FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY extern const std::string NODE_NAME_NET_OUTPUT; + +#pragma pack() // Cancels single-byte alignment +} // namespace parser +} // namespace ge + +#endif // PARSER_COMMON_TYPES_H_ diff --git a/inc/graph/debug/ge_attr_define.h b/inc/graph/debug/ge_attr_define.h index 7ec6e1a8e2a398dcfe22a4eb12524d9d7a75dbee..375dc0e2ad0b6ac424f64c5251e51b094a14290b 100644 --- a/inc/graph/debug/ge_attr_define.h +++ b/inc/graph/debug/ge_attr_define.h @@ -946,6 +946,7 @@ GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAM GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BATCH_NUM; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_BATCH_LABEL; GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_COMBINED_BATCH; +GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_USER_DESIGNEATE_SHAPE_ORDER; // Control flow GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY extern const std::string ATTR_NAME_STREAM_SWITCH_COND; diff --git a/inc/graph/op_desc.h b/inc/graph/op_desc.h index e29d839402028421faaea6bde63f994f1a8520a6..d2109d3f5bcce1249ca2302f16c04a17bae929aa 100644 --- a/inc/graph/op_desc.h +++ b/inc/graph/op_desc.h @@ -157,9 +157,6 @@ class OpDesc : public std::enable_shared_from_this, public AttrHolder { graphStatus AddDynamicOutputDesc(const string &name, const unsigned int num, bool isPushBack = true); - void RemoveInputDesc(uint32_t index); - void RemoveOutputDesc(uint32_t index); - bool IsOptionalInput(const string &name) const; bool IsOptionalInput(uint32_t index) const; diff --git a/inc/graph/utils/node_utils.h b/inc/graph/utils/node_utils.h index ea6e1696b021c324c2438a17e548a0b5ea0bea40..7d17c51f066861a0eeffd13635a079b2f47287c2 100644 --- a/inc/graph/utils/node_utils.h +++ b/inc/graph/utils/node_utils.h @@ -20,6 +20,7 @@ #include #include #include +#include "external/graph/operator.h" #include "graph/node.h" namespace ge { @@ -63,8 +64,11 @@ class NodeUtils { static void UnlinkAll(const Node &node); static graphStatus UpdatePeerNodeInputDesc(const NodePtr &node_ptr); - static graphStatus AppendInputAnchor(const NodePtr &node, uint32_t index); - static graphStatus RemoveInputAnchor(const NodePtr &node, uint32_t index); + static graphStatus AppendInputAnchor(const NodePtr &node, uint32_t num); + static graphStatus RemoveInputAnchor(const NodePtr &node, uint32_t num); + + static graphStatus AppendOutputAnchor(const NodePtr &node, uint32_t num); + static graphStatus RemoveOutputAnchor(const NodePtr &node, uint32_t num); static bool IsInNodesEmpty(const Node &node); static GeTensorDesc GetOutputDesc(const Node &node, uint32_t index); @@ -104,6 +108,14 @@ class NodeUtils { static NodePtr GetParentInput(const Node &node); static NodePtr GetParentInput(const NodePtr &node); + /// + /// @brief Get is dynamic shape graph from node. + /// @param [in] node + /// @return bool + /// + static bool IsDynamicShape(const Node &node); + static bool IsDynamicShape(const NodePtr &node); + /// /// @brief Check is varying_input for while node /// @param [in] node: Data node for subgraph @@ -140,9 +152,15 @@ class NodeUtils { /// static vector GetSubgraphOutputNodes(const Node &node); - static NodePtr GetInDataNodeByIndex(const Node &node, int index); + static NodePtr GetInDataNodeByIndex(const Node &node, const int index); + + static vector> GetOutDataNodesWithAnchorByIndex(const Node &node, const int index); + + static ge::ConstNodePtr GetNodeFromOperator(const Operator &oprt); + + static graphStatus GetInputConstData(const ConstNodePtr& node_ptr, const string &dst_name, GeTensorPtr &ge_tensor); - static vector GetOutDataNodesByIndex(const Node &node, int index); + static graphStatus GetInputConstData(const Node &node, const string &dst_name, GeTensorPtr &ge_tensor); private: static std::map> map_send_info_; diff --git a/inc/graph/utils/type_utils.h b/inc/graph/utils/type_utils.h index 92f39f4ae0961057d0174e5819a828fc7cc97329..4e7dfc70cdbb4aba61f9813b0eb47f2ed62ba80b 100644 --- a/inc/graph/utils/type_utils.h +++ b/inc/graph/utils/type_utils.h @@ -34,6 +34,7 @@ class TypeUtils { static bool IsFormatValid(Format format); static bool IsInternalFormat(Format format); + static std::string ImplyTypeToSerialString(domi::ImplyType imply_type); static std::string DataTypeToSerialString(DataType data_type); static DataType SerialStringToDataType(const std::string &str); static std::string FormatToSerialString(Format format); diff --git a/inc/hccl/hccl_op_base.h b/inc/hccl/hccl_op_base.h deleted file mode 100644 index 017e4f8a9093ca39fb82f41cc7373ae8d4c48463..0000000000000000000000000000000000000000 --- a/inc/hccl/hccl_op_base.h +++ /dev/null @@ -1,132 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @file hccl_op_base.h - * @brief HCCL OP_BASE API - */ - -#ifndef HCCL_OPBASE_H_ -#define HCCL_OPBASE_H_ - -#include - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -/** - * @brief Initialize HCCL in OP_BASE mode. - * - * @param rank_table A string identifying the rank table file path, include file name. - * @param identify A string identifying the identify for the rank. - * @param comm A pointer identifying the initialized communication resource. - * @return hcclResult_t - * @see hcclCommDestroy() - */ -extern hcclResult_t hcclCommInitClusterInfo(const char *rankTable, const char *identify, hcclComm_t *comm); - -/** - * @brief Get hccl unique id in OP_BASE mode. - * - * @param id A pointer identifying the hccl unique id. - * @return hcclResult_t - */ -extern hcclResult_t hcclGetUniqueId(hcclUniqueId* id); - -/** - * @brief Initialize HCCL with unique id in OP_BASE mode. - * - * @param comm A pointer identifying the initialized communication resource. - * @param nranks A integer identifying the rank size of the cluster. - * @param commId A struct identifying the hccl unique id. - * @param myrank A integer identifying the identify for the rank. - * @return hcclResult_t - * @see hcclCommDestroy() - */ -extern hcclResult_t hcclCommInitUniqueId(hcclComm_t* comm, u32 nranks, hcclUniqueId commId, u32 myrank); - -/** - * @brief AllReduce operator in OP_BASE mode. - * - * @param inputPtr A pointer identifying the input data address of the operator. - * @param outputPtr A pointer identifying the output data address of the operator. - * @param count An integer(u64) identifying the number of the output data. -* @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. - * @param op The reduction type of the operator, must be one of the following types: sum, min, max, prod. - * @param comm A pointer identifying the communication resource based on. - * @param stream A pointer identifying the stream information. - * @return hcclResult_t - */ -extern hcclResult_t hcclAllReduce(void *inputPtr, void *outputPtr, u64 count, hcclDataType_t dataType, - hcclRedOp_t op, hcclComm_t comm, rtStream_t stream); - -/** - * @brief Broadcast operator in OP_BASE mode. - * - * @param ptr A pointer identifying the data address of the operator. - * @param count An integer(u64) identifying the number of the data. - * @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. - * @param root An integer(u32) identifying the the root rank in the operator. - * @param comm A pointer identifying the communication resource based on - * @param stream A pointer identifying the stream information. - * @return hcclResult_t - */ -extern hcclResult_t hcclBroadcast(void *ptr, u64 count, hcclDataType_t dataType, u32 root, hcclComm_t comm, - rtStream_t stream); - -/** - * @brief ReduceScatter operator in OP_BASE mode. - * - * @param inputPtr A pointer identifying the input data address of the operator. - * @param outputPtr A pointer identifying the output data address of the operator. - * @param count An integer(u64) identifying the number of the output data. -* @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. - * @param op The reduction type of the operator, must be one of the following types: sum, min, max, prod. - * @param comm A pointer identifying the communication resource based on. - * @param stream A pointer identifying the stream information. - * @return hcclResult_t - */ -extern hcclResult_t hcclReduceScatter(void* inputPtr, void* outputPtr, u64 count, hcclDataType_t dataType, - hcclRedOp_t op, hcclComm_t comm, rtStream_t stream); - -/** - * @brief AllGather operator in OP_BASE mode. - * - * @param inputPtr A pointer identifying the input data address of the operator. - * @param outputPtr A pointer identifying the output data address of the operator. - * @param count An integer(u64) identifying the number of the input data. -* @param dataType The data type of the operator, must be one of the following types: int8, int32, float16, float32. - * @param comm A pointer identifying the communication resource based on. - * @param stream A pointer identifying the stream information. - * @return hcclResult_t - */ -extern hcclResult_t hcclAllGather(void* inputPtr, void* outputPtr, u64 count, hcclDataType_t dataType, - hcclComm_t comm, rtStream_t stream); - -/** - * @brief Destroy HCCL comm - * - * @param comm A pointer identifying the communication resource targetting - * @return hcclResult_t - * @see hcclCommInitClusterInfo() - */ -extern hcclResult_t hcclCommDestroy(hcclComm_t comm); - -#ifdef __cplusplus -} -#endif // __cplusplus -#endif // HCCL_OPBASE_H_ diff --git a/inc/hccl/hcom_ops_stores.h b/inc/hccl/hcom_ops_stores.h deleted file mode 100644 index 274af6fde20c6f62261ce038705d09357dfd43fa..0000000000000000000000000000000000000000 --- a/inc/hccl/hcom_ops_stores.h +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @file hcom_ops_stores.h - * @brief HCOM operators plugin API - */ - -#ifndef HCOM_OPS_STORES_H_ -#define HCOM_OPS_STORES_H_ - -#include "common/opskernel/ops_kernel_info_store.h" -#include "common/optimizer/graph_optimizer.h" -#include "framework/common/ge_inner_error_codes.h" - -using OpsKernelInfoStorePtr = std::shared_ptr; -using GraphOptimizerPtr = std::shared_ptr; - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -/** - * @brief Initialize HCOM operators plugin. - * - * @param options Input parameter. Options must contain rank table path, deploy mode, rank id, pod name. - * @return ge::SUCCESS success; others:fail. - */ -ge::Status Initialize(const std::map &options); - -/** - * @brief Finalize HCOM operators plugin. - * - * @return ge::SUCCESS success; others: fail. - */ -ge::Status Finalize(); - -/** - * @brief Get the information store of HCOM operators. - * - * @param opKernInfos A map identifying the information store of HCOM operators. - */ -void GetOpsKernelInfoStores(std::map &opKernInfos); - -/** - * @brief Get the graph optimizer of HCOM operators. - * - * @param graphOptimizers A map identifying the graph optimizer of HCOM operators. - */ -void GetGraphOptimizerObjs(std::map &graphOptimizers); - -#ifdef __cplusplus -} -#endif // __cplusplus -#endif // HCOM_OPS_STORES_H_ diff --git a/inc/hccl/hvd_adapter_pub.h b/inc/hccl/hvd_adapter_pub.h deleted file mode 100644 index 9ded323b49bd553acff26864f229f8549ea31b50..0000000000000000000000000000000000000000 --- a/inc/hccl/hvd_adapter_pub.h +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * @file hvd_adapter_pub.h - * @brief Horovod Adapter API - */ - -#ifndef HVD_ADAPTER_ -#define HVD_ADAPTER_ - -#include - -namespace hccl { -using HvdCbDataProcessFunc = hcclResult_t (*)(void *fnData); - -/** - * @brief Add function to process fndata. - * @param fn A hvdCbDataProcessFunc type function. - * @return void - */ -extern void HvdCbDataProcessFuncAdd(HvdCbDataProcessFunc fn); // 注册处理回host侧数据的函数 -} -#endif // HVD_ADAPTER_ diff --git a/inc/mmpa/mmpa_api.h b/inc/mmpa/mmpa_api.h new file mode 100644 index 0000000000000000000000000000000000000000..ce1c9720b7dd0cde457aa51180ac9bac5949190f --- /dev/null +++ b/inc/mmpa/mmpa_api.h @@ -0,0 +1,125 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef _MMPA_API_H_ +#define _MMPA_API_H_ + +#define LINUX 0 +#define WIN 1 + +#if(OS_TYPE == LINUX) //lint !e553 + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "securec.h" + +#include "./sub_inc/mmpa_typedef_linux.h" +#include "./sub_inc/mmpa_linux.h" + +#endif + + +#if(OS_TYPE == WIN) //lint !e553 +#include +#include +#include "Windows.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "shlwapi.h" +#include +#include "sub_inc/mmpa_typedef_win.h" +#include "sub_inc/mmpa_win.h" +#include +#include +#include +#include + +#include +#include + +#pragma comment(lib, "ws2_32.lib") +#pragma comment(lib, "mswsock.lib") +#pragma comment(lib, "Kernel32.lib") +#pragma comment(lib, "shlwapi.lib") +#pragma comment(lib, "wbemuuid.lib") +#pragma comment(lib, "Iphlpapi.lib") +#endif + +#endif // MMPA_API_H_ + diff --git a/inc/mmpa/sub_inc/mmpa_linux.h b/inc/mmpa/sub_inc/mmpa_linux.h new file mode 100644 index 0000000000000000000000000000000000000000..6ac8f8f6cd388bd6b7699fd210511dbc9bc19973 --- /dev/null +++ b/inc/mmpa/sub_inc/mmpa_linux.h @@ -0,0 +1,450 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MMPA_LINUX_MMPA_LINUX_H +#define MMPA_LINUX_MMPA_LINUX_H + +#ifdef __cplusplus +#if __cplusplus +extern "C" { +#endif // __cpluscplus +#endif // __cpluscplus + +#define MMPA_MACINFO_DEFAULT_SIZE 18 +#define MMPA_CPUDESC_DEFAULT_SIZE 64 + +typedef pthread_t mmThread; +typedef pthread_mutex_t mmMutex_t; +typedef pthread_cond_t mmCond; +typedef pthread_mutex_t mmMutexFC; +typedef signed int mmProcess; +typedef int mmPollHandle; +typedef int mmPipeHandle; +typedef int mmComPletionKey; +typedef int mmCompletionHandle; + +typedef VOID *mmExitCode; +typedef key_t mmKey_t; +typedef int mmMsgid; +typedef struct dirent mmDirent; +typedef int (*mmFilter)(const mmDirent *entry); +typedef int (*mmSort)(const mmDirent **a, const mmDirent **b); + +typedef VOID *(*userProcFunc)(VOID *pulArg); + +typedef struct { + userProcFunc procFunc; // Callback function pointer + VOID *pulArg; // Callback function parameters +} mmUserBlock_t; + +typedef struct { + int wSecond; // Seconds. [0-60] (1 leap second) + int wMinute; // Minutes. [0-59] + int wHour; // Hours. [0-23] + int wDay; // Day. [1-31] + int wMonth; // Month. [1-12] + int wYear; // Year + int wDayOfWeek; // Day of week. [0-6] + int tm_yday; // Days in year.[0-365] + int tm_isdst; // DST. [-1/0/1] + long int wMilliseconds; // milliseconds +} mmSystemTime_t; + +typedef sem_t mmSem_t; +typedef struct sockaddr mmSockAddr; +typedef socklen_t mmSocklen_t; +typedef int mmSockHandle; +typedef timer_t mmTimer; +typedef pthread_key_t mmThreadKey; + +typedef int mmOverLap; + +typedef ssize_t mmSsize_t; + +typedef struct { + UINT32 createFlag; + INT32 oaFlag; +} mmCreateFlag; + +typedef struct { + VOID *sendBuf; + INT32 sendLen; +} mmIovSegment; +typedef struct in_addr mmInAddr; + +typedef struct { + VOID *inbuf; + INT32 inbufLen; + VOID *outbuf; + INT32 outbufLen; + mmOverLap *oa; +} mmIoctlBuf; + +typedef int mmAtomicType; + +typedef enum { + pollTypeRead = 1, // pipe read + pollTypeRecv, // socket recv + pollTypeIoctl, // ioctl +} mmPollType; + +typedef struct { + mmPollHandle handle; // The file descriptor or handle of poll is required + mmPollType pollType; // Operation type requiring poll + // read or recv or ioctl + INT32 ioctlCode; // IOCTL operation code, dedicated to IOCTL + mmComPletionKey completionKey; // The default value is blank, which is used in windows + // The data used to receive the difference between which handle is readable +} mmPollfd; + +typedef struct { + VOID *priv; // User defined private content + mmPollHandle bufHandle; // Value of handle corresponding to buf + mmPollType bufType; // Data types polled to + VOID *buf; // Data used in poll + UINT32 bufLen; // Data length used in poll + UINT32 bufRes; // Actual return length +} mmPollData, *pmmPollData; + +typedef VOID (*mmPollBack)(pmmPollData); + +typedef struct { + INT32 tz_minuteswest; // How many minutes is it different from Greenwich + INT32 tz_dsttime; // type of DST correction +} mmTimezone; + +typedef struct { + LONG tv_sec; + LONG tv_usec; +} mmTimeval; + +typedef struct { + LONG tv_sec; + LONG tv_nsec; +} mmTimespec; + +typedef struct { + ULONGLONG totalSize; + ULONGLONG freeSize; + ULONGLONG availSize; +} mmDiskSize; + +#define mmTLS __thread +typedef struct stat mmStat_t; +typedef struct stat64 mmStat64_t; +typedef mode_t mmMode_t; + +typedef struct option mmStructOption; + +typedef struct { + char addr[MMPA_MACINFO_DEFAULT_SIZE]; // ex:aa-bb-cc-dd-ee-ff\0 +} mmMacInfo; + +typedef struct { + char **argv; + INT32 argvCount; + char **envp; + INT32 envpCount; +} mmArgvEnv; + +typedef struct { + char arch[MMPA_CPUDESC_DEFAULT_SIZE]; + char manufacturer[MMPA_CPUDESC_DEFAULT_SIZE]; // vendor + char version[MMPA_CPUDESC_DEFAULT_SIZE]; // modelname + INT32 frequency; // cpu frequency + INT32 maxFrequency; // max speed + INT32 ncores; // cpu cores + INT32 nthreads; // cpu thread count + INT32 ncounts; // logical cpu nums +} mmCpuDesc; + +typedef mode_t MODE; + +typedef struct { + INT32 detachFlag; // Determine whether to set separation property 0, not to separate 1 + INT32 priorityFlag; // Determine whether to set priority 0 and not set 1 + INT32 priority; // Priority value range to be set 1-99 + INT32 policyFlag; // Set scheduling policy or not 0 do not set 1 setting + INT32 policy; // Scheduling policy value value + // MMPA_THREAD_SCHED_RR + // MMPA_THREAD_SCHED_OTHER + // MMPA_THREAD_SCHED_FIFO + INT32 stackFlag; // Set stack size or not: 0 does not set 1 setting + UINT32 stackSize; // The stack size unit bytes to be set cannot be less than MMPA_THREAD_STACK_MIN +} mmThreadAttr; + +#ifdef __ANDROID__ +#define S_IREAD S_IRUSR +#define S_IWRITE S_IWUSR +#endif + +#define M_FILE_RDONLY O_RDONLY +#define M_FILE_WRONLY O_WRONLY +#define M_FILE_RDWR O_RDWR +#define M_FILE_CREAT O_CREAT + +#define M_RDONLY O_RDONLY +#define M_WRONLY O_WRONLY +#define M_RDWR O_RDWR +#define M_CREAT O_CREAT +#define M_BINARY O_RDONLY + +#define M_IREAD S_IREAD +#define M_IRUSR S_IRUSR +#define M_IWRITE S_IWRITE +#define M_IWUSR S_IWUSR +#define M_IXUSR S_IXUSR +#define FDSIZE 64 +#define M_MSG_CREAT IPC_CREAT +#define M_MSG_EXCL (IPC_CREAT | IPC_EXCL) +#define M_MSG_NOWAIT IPC_NOWAIT + +#define M_WAIT_NOHANG WNOHANG // Non blocking waiting +#define M_WAIT_UNTRACED \ + WUNTRACED // If the subprocess enters the suspended state, it will return immediately + // But the end state of the subprocess is ignored +#define M_UMASK_USRREAD S_IRUSR +#define M_UMASK_GRPREAD S_IRGRP +#define M_UMASK_OTHREAD S_IROTH + +#define M_UMASK_USRWRITE S_IWUSR +#define M_UMASK_GRPWRITE S_IWGRP +#define M_UMASK_OTHWRITE S_IWOTH + +#define M_UMASK_USREXEC S_IXUSR +#define M_UMASK_GRPEXEC S_IXGRP +#define M_UMASK_OTHEXEC S_IXOTH + +#define mmConstructor(x) __attribute__((constructor)) VOID x() +#define mmDestructor(x) __attribute__((destructor)) VOID x() + +#define MMPA_NO_ARGUMENT 0 +#define MMPA_REQUIRED_ARGUMENT 1 +#define MMPA_OPTIONAL_ARGUMENT 2 + +#define MMPA_MAX_PATH PATH_MAX + +#define M_F_OK F_OK +#define M_R_OK R_OK +#define M_W_OK W_OK + +#define MMPA_RTLD_NOW RTLD_NOW +#define MMPA_RTLD_GLOBAL RTLD_GLOBAL + +#define MMPA_DL_EXT_NAME ".so" + +extern INT32 mmCreateTask(mmThread *threadHandle, mmUserBlock_t *funcBlock); +extern INT32 mmJoinTask(mmThread *threadHandle); +extern INT32 mmMutexInit(mmMutex_t *mutex); +extern INT32 mmMutexLock(mmMutex_t *mutex); +extern INT32 mmMutexUnLock(mmMutex_t *mutex); +extern INT32 mmMutexDestroy(mmMutex_t *mutex); +extern INT32 mmCondInit(mmCond *cond); +extern INT32 mmCondLockInit(mmMutexFC *mutex); +extern INT32 mmCondLock(mmMutexFC *mutex); +extern INT32 mmCondUnLock(mmMutexFC *mutex); +extern INT32 mmCondLockDestroy(mmMutexFC *mutex); +extern INT32 mmCondWait(mmCond *cond, mmMutexFC *mutex); +extern INT32 mmCondTimedWait(mmCond *cond, mmMutexFC *mutex, UINT32 milliSecond); +extern INT32 mmCondNotify(mmCond *cond); +extern INT32 mmCondNotifyAll(mmCond *cond); +extern INT32 mmCondDestroy(mmCond *cond); +extern INT32 mmGetPid(); +extern INT32 mmGetTid(); +extern INT32 mmGetPidHandle(mmProcess *processHandle); +extern INT32 mmGetLocalTime(mmSystemTime_t *sysTime); + +extern INT32 mmSemInit(mmSem_t *sem, UINT32 value); +extern INT32 mmSemWait(mmSem_t *sem); +extern INT32 mmSemPost(mmSem_t *sem); +extern INT32 mmSemDestroy(mmSem_t *sem); +extern INT32 mmOpen(const CHAR *pathName, INT32 flags); +extern INT32 mmOpen2(const CHAR *pathName, INT32 flags, MODE mode); +extern INT32 mmClose(INT32 fd); +extern mmSsize_t mmWrite(INT32 fd, VOID *buf, UINT32 bufLen); +extern mmSsize_t mmRead(INT32 fd, VOID *buf, UINT32 bufLen); +extern mmSockHandle mmSocket(INT32 sockFamily, INT32 type, INT32 protocol); +extern INT32 mmBind(mmSockHandle sockFd, mmSockAddr *addr, mmSocklen_t addrLen); +extern INT32 mmListen(mmSockHandle sockFd, INT32 backLog); +extern mmSockHandle mmAccept(mmSockHandle sockFd, mmSockAddr *addr, mmSocklen_t *addrLen); +extern INT32 mmConnect(mmSockHandle sockFd, mmSockAddr *addr, mmSocklen_t addrLen); +extern INT32 mmCloseSocket(mmSockHandle sockFd); +extern mmSsize_t mmSocketSend(mmSockHandle sockFd, VOID *sendBuf, INT32 sendLen, INT32 sendFlag); +extern mmSsize_t mmSocketRecv(mmSockHandle sockFd, VOID *recvBuf, INT32 recvLen, INT32 recvFlag); +extern INT32 mmSAStartup(); +extern INT32 mmSACleanup(); +extern VOID *mmDlopen(const CHAR *fileName, INT32 mode); +extern VOID *mmDlsym(VOID *handle, CHAR *funcName); +extern INT32 mmDlclose(VOID *handle); +extern CHAR *mmDlerror(); +extern INT32 mmCreateAndSetTimer(mmTimer *timerHandle, mmUserBlock_t *timerBlock, UINT milliSecond, UINT period); +extern INT32 mmDeleteTimer(mmTimer timerHandle); +extern INT32 mmStatGet(const CHAR *path, mmStat_t *buffer); +extern INT32 mmStat64Get(const CHAR *path, mmStat64_t *buffer); +extern INT32 mmMkdir(const CHAR *pathName, mmMode_t mode); +extern INT32 mmSleep(UINT32 milliSecond); + +extern INT32 mmCreateTaskWithAttr(mmThread *threadHandle, mmUserBlock_t *funcBlock); +extern INT32 mmGetProcessPrio(mmProcess pid); +extern INT32 mmSetProcessPrio(mmProcess pid, INT32 processPrio); +extern INT32 mmGetThreadPrio(mmThread *threadHandle); +extern INT32 mmSetThreadPrio(mmThread *threadHandle, INT32 threadPrio); +extern INT32 mmAccess(const CHAR *pathName); +extern INT32 mmAccess2(const CHAR *pathName, INT32 mode); +extern INT32 mmRmdir(const CHAR *pathName); + +extern INT32 mmIoctl(mmProcess fd, INT32 ioctlCode, mmIoctlBuf *bufPtr); +extern INT32 mmSemTimedWait(mmSem_t *sem, INT32 timeout); +extern mmSsize_t mmWritev(mmProcess fd, mmIovSegment *iov, INT32 iovcnt); +extern VOID mmMb(); +extern INT32 mmInetAton(const CHAR *addrStr, mmInAddr *addr); + +extern mmProcess mmOpenFile(const CHAR *fileName, UINT32 access, mmCreateFlag fileFlag); +extern mmSsize_t mmReadFile(mmProcess fileId, VOID *buffer, INT32 len); +extern mmSsize_t mmWriteFile(mmProcess fileId, VOID *buffer, INT32 len); +extern INT32 mmCloseFile(mmProcess fileId); + +extern mmAtomicType mmSetData(mmAtomicType *ptr, mmAtomicType value); +extern mmAtomicType mmValueInc(mmAtomicType *ptr, mmAtomicType value); +extern mmAtomicType mmValueSub(mmAtomicType *ptr, mmAtomicType value); +extern INT32 mmCreateTaskWithDetach(mmThread *threadHandle, mmUserBlock_t *funcBlock); + +// The following 3 interfaces are to be deleted +extern INT32 mmCreateNamedPipe(mmPipeHandle pipe[], CHAR *pipeName[], INT32 waitMode); +extern INT32 mmOpenNamePipe(mmPipeHandle pipe[], CHAR *pipeName[], INT32 waitMode); +extern VOID mmCloseNamedPipe(mmPipeHandle namedPipe[]); + +extern INT32 mmCreatePipe(mmPipeHandle pipe[], CHAR *pipeName[], UINT32 pipeCount, INT32 waitMode); +extern INT32 mmOpenPipe(mmPipeHandle pipe[], CHAR *pipeName[], UINT32 pipeCount, INT32 waitMode); +extern VOID mmClosePipe(mmPipeHandle pipe[], UINT32 pipeCount); + +// Poll related interface +extern mmCompletionHandle mmCreateCompletionPort(); +extern VOID mmCloseCompletionPort(mmCompletionHandle handle); +extern INT32 mmPoll(mmPollfd *fds, INT32 fdCount, INT32 timeout, mmCompletionHandle handleIOCP, pmmPollData polledData, + mmPollBack pollBack); +extern INT32 mmGetErrorCode(); +extern INT32 mmGetTimeOfDay(mmTimeval *timeVal, mmTimezone *timeZone); +extern mmTimespec mmGetTickCount(); +extern INT32 mmGetRealPath(CHAR *path, CHAR *realPath); +extern INT32 mmRealPath(const CHAR *path, CHAR *realPath, INT32 realPathLen); + +extern INT32 mmDup2(INT32 oldFd, INT32 newFd); + +extern INT32 mmDup(INT32 fd); + +extern INT32 mmUnlink(const CHAR *filename); + +extern INT32 mmChmod(const CHAR *filename, INT32 mode); + +extern INT32 mmFileno(FILE *stream); + +extern INT32 mmScandir(const CHAR *path, mmDirent ***entryList, mmFilter filterFunc, mmSort sort); + +extern VOID mmScandirFree(mmDirent **entryList, INT32 count); + +extern mmMsgid mmMsgCreate(mmKey_t key, INT32 msgFlag); + +extern mmMsgid mmMsgOpen(mmKey_t key, INT32 msgFlag); + +extern INT32 mmMsgSnd(mmMsgid msqid, VOID *buf, INT32 bufLen, INT32 msgFlag); + +extern INT32 mmMsgRcv(mmMsgid msqid, VOID *buf, INT32 bufLen, INT32 msgFlag); + +extern INT32 mmMsgClose(mmMsgid msqid); + +extern INT32 mmLocalTimeR(const time_t *timep, struct tm *result); + +extern INT32 mmGetOpt(INT32 argc, char *const *argv, const char *opts); +extern INT32 mmGetOptLong(INT32 argc, char *const *argv, const char *opts, const mmStructOption *longOpts, + INT32 *longIndex); + +extern LONG mmLseek(INT32 fd, INT64 offset, INT32 seekFlag); +extern INT32 mmFtruncate(mmProcess fd, UINT32 length); + +extern INT32 mmTlsCreate(mmThreadKey *key, VOID (*destructor)(VOID *)); +extern INT32 mmTlsSet(mmThreadKey key, const VOID *value); +extern VOID *mmTlsGet(mmThreadKey key); +extern INT32 mmTlsDelete(mmThreadKey key); +extern INT32 mmGetOsType(); + +extern INT32 mmFsync(mmProcess fd); +extern INT32 mmChdir(const CHAR *path); +extern INT32 mmUmask(INT32 pmode); +extern INT32 mmThreadKill(mmThread id); +extern INT32 mmWaitPid(mmProcess pid, INT32 *status, INT32 options); + +extern INT32 mmGetCwd(CHAR *buffer, INT32 maxLen); +extern INT32 mmGetEnv(const CHAR *name, CHAR *value, UINT32 len); +extern INT32 mmSetEnv(const CHAR *name, const CHAR *value, INT32 overwrite); +extern CHAR *mmStrTokR(CHAR *str, const CHAR *delim, CHAR **saveptr); +extern CHAR *mmDirName(CHAR *path); +extern CHAR *mmBaseName(CHAR *path); +extern INT32 mmGetDiskFreeSpace(const char *path, mmDiskSize *diskSize); + +/* + * Function: set the thread name created by mmcreatetask + * Input: pstThreadHandle: thread ID + * name: thread name, the actual length of name must be < MMPA_THREADNAME_SIZE + * The input parameter error returns EN_INVALID_PARAM, the execution success returns EN_OK, and the + * execution failure returns EN_ERROR + */ +extern INT32 mmSetThreadName(mmThread *threadHandle, const CHAR *name); + +/* + * Function: get thread name + * Input: pstThreadHandle: thread ID + * size: Cache length of thread name + * name:User allocated cache for thread name, Cache length must be >= MMPA_THREADNAME_SIZE + * The input parameter error returns EN_INVALID_PARAM, the execution success returns EN_OK, and the + * execution failure returns EN_ERROR + */ +extern INT32 mmGetThreadName(mmThread *threadHandle, CHAR *name, INT32 size); +/* + * Function:Set the thread name of the currently executing thread - call inside the thread body + * Input:name:Thread name to be set + * The input parameter error returns EN_INVALID_PARAM, the execution success returns EN_OK, and the + * execution failure returns EN_ERROR + */ +extern INT32 mmSetCurrentThreadName(const CHAR *name); +/* + * Function:Get the thread name of the currently executing thread - in body call + * Input:name:The name of the thread to get, and the cache is allocated by the user,size>=MMPA_THREADNAME_SIZE + * The input parameter error returns EN_INVALID_PARAM, the execution success returns EN_OK, and the + * execution failure returns EN_ERROR + */ +extern INT32 mmGetCurrentThreadName(CHAR *name, INT32 size); +extern INT32 mmGetFileSize(const CHAR *fileName, ULONGLONG *length); +extern INT32 mmIsDir(const CHAR *fileName); +extern INT32 mmGetOsName(CHAR *name, INT32 nameSize); +extern INT32 mmGetOsVersion(CHAR *versionInfo, INT32 versionLength); +extern INT32 mmGetMac(mmMacInfo **list, INT32 *count); +extern INT32 mmGetMacFree(mmMacInfo *list, INT32 count); +extern INT32 mmGetCpuInfo(mmCpuDesc **cpuInfo, INT32 *count); +extern INT32 mmCpuInfoFree(mmCpuDesc *cpuInfo, INT32 count); +extern INT32 mmCreateProcess(const CHAR *fileName, const mmArgvEnv *env, const char *stdoutRedirectFile, mmProcess *id); + +extern INT32 mmCreateTaskWithThreadAttr(mmThread *threadHandle, const mmUserBlock_t *funcBlock, + const mmThreadAttr *threadAttr); +#define MMPA_DLL_API + +#ifdef __cplusplus +#if __cplusplus +} +#endif /* __cpluscplus */ +#endif // __cpluscplus + +#endif // MMPA_LINUX_MMPA_LINUX_H_ diff --git a/inc/mmpa/sub_inc/mmpa_typedef_linux.h b/inc/mmpa/sub_inc/mmpa_typedef_linux.h new file mode 100644 index 0000000000000000000000000000000000000000..fc862a72795a2fd66ad1e520fd917f115954b4ce --- /dev/null +++ b/inc/mmpa/sub_inc/mmpa_typedef_linux.h @@ -0,0 +1,95 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MMPA_TYPEDEF_LINUX_H +#define MMPA_TYPEDEF_LINUX_H + +#ifdef __cplusplus +#if __cplusplus +extern "C" { +#endif // __cpluscplus +#endif // __cpluscplus + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + +typedef unsigned char UINT8; +typedef signed char INT8; +typedef unsigned short UINT16; +typedef signed short INT16; +typedef unsigned int UINT32; +typedef signed int INT32; +typedef unsigned long long UINT64; +typedef signed long long INT64; +typedef float FLOAT; +typedef double DOUBLE; +typedef void VOID; +typedef unsigned char UCHAR; +typedef char CHAR; +typedef unsigned short USHORT; +typedef short SHORT; +typedef unsigned int UINT; +typedef int INT; +typedef unsigned long ULONG; +typedef unsigned long long ULONGLONG; + +typedef long LONG; + +#define HANDLE_INVALID_VALUE (-1) +#define MMPA_MEM_MAX_LEN (0x7fffffff) +#define MMPA_PROCESS_ERROR (0x7fffffff) +#define PATH_SIZE 256 +#define MAX_IOVEC_SIZE 32 +#define MMPA_MAX_SLEEP_MILLSECOND 4294967 +#define MAX_PIPE_COUNT 2 +#define MMPA_PIPE_COUNT 2 +#define MMPA_THREADNAME_SIZE 16 +#define MMPA_MIN_OS_NAME_SIZE 64 +#define MMPA_MIN_OS_VERSION_SIZE 128 + +#define MMPA_ONE_THOUSAND 1000 +#define MMPA_ONE_BILLION 1000000000 +#define MMPA_COMPUTER_BEGIN_YEAR 1900 +#define MMPA_ZERO 0 +#define MMPA_MAX_THREAD_PIO 99 +#define MMPA_MIN_THREAD_PIO 1 +#define MMPA_DEFAULT_PIPE_PERMISSION 0777 +#define MMPA_DEFAULT_MSG_TYPE 1 + +#define MMPA_THREAD_SCHED_RR SCHED_RR +#define MMPA_THREAD_SCHED_FIFO SCHED_FIFO +#define MMPA_THREAD_SCHED_OTHER SCHED_OTHER +#define MMPA_THREAD_MIN_STACK_SIZE PTHREAD_STACK_MIN + +#define MMPA_MAX_NI 19 +#define MMPA_MIN_NI (-20) + +#define EN_OK 0 +#define EN_ERR 1 +#define EN_ERROR (-1) +#define EN_INVALID_PARAM (-2) + +#ifdef __cplusplus +#if __cplusplus +} +#endif // __cpluscplus +#endif // __cpluscplus +#endif // MMPA_TYPEDEF_LINUX_H_ diff --git a/inc/hccl/cltm.h b/inc/mmpa/sub_inc/mmpa_typedef_win.h similarity index 33% rename from inc/hccl/cltm.h rename to inc/mmpa/sub_inc/mmpa_typedef_win.h index b741f46b6cf564b1178788f742ad01a774c7aadf..78ce6a8637e511008d783c0dc8382a770eb617da 100644 --- a/inc/hccl/cltm.h +++ b/inc/mmpa/sub_inc/mmpa_typedef_win.h @@ -1,4 +1,4 @@ -/** +/** * Copyright 2019-2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,43 +14,66 @@ * limitations under the License. */ -/** - * @file cltm.h - * @brief Cluster Logical Topology Management API - * - */ - -#ifndef CLTM_INC_H_ -#define CLTM_INC_H_ +#ifndef MMPA_TYPEDEF_WIN_H +#define MMPA_TYPEDEF_WIN_H #ifdef __cplusplus +#if __cplusplus extern "C" { -#endif // __cplusplus +#endif // __cpluscplus +#endif // __cpluscplus -/** - * @brief CLTM functions return value definition - */ -typedef enum tagCltmResult { - CLTM_SUCCESS = 0, /**< success */ - CTLM_E_PTR, /**< empty pointer */ - CLTM_E_PARA, /**< parameter error */ - CLTM_E_NO_RESOURCE, /**< resource not enough error */ - CLTM_E_RESERVED /**< reserved */ -} cltmResult_t; - -/** - * @brief Generate rank table - * - * @param allocatedResource A string identifying the resource list allocate by the CSM. - * @param rankTableBuf A string identifying the buffer of . - * @param maxBufSize An integer(u32) identifying the size of rank table buffer. - * @param usedBufSize A pointer identifying the used size of rank table buffer. - * @return cltmResult_t - */ -cltmResult_t cltm_generate_ranktable(const char *allocatedResource, char* rankTableBuf, - unsigned int maxBufSize, unsigned int *usedBufSize); +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + +#define EN_OK 0 +#define EN_ERR 1 +#define EN_ERROR (-1) +#define EN_INVALID_PARAM (-2) + +#define HANDLE_INVALID_VALUE (-1) +#define INVALID_SOCKET_HANDLE INVALID_SOCKET +#define MMPA_MEM_MAX_LEN (0x7fffffff) +#define MMPA_PROCESS_ERROR (0x7fffffff) + +#define MMPA_ONE_THOUSAND 1000 +#define MMPA_COMPUTER_BEGIN_YEAR 1900 +#define SUMMER_TIME_OR_NOT (-1) +#define MMPA_ZERO 0 +#define MMPA_VALUE_ONE 1 +#define MMPA_SOCKET_MAIN_EDITION 2 +#define MMPA_SOCKET_SECOND_EDITION 0 +#define MMPA_PIPE_BUF_SIZE 1024 +#define MMPA_MAX_SCANDIR_COUNT 1024 +#define MAX_IOVEC_SIZE 32 +#define MMPA_PIPE_COUNT 2 +#define MMPA_THREADNAME_SIZE 16 +#define MMPA_MIN_OS_NAME_SIZE (MAX_COMPUTERNAME_LENGTH + 1) +#define MMPA_MIN_OS_VERSION_SIZE 64 + +#define MMPA_MAX_NI 19 +#define MMPA_MIDDLE_NI 5 +#define MMPA_LOW_NI (-5) +#define MMPA_MIN_NI (-20) + +#define MMPA_MAX_THREAD_PIO 99 +#define MMPA_MIDDLE_THREAD_PIO 66 +#define MMPA_LOW_THREAD_PIO 33 +#define MMPA_MIN_THREAD_PIO 1 + +#define MMPA_THREAD_SCHED_RR 0 +#define MMPA_THREAD_SCHED_FIFO 0 +#define MMPA_THREAD_SCHED_OTHER 0 +#define MMPA_THREAD_MIN_STACK_SIZE 0 #ifdef __cplusplus +#if __cplusplus } -#endif -#endif // CLTM_INC_H_ +#endif // __cpluscplus +#endif // __cpluscplus +#endif // _MMPA_TYPEDEF_WIN_H_ diff --git a/inc/mmpa/sub_inc/mmpa_win.h b/inc/mmpa/sub_inc/mmpa_win.h new file mode 100644 index 0000000000000000000000000000000000000000..a78ceb21894540428e3a88d0e2e49a9440f370fd --- /dev/null +++ b/inc/mmpa/sub_inc/mmpa_win.h @@ -0,0 +1,465 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MMPA_WIN_MMPA_WIN_H +#define MMPA_WIN_MMPA_WIN_H +#ifdef __cplusplus +#if __cplusplus +extern "C" { +#endif // __cpluscplus +#endif // __cpluscplus +#ifdef MMPA_DLL +#define MMPA_DLL_API __declspec(dllexport) +#else +#define MMPA_DLL_API __declspec(dllimport) +#endif + +#define MMPA_MACINFO_DEFAULT_SIZE 18 +#define MMPA_CPUDESC_DEFAULT_SIZE 64 + +MMPA_DLL_API extern char *optarg; +MMPA_DLL_API extern int opterr; +MMPA_DLL_API extern int optind; +MMPA_DLL_API extern int optopt; + +#pragma section(".CRT$XCU", long, read) +#pragma section(".CRT$XPU", long, read) + +typedef HANDLE mmMutex_t; +typedef HANDLE mmThread; +typedef HANDLE mmProcess; +typedef HANDLE mmPollHandle; +typedef HANDLE mmPipeHandle; +typedef HANDLE mmCompletionHandle; + +typedef CRITICAL_SECTION mmMutexFC; +typedef CONDITION_VARIABLE mmCond; + +typedef VOID *(*userProcFunc)(VOID *pulArg); +typedef struct { + userProcFunc procFunc; + VOID *pulArg; +} mmUserBlock_t; + +typedef DWORD mmThreadKey; +typedef SYSTEMTIME mmSystemTime_t; + +typedef HANDLE mmSem_t; +typedef SOCKET mmSockHandle; +typedef struct sockaddr mmSockAddr; +typedef int mmSocklen_t; +typedef int mmSemTimeout_t; +typedef long mmAtomicType; +typedef DWORD mmExitCode; +typedef int mmKey_t; +typedef HANDLE mmMsgid; + +typedef INT32 mmSsize_t; + +typedef enum { + DT_DIR = FILE_ATTRIBUTE_DIRECTORY, +} mmDtype; + +typedef struct { + unsigned char d_type; + char d_name[MAX_PATH]; // file name +} mmDirent; + +typedef int (*mmFilter)(const mmDirent *entry); +typedef int (*mmSort)(const mmDirent **a, const mmDirent **b); + +typedef struct { + VOID *sendBuf; + INT32 sendLen; +} mmIovSegment; +typedef PVOID mmInAddr; + +typedef enum { + pollTypeRead = 1, // pipeline reading + pollTypeRecv, // socket receive + pollTypeIoctl, // ioctl read +} mmPollType; + +typedef struct { + HANDLE completionHandle; + mmPollType overlapType; + OVERLAPPED oa; +} mmComPletionKey, *pmmComPletionKey; + +typedef struct { + VOID *priv; // User defined private content + mmPollHandle bufHandle; // Value of handle corresponding to buf + mmPollType bufType; // Data types polled to + VOID *buf; + UINT32 bufLen; + UINT32 bufRes; +} mmPollData, *pmmPollData; + +typedef VOID (*mmPollBack)(pmmPollData); +typedef struct { + mmPollHandle handle; // The file descriptor or handle of poll is required + mmPollType pollType; // Operation type requiring poll,read or recv or ioctl + INT32 ioctlCode; // IOCTL operation code, dedicated to IOCTL + mmComPletionKey completionKey; // The default value is blank, which will be used in windows to receive the data with + // different handle +} mmPollfd; + +typedef struct { + OVERLAPPED oa; + HANDLE completionHandle; + WSABUF DataBuf; +} PRE_IO_DATA, *PPRE_IO_DATA; + +typedef OVERLAPPED mmOverLap; + +typedef struct { + UINT32 createFlag; + INT32 oaFlag; // Overlap operation is supported if it is not 0 +} mmCreateFlag; + +typedef struct { + VOID *inbuf; + INT32 inbufLen; + VOID *outbuf; + INT32 outbufLen; + mmOverLap *oa; +} mmIoctlBuf; + +typedef struct { + HANDLE timerQueue; + HANDLE timerHandle; +} mmTimerHandle; + +typedef struct { + LONG tv_sec; + LONG tv_usec; +} mmTimeval; + +typedef struct { + INT32 tz_minuteswest; // How many minutes is it different from Greenwich + INT32 tz_dsttime; // DST correction type +} mmTimezone; + +typedef struct { + LONG tv_sec; + LONG tv_nsec; +} mmTimespec; + +typedef mmTimerHandle mmTimer; + +#define mmTLS __declspec(thread) + +typedef struct stat mmStat_t; +typedef struct _stat64 mmStat64_t; +typedef int mmMode_t; + +typedef int MODE; + +typedef struct { + const char *name; + int has_arg; + int *flag; + int val; +} mmStructOption; + +typedef struct { + ULONGLONG totalSize; + ULONGLONG freeSize; + ULONGLONG availSize; +} mmDiskSize; + +typedef struct { + char addr[MMPA_MACINFO_DEFAULT_SIZE]; // ex:aa-bb-cc-dd-ee-ff\0 +} mmMacInfo; + +typedef struct { + char arch[MMPA_CPUDESC_DEFAULT_SIZE]; + char manufacturer[MMPA_CPUDESC_DEFAULT_SIZE]; // vendor + char version[MMPA_CPUDESC_DEFAULT_SIZE]; // modelname + INT32 frequency; // cpu frequency + INT32 maxFrequency; // max speed + INT32 ncores; // cpu cores + INT32 nthreads; // cpu thread count + INT32 ncounts; // logical cpu nums +} mmCpuDesc; + +typedef struct { + char **argv; + INT32 argvCount; + char **envp; + INT32 envpCount; +} mmArgvEnv; + +// Windows currently does not support properties other than thread separation properties +typedef struct { + INT32 detachFlag; // Thread detach property: 0 do not detach 1 detach + INT32 priorityFlag; + INT32 priority; + INT32 policyFlag; + INT32 policy; + INT32 stackFlag; + UINT32 stackSize; +} mmThreadAttr; + +typedef VOID (*mmPf)(VOID); +#define M_FILE_RDONLY GENERIC_READ +#define M_FILE_WRONLY GENERIC_WRITE +#define M_FILE_RDWR (GENERIC_READ | GENERIC_WRITE) +#define M_FILE_CREAT OPEN_ALWAYS + +#define M_RDONLY _O_RDONLY +#define M_WRONLY _O_WRONLY +#define M_RDWR _O_RDWR +#define M_CREAT _O_CREAT +#define M_BINARY _O_BINARY + +#define M_IREAD _S_IREAD +#define M_IRUSR _S_IREAD +#define M_IWRITE _S_IWRITE +#define M_IWUSR _S_IWRITE +#define M_IXUSR 0 + +#define M_MSG_CREAT 1 +#define M_MSG_EXCL 2 +#define M_MSG_NOWAIT 3 + +#define M_WAIT_NOHANG 1 +#define M_WAIT_UNTRACED 2 + +#define M_UMASK_USRREAD _S_IREAD +#define M_UMASK_GRPREAD _S_IREAD +#define M_UMASK_OTHREAD _S_IREAD + +#define M_UMASK_USRWRITE _S_IWRITE +#define M_UMASK_GRPWRITE _S_IWRITE +#define M_UMASK_OTHWRITE _S_IWRITE + +#define M_UMASK_USREXEC 0 +#define M_UMASK_GRPEXEC 0 +#define M_UMASK_OTHEXEC 0 + +#define mmConstructor(x) __declspec(allocate(".CRT$XCU")) mmPf con = x +#define mmDestructor(x) __declspec(allocate(".CRT$XPU")) mmPf de = x + +#define MMPA_PRINT_ERROR ((opterr) && (*options != ':')) +#define MMPA_FLAG_PERMUTE 0x01 // permute non-options to the end of argv +#define MMPA_FLAG_ALLARGS 0x02 // treat non-options as args to option "-1" +#define MMPA_FLAG_LONGONLY 0x04 // operate as getopt_long_only +// return values +#define MMPA_BADCH (INT32)'?' +#define MMPA_BADARG ((*options == ':') ? (INT32)':' : (INT32)'?') +#define MMPA_INORDER (INT32)1 + +#define MMPA_NO_ARGUMENT 0 +#define MMPA_REQUIRED_ARGUMENT 1 +#define MMPA_OPTIONAL_ARGUMENT 2 + +#define MMPA_EMSG "" +#define MMPA_MAX_PATH MAX_PATH + +#define M_F_OK 0 +#define M_W_OK 2 +#define M_R_OK 4 + +#define MMPA_RTLD_NOW 0 +#define MMPA_RTLD_GLOBAL 0 + +#define MMPA_DL_EXT_NAME ".dll" + +#define __attribute__(v) + +_declspec(dllexport) INT32 mmCreateTask(mmThread *threadHandle, mmUserBlock_t *funcBlock); +_declspec(dllexport) INT32 mmJoinTask(mmThread *threadHandle); +_declspec(dllexport) INT32 mmMutexInit(mmMutex_t *mutex); +_declspec(dllexport) INT32 mmMutexLock(mmMutex_t *mutex); +_declspec(dllexport) INT32 mmMutexUnLock(mmMutex_t *mutex); +_declspec(dllexport) INT32 mmMutexDestroy(mmMutex_t *mutex); +_declspec(dllexport) INT32 mmCondInit(mmCond *cond); +_declspec(dllexport) INT32 mmCondLockInit(mmMutexFC *mutex); +_declspec(dllexport) INT32 mmCondLock(mmMutexFC *mutex); +_declspec(dllexport) INT32 mmCondUnLock(mmMutexFC *mutex); +_declspec(dllexport) INT32 mmCondLockDestroy(mmMutexFC *mutex); +_declspec(dllexport) INT32 mmCondWait(mmCond *cond, mmMutexFC *mutex); +_declspec(dllexport) INT32 mmCondTimedWait(mmCond *cond, mmMutexFC *mutex, UINT32 milliSecond); + +_declspec(dllexport) INT32 mmCondNotify(mmCond *cond); +_declspec(dllexport) INT32 mmCondNotifyAll(mmCond *cond); +_declspec(dllexport) INT32 mmCondDestroy(mmCond *cond); +_declspec(dllexport) INT32 mmGetPid(VOID); +_declspec(dllexport) INT32 mmGetTid(VOID); +_declspec(dllexport) INT32 mmGetPidHandle(mmProcess *processHandle); +_declspec(dllexport) INT32 mmGetLocalTime(mmSystemTime_t *sysTime); +_declspec(dllexport) INT32 mmSemInit(mmSem_t *sem, UINT32 value); +_declspec(dllexport) INT32 mmSemWait(mmSem_t *sem); +_declspec(dllexport) INT32 mmSemPost(mmSem_t *sem); +_declspec(dllexport) INT32 mmSemDestroy(mmSem_t *sem); +_declspec(dllexport) INT32 mmOpen(const CHAR *pathName, INT32 flags); +_declspec(dllexport) INT32 mmOpen2(const CHAR *pathName, INT32 flags, MODE mode); +_declspec(dllexport) INT32 mmClose(INT32 fd); +_declspec(dllexport) mmSsize_t mmWrite(INT32 fd, VOID *buf, UINT32 bufLen); +_declspec(dllexport) mmSsize_t mmRead(INT32 fd, VOID *buf, UINT32 bufLen); +_declspec(dllexport) mmSockHandle mmSocket(INT32 sockFamily, INT32 type, INT32 protocol); +_declspec(dllexport) INT32 mmBind(mmSockHandle sockFd, mmSockAddr *addr, mmSocklen_t addrLen); +_declspec(dllexport) INT32 mmListen(mmSockHandle sockFd, INT32 backLog); +_declspec(dllexport) mmSockHandle mmAccept(mmSockHandle sockFd, mmSockAddr *addr, mmSocklen_t *addrLen); +_declspec(dllexport) INT32 mmConnect(mmSockHandle sockFd, mmSockAddr *addr, mmSocklen_t addrLen); +_declspec(dllexport) INT32 mmCloseSocket(mmSockHandle sockFd); +_declspec(dllexport) mmSsize_t mmSocketRecv(mmSockHandle sockFd, VOID *recvBuf, INT32 recvLen, INT32 recvFlag); +_declspec(dllexport) mmSsize_t mmSocketSend(mmSockHandle sockFd, VOID *sendBuf, INT32 sendLen, INT32 sendFlag); +_declspec(dllexport) INT32 mmSAStartup(VOID); +_declspec(dllexport) INT32 mmSACleanup(VOID); +_declspec(dllexport) VOID *mmDlopen(const CHAR *fileName, INT mode); +_declspec(dllexport) VOID *mmDlsym(VOID *handle, CHAR *fileName); +_declspec(dllexport) INT32 mmDlclose(VOID *handle); +_declspec(dllexport) CHAR *mmDlerror(VOID); +_declspec(dllexport) INT32 + mmCreateAndSetTimer(mmTimer *timerHandle, mmUserBlock_t *timerBlock, UINT milliSecond, UINT period); +_declspec(dllexport) INT32 mmDeleteTimer(mmTimer timerHandle); +_declspec(dllexport) INT32 mmStatGet(const CHAR *path, mmStat_t *buffer); +_declspec(dllexport) INT32 mmStat64Get(const CHAR *path, mmStat64_t *buffer); +_declspec(dllexport) INT32 mmMkdir(const CHAR *pathName, mmMode_t mode); +_declspec(dllexport) INT32 mmSleep(UINT32 milliSecond); +_declspec(dllexport) INT32 mmCreateTaskWithAttr(mmThread *threadHandle, mmUserBlock_t *funcBlock); +_declspec(dllexport) INT32 mmGetProcessPrio(mmProcess pid); +_declspec(dllexport) INT32 mmSetProcessPrio(mmProcess pid, INT32 processPrio); +_declspec(dllexport) INT32 mmGetThreadPrio(mmThread *threadHandle); +_declspec(dllexport) INT32 mmSetThreadPrio(mmThread *threadHandle, INT32 threadPrio); +_declspec(dllexport) INT32 mmAccess(const CHAR *pathName); +_declspec(dllexport) INT32 mmAccess2(const CHAR *pathName, INT32 mode); +_declspec(dllexport) INT32 mmRmdir(const CHAR *pathName); + +_declspec(dllexport) INT32 mmIoctl(mmProcess fd, INT32 ioctlCode, mmIoctlBuf *bufPtr); +_declspec(dllexport) INT32 mmSemTimedWait(mmSem_t *sem, INT32 timeout); +_declspec(dllexport) mmSsize_t mmWritev(mmSockHandle fd, mmIovSegment *iov, INT32 iovcnt); +_declspec(dllexport) VOID mmMb(); +_declspec(dllexport) INT32 mmInetAton(const CHAR *addrStr, mmInAddr *addr); + +_declspec(dllexport) mmProcess mmOpenFile(const CHAR *fileName, UINT32 access, mmCreateFlag fileFlag); +_declspec(dllexport) mmSsize_t mmReadFile(mmProcess fileId, VOID *buffer, INT32 len); +_declspec(dllexport) mmSsize_t mmWriteFile(mmProcess fileId, VOID *buffer, INT32 len); +_declspec(dllexport) INT32 mmCloseFile(mmProcess fileId); + +_declspec(dllexport) mmAtomicType mmSetData(mmAtomicType *ptr, mmAtomicType value); +_declspec(dllexport) mmAtomicType mmValueInc(mmAtomicType *ptr, mmAtomicType value); +_declspec(dllexport) mmAtomicType mmValueSub(mmAtomicType *ptr, mmAtomicType value); +_declspec(dllexport) INT32 mmCreateTaskWithDetach(mmThread *threadHandle, mmUserBlock_t *funcBlock); + +_declspec(dllexport) INT32 mmCreateNamedPipe(mmPipeHandle pipe[], CHAR *pipeName[], INT32 waitMode); +_declspec(dllexport) INT32 mmOpenNamePipe(mmPipeHandle pipe[], CHAR *pipeName[], INT32 waitMode); +_declspec(dllexport) VOID mmCloseNamedPipe(mmPipeHandle namedPipe[]); + +_declspec(dllexport) INT32 mmCreatePipe(mmPipeHandle pipe[], CHAR *pipeName[], UINT32 pipeCount, INT32 waitMode); +_declspec(dllexport) INT32 mmOpenPipe(mmPipeHandle pipe[], CHAR *pipeName[], UINT32 pipeCount, INT32 waitMode); +_declspec(dllexport) VOID mmClosePipe(mmPipeHandle pipe[], UINT32 pipeCount); + +_declspec(dllexport) mmCompletionHandle mmCreateCompletionPort(); +_declspec(dllexport) VOID mmCloseCompletionPort(mmCompletionHandle handle); +_declspec(dllexport) INT32 mmPoll(mmPollfd *fds, INT32 fdCount, INT32 timeout, mmCompletionHandle handleIOCP, + pmmPollData polledData, mmPollBack pollBack); + +_declspec(dllexport) INT32 mmGetErrorCode(); +_declspec(dllexport) INT32 mmGetTimeOfDay(mmTimeval *timeVal, mmTimezone *timeZone); +_declspec(dllexport) mmTimespec mmGetTickCount(); +_declspec(dllexport) INT32 mmGetRealPath(CHAR *path, CHAR *realPath); + +_declspec(dllexport) INT32 mmRealPath(const CHAR *path, CHAR *realPath, INT32 realPathLen); + +_declspec(dllexport) INT32 mmDup2(INT32 oldFd, INT32 newFd); +_declspec(dllexport) INT32 mmDup(INT32 fd); +_declspec(dllexport) INT32 mmUnlink(const CHAR *filename); +_declspec(dllexport) INT32 mmChmod(const CHAR *filename, INT32 mode); +_declspec(dllexport) INT32 mmFileno(FILE *stream); +_declspec(dllexport) INT32 mmScandir(const CHAR *path, mmDirent ***entryList, mmFilter filterFunc, mmSort sort); +_declspec(dllexport) VOID mmScandirFree(mmDirent **entryList, INT32 count); + +_declspec(dllexport) mmMsgid mmMsgCreate(mmKey_t key, INT32 msgFlag); +_declspec(dllexport) mmMsgid mmMsgOpen(mmKey_t key, INT32 msgFlag); +_declspec(dllexport) INT32 mmMsgRcv(mmMsgid msqid, VOID *buf, INT32 bufLen, INT32 msgFlag); +_declspec(dllexport) INT32 mmMsgSnd(mmMsgid msqid, VOID *buf, INT32 bufLen, INT32 msgFlag); + +_declspec(dllexport) INT32 mmMsgClose(mmMsgid msqid); + +_declspec(dllexport) INT32 mmLocalTimeR(const time_t *timep, struct tm *result); +_declspec(dllexport) INT32 mmGetOpt(INT32 argc, char *const *argv, const char *opts); +_declspec(dllexport) INT32 + mmGetOptLong(INT32 argc, CHAR *const *argv, const CHAR *opts, const mmStructOption *longopts, INT32 *longindex); + +_declspec(dllexport) LONG mmLseek(INT32 fd, INT64 offset, INT32 seekFlag); +_declspec(dllexport) INT32 mmFtruncate(mmProcess fd, UINT32 length); + +_declspec(dllexport) INT32 mmTlsCreate(mmThreadKey *key, VOID (*destructor)(VOID *)); +_declspec(dllexport) INT32 mmTlsSet(mmThreadKey key, const VOID *value); +_declspec(dllexport) VOID *mmTlsGet(mmThreadKey key); +_declspec(dllexport) INT32 mmTlsDelete(mmThreadKey key); +_declspec(dllexport) INT32 mmGetOsType(); + +_declspec(dllexport) INT32 mmFsync(mmProcess fd); + +_declspec(dllexport) INT32 mmChdir(const CHAR *path); +_declspec(dllexport) INT32 mmUmask(INT32 pmode); +_declspec(dllexport) INT32 mmWaitPid(mmProcess pid, INT32 *status, INT32 options); + +_declspec(dllexport) INT32 mmGetCwd(CHAR *buffer, INT32 maxLen); +_declspec(dllexport) CHAR *mmStrTokR(CHAR *str, const CHAR *delim, CHAR **saveptr); + +_declspec(dllexport) INT32 mmGetEnv(const CHAR *name, CHAR *value, UINT32 len); +_declspec(dllexport) INT32 mmSetEnv(const CHAR *name, const CHAR *value, INT32 overwrite); +_declspec(dllexport) CHAR *mmDirName(CHAR *path); +_declspec(dllexport) CHAR *mmBaseName(CHAR *path); +_declspec(dllexport) INT32 mmGetDiskFreeSpace(const char *path, mmDiskSize *diskSize); + +_declspec(dllexport) INT32 mmSetThreadName(mmThread *threadHandle, const CHAR *name); +_declspec(dllexport) INT32 mmGetThreadName(mmThread *threadHandle, CHAR *name, INT32 size); + +/* + * Function: set the thread name of the currently executing thread - internal call of thread, which is not supported + * under Windows temporarily, and is null. + * Input: name: the thread name to be set + * The input parameter error returns EN_INVALID_PARAM, the execution success returns EN_OK, and the + * execution failure returns EN_ERROR + */ +_declspec(dllexport) INT32 mmSetCurrentThreadName(const CHAR *name); + +/* + * Function: Get the thread name of the currently executing thread - thread body call, not supported under windows, null + * implementation. + * Input:name:The name of the thread to get, and the cache is allocated by the user,size>=MMPA_THREADNAME_SIZE. + * The input parameter error returns EN_INVALID_PARAM, the execution success returns + * EN_OK, and the execution failure returns EN_ERROR + */ +_declspec(dllexport) INT32 mmGetCurrentThreadName(CHAR *name, INT32 size); + +_declspec(dllexport) INT32 mmGetFileSize(const CHAR *fileName, ULONGLONG *length); +_declspec(dllexport) INT32 mmIsDir(const CHAR *fileName); +_declspec(dllexport) INT32 mmGetOsName(CHAR *name, INT32 nameSize); +_declspec(dllexport) INT32 mmGetOsVersion(CHAR *versionInfo, INT32 versionLength); +_declspec(dllexport) INT32 mmGetMac(mmMacInfo **list, INT32 *count); +_declspec(dllexport) INT32 mmGetMacFree(mmMacInfo *list, INT32 count); +_declspec(dllexport) INT32 mmGetCpuInfo(mmCpuDesc **cpuInfo, INT32 *count); +_declspec(dllexport) INT32 mmCpuInfoFree(mmCpuDesc *cpuInfo, INT32 count); +_declspec(dllexport) INT32 + mmCreateProcess(const CHAR *fileName, const mmArgvEnv *env, const char *stdoutRedirectFile, mmProcess *id); + +_declspec(dllexport) INT32 + mmCreateTaskWithThreadAttr(mmThread *threadHandle, const mmUserBlock_t *funcBlock, const mmThreadAttr *threadAttr); + +#ifdef __cplusplus +#if __cplusplus +} +#endif /* __cpluscplus */ +#endif // __cpluscplus + +#endif // MMPA_WIN_MMPA_WIN_H_ diff --git a/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_constant.h b/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_constant.h deleted file mode 100644 index 8a661e4290d30ff1d432f0bae2f71bdf8cbc9991..0000000000000000000000000000000000000000 --- a/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_constant.h +++ /dev/null @@ -1,74 +0,0 @@ -/** - * @file buffer_fusion_constant.h - * - * Copyright(C), 2017 - 2017, Huawei Tech. Co., Ltd. ALL RIGHTS RESERVED. - * - * @brief define the pattern. - * - * @author Huawei - * - * @version 1.0 - * - */ -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_CONSTANT_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_CONSTANT_H_ -#include -#include - -namespace fe { -// add the op pattern -static const std::string TBE_PATTERN_INPUT_NODE = "InputData"; -static const std::string TBE_PATTERN_OP_TYPE_ANY = "OpTypeAny"; -static const std::string TBE_PATTERN_OUTPUT_NODE = "OutputData"; -static const std::string OP_PATTERN_ELEMWISE = "ElemWise"; -static const std::string OP_PATTERN_COMMONREDUCE = "CommReduce"; -static const std::string OP_PATTERN_SEGMENT = "Segment"; -static const std::string OP_PATTERN_MAXPOOL = "MaxPool"; -static const std::string OP_PATTERN_CONV = "Convolution"; -static const std::string OP_PATTERN_MATMUL = "Matmul"; -static const std::string OP_PATTERN_BNUPDATE = "bn_update"; -static const std::string OP_PATTERN_BNREDUCE = "bn_reduce"; -static const std::string OP_PATTERN_CONV_BACKPROP_INPUT = - "Conv2d_backprop_input"; -static const std::string OP_PATTERN_DEPTHWISE_CONV = "DepthwiseConvolution"; -static const std::string OP_PATTERN_QUANT = "quant"; -static const std::string OP_PATTERN_DEQUANT = "dequant"; -static const std::string OP_PATTERN_REQUANT = "requant"; -static const std::string OP_PATTERN_POOL2D = "Pool2d"; -static const std::string OP_PATTERN_ANTIQUANT = "anti_quant"; -static const std::string OP_PATTERN_STRIDED_WRITE = "strided_write"; -static const std::string OP_PATTERN_STRIDED_READ = "strided_read"; -static const std::string OP_PATTERN_AIPP = "aipp"; -static const std::string OP_PATTERN_CONFUSION_TRANSPOSE = "confusiontranspose"; -static const std::string OP_PATTERN_DEQUANTS16 = "dequant_s16"; -static const std::string OP_PATTERN_REQUANTS16 = "requant_s16"; -static const std::string OP_PATTERN_READ_SELECT = "read_select"; -static const std::string OP_PATTERN_WRITE_SELECT = "write_select"; - -static const std::vector OP_PATTERN_VEC{ - OP_PATTERN_ELEMWISE, - OP_PATTERN_COMMONREDUCE, - OP_PATTERN_SEGMENT, - OP_PATTERN_MAXPOOL, - OP_PATTERN_CONV, - OP_PATTERN_MATMUL, - OP_PATTERN_BNUPDATE, - OP_PATTERN_BNREDUCE, - OP_PATTERN_CONV_BACKPROP_INPUT, - OP_PATTERN_DEPTHWISE_CONV, - OP_PATTERN_QUANT, - OP_PATTERN_DEQUANT, - OP_PATTERN_REQUANT, - OP_PATTERN_POOL2D, - OP_PATTERN_ANTIQUANT, - OP_PATTERN_STRIDED_WRITE, - OP_PATTERN_STRIDED_READ, - OP_PATTERN_AIPP, - OP_PATTERN_CONFUSION_TRANSPOSE, - OP_PATTERN_DEQUANTS16, - OP_PATTERN_REQUANTS16, - OP_PATTERN_READ_SELECT, - OP_PATTERN_WRITE_SELECT}; -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_CONSTANT_H_ diff --git a/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pass_base.h b/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pass_base.h deleted file mode 100644 index d5dc2a834a97d5eea03fe6cfa51d48ccd59cfbd6..0000000000000000000000000000000000000000 --- a/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pass_base.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PASS_BASE_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PASS_BASE_H_ - -#include "register/graph_optimizer/buffer_fusion/buffer_fusion_constant.h" -#include "register/graph_optimizer/buffer_fusion/buffer_fusion_pattern.h" -#include "register/graph_optimizer/graph_optimize_register_error_codes.h" -#include -#include -#include -#include - -namespace fe { -enum BufferFusionPassType { - BUILT_IN_AI_CORE_BUFFER_FUSION_PASS, - BUILT_IN_VECTOR_CORE_BUFFER_FUSION_PASS, - CUSTOM_AI_CORE_BUFFER_FUSION_PASS, - CUSTOM_VECTOR_CORE_BUFFER_FUSION_PASS, - BUFFER_FUSION_PASS_TYPE_RESERVED -}; -class BufferFusionPassBase { -public: - explicit BufferFusionPassBase(); - virtual ~BufferFusionPassBase(); - virtual std::vector DefinePatterns() = 0; - virtual Status GetFusionNodes(const BufferFusionMapping &mapping, - vector &fusionNodes); - std::vector GetMatchedNodes(const BufferFusionMapping &mapping); - std::vector - GetMatchedNodesByDescName(const std::string &descName, - const BufferFusionMapping &mapping); - ge::NodePtr GetMatchedHeadNode(const std::vector &matchedNodes); -}; - -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PASS_BASE_H_ diff --git a/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pass_registry.h b/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pass_registry.h deleted file mode 100644 index fb7f70eb5242b67c1281f84a6b5ada8d752b5d0b..0000000000000000000000000000000000000000 --- a/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pass_registry.h +++ /dev/null @@ -1,66 +0,0 @@ -/** - * @file buffer_fusion_pass_registry.h - * - * Copyright(C), 2017 - 2017, Huawei Tech. Co., Ltd. ALL RIGHTS RESERVED. - * - * @brief provide interface: BufferFusionByPass - * - * @author Huawei - * - * @version 1.0 - * - */ -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PASS_REGISTRY_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PASS_REGISTRY_H_ -#include "register/graph_optimizer/buffer_fusion/buffer_fusion_pass_base.h" -#include -#include -#include -#include - -namespace fe { -class BufferFusionPassRegistry { -public: - using CreateFn = BufferFusionPassBase *(*)(); - ~BufferFusionPassRegistry(); - - static BufferFusionPassRegistry &GetInstance(); - - void RegisterPass(const BufferFusionPassType &pass_type, - const std::string &pass_name, CreateFn create_fn); - - std::map - GetCreateFnByType(const BufferFusionPassType &pass_type); - -private: - BufferFusionPassRegistry(); - class BufferFusionPassRegistryImpl; - std::unique_ptr impl_; -}; - -class BufferFusionPassRegistrar { -public: - BufferFusionPassRegistrar(const BufferFusionPassType &pass_type, - const std::string &pass_name, - BufferFusionPassBase *(*create_fun)()); - ~BufferFusionPassRegistrar() {} -}; - -#define REGISTER_BUFFER_FUSION_PASS(pass_name, pass_type, pass_class) \ - REGISTER_BUFFER_FUSION_PASS_UNIQ_HELPER(__COUNTER__, pass_name, pass_type, \ - pass_class) - -#define REGISTER_BUFFER_FUSION_PASS_UNIQ_HELPER(ctr, pass_name, pass_type, \ - pass_class) \ - REGISTER_BUFFER_FUSION_PASS_UNIQ(ctr, pass_name, pass_type, pass_class) - -#define REGISTER_BUFFER_FUSION_PASS_UNIQ(ctr, pass_name, pass_type, \ - pass_class) \ - static ::fe::BufferFusionPassRegistrar register_buffer_fusion_pass##ctr \ - __attribute__((unused)) = ::fe::BufferFusionPassRegistrar( \ - pass_type, pass_name, []() -> ::fe::BufferFusionPassBase * { \ - return new (std::nothrow) pass_class(); \ - }) - -} // namespace fe -#endif // INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PASS_REGISTRY_H_ diff --git a/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pattern.h b/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pattern.h deleted file mode 100644 index 575b55664fabb7c1a787ed54e0e2bab4b603d172..0000000000000000000000000000000000000000 --- a/inc/register/graph_optimizer/buffer_fusion/buffer_fusion_pattern.h +++ /dev/null @@ -1,74 +0,0 @@ -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PATTERN_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PATTERN_H_ -#include "graph/debug/ge_attr_define.h" -#include "graph/utils/attr_utils.h" -#include "graph/utils/graph_utils.h" -#include -#include -#include - -namespace fe { -static const int TBE_FUSION_OP_NUM_MAX = 5; -static const int TBE_PATTERN_NUM_MAX = 5; -static const int TBE_PATTERN_NUM_NONE = 0; -static const int TBE_PATTERN_NUM_DEFAULT = 1; -static const int TBE_OUTPUT_BRANCH_SINGLE = 1; -static const int TBE_OUTPUT_BRANCH_MULTI = 2; -static const int TBE_PATTERN_GROUPID_INVALID = -1; - -struct BufferFusionOpDesc { - std::string descName; // description name - std::vector types; // description type - std::vector inputs; // all input op - std::vector outputs; // all output op - int64_t outBranchType; // out desc type, 1:single, 2: multi - int64_t repeateMin; // opdesc min repeat num - int64_t repeateMax; // opdesc max repeat num - int64_t repeateCurr; // opdesc current repeat num - bool matchStatus; - int64_t groupId; // record desc groupid, need one desc matched at least in - // the same group -}; -using BufferFusionMapping = - std::map>; -using BufferFusionMappings = std::vector; - -class BufferFusionPattern { -public: - explicit BufferFusionPattern(std::string name = "", - int64_t opMaxCount = TBE_FUSION_OP_NUM_MAX); - - virtual ~BufferFusionPattern(); - - BufferFusionPattern &AddOpDesc(const std::string &descName, - const std::vector &patterns, - int64_t repeatMin = TBE_PATTERN_NUM_DEFAULT, - int64_t repeatMax = TBE_PATTERN_NUM_DEFAULT, - int64_t groupId = TBE_PATTERN_GROUPID_INVALID); - - BufferFusionPattern &SetOutputs(const std::string &descName, - const std::vector &patterns, - int64_t relation = TBE_OUTPUT_BRANCH_SINGLE); - - BufferFusionPattern &SetHead(const std::vector &opPatterns); - - std::string GetName(); - int64_t GetOpMaxCount(); - std::vector GetOpDescs(); - bool GetOutputs(BufferFusionOpDesc *opDesc, - std::vector &outputs); - std::vector GetHead(); - int64_t GetErrorCnt(); - void InitRepeatCurr(const BufferFusionPattern &pattern); - -private: - BufferFusionOpDesc *GetOpDesc(const std::string &descName); - std::string name_; - int64_t opMaxCount_; - std::vector ops_; - std::map opMap_; - std::vector head_; - int64_t errorCount_; -}; -} // namespace fe -#endif // INC_REGISTER_GRAPH_OPTIMIZER_BUFFER_FUSION_PATTERN_H_ \ No newline at end of file diff --git a/inc/register/graph_optimizer/fusion_common/graph_pass_util.h b/inc/register/graph_optimizer/fusion_common/graph_pass_util.h deleted file mode 100644 index c01000f8d1c709e6d55d4f367e9ebb608c79b3ad..0000000000000000000000000000000000000000 --- a/inc/register/graph_optimizer/fusion_common/graph_pass_util.h +++ /dev/null @@ -1,250 +0,0 @@ -/** - * @file graph_pass.h - * Copyright (c) Huawei Technologies Co., Ltd. 2019-2019. All rights reserved. - * - * @brief define graph pass, which provides two interface: 1. run pass; - * 2. record op names before fusion - * - * @author Huawei - * - * @version 1.0 - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_PASS_UTIL_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_PASS_UTIL_H_ -#include "graph/compute_graph.h" -#include "graph/debug/ge_attr_define.h" -#include "graph/utils/attr_utils.h" -#include "graph/utils/node_utils.h" -#include "graph/utils/type_utils.h" -#include "register/graph_optimizer/graph_optimize_register_error_codes.h" - -#include -#include -#include -#include -#include - -namespace fe { -using NodeTypeMap = std::unordered_map>; -using NodeTypeMapPtr = std::shared_ptr; -struct NodeMapInfo { - int64_t runCount; - NodeTypeMapPtr nodeTypeMap; -}; -using NodeMapInfoPtr = std::shared_ptr; - -class GraphPassUtil { -public: - /** set outputdesc attr for data dump - * - * @param originIndex,usually is origin node output index - * - * @param fusionIndex,usually is fusion node output index - * - * @param originNode, usually is origin node - * - * @param fusionNode, usually is fusion node - */ - static void SetOutputDescAttr(uint32_t originIndex, uint32_t fusionIndex, - ge::NodePtr originNode, - ge::NodePtr fusionNode) { - if (fusionNode->GetOpDesc() == nullptr) { - return; - } - - auto fusionNodeOutputDesc = - fusionNode->GetOpDesc()->MutableOutputDesc(fusionIndex); - if (fusionNodeOutputDesc == nullptr) { - return; - } - if (originNode->GetOpDesc() == nullptr) { - return; - } - auto originNodeOutputDesc = - originNode->GetOpDesc()->MutableOutputDesc(originIndex); - if (originNodeOutputDesc == nullptr) { - return; - } - - std::vector originalNames; - if (ge::AttrUtils::GetListStr(originNode->GetOpDesc(), - ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, - originalNames) && - originalNames.size() > 0) { - std::string originalName; - if (ge::AttrUtils::GetStr(originNodeOutputDesc, - ge::ATTR_NAME_DATA_DUMP_ORIGIN_NAME, - originalName)) { - (void)ge::AttrUtils::SetStr(fusionNodeOutputDesc, - ge::ATTR_NAME_DATA_DUMP_ORIGIN_NAME, - originalName); - - std::int64_t originOutputIndex = 0; - if (ge::AttrUtils::GetInt(originNodeOutputDesc, - ge::ATTR_NAME_DATA_DUMP_ORIGIN_OUTPUT_INDEX, - originOutputIndex)) { - (void)ge::AttrUtils::SetInt( - fusionNodeOutputDesc, ge::ATTR_NAME_DATA_DUMP_ORIGIN_OUTPUT_INDEX, - originOutputIndex); - } - - ge::DataType originDataType = - GetDataDumpOriginDataType(originNodeOutputDesc); - if (originDataType != ge::DT_UNDEFINED) { - SetDataDumpOriginDataType(originDataType, fusionNodeOutputDesc); - } - ge::Format originFormat = GetDataDumpOriginFormat(originNodeOutputDesc); - if (originFormat != ge::FORMAT_RESERVED) { - SetDataDumpOriginFormat(originFormat, fusionNodeOutputDesc); - } - } - } else { - (void)ge::AttrUtils::SetStr(fusionNodeOutputDesc, - ge::ATTR_NAME_DATA_DUMP_ORIGIN_NAME, - originNode->GetName()); - (void)ge::AttrUtils::SetInt(fusionNodeOutputDesc, - ge::ATTR_NAME_DATA_DUMP_ORIGIN_OUTPUT_INDEX, - originIndex); - SetDataDumpOriginDataType(originNodeOutputDesc->GetOriginDataType(), - fusionNodeOutputDesc); - SetDataDumpOriginFormat(originNodeOutputDesc->GetOriginFormat(), - fusionNodeOutputDesc); - } - } - - /** get origin format for data dump - * - * @param tensorDesc,usually is outputDesc - * - * @return format of this tensorDesc - */ - static ge::Format GetDataDumpOriginFormat(ge::GeTensorDescPtr tensorDesc) { - std::string originFormatStr; - if (!ge::AttrUtils::GetStr(tensorDesc, - ge::ATTR_NAME_DATA_DUMP_ORIGIN_FORMAT, - originFormatStr)) { - // Can not get the certificate and it's not set,return directly - return ge::FORMAT_RESERVED; - } - if (originFormatStr == "RESERVED") { - return ge::FORMAT_RESERVED; - } - return ge::TypeUtils::SerialStringToFormat(originFormatStr); - } - - /** set origin format for data dump - * - * @param origin format - * - * @param tensorDesc,usually is outputDesc - */ - static void SetDataDumpOriginFormat(ge::Format originFormat, - ge::GeTensorDescPtr tensorDesc) { - std::string originFormatStr = "RESERVED"; - if (originFormat != ge::FORMAT_RESERVED) { - originFormatStr = ge::TypeUtils::FormatToSerialString(originFormat); - } - (void)ge::AttrUtils::SetStr( - tensorDesc, ge::ATTR_NAME_DATA_DUMP_ORIGIN_FORMAT, originFormatStr); - } - - /** set origin datatype for data dump - * - * @param origin datatype - * - * @param tensorDesc,usually is outputDesc - */ - static void SetDataDumpOriginDataType(ge::DataType originDataType, - ge::GeTensorDescPtr tensorDesc) { - std::string originDataTypeStr = "RESERVED"; - if (originDataType != ge::DT_UNDEFINED) { - originDataTypeStr = ge::TypeUtils::DataTypeToSerialString(originDataType); - } - (void)ge::AttrUtils::SetStr(tensorDesc, - ge::ATTR_NAME_DATA_DUMP_ORIGIN_DATA_TYPE, - originDataTypeStr); - } - - /** get origin datatype for data dump - * - * @param tensorDesc,usually is outputDesc - * - * @return format of this tensorDesc - */ - static ge::DataType - GetDataDumpOriginDataType(ge::GeTensorDescPtr tensorDesc) { - std::string originDataTypeStr; - if (!ge::AttrUtils::GetStr(tensorDesc, - ge::ATTR_NAME_DATA_DUMP_ORIGIN_DATA_TYPE, - originDataTypeStr)) { - return ge::DT_UNDEFINED; - } - if (originDataTypeStr == "RESERVED") { - return ge::DT_UNDEFINED; - } - return ge::TypeUtils::SerialStringToDataType(originDataTypeStr); - } - - static void AddNodeFromOpTypeMap(NodeMapInfoPtr &nodeMapInfo, - ge::NodePtr &nodePtr) { - if (nodeMapInfo == nullptr || nodePtr == nullptr) { - return; - } - NodeTypeMapPtr nodeTypeMap = nodeMapInfo->nodeTypeMap; - string realOpType = ge::NodeUtils::GetNodeType(*nodePtr); - auto iter = nodeTypeMap->find(realOpType); - if (iter != nodeTypeMap->end()) { - iter->second.insert(nodePtr); - } else { - nodeTypeMap->emplace( - std::make_pair(realOpType, std::unordered_set{nodePtr})); - } - } - - static Status GetOpTypeMapToGraph(NodeMapInfoPtr &nodeMapInfo, - const ge::ComputeGraph &graph) { - nodeMapInfo = graph.TryGetExtAttr("NodeMapInfo", nodeMapInfo); - if (nodeMapInfo == nullptr) { - return FAILED; - } - return SUCCESS; - } - - static void RecordOriginalNames(std::vector originalNodes, - ge::NodePtr node) { - // 1. get the originalNames - std::vector originalNames; - for (ge::NodePtr originalNode : originalNodes) { - if (originalNode == nullptr || originalNode->GetOpDesc() == nullptr) { - return; - } - - ge::OpDescPtr originOpDescPtr = originalNode->GetOpDesc(); - std::vector names_tmp; - bool isHasAttr = ge::AttrUtils::GetListStr( - originOpDescPtr, ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, names_tmp); - if (isHasAttr) { - for (const auto &node_name : names_tmp) { - if (!node_name.empty()) { - originalNames.push_back(node_name); - } - } - } else { - originalNames.push_back(originOpDescPtr->GetName()); - } - } - - // 2. set the dump attr - if (node == nullptr || node->GetOpDesc() == nullptr) { - return; - } - ge::OpDescPtr nodeOpDescPtr = node->GetOpDesc(); - (void)ge::AttrUtils::SetListStr( - nodeOpDescPtr, ge::ATTR_NAME_DATA_DUMP_ORIGIN_OP_NAMES, originalNames); - } -}; - -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_PASS_UTIL_H_ diff --git a/inc/register/graph_optimizer/fusion_common/pattern_fusion_base_pass.h b/inc/register/graph_optimizer/fusion_common/pattern_fusion_base_pass.h deleted file mode 100644 index 9bfa73b40ff432f9d34521070c05eb8bf2643c89..0000000000000000000000000000000000000000 --- a/inc/register/graph_optimizer/fusion_common/pattern_fusion_base_pass.h +++ /dev/null @@ -1,106 +0,0 @@ -/** - * @file pattern_fusion_base_pass.h - * - * Copyright (c) Huawei Technologies Co., Ltd. 2019-2019. All rights reserved. - * - * @brief define fusion pass based on pattern - * - * @author Huawei - * - * @version 1.0 - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_PATTERN_FUSION_BASE_PASS_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_PATTERN_FUSION_BASE_PASS_H_ - -#include "common/opskernel/ops_kernel_info_store.h" -#include "register/graph_optimizer/graph_fusion/fusion_pattern.h" -#include "register/graph_optimizer/graph_fusion/graph_fusion_pass_base.h" -#include "register/graph_optimizer/graph_fusion/graph_pass.h" -#include "register/graph_optimizer/graph_optimize_register_error_codes.h" -#include -#include -#include -#include -#include - -using std::initializer_list; -using std::map; -using std::string; -using std::vector; - -using namespace std; - -namespace fe { -using OpsKernelInfoStorePtr = std::shared_ptr; -class PatternFusionBasePassImpl; -using PatternFusionBasePassImplPtr = std::shared_ptr; - -/** Pass based on pattern - * @ingroup FUSION_PASS_GROUP - * @note New virtual methods should be append at the end of this class - */ -class PatternFusionBasePass : public GraphPass { -public: - using OpDesc = FusionPattern::OpDesc; - using Mapping = map, vector>; - using Mappings = vector; - std::map originOpAnchorsMap_; - - PatternFusionBasePass(); - virtual ~PatternFusionBasePass(); - - /** execute pass - * - * @param [in] graph, the graph waiting for pass level optimization - * @return SUCCESS, successfully optimized the graph by the pass - * @return NOT_CHANGED, the graph did not change - * @return FAILED, fail to modify graph - */ - Status Run(ge::ComputeGraph &graph) override; - - /** execute pass - * - * @param [in] graph, the graph waiting for pass level optimization - * @param [opsKernelInfoStorePtr, OP info kernel instance - * @return SUCCESS, successfully optimized the graph by the pass - * @return NOT_CHANGED, the graph did not change - * @return FAILED, fail to modify graph - */ - virtual Status Run(ge::ComputeGraph &graph, - OpsKernelInfoStorePtr opsKernelInfoStorePtr); - -protected: - virtual vector DefinePatterns() = 0; - virtual Status Fusion(ge::ComputeGraph &graph, Mapping &mapping, - vector &newNodes) = 0; - - std::vector GetNodesFromMapping(const Mapping &mapping); - ge::NodePtr GetNodeFromMapping(const string &id, const Mapping &mapping); - - void RecordOutputAnchorMap(ge::NodePtr outputNode); - Status SetDataDumpAttr(vector &originalNodes, - vector &fusNodes); - - bool CheckOpSupported(const ge::OpDescPtr &opDescPtr); - -private: - /** match all nodes in graph according to pattern - * - * @param pattern fusion pattern defined - * @param mappings match result - * @return SUCCESS, successfully add edge - * @return FAILED, fail - */ - bool MatchAll(ge::ComputeGraph &graph, const FusionPattern &pattern, - Mappings &mappings); - - Status RunOnePattern(ge::ComputeGraph &graph, const FusionPattern &pattern, - bool &changed); // lint !e148 - - /** Internal implement class ptr */ - std::shared_ptr patternFusionBasePassImplPtr_; -}; -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_PATTERN_FUSION_BASE_PASS_H_ diff --git a/inc/register/graph_optimizer/graph_fusion/fusion_pass_manager/fusion_pass_registry.h b/inc/register/graph_optimizer/graph_fusion/fusion_pass_manager/fusion_pass_registry.h deleted file mode 100644 index a954c053e00abf1a919820305ff98b087edb180b..0000000000000000000000000000000000000000 --- a/inc/register/graph_optimizer/graph_fusion/fusion_pass_manager/fusion_pass_registry.h +++ /dev/null @@ -1,63 +0,0 @@ -/** - * @file fusion_pass_registry.h - * - * Copyright(C), 2017 - 2017, Huawei Tech. Co., Ltd. ALL RIGHTS RESERVED. - * - * @brief provide interface: GraphFusionByPass - * - * @author Huawei - * - * @version 1.0 - * - */ -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_FUSION_PASS_REGISTRY_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_FUSION_PASS_REGISTRY_H_ - -#include "register/graph_optimizer/graph_fusion/graph_fusion_pass_base.h" -#include -#include -#include -#include - -namespace fe { -class FusionPassRegistry { -public: - using CreateFn = GraphPass *(*)(); - ~FusionPassRegistry(); - - static FusionPassRegistry &GetInstance(); - - void RegisterPass(const GraphFusionPassType &pass_type, - const std::string &pass_name, CreateFn create_fn); - - std::map - GetCreateFnByType(const GraphFusionPassType &pass_type); - -private: - FusionPassRegistry(); - class FusionPassRegistryImpl; - std::unique_ptr impl_; -}; - -class FusionPassRegistrar { -public: - FusionPassRegistrar(const GraphFusionPassType &pass_type, - const std::string &pass_name, GraphPass *(*create_fun)()); - ~FusionPassRegistrar() {} -}; - -#define REGISTER_PASS(pass_name, pass_type, pass_class) \ - REGISTER_PASS_UNIQ_HELPER(__COUNTER__, pass_name, pass_type, pass_class) - -#define REGISTER_PASS_UNIQ_HELPER(ctr, pass_name, pass_type, pass_class) \ - REGISTER_PASS_UNIQ(ctr, pass_name, pass_type, pass_class) - -#define REGISTER_PASS_UNIQ(ctr, pass_name, pass_type, pass_class) \ - static ::fe::FusionPassRegistrar register_fusion_pass##ctr \ - __attribute__((unused)) = ::fe::FusionPassRegistrar( \ - pass_type, pass_name, []() -> ::fe::GraphPass * { \ - return new (std::nothrow) pass_class(); \ - }) - -} // namespace fe -#endif // INC_REGISTER_GRAPH_OPTIMIZER_FUSION_PASS_REGISTRY_H_ diff --git a/inc/register/graph_optimizer/graph_fusion/fusion_pattern.h b/inc/register/graph_optimizer/graph_fusion/fusion_pattern.h deleted file mode 100644 index 697f5f3e24c735119fe2a154fb84c9b216a192f2..0000000000000000000000000000000000000000 --- a/inc/register/graph_optimizer/graph_fusion/fusion_pattern.h +++ /dev/null @@ -1,172 +0,0 @@ -/** - * @file pattern_fusion_base_pass.h - * - * Copyright (c) Huawei Technologies Co., Ltd. 2019-2019. All rights reserved. - * - * @brief define fusion pass based on pattern - * - * @author Huawei - * - * @version 1.0 - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_FUSION_PATTERN_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_FUSION_PATTERN_H_ -#include -#include -#include -#include -#include - -using std::initializer_list; -using std::map; -using std::string; -using std::vector; - -using namespace std; - -namespace fe { - -/** Fusion pattern - * @ingroup FUSION_PASS_GROUP - * Describe Pattern of Ops waiting for fusion(Op type, etc) - */ -class FusionPattern { -public: - struct OpDesc; - using OpDescPtr = std::shared_ptr; - /** - * @ingroup fe - * @brief description of Ops - */ - struct OpDesc { - string id; // Identifier - std::vector types; // the Op types of Ops - std::vector inputs; // all input Ops - bool repeatable; // flag to show if match multiple Ops or not - bool is_output; // flag to show if the op is output node - }; - -public: - explicit FusionPattern(string name = ""); - ~FusionPattern(); - - /** set pattern name - * - * @param name pattern name - * @return FusionPattern - */ - FusionPattern &SetName(const string &name); - - /** add Op description with unknown number of args - * - * @param id pattern id - * @param types op type list - * @return FusionPattern - */ - FusionPattern &AddOpDesc(const string &id, - const initializer_list &types = {}); - - /** add Op description with vector - * - * @param id pattern id - * @param types op type list - * - * @return FusionPattern - */ - FusionPattern &AddOpDesc(const string &id, const vector &types); - - /** set input Ops with unknown number of args - * - * @param id pattern id - * - * @param inputIds inputs to id op - * - * @return FusionPattern - */ - FusionPattern &SetInputs(const string &id, - const initializer_list &inputIds); - - /** set input Ops with unknown number of args - * - * @param id pattern id - * - * @param inputIds inputs to id op - * - * @return FusionPattern - */ - FusionPattern &SetInputs(const string &id, const vector &inputIds); - - /** set output Op - * - * @param id pattern id - * - * @return FusionPattern - */ - FusionPattern &SetOutput(const string &id); - - /** build pattern and check if error exists - * - * @return True or False - */ - bool Build(); - - /** get pattern name - * - * @param id pattern id - * - * @return fusion pattern name - */ - const string &GetName() const; - - /** get the OpDesc of input Ops (const) - * - * @param op_desc op_desc for getting inputs - * - * @return op_desc's iniput opdesc list - */ - static const vector> * - GetInputs(std::shared_ptr op_desc); - - /** get the OpDesc of output Op - * - * @return pattern's output opdesc list - */ - const std::shared_ptr GetOutput() const; - - /** print pattern - * - */ - void Dump() const; - - void GetOpDescList(vector> &op_desc_list); - - /** get OpDesc based on ID, return nullptr if failed - * - * @param id pattern id - * - * @return pattern's output opdesc list - */ - std::shared_ptr GetOpDesc(const string &id) const; - -private: - FusionPattern(const FusionPattern &) = default; - FusionPattern &operator=(const FusionPattern &) = default; - - void SetError(); - -private: - string name_; - - vector> ops_; - - map> op_map_; - - std::shared_ptr output_; - - bool has_error_; -}; - -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_FUSION_PATTERN_H_ diff --git a/inc/register/graph_optimizer/graph_fusion/graph_fusion_pass_base.h b/inc/register/graph_optimizer/graph_fusion/graph_fusion_pass_base.h deleted file mode 100644 index 68e07605ba096e22e51fa371e51fe9f9b8bee682..0000000000000000000000000000000000000000 --- a/inc/register/graph_optimizer/graph_fusion/graph_fusion_pass_base.h +++ /dev/null @@ -1,113 +0,0 @@ -/** - * @file custom_pattern_fusion_base_pass.h - * - * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. - * - * @brief custom_pattern_fusion_base_pass - * - * @author Huawei - * - * @version 1.0 - * - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_FUSION_PASS_BASE_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_FUSION_PASS_BASE_H_ - -#include -#include -#include -#include -#include - -#include "register/graph_optimizer/graph_fusion/fusion_pattern.h" -#include "register/graph_optimizer/graph_fusion/graph_pass.h" - -using std::initializer_list; -using std::map; -using std::string; -using std::vector; - -using namespace std; - -namespace fe { -enum GraphFusionPassType { - BUILT_IN_GRAPH_PASS = 0, - BUILT_IN_VECTOR_CORE_GRAPH_PASS, - CUSTOM_AI_CORE_GRAPH_PASS, - CUSTOM_VECTOR_CORE_GRAPH_PASS, - SECOND_ROUND_BUILT_IN_GRAPH_PASS, - GRAPH_FUSION_PASS_TYPE_RESERVED, -}; -class PatternFusionBasePassImpl; -using PatternFusionBasePassImplPtr = std::shared_ptr; - -/** Pass based on pattern - * @ingroup FUSION_PASS_GROUP - * @note New virtual methods should be append at the end of this class - */ -class GraphFusionPassBase : public GraphPass { -public: - using OpDesc = FusionPattern::OpDesc; - using Mapping = map, vector>; - using Mappings = vector; - - GraphFusionPassBase(); - virtual ~GraphFusionPassBase(); - - /** execute pass - * - * @param [in] graph, the graph waiting for pass level optimization - * @return SUCCESS, successfully optimized the graph by the pass - * @return NOT_CHANGED, the graph did not change - * @return FAILED, fail to modify graph - */ - Status Run(ge::ComputeGraph &graph) override; - -protected: - /** define pattern - * - * @return NA - */ - virtual vector DefinePatterns() = 0; - - /** do fusion according to nodes matched - * - * @param graph the graph waiting for pass level optimization - * @param newNodes fusion result node(s) - * @return SUCCESS, successfully optimized the graph by the pass - * @return NOT_CHANGED, the graph did not change - * @return FAILED, fail to modify graph - */ - virtual Status Fusion(ge::ComputeGraph &graph, Mapping &mapping, - vector &newNodes) = 0; // lint !e148 - - /** get nodes from matched result - * - * @param mapping match result - * @return nodes result - */ - static ge::NodePtr GetNodeFromMapping(const string &id, - const Mapping &mapping); - -private: - /** match all nodes in graph according to pattern - * - * @param pattern fusion pattern defined - * @param mappings match result - * @return SUCCESS, successfully add edge - * @return FAILED, fail - */ - bool MatchAll(ge::ComputeGraph &graph, const FusionPattern &pattern, - Mappings &mappings); - - Status RunOnePattern(ge::ComputeGraph &graph, const FusionPattern &pattern, - bool &changed); // lint !e148 - - /** Internal implement class ptr */ - std::shared_ptr patternFusionBasePassImplPtr_; -}; - -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_FUSION_PASS_BASE_H_ diff --git a/inc/register/graph_optimizer/graph_fusion/graph_pass.h b/inc/register/graph_optimizer/graph_fusion/graph_pass.h deleted file mode 100644 index 87e219101d38dcc55dc8b8a788c503b001eb4865..0000000000000000000000000000000000000000 --- a/inc/register/graph_optimizer/graph_fusion/graph_pass.h +++ /dev/null @@ -1,39 +0,0 @@ -/** - * @file graph_pass.h - * Copyright (c) Huawei Technologies Co., Ltd. 2019-2019. All rights reserved. - * - * @brief define graph pass, which provides two interface: 1. run pass; - * 2. record op names before fusion - * - * @author Huawei - * - * @version 1.0 - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_PASS_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_PASS_H_ - -#include "register/graph_optimizer/graph_fusion/pass.h" -#include - -namespace fe { - -/** graph pass - * @ingroup GRAPH_PASS_GROUP - * graph level pass - */ -class GraphPass : public Pass { -public: - /** execute pass - * - * @param [in] graph, the graph waiting for pass level optimization - * @return SUCCESS, successfully optimized the graph by the pass - * @return NOT_CHANGED, the graph did not change - * @return FAILED, fail to modify graph - */ - virtual Status Run(ge::ComputeGraph &graph) = 0; -}; - -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_GRAPH_PASS_H_ diff --git a/inc/register/graph_optimizer/graph_fusion/pass.h b/inc/register/graph_optimizer/graph_fusion/pass.h deleted file mode 100644 index eb31e1d1b474dba6352570907bc2bf752b8f57e1..0000000000000000000000000000000000000000 --- a/inc/register/graph_optimizer/graph_fusion/pass.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * @file pass.h - * Copyright (c) Huawei Technologies Co., Ltd. 2019-2019. All rights reserved. - * - * @brief define pass - * - * @author Huawei - * - * @version 1.0 - */ -/** @defgroup FUSION_PASS_GROUP Fusion Pass Interface */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZER_PASS_H_ -#define INC_REGISTER_GRAPH_OPTIMIZER_PASS_H_ - -#include "graph/compute_graph.h" -#include "register/graph_optimizer/graph_optimize_register_error_codes.h" - -namespace fe { - -/** fusion pass - * @ingroup GRAPH_PASS_GROUP - * network level pass - */ -template class Pass { -public: - virtual ~Pass() {} - - /** execute pass - * - * @param [in] graph, the graph waiting for pass level optimization - * @return SUCCESS, successfully optimized the graph by the pass - * @return NOT_CHANGED, the graph did not change - * @return FAILED, fail to modify graph - */ - virtual Status Run(ge::ComputeGraph &graph) = 0; - - void SetName(const string &name) { name_ = name; } - - string GetName() { return name_; } - -private: - string name_; -}; - -} // namespace fe - -#endif // INC_REGISTER_GRAPH_OPTIMIZER_PASS_H_ diff --git a/inc/register/graph_optimizer/graph_optimize_register_error_codes.h b/inc/register/graph_optimizer/graph_optimize_register_error_codes.h deleted file mode 100644 index d88f1275e16f4fede3696dc41b3687d3f1ded3e2..0000000000000000000000000000000000000000 --- a/inc/register/graph_optimizer/graph_optimize_register_error_codes.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * @file graph_optimize_register_error_codes.h - * - * Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. - * - * @brief fe_error_codes - * - * @author Huawei - * - * @version 1.0 - * - */ - -#ifndef INC_REGISTER_GRAPH_OPTIMIZE_REGISTER_ERROR_CODES_H_ -#define INC_REGISTER_GRAPH_OPTIMIZE_REGISTER_ERROR_CODES_H_ - -#include -#include - -/** Assigned SYS ID */ -const uint8_t SYSID_FE = 3; - -/** Common module ID */ -const uint8_t FE_MODID_COMMON = 50; - -namespace fe { - -/** FE error code definiton Macro -* Build error code -*/ -#define FE_DEF_ERRORNO(sysid, modid, name, value, desc) \ - static constexpr fe::Status name = \ - (((((uint32_t)(0xFF & ((uint8_t)(sysid)))) << 24) | \ - (((uint32_t)(0xFF & ((uint8_t)(modid)))) << 16)) | \ - (0xFFFF & ((uint16_t)(value)))); - -using Status = uint32_t; - -#define FE_DEF_ERRORNO_COMMON(name, value, desc) \ - FE_DEF_ERRORNO(SYSID_FE, FE_MODID_COMMON, name, value, desc) - -using Status = uint32_t; - -FE_DEF_ERRORNO(0, 0, SUCCESS, 0, "success"); -FE_DEF_ERRORNO(0xFF, 0xFF, FAILED, 0xFFFF, "failed"); -FE_DEF_ERRORNO_COMMON(NOT_CHANGED, 201, "The nodes of the graph not changed."); -FE_DEF_ERRORNO_COMMON(PARAM_INVALID, 1, "Parameter's invalid!"); - -} // namespace fe -#endif // INC_REGISTER_GRAPH_OPTIMIZE_REGISTER_ERROR_CODES_H_ diff --git a/inc/register/op_kernel_registry.h b/inc/register/op_kernel_registry.h deleted file mode 100644 index 5fed8960e0615cb520045625cdf799859412be7a..0000000000000000000000000000000000000000 --- a/inc/register/op_kernel_registry.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_OP_KERNEL_REGISTRY_H_ -#define INC_REGISTER_OP_KERNEL_REGISTRY_H_ -#include -#include -#include "register/register_types.h" -#include "register.h" - -namespace ge { -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY OpKernelRegistry { - public: - using CreateFn = HostCpuOp* (*)(); - ~OpKernelRegistry(); - - static OpKernelRegistry& GetInstance() { - static OpKernelRegistry instance; - return instance; - } - - bool IsRegistered(const std::string &op_type); - - void RegisterHostCpuOp(const std::string &op_type, CreateFn create_fn); - - std::unique_ptr CreateHostCpuOp(const std::string &op_type); - - private: - OpKernelRegistry(); - class OpKernelRegistryImpl; - /*lint -e148*/ - std::unique_ptr impl_; -}; -} // namespace ge - -#endif // INC_REGISTER_OP_KERNEL_REGISTRY_H_ diff --git a/inc/register/op_registry.h b/inc/register/op_registry.h deleted file mode 100644 index 1dc14b8b6f783787e6df0a134f5d6ac9e27fff0c..0000000000000000000000000000000000000000 --- a/inc/register/op_registry.h +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_OP_REGISTRY_H_ -#define INC_REGISTER_OP_REGISTRY_H_ - -#include -#include -#include -#include -#include - -#include "register/register.h" - -namespace domi { -enum RemoveInputType { - OMG_MOVE_TYPE_DTYPE = 0, - OMG_MOVE_TYPE_VALUE, - OMG_MOVE_TYPE_SHAPE, - OMG_MOVE_TYPE_FORMAT, - OMG_MOVE_TYPE_AXIS, - OMG_MOVE_TYPE_SCALAR_VALUE, - OMG_REMOVE_TYPE_WITH_COND = 1000, - OMG_REMOVE_INPUT_WITH_ORIGINAL_TYPE, - OMG_INPUT_REORDER, -}; - -struct RemoveInputConfigure { - int inputIdx = INT_MAX; - std::string attrName; - RemoveInputType moveType; - bool attrValue = false; - std::string originalType; - std::vector input_order; -}; - -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY OpRegistry { - public: - static OpRegistry *Instance(); - - std::vector registrationDatas; - - bool Register(const OpRegistrationData ®_data); - - domi::ImplyType GetImplyType(const std::string &op_type); - - void GetOpTypeByImplyType(std::vector &vec_op_type, const domi::ImplyType &imply_type); - - domi::ParseParamFunc GetParseParamFunc(const std::string &op_type, const std::string &ori_type); - - domi::ParseParamByOpFunc GetParseParamByOperatorFunc(const std::string &ori_type); - - domi::FusionParseParamFunc GetFusionParseParamFunc(const std::string &op_type, const std::string &ori_type); - - domi::ParseSubgraphFunc GetParseSubgraphPostFunc(const std::string &op_type); - - domi::ImplyType GetImplyTypeByOriOpType(const std::string &ori_optype); - - const std::vector &GetRemoveInputConfigure(const std::string &ori_optype) const; - - bool GetOmTypeByOriOpType(const std::string &ori_optype, std::string &om_type); - - private: - std::unordered_map op_run_mode_map_; - std::unordered_map op_parse_params_fn_map_; - std::unordered_map parse_params_by_op_func_map_; - std::unordered_map fusion_op_parse_params_fn_map_; - std::unordered_map op_types_to_parse_subgraph_post_func_; - std::unordered_map> remove_input_configure_map_; - std::unordered_map origin_type_to_om_type_; -}; -} // namespace domi -#endif // INC_REGISTER_OP_REGISTRY_H_ diff --git a/inc/register/op_tiling.h b/inc/register/op_tiling.h deleted file mode 100644 index e9d19f942a835c10311dbce7f8337add1aeda655..0000000000000000000000000000000000000000 --- a/inc/register/op_tiling.h +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_OP_TILING_H_ -#define INC_OP_TILING_H_ - -#include "external/register/register_types.h" -#include "graph/debug/ge_attr_define.h" -#include "graph/node.h" - -#include -#include -#include -#include -#include -#include -#include -#include "graph/node.h" - -#define REGISTER_OP_TILING_FUNC(optype, opfunc) \ - REGISTER_OP_TILING_FUNC_UNIQ_HELPER(optype, opfunc, __COUNTER__) - -#define REGISTER_OP_TILING_FUNC_UNIQ_HELPER(optype, opfunc, counter) \ - REGISTER_OP_TILING_FUNC_UNIQ(optype, opfunc, counter) - -#define REGISTER_OP_TILING_FUNC_UNIQ(optype, opfunc, counter) \ - static OpTilingInterf g_##optype##TilingInterf##counter(#optype, opfunc) - -namespace optiling { - -enum TensorArgType { - TA_NONE, - TA_SINGLE, - TA_LIST, -}; - - -using ByteBuffer = std::stringstream; - -struct TeOpTensor { - std::vector shape; - std::vector ori_shape; - std::string format; - std::string ori_format; - std::string dtype; - std::map attrs; -}; - - -struct TeOpTensorArg { - TensorArgType arg_type; - std::vector tensor; -}; - -struct OpRunInfo { - uint32_t block_dim; - std::vector workspaces; - ByteBuffer tiling_data; -}; - - -using TeOpAttrArgs = std::vector; -using TeConstTensorData = std::tuple; - -struct TeOpParas { - std::vector inputs; - std::vector outputs; - std::map const_inputs; - TeOpAttrArgs attrs; -}; - - -using OpTilingFunc = std::function; - -using OpTilingFuncPtr = bool(*)(const std::string&, const TeOpParas&, const nlohmann::json& , OpRunInfo&); - -class FMK_FUNC_HOST_VISIBILITY OpTilingInterf -{ -public: - OpTilingInterf(std::string op_type, OpTilingFunc func); - ~OpTilingInterf() = default; - static std::map &RegisteredOpInterf(); -}; - - -template -ByteBuffer& ByteBufferPut(ByteBuffer &buf, const T &value) -{ - buf.write(reinterpret_cast(&value), sizeof(value)); - buf.flush(); - return buf; -} - -template -ByteBuffer& ByteBufferGet(ByteBuffer &buf, T &value) -{ - buf.read(reinterpret_cast(&value), sizeof(value)); - return buf; -} - -inline size_t ByteBufferGetAll(ByteBuffer &buf, char *dest, size_t dest_len) -{ - size_t nread = 0; - size_t rn = 0; - do { - rn = buf.readsome(dest + nread, dest_len - nread); - nread += rn; - } while (rn > 0 && dest_len > nread); - - return nread; -} - - -extern "C" ge::graphStatus OpParaCalculate(const ge::Node &node, OpRunInfo &run_info); -extern "C" ge::graphStatus OpAtomicCalculate(const ge::Node &node, OpRunInfo &run_info); - -} - -#endif // INC_OP_TILING_H_ diff --git a/inc/register/proto/caffe/caffe.proto b/inc/register/proto/caffe/caffe.proto deleted file mode 100644 index f2d1acc64dfc97ca7f827a95dc5be5c9fc4d7498..0000000000000000000000000000000000000000 --- a/inc/register/proto/caffe/caffe.proto +++ /dev/null @@ -1,1802 +0,0 @@ -syntax = "proto2"; - -package domi.caffe; - -// Specifies the shape (dimensions) of a Blob. -message BlobShape { - repeated int64 dim = 1 [packed = true]; -} - -message BlobProto { - optional BlobShape shape = 7; - repeated float data = 5 [packed = true]; - repeated float diff = 6 [packed = true]; - repeated double double_data = 8 [packed = true]; - repeated double double_diff = 9 [packed = true]; - optional bytes int8_data = 10; - repeated int32 int32_data = 11 [packed = true]; - repeated uint64 uint64_data = 12 [packed = true]; - // 4D dimensions -- deprecated. Use "shape" instead. - optional int32 num = 1 [default = 0]; - optional int32 channels = 2 [default = 0]; - optional int32 height = 3 [default = 0]; - optional int32 width = 4 [default = 0]; -} - -// The BlobProtoVector is simply a way to pass multiple blobproto instances -// around. -message BlobProtoVector { - repeated BlobProto blobs = 1; -} - -message Datum { - optional int32 channels = 1; - optional int32 height = 2; - optional int32 width = 3; - // the actual image data, in bytes - optional bytes data = 4; - optional int32 label = 5; - // Optionally, the datum could also hold float data. - repeated float float_data = 6; - // If true data contains an encoded image that need to be decoded - optional bool encoded = 7 [default = false]; -} - -message FillerParameter { - // The filler type. - optional string type = 1 [default = 'constant']; - optional float value = 2 [default = 0]; // the value in constant filler - optional float min = 3 [default = 0]; // the min value in uniform filler - optional float max = 4 [default = 1]; // the max value in uniform filler - optional float mean = 5 [default = 0]; // the mean value in Gaussian filler - optional float std = 6 [default = 1]; // the std value in Gaussian filler - // The expected number of non-zero output weights for a given input in - // Gaussian filler -- the default -1 means don't perform sparsification. - optional int32 sparse = 7 [default = -1]; - // Normalize the filler variance by fan_in, fan_out, or their average. - // Applies to 'xavier' and 'msra' fillers. - enum VarianceNorm { - FAN_IN = 0; - FAN_OUT = 1; - AVERAGE = 2; - } - optional VarianceNorm variance_norm = 8 [default = FAN_IN]; -} - -message NetParameter { - optional string name = 1; // consider giving the network a name - // DEPRECATED. See InputParameter. The input blobs to the network. - repeated string input = 3; - // DEPRECATED. See InputParameter. The shape of the input blobs. - repeated BlobShape input_shape = 8; - - // 4D input dimensions -- deprecated. Use "input_shape" instead. - // If specified, for each input blob there should be four - // values specifying the num, channels, height and width of the input blob. - // Thus, there should be a total of (4 * #input) numbers. - repeated int32 input_dim = 4; - - // Whether the network will force every layer to carry out backward operation. - // If set False, then whether to carry out backward is determined - // automatically according to the net structure and learning rates. - optional bool force_backward = 5 [default = false]; - // The current "state" of the network, including the phase, level, and stage. - // Some layers may be included/excluded depending on this state and the states - // specified in the layers' include and exclude fields. - optional NetState state = 6; - - // Print debugging information about results while running Net::Forward, - // Net::Backward, and Net::Update. - optional bool debug_info = 7 [default = false]; - - // The layers that make up the net. Each of their configurations, including - // connectivity and behavior, is specified as a LayerParameter. - repeated LayerParameter layer = 100; // ID 100 so layers are printed last. - - // DEPRECATED: use 'layer' instead. - repeated V1LayerParameter layers = 2; -} - -// NOTE -// Update the next available ID when you add a new SolverParameter field. -// -// SolverParameter next available ID: 42 (last added: layer_wise_reduce) -message SolverParameter { - ////////////////////////////////////////////////////////////////////////////// - // Specifying the train and test networks - // - // Exactly one train net must be specified using one of the following fields: - // train_net_param, train_net, net_param, net - // One or more test nets may be specified using any of the following fields: - // test_net_param, test_net, net_param, net - // If more than one test net field is specified (e.g., both net and - // test_net are specified), they will be evaluated in the field order given - // above: (1) test_net_param, (2) test_net, (3) net_param/net. - // A test_iter must be specified for each test_net. - // A test_level and/or a test_stage may also be specified for each test_net. - ////////////////////////////////////////////////////////////////////////////// - - // Proto filename for the train net, possibly combined with one or more - // test nets. - optional string net = 24; - // Inline train net param, possibly combined with one or more test nets. - optional NetParameter net_param = 25; - - optional string train_net = 1; // Proto filename for the train net. - repeated string test_net = 2; // Proto filenames for the test nets. - optional NetParameter train_net_param = 21; // Inline train net params. - repeated NetParameter test_net_param = 22; // Inline test net params. - - // The states for the train/test nets. Must be unspecified or - // specified once per net. - // - // By default, all states will have solver = true; - // train_state will have phase = TRAIN, - // and all test_state's will have phase = TEST. - // Other defaults are set according to the NetState defaults. - optional NetState train_state = 26; - repeated NetState test_state = 27; - - // The number of iterations for each test net. - repeated int32 test_iter = 3; - - // The number of iterations between two testing phases. - optional int32 test_interval = 4 [default = 0]; - optional bool test_compute_loss = 19 [default = false]; - // If true, run an initial test pass before the first iteration, - // ensuring memory availability and printing the starting value of the loss. - optional bool test_initialization = 32 [default = true]; - optional float base_lr = 5; // The base learning rate - // the number of iterations between displaying info. If display = 0, no info - // will be displayed. - optional int32 display = 6; - // Display the loss averaged over the last average_loss iterations - optional int32 average_loss = 33 [default = 1]; - optional int32 max_iter = 7; // the maximum number of iterations - // accumulate gradients over `iter_size` x `batch_size` instances - optional int32 iter_size = 36 [default = 1]; - - // The learning rate decay policy. The currently implemented learning rate - // policies are as follows: - // - fixed: always return base_lr. - // - step: return base_lr * gamma ^ (floor(iter / step)) - // - exp: return base_lr * gamma ^ iter - // - inv: return base_lr * (1 + gamma * iter) ^ (- power) - // - multistep: similar to step but it allows non uniform steps defined by - // stepvalue - // - poly: the effective learning rate follows a polynomial decay, to be - // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) - // - sigmoid: the effective learning rate follows a sigmod decay - // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) - // - // where base_lr, max_iter, gamma, step, stepvalue and power are defined - // in the solver parameter protocol buffer, and iter is the current iteration. - optional string lr_policy = 8; - optional float gamma = 9; // The parameter to compute the learning rate. - optional float power = 10; // The parameter to compute the learning rate. - optional float momentum = 11; // The momentum value. - optional float weight_decay = 12; // The weight decay. - // regularization types supported: L1 and L2 - // controlled by weight_decay - optional string regularization_type = 29 [default = "L2"]; - // the stepsize for learning rate policy "step" - optional int32 stepsize = 13; - // the stepsize for learning rate policy "multistep" - repeated int32 stepvalue = 34; - - // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm, - // whenever their actual L2 norm is larger. - optional float clip_gradients = 35 [default = -1]; - - optional int32 snapshot = 14 [default = 0]; // The snapshot interval - optional string snapshot_prefix = 15; // The prefix for the snapshot. - // whether to snapshot diff in the results or not. Snapshotting diff will help - // debugging but the final protocol buffer size will be much larger. - optional bool snapshot_diff = 16 [default = false]; - enum SnapshotFormat { - HDF5 = 0; - BINARYPROTO = 1; - } - optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO]; - // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. - enum SolverMode { - CPU = 0; - GPU = 1; - } - optional SolverMode solver_mode = 17 [default = GPU]; - // the device_id will that be used in GPU mode. Use device_id = 0 in default. - optional int32 device_id = 18 [default = 0]; - // If non-negative, the seed with which the Solver will initialize the Caffe - // random number generator -- useful for reproducible results. Otherwise, - // (and by default) initialize using a seed derived from the system clock. - optional int64 random_seed = 20 [default = -1]; - - // type of the solver - optional string type = 40 [default = "SGD"]; - - // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam - optional float delta = 31 [default = 1e-8]; - // parameters for the Adam solver - optional float momentum2 = 39 [default = 0.999]; - - // RMSProp decay value - // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) - optional float rms_decay = 38 [default = 0.99]; - - // If true, print information about the state of the net that may help with - // debugging learning problems. - optional bool debug_info = 23 [default = false]; - - // If false, don't save a snapshot after training finishes. - optional bool snapshot_after_train = 28 [default = true]; - - // DEPRECATED: old solver enum types, use string instead - enum SolverType { - SGD = 0; - NESTEROV = 1; - ADAGRAD = 2; - RMSPROP = 3; - ADADELTA = 4; - ADAM = 5; - } - // DEPRECATED: use type instead of solver_type - optional SolverType solver_type = 30 [default = SGD]; - - // Overlap compute and communication for data parallel training - optional bool layer_wise_reduce = 41 [default = true]; -} - -// A message that stores the solver snapshots -message SolverState { - optional int32 iter = 1; // The current iteration - optional string learned_net = 2; // The file that stores the learned net. - repeated BlobProto history = 3; // The history for sgd solvers - optional int32 current_step = 4 [default = 0]; // The current step for learning rate -} - -enum Phase { - TRAIN = 0; - TEST = 1; -} - -message NetState { - optional Phase phase = 1 [default = TEST]; - optional int32 level = 2 [default = 0]; - repeated string stage = 3; -} - -message NetStateRule { - // Set phase to require the NetState have a particular phase (TRAIN or TEST) - // to meet this rule. - optional Phase phase = 1; - - // Set the minimum and/or maximum levels in which the layer should be used. - // Leave undefined to meet the rule regardless of level. - optional int32 min_level = 2; - optional int32 max_level = 3; - - // Customizable sets of stages to include or exclude. - // The net must have ALL of the specified stages and NONE of the specified - // "not_stage"s to meet the rule. - // (Use multiple NetStateRules to specify conjunctions of stages.) - repeated string stage = 4; - repeated string not_stage = 5; -} - -// Specifies training parameters (multipliers on global learning constants, -// and the name and other settings used for weight sharing). -message ParamSpec { - // The names of the parameter blobs -- useful for sharing parameters among - // layers, but never required otherwise. To share a parameter between two - // layers, give it a (non-empty) name. - optional string name = 1; - - // Whether to require shared weights to have the same shape, or just the same - // count -- defaults to STRICT if unspecified. - optional DimCheckMode share_mode = 2; - enum DimCheckMode { - // STRICT (default) requires that num, channels, height, width each match. - STRICT = 0; - // PERMISSIVE requires only the count (num*channels*height*width) to match. - PERMISSIVE = 1; - } - - // The multiplier on the global learning rate for this parameter. - optional float lr_mult = 3 [default = 1.0]; - - // The multiplier on the global weight decay for this parameter. - optional float decay_mult = 4 [default = 1.0]; -} - -// NOTE -// Update the next available ID when you add a new LayerParameter field. -// -// LayerParameter next available layer-specific ID: 151 (last added: smooth_l1_loss_param) -message LayerParameter { - optional string name = 1; // the layer name - optional string type = 2; // the layer type - repeated string bottom = 3; // the name of each bottom blob - repeated string top = 4; // the name of each top blob - - // The train / test phase for computation. - optional Phase phase = 10; - - // The amount of weight to assign each top blob in the objective. - // Each layer assigns a default value, usually of either 0 or 1, - // to each top blob. - repeated float loss_weight = 5; - - // Specifies training parameters (multipliers on global learning constants, - // and the name and other settings used for weight sharing). - repeated ParamSpec param = 6; - - // The blobs containing the numeric parameters of the layer. - repeated BlobProto blobs = 7; - - // Specifies whether to backpropagate to each bottom. If unspecified, - // Caffe will automatically infer whether each input needs backpropagation - // to compute parameter gradients. If set to true for some inputs, - // backpropagation to those inputs is forced; if set false for some inputs, - // backpropagation to those inputs is skipped. - // - // The size must be either 0 or equal to the number of bottoms. - repeated bool propagate_down = 11; - - // Rules controlling whether and when a layer is included in the network, - // based on the current NetState. You may specify a non-zero number of rules - // to include OR exclude, but not both. If no include or exclude rules are - // specified, the layer is always included. If the current NetState meets - // ANY (i.e., one or more) of the specified rules, the layer is - // included/excluded. - repeated NetStateRule include = 8; - repeated NetStateRule exclude = 9; - - // Parameters for data pre-processing. - optional TransformationParameter transform_param = 100; - - // Parameters shared by loss layers. - optional LossParameter loss_param = 101; - - // Layer type-specific parameters. - // - // Note: certain layers may have more than one computational engine - // for their implementation. These layers include an Engine type and - // engine parameter for selecting the implementation. - // The default for the engine is set by the ENGINE switch at compile-time. - optional AccuracyParameter accuracy_param = 102; - optional ArgMaxParameter argmax_param = 103; - optional BatchNormParameter batch_norm_param = 139; - optional BiasParameter bias_param = 141; - optional ConcatParameter concat_param = 104; - optional ContrastiveLossParameter contrastive_loss_param = 105; - optional ConvolutionParameter convolution_param = 106; - optional CropParameter crop_param = 144; - optional DataParameter data_param = 107; - optional DetectionOutputParameter detection_output_param = 150; - optional DropoutParameter dropout_param = 108; - optional DummyDataParameter dummy_data_param = 109; - optional EltwiseParameter eltwise_param = 110; - optional ELUParameter elu_param = 140; - optional EmbedParameter embed_param = 137; - optional ExpParameter exp_param = 111; - optional FlattenParameter flatten_param = 135; - optional HDF5DataParameter hdf5_data_param = 112; - optional HDF5OutputParameter hdf5_output_param = 113; - optional HingeLossParameter hinge_loss_param = 114; - optional ImageDataParameter image_data_param = 115; - optional InfogainLossParameter infogain_loss_param = 116; - optional InnerProductParameter inner_product_param = 117; - optional InputParameter input_param = 143; - optional LogParameter log_param = 134; - optional LRNParameter lrn_param = 118; - optional MemoryDataParameter memory_data_param = 119; - optional MVNParameter mvn_param = 120; - optional ParameterParameter parameter_param = 145; - optional PoolingParameter pooling_param = 121; - optional PowerParameter power_param = 122; - optional PReLUParameter prelu_param = 131; - optional PythonParameter python_param = 130; - optional RecurrentParameter recurrent_param = 146; - optional ReductionParameter reduction_param = 136; - optional ReLUParameter relu_param = 123; - optional ReshapeParameter reshape_param = 133; - optional ScaleParameter scale_param = 142; - optional SigmoidParameter sigmoid_param = 124; - optional SmoothL1LossParameter smooth_l1_loss_param = 148; - optional SoftmaxParameter softmax_param = 125; - optional SPPParameter spp_param = 132; - optional SliceParameter slice_param = 126; - optional TanHParameter tanh_param = 127; - optional ThresholdParameter threshold_param = 128; - optional TileParameter tile_param = 138; - optional WindowDataParameter window_data_param = 129; - optional PermuteParameter permute_param = 202; - optional PriorBoxParameter prior_box_param = 203; - optional NormalizeParameter norm_param = 206; - optional PSROIPoolingParameter psroi_pooling_param = 207; - optional FreespaceExtractParameter freespace_extract_param = 151; - optional PostprocessParameter postprocess_param = 152; - optional SpatialTransformParameter spatial_transform_param = 153; - optional ROIAlignParameter roi_align_param = 154; - optional ReorgParameter reorg_param = 155; - optional RegionParameter region_param = 156; - optional ReverseParameter reverse_param = 157; - optional InterpParameter interp_param = 158; - optional ShuffleChannelParameter shuffle_channel_param = 159; - optional UpsampleParameter upsample_param = 160; - optional ROIPoolingParameter roi_pooling_param = 161; - optional YoloParameter yolo_param = 199; - optional YoloV3DetectionOutputParameter yolov3_detection_output_param = 200; - optional ProposalParameter proposal_param = 201; - optional FSRDetectionOutputParameter fsrdetectionoutput_param = 222; - optional SSDDetectionOutputParameter ssddetectionoutput_param = 232; - optional YoloV2DetectionOutputParameter yolov2_detection_output_param = 204; - optional QuantParameter quant_param = 208; - optional CondTakeParameter condtake_param = 233; - optional MatrixInverseParameter matrix_inverse_param = 210; - optional WarpPerspectiveParameter warp_perspective_param = 234; - optional BatchMatMulParameter batch_matmul_param = 235; - optional SpatialTransformerParameter st_param = 5000; -} - -// Message that stores parameters used to apply transformation -// to the data layer's data -message TransformationParameter { - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 1 [default = 1]; - // Specify if we want to randomly mirror data. - optional bool mirror = 2 [default = false]; - // Specify if we would like to randomly crop an image. - optional uint32 crop_size = 3 [default = 0]; - // mean_file and mean_value cannot be specified at the same time - optional string mean_file = 4; - // if specified can be repeated once (would substract it from all the channels) - // or can be repeated the same number of times as channels - // (would subtract them from the corresponding channel) - repeated float mean_value = 5; - // Force the decoded image to have 3 color channels. - optional bool force_color = 6 [default = false]; - // Force the decoded image to have 1 color channels. - optional bool force_gray = 7 [default = false]; -} - -// Message that stores parameters shared by loss layers -message LossParameter { - // If specified, ignore instances with the given label. - optional int32 ignore_label = 1; - // How to normalize the loss for loss layers that aggregate across batches, - // spatial dimensions, or other dimensions. Currently only implemented in - // SoftmaxWithLoss and SigmoidCrossEntropyLoss layers. - enum NormalizationMode { - // Divide by the number of examples in the batch times spatial dimensions. - // Outputs that receive the ignore label will NOT be ignored in computing - // the normalization factor. - FULL = 0; - // Divide by the total number of output locations that do not take the - // ignore_label. If ignore_label is not set, this behaves like FULL. - VALID = 1; - // Divide by the batch size. - BATCH_SIZE = 2; - // Do not normalize the loss. - NONE = 3; - } - // For historical reasons, the default normalization for - // SigmoidCrossEntropyLoss is BATCH_SIZE and *not* VALID. - optional NormalizationMode normalization = 3 [default = VALID]; - // Deprecated. Ignored if normalization is specified. If normalization - // is not specified, then setting this to false will be equivalent to - // normalization = BATCH_SIZE to be consistent with previous behavior. - optional bool normalize = 2; -} - -// Messages that store parameters used by individual layer types follow, in -// alphabetical order. - -message AccuracyParameter { - // When computing accuracy, count as correct by comparing the true label to - // the top k scoring classes. By default, only compare to the top scoring - // class (i.e. argmax). - optional uint32 top_k = 1 [default = 1]; - - // The "label" axis of the prediction blob, whose argmax corresponds to the - // predicted label -- may be negative to index from the end (e.g., -1 for the - // last axis). For example, if axis == 1 and the predictions are - // (N x C x H x W), the label blob is expected to contain N*H*W ground truth - // labels with integer values in {0, 1, ..., C-1}. - optional int32 axis = 2 [default = 1]; - - // If specified, ignore instances with the given label. - optional int32 ignore_label = 3; -} - -message ArgMaxParameter { - // If true produce pairs (argmax, maxval) - optional bool out_max_val = 1 [default = false]; - optional uint32 top_k = 2 [default = 1]; - // The axis along which to maximise -- may be negative to index from the - // end (e.g., -1 for the last axis). - // By default ArgMaxLayer maximizes over the flattened trailing dimensions - // for each index of the first / num dimension. - optional int32 axis = 3; -} - -message ConcatParameter { - // The axis along which to concatenate -- may be negative to index from the - // end (e.g., -1 for the last axis). Other axes must have the - // same dimension for all the bottom blobs. - // By default, ConcatLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 2 [default = 1]; - - // DEPRECATED: alias for "axis" -- does not support negative indexing. - optional uint32 concat_dim = 1 [default = 1]; -} - -message BatchNormParameter { - // If false, normalization is performed over the current mini-batch - // and global statistics are accumulated (but not yet used) by a moving - // average. - // If true, those accumulated mean and variance values are used for the - // normalization. - // By default, it is set to false when the network is in the training - // phase and true when the network is in the testing phase. - optional bool use_global_stats = 1; - // What fraction of the moving average remains each iteration? - // Smaller values make the moving average decay faster, giving more - // weight to the recent values. - // Each iteration updates the moving average @f$S_{t-1}@f$ with the - // current mean @f$ Y_t @f$ by - // @f$ S_t = (1-\beta)Y_t + \beta \cdot S_{t-1} @f$, where @f$ \beta @f$ - // is the moving_average_fraction parameter. - optional float moving_average_fraction = 2 [default = .999]; - // Small value to add to the variance estimate so that we don't divide by - // zero. - optional float eps = 3 [default = 1e-5]; -} - -message BiasParameter { - // The first axis of bottom[0] (the first input Blob) along which to apply - // bottom[1] (the second input Blob). May be negative to index from the end - // (e.g., -1 for the last axis). - // - // For example, if bottom[0] is 4D with shape 100x3x40x60, the output - // top[0] will have the same shape, and bottom[1] may have any of the - // following shapes (for the given value of axis): - // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60 - // (axis == 1 == -3) 3; 3x40; 3x40x60 - // (axis == 2 == -2) 40; 40x60 - // (axis == 3 == -1) 60 - // Furthermore, bottom[1] may have the empty shape (regardless of the value of - // "axis") -- a scalar bias. - optional int32 axis = 1 [default = 1]; - - // (num_axes is ignored unless just one bottom is given and the bias is - // a learned parameter of the layer. Otherwise, num_axes is determined by the - // number of axes by the second bottom.) - // The number of axes of the input (bottom[0]) covered by the bias - // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. - // Set num_axes := 0, to add a zero-axis Blob: a scalar. - optional int32 num_axes = 2 [default = 1]; - - // (filler is ignored unless just one bottom is given and the bias is - // a learned parameter of the layer.) - // The initialization for the learned bias parameter. - // Default is the zero (0) initialization, resulting in the BiasLayer - // initially performing the identity operation. - optional FillerParameter filler = 3; - optional bool bias_from_blob = 4 [default = true]; -} - -message ContrastiveLossParameter { - // margin for dissimilar pair - optional float margin = 1 [default = 1.0]; - // The first implementation of this cost did not exactly match the cost of - // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. - // legacy_version = false (the default) uses (margin - d)^2 as proposed in the - // Hadsell paper. New models should probably use this version. - // legacy_version = true uses (margin - d^2). This is kept to support / - // reproduce existing models and results - optional bool legacy_version = 2 [default = false]; -} - -message ConvolutionParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - optional bool bias_term = 2 [default = true]; // whether to have bias terms - - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in all spatial dimensions, or once per spatial dimension. - repeated uint32 pad = 3; // The padding size; defaults to 0 - repeated uint32 kernel_size = 4; // The kernel size - repeated uint32 stride = 6; // The stride; defaults to 1 - // Factor used to dilate the kernel, (implicitly) zero-filling the resulting - // holes. (Kernel dilation is sometimes referred to by its use in the - // algorithme à trous from Holschneider et al. 1987.) - repeated uint32 dilation = 18; // The dilation; defaults to 1 - - // For 2D convolution only, the *_h and *_w versions may also be used to - // specify both spatial dimensions. - optional uint32 pad_h = 9 [default = 0]; // The padding height (2D only) - optional uint32 pad_w = 10 [default = 0]; // The padding width (2D only) - optional uint32 kernel_h = 11; // The kernel height (2D only) - optional uint32 kernel_w = 12; // The kernel width (2D only) - optional uint32 stride_h = 13; // The stride height (2D only) - optional uint32 stride_w = 14; // The stride width (2D only) - - optional uint32 group = 5 [default = 1]; // The group size for group conv - - optional FillerParameter weight_filler = 7; // The filler for the weight - optional FillerParameter bias_filler = 8; // The filler for the bias - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 15 [default = DEFAULT]; - - // The axis to interpret as "channels" when performing convolution. - // Preceding dimensions are treated as independent inputs; - // succeeding dimensions are treated as "spatial". - // With (N, C, H, W) inputs, and axis == 1 (the default), we perform - // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for - // groups g>1) filters across the spatial axes (H, W) of the input. - // With (N, C, D, H, W) inputs, and axis == 1, we perform - // N independent 3D convolutions, sliding (C/g)-channels - // filters across the spatial axes (D, H, W) of the input. - optional int32 axis = 16 [default = 1]; - - // Whether to force use of the general ND convolution, even if a specific - // implementation for blobs of the appropriate number of spatial dimensions - // is available. (Currently, there is only a 2D-specific convolution - // implementation; for input blobs with num_axes != 2, this option is - // ignored and the ND implementation will be used.) - optional bool force_nd_im2col = 17 [default = false]; -} - -message CropParameter { - // To crop, elements of the first bottom are selected to fit the dimensions - // of the second, reference bottom. The crop is configured by - // - the crop `axis` to pick the dimensions for cropping - // - the crop `offset` to set the shift for all/each dimension - // to align the cropped bottom with the reference bottom. - // All dimensions up to but excluding `axis` are preserved, while - // the dimensions including and trailing `axis` are cropped. - // If only one `offset` is set, then all dimensions are offset by this amount. - // Otherwise, the number of offsets must equal the number of cropped axes to - // shift the crop in each dimension accordingly. - // Note: standard dimensions are N,C,H,W so the default is a spatial crop, - // and `axis` may be negative to index from the end (e.g., -1 for the last - // axis). - optional int32 axis = 1 [default = 2]; - repeated uint32 offset = 2; -} - -message DataParameter { - enum DB { - LEVELDB = 0; - LMDB = 1; - } - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 4; - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - // DEPRECATED. Each solver accesses a different subset of the database. - optional uint32 rand_skip = 7 [default = 0]; - optional DB backend = 8 [default = LEVELDB]; - // DEPRECATED. See TransformationParameter. For data pre-processing, we can do - // simple scaling and subtracting the data mean, if provided. Note that the - // mean subtraction is always carried out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // DEPRECATED. See TransformationParameter. Specify if we would like to randomly - // crop an image. - optional uint32 crop_size = 5 [default = 0]; - // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror - // data. - optional bool mirror = 6 [default = false]; - // Force the encoded image to have 3 color channels - optional bool force_encoded_color = 9 [default = false]; - // Prefetch queue (Increase if data feeding bandwidth varies, within the - // limit of device memory for GPU training) - optional uint32 prefetch = 10 [default = 4]; -} - -message DropoutParameter { - optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio - optional bool scale_train = 2 [default = true]; // scale train or test phase -} - -// DummyDataLayer fills any number of arbitrarily shaped blobs with random -// (or constant) data generated by "Fillers" (see "message FillerParameter"). -message DummyDataParameter { - // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or N - // shape fields, and 0, 1 or N data_fillers. - // - // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used. - // If 1 data_filler is specified, it is applied to all top blobs. If N are - // specified, the ith is applied to the ith top blob. - repeated FillerParameter data_filler = 1; - repeated BlobShape shape = 6; - - // 4D dimensions -- deprecated. Use "shape" instead. - repeated uint32 num = 2; - repeated uint32 channels = 3; - repeated uint32 height = 4; - repeated uint32 width = 5; -} - -message EltwiseParameter { - enum EltwiseOp { - PROD = 0; - SUM = 1; - MAX = 2; - } - optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation - repeated float coeff = 2; // blob-wise coefficient for SUM operation - - // Whether to use an asymptotically slower (for >2 inputs) but stabler method - // of computing the gradient for the PROD operation. (No effect for SUM op.) - optional bool stable_prod_grad = 3 [default = true]; -} - -// Message that stores parameters used by ELULayer -message ELUParameter { - // Described in: - // Clevert, D.-A., Unterthiner, T., & Hochreiter, S. (2015). Fast and Accurate - // Deep Network Learning by Exponential Linear Units (ELUs). arXiv - optional float alpha = 1 [default = 1]; -} - -// Message that stores parameters used by EmbedLayer -message EmbedParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - // The input is given as integers to be interpreted as one-hot - // vector indices with dimension num_input. Hence num_input should be - // 1 greater than the maximum possible input value. - optional uint32 input_dim = 2; - - optional bool bias_term = 3 [default = true]; // Whether to use a bias term - optional FillerParameter weight_filler = 4; // The filler for the weight - optional FillerParameter bias_filler = 5; // The filler for the bias - -} - -// Message that stores parameters used by ExpLayer -message ExpParameter { - // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = exp(shift + scale * x). - optional float base = 1 [default = -1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -/// Message that stores parameters used by FlattenLayer -message FlattenParameter { - // The first axis to flatten: all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 1 [default = 1]; - - // The last axis to flatten: all following axes are retained in the output. - // May be negative to index from the end (e.g., the default -1 for the last - // axis). - optional int32 end_axis = 2 [default = -1]; -} - -// Message that stores parameters used by HDF5DataLayer -message HDF5DataParameter { - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 2; - - // Specify whether to shuffle the data. - // If shuffle == true, the ordering of the HDF5 files is shuffled, - // and the ordering of data within any given HDF5 file is shuffled, - // but data between different files are not interleaved; all of a file's - // data are output (in a random order) before moving onto another file. - optional bool shuffle = 3 [default = false]; -} - -message HDF5OutputParameter { - optional string file_name = 1; -} - -message HingeLossParameter { - enum Norm { - L1 = 1; - L2 = 2; - } - // Specify the Norm to use L1 or L2 - optional Norm norm = 1 [default = L1]; -} - -message ImageDataParameter { - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 4 [default = 1]; - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - optional uint32 rand_skip = 7 [default = 0]; - // Whether or not ImageLayer should shuffle the list of files at every epoch. - optional bool shuffle = 8 [default = false]; - // It will also resize images if new_height or new_width are not zero. - optional uint32 new_height = 9 [default = 0]; - optional uint32 new_width = 10 [default = 0]; - // Specify if the images are color or gray - optional bool is_color = 11 [default = true]; - // DEPRECATED. See TransformationParameter. For data pre-processing, we can do - // simple scaling and subtracting the data mean, if provided. Note that the - // mean subtraction is always carried out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // DEPRECATED. See TransformationParameter. Specify if we would like to randomly - // crop an image. - optional uint32 crop_size = 5 [default = 0]; - // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror - // data. - optional bool mirror = 6 [default = false]; - optional string root_folder = 12 [default = ""]; -} - -message InfogainLossParameter { - // Specify the infogain matrix source. - optional string source = 1; - optional int32 axis = 2 [default = 1]; // axis of prob -} - -message InnerProductParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - optional bool bias_term = 2 [default = true]; // whether to have bias terms - optional FillerParameter weight_filler = 3; // The filler for the weight - optional FillerParameter bias_filler = 4; // The filler for the bias - - // The first axis to be lumped into a single inner product computation; - // all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 5 [default = 1]; - // Specify whether to transpose the weight matrix or not. - // If transpose == true, any operations will be performed on the transpose - // of the weight matrix. The weight matrix itself is not going to be transposed - // but rather the transfer flag of operations will be toggled accordingly. - optional bool transpose = 6 [default = false]; -} - -message InputParameter { - // This layer produces N >= 1 top blob(s) to be assigned manually. - // Define N shapes to set a shape for each top. - // Define 1 shape to set the same shape for every top. - // Define no shape to defer to reshaping manually. - repeated BlobShape shape = 1; -} - -// Message that stores parameters used by LogLayer -message LogParameter { - // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = ln(shift + scale * x) = log_e(shift + scale * x) - optional float base = 1 [default = -1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -// Message that stores parameters used by LRNLayer -message LRNParameter { - optional uint32 local_size = 1 [default = 5]; - optional float alpha = 2 [default = 1.]; - optional float beta = 3 [default = 0.75]; - enum NormRegion { - ACROSS_CHANNELS = 0; - WITHIN_CHANNEL = 1; - } - optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS]; - optional float k = 5 [default = 1.]; - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 6 [default = DEFAULT]; -} - -message MemoryDataParameter { - optional uint32 batch_size = 1; - optional uint32 channels = 2; - optional uint32 height = 3; - optional uint32 width = 4; -} - -message MVNParameter { - // This parameter can be set to false to normalize mean only - optional bool normalize_variance = 1 [default = true]; - - // This parameter can be set to true to perform DNN-like MVN - optional bool across_channels = 2 [default = false]; - - // Epsilon for not dividing by zero while normalizing variance - optional float eps = 3 [default = 1e-9]; -} - -message ParameterParameter { - optional BlobShape shape = 1; -} - -message PoolingParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional PoolMethod pool = 1 [default = MAX]; // The pooling method - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in height and width or as Y, X pairs. - optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X) - optional uint32 pad_h = 9 [default = 0]; // The padding height - optional uint32 pad_w = 10 [default = 0]; // The padding width - optional uint32 kernel_size = 2; // The kernel size (square) - optional uint32 kernel_h = 5; // The kernel height - optional uint32 kernel_w = 6; // The kernel width - optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X) - optional uint32 stride_h = 7; // The stride height - optional uint32 stride_w = 8; // The stride width - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 11 [default = DEFAULT]; - // If global_pooling then it will pool over the size of the bottom by doing - // kernel_h = bottom->height and kernel_w = bottom->width - optional bool global_pooling = 12 [default = false]; - optional bool ceil_mode = 13 [default = true]; - // How to calculate the output size - using ceil (default) or floor rounding. - enum RoundMode { - CEIL = 0; - FLOOR = 1; - } - optional RoundMode round_mode = 14 [default = CEIL]; -} - -message PowerParameter { - // PowerLayer computes outputs y = (shift + scale * x) ^ power. - optional float power = 1 [default = 1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -message PythonParameter { - optional string module = 1; - optional string layer = 2; - // This value is set to the attribute `param_str` of the `PythonLayer` object - // in Python before calling the `setup()` method. This could be a number, - // string, dictionary in Python dict format, JSON, etc. You may parse this - // string in `setup` method and use it in `forward` and `backward`. - optional string param_str = 3 [default = '']; - // Whether this PythonLayer is shared among worker solvers during data parallelism. - // If true, each worker solver sequentially run forward from this layer. - // This value should be set true if you are using it as a data layer. - optional bool share_in_parallel = 4 [default = false]; -} - -// Message that stores parameters used by RecurrentLayer -message RecurrentParameter { - // The dimension of the output (and usually hidden state) representation -- - // must be explicitly set to non-zero. - optional uint32 num_output = 1 [default = 0]; - - optional FillerParameter weight_filler = 2; // The filler for the weight - optional FillerParameter bias_filler = 3; // The filler for the bias - - // Whether to enable displaying debug_info in the unrolled recurrent net. - optional bool debug_info = 4 [default = false]; - - // Whether to add as additional inputs (bottoms) the initial hidden state - // blobs, and add as additional outputs (tops) the final timestep hidden state - // blobs. The number of additional bottom/top blobs required depends on the - // recurrent architecture -- e.g., 1 for RNNs, 2 for LSTMs. - optional bool expose_hidden = 5 [default = false]; -} - -// Message that stores parameters used by ReductionLayer -message ReductionParameter { - enum ReductionOp { - SUM = 1; - ASUM = 2; - SUMSQ = 3; - MEAN = 4; - } - - optional ReductionOp operation = 1 [default = SUM]; // reduction operation - - // The first axis to reduce to a scalar -- may be negative to index from the - // end (e.g., -1 for the last axis). - // (Currently, only reduction along ALL "tail" axes is supported; reduction - // of axis M through N, where N < num_axes - 1, is unsupported.) - // Suppose we have an n-axis bottom Blob with shape: - // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). - // If axis == m, the output Blob will have shape - // (d0, d1, d2, ..., d(m-1)), - // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) - // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. - // If axis == 0 (the default), the output Blob always has the empty shape - // (count 1), performing reduction across the entire input -- - // often useful for creating new loss functions. - optional int32 axis = 2 [default = 0]; - - optional float coeff = 3 [default = 1.0]; // coefficient for output -} - -// Message that stores parameters used by ReLULayer -message ReLUParameter { - // Allow non-zero slope for negative inputs to speed up optimization - // Described in: - // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities - // improve neural network acoustic models. In ICML Workshop on Deep Learning - // for Audio, Speech, and Language Processing. - optional float negative_slope = 1 [default = 0]; - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 2 [default = DEFAULT]; -} - -message ReshapeParameter { - // Specify the output dimensions. If some of the dimensions are set to 0, - // the corresponding dimension from the bottom layer is used (unchanged). - // Exactly one dimension may be set to -1, in which case its value is - // inferred from the count of the bottom blob and the remaining dimensions. - // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: - // - // layer { - // type: "Reshape" bottom: "input" top: "output" - // reshape_param { ... } - // } - // - // If "input" is 2D with shape 2 x 8, then the following reshape_param - // specifications are all equivalent, producing a 3D blob "output" with shape - // 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } - // reshape_param { shape { dim: 0 dim:-1 dim: 4 } } - // - optional BlobShape shape = 1; - - // axis and num_axes control the portion of the bottom blob's shape that are - // replaced by (included in) the reshape. By default (axis == 0 and - // num_axes == -1), the entire bottom blob shape is included in the reshape, - // and hence the shape field must specify the entire output shape. - // - // axis may be non-zero to retain some portion of the beginning of the input - // shape (and may be negative to index from the end; e.g., -1 to begin the - // reshape after the last axis, including nothing in the reshape, - // -2 to include only the last axis, etc.). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are all equivalent, - // producing a blob "output" with shape 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } - // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } - // - // num_axes specifies the extent of the reshape. - // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on - // input axes in the range [axis, axis+num_axes]. - // num_axes may also be -1, the default, to include all remaining axes - // (starting from axis). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are equivalent, - // producing a blob "output" with shape 1 x 2 x 8. - // - // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } - // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } - // reshape_param { shape { dim: 1 } num_axes: 0 } - // - // On the other hand, these would produce output blob shape 2 x 1 x 8: - // - // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } - // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } - // - optional int32 axis = 2 [default = 0]; - optional int32 num_axes = 3 [default = -1]; -} - - -message ScaleParameter { - // The first axis of bottom[0] (the first input Blob) along which to apply - // bottom[1] (the second input Blob). May be negative to index from the end - // (e.g., -1 for the last axis). - // - // For example, if bottom[0] is 4D with shape 100x3x40x60, the output - // top[0] will have the same shape, and bottom[1] may have any of the - // following shapes (for the given value of axis): - // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60 - // (axis == 1 == -3) 3; 3x40; 3x40x60 - // (axis == 2 == -2) 40; 40x60 - // (axis == 3 == -1) 60 - // Furthermore, bottom[1] may have the empty shape (regardless of the value of - // "axis") -- a scalar multiplier. - optional int32 axis = 1 [default = 1]; - - // (num_axes is ignored unless just one bottom is given and the scale is - // a learned parameter of the layer. Otherwise, num_axes is determined by the - // number of axes by the second bottom.) - // The number of axes of the input (bottom[0]) covered by the scale - // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. - // Set num_axes := 0, to multiply with a zero-axis Blob: a scalar. - optional int32 num_axes = 2 [default = 1]; - - // (filler is ignored unless just one bottom is given and the scale is - // a learned parameter of the layer.) - // The initialization for the learned scale parameter. - // Default is the unit (1) initialization, resulting in the ScaleLayer - // initially performing the identity operation. - optional FillerParameter filler = 3; - - // Whether to also learn a bias (equivalent to a ScaleLayer+BiasLayer, but - // may be more efficient). Initialized with bias_filler (defaults to 0). - optional bool bias_term = 4 [default = false]; - optional FillerParameter bias_filler = 5; - optional bool scale_from_blob = 6 [default = true]; -} - -message SigmoidParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; -} - -message SliceParameter { - // The axis along which to slice -- may be negative to index from the end - // (e.g., -1 for the last axis). - // By default, SliceLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 3 [default = 1]; - repeated uint32 slice_point = 2; - - // DEPRECATED: alias for "axis" -- does not support negative indexing. - optional uint32 slice_dim = 1 [default = 1]; -} - -message SmoothL1LossParameter { - // SmoothL1Loss(x) = - // 0.5 * (sigma * x) ** 2 -- if x < 1.0 / sigma / sigma - // |x| - 0.5 / sigma / sigma -- otherwise - optional float sigma = 1 [default = 1]; -} - -// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer -message SoftmaxParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; - - // The axis along which to perform the softmax -- may be negative to index - // from the end (e.g., -1 for the last axis). - // Any other axes will be evaluated as independent softmaxes. - optional int32 axis = 2 [default = 1]; -} - -message TanHParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; -} - -// Message that stores parameters used by TileLayer -message TileParameter { - // The index of the axis to tile. - optional int32 axis = 1 [default = 1]; - - // The number of copies (tiles) of the blob to output. - optional int32 tiles = 2; -} - -// Message that stores parameters used by ThresholdLayer -message ThresholdParameter { - optional float threshold = 1 [default = 0]; // Strictly positive values -} - -message WindowDataParameter { - // Specify the data source. - optional string source = 1; - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // Specify the batch size. - optional uint32 batch_size = 4; - // Specify if we would like to randomly crop an image. - optional uint32 crop_size = 5 [default = 0]; - // Specify if we want to randomly mirror data. - optional bool mirror = 6 [default = false]; - // Foreground (object) overlap threshold - optional float fg_threshold = 7 [default = 0.5]; - // Background (non-object) overlap threshold - optional float bg_threshold = 8 [default = 0.5]; - // Fraction of batch that should be foreground objects - optional float fg_fraction = 9 [default = 0.25]; - // Amount of contextual padding to add around a window - // (used only by the window_data_layer) - optional uint32 context_pad = 10 [default = 0]; - // Mode for cropping out a detection window - // warp: cropped window is warped to a fixed size and aspect ratio - // square: the tightest square around the window is cropped - optional string crop_mode = 11 [default = "warp"]; - // cache_images: will load all images in memory for faster access - optional bool cache_images = 12 [default = false]; - // append root_folder to locate images - optional string root_folder = 13 [default = ""]; -} - -message SPPParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional uint32 pyramid_height = 1; - optional PoolMethod pool = 2 [default = MAX]; // The pooling method - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 6 [default = DEFAULT]; -} - -// DEPRECATED: use LayerParameter. -message V1LayerParameter { - repeated string bottom = 2; - repeated string top = 3; - optional string name = 4; - repeated NetStateRule include = 32; - repeated NetStateRule exclude = 33; - enum LayerType { - NONE = 0; - ABSVAL = 35; - ACCURACY = 1; - ARGMAX = 30; - BNLL = 2; - CONCAT = 3; - CONTRASTIVE_LOSS = 37; - CONVOLUTION = 4; - DATA = 5; - DECONVOLUTION = 39; - DROPOUT = 6; - DUMMY_DATA = 32; - EUCLIDEAN_LOSS = 7; - ELTWISE = 25; - EXP = 38; - FLATTEN = 8; - HDF5_DATA = 9; - HDF5_OUTPUT = 10; - HINGE_LOSS = 28; - IM2COL = 11; - IMAGE_DATA = 12; - INFOGAIN_LOSS = 13; - INNER_PRODUCT = 14; - LRN = 15; - MEMORY_DATA = 29; - MULTINOMIAL_LOGISTIC_LOSS = 16; - MVN = 34; - POOLING = 17; - POWER = 26; - RELU = 18; - SIGMOID = 19; - SIGMOID_CROSS_ENTROPY_LOSS = 27; - SILENCE = 36; - SOFTMAX = 20; - SOFTMAX_LOSS = 21; - SPLIT = 22; - SLICE = 33; - TANH = 23; - WINDOW_DATA = 24; - THRESHOLD = 31; - QUANT = 208; - DEQUANT = 209; - } - optional LayerType type = 5; - repeated BlobProto blobs = 6; - repeated string param = 1001; - repeated DimCheckMode blob_share_mode = 1002; - enum DimCheckMode { - STRICT = 0; - PERMISSIVE = 1; - } - repeated float blobs_lr = 7; - repeated float weight_decay = 8; - repeated float loss_weight = 35; - optional AccuracyParameter accuracy_param = 27; - optional ArgMaxParameter argmax_param = 23; - optional ConcatParameter concat_param = 9; - optional ContrastiveLossParameter contrastive_loss_param = 40; - optional ConvolutionParameter convolution_param = 10; - optional DataParameter data_param = 11; - optional DropoutParameter dropout_param = 12; - optional DummyDataParameter dummy_data_param = 26; - optional EltwiseParameter eltwise_param = 24; - optional ExpParameter exp_param = 41; - optional HDF5DataParameter hdf5_data_param = 13; - optional HDF5OutputParameter hdf5_output_param = 14; - optional HingeLossParameter hinge_loss_param = 29; - optional ImageDataParameter image_data_param = 15; - optional InfogainLossParameter infogain_loss_param = 16; - optional InnerProductParameter inner_product_param = 17; - optional LRNParameter lrn_param = 18; - optional MemoryDataParameter memory_data_param = 22; - optional MVNParameter mvn_param = 34; - optional PoolingParameter pooling_param = 19; - optional PowerParameter power_param = 21; - optional ReLUParameter relu_param = 30; - optional SigmoidParameter sigmoid_param = 38; - optional SoftmaxParameter softmax_param = 39; - optional SliceParameter slice_param = 31; - optional TanHParameter tanh_param = 37; - optional ThresholdParameter threshold_param = 25; - optional WindowDataParameter window_data_param = 20; - optional TransformationParameter transform_param = 36; - optional LossParameter loss_param = 42; - optional V0LayerParameter layer = 1; -} - -// DEPRECATED: V0LayerParameter is the old way of specifying layer parameters -// in Caffe. We keep this message type around for legacy support. -message V0LayerParameter { - optional string name = 1; // the layer name - optional string type = 2; // the string to specify the layer type - - // Parameters to specify layers with inner products. - optional uint32 num_output = 3; // The number of outputs for the layer - optional bool biasterm = 4 [default = true]; // whether to have bias terms - optional FillerParameter weight_filler = 5; // The filler for the weight - optional FillerParameter bias_filler = 6; // The filler for the bias - - optional uint32 pad = 7 [default = 0]; // The padding size - optional uint32 kernelsize = 8; // The kernel size - optional uint32 group = 9 [default = 1]; // The group size for group conv - optional uint32 stride = 10 [default = 1]; // The stride - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional PoolMethod pool = 11 [default = MAX]; // The pooling method - optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio - - optional uint32 local_size = 13 [default = 5]; // for local response norm - optional float alpha = 14 [default = 1.]; // for local response norm - optional float beta = 15 [default = 0.75]; // for local response norm - optional float k = 22 [default = 1.]; - - // For data layers, specify the data source - optional string source = 16; - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 17 [default = 1]; - optional string meanfile = 18; - // For data layers, specify the batch size. - optional uint32 batchsize = 19; - // For data layers, specify if we would like to randomly crop an image. - optional uint32 cropsize = 20 [default = 0]; - // For data layers, specify if we want to randomly mirror data. - optional bool mirror = 21 [default = false]; - - // The blobs containing the numeric parameters of the layer - repeated BlobProto blobs = 50; - // The ratio that is multiplied on the global learning rate. If you want to - // set the learning ratio for one blob, you need to set it for all blobs. - repeated float blobs_lr = 51; - // The weight decay that is multiplied on the global weight decay. - repeated float weight_decay = 52; - - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - optional uint32 rand_skip = 53 [default = 0]; - - // Fields related to detection (det_*) - // foreground (object) overlap threshold - optional float det_fg_threshold = 54 [default = 0.5]; - // background (non-object) overlap threshold - optional float det_bg_threshold = 55 [default = 0.5]; - // Fraction of batch that should be foreground objects - optional float det_fg_fraction = 56 [default = 0.25]; - - // optional bool OBSOLETE_can_clobber = 57 [default = true]; - - // Amount of contextual padding to add around a window - // (used only by the window_data_layer) - optional uint32 det_context_pad = 58 [default = 0]; - - // Mode for cropping out a detection window - // warp: cropped window is warped to a fixed size and aspect ratio - // square: the tightest square around the window is cropped - optional string det_crop_mode = 59 [default = "warp"]; - - // For ReshapeLayer, one needs to specify the new dimensions. - optional int32 new_num = 60 [default = 0]; - optional int32 new_channels = 61 [default = 0]; - optional int32 new_height = 62 [default = 0]; - optional int32 new_width = 63 [default = 0]; - - // Whether or not ImageLayer should shuffle the list of files at every epoch. - // It will also resize images if new_height or new_width are not zero. - optional bool shuffle_images = 64 [default = false]; - - // For ConcatLayer, one needs to specify the dimension for concatenation, and - // the other dimensions must be the same for all the bottom blobs. - // By default it will concatenate blobs along the channels dimension. - optional uint32 concat_dim = 65 [default = 1]; - - optional HDF5OutputParameter hdf5_output_param = 1001; -} - -message PReLUParameter { - // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: - // Surpassing Human-Level Performance on ImageNet Classification, 2015. - - // Initial value of a_i. Default is a_i=0.25 for all i. - optional FillerParameter filler = 1; - // Whether or not slope parameters are shared across channels. - optional bool channel_shared = 2 [default = false]; -} - -// Message that stores parameters used by DetectionOutputLayer -//message DetectionOutputParameter { -// optional int32 num_classes = 1 [default = 21]; -// optional float nms_threshold = 2 [default = 0.3]; -// optional int32 top_k = 3; -// optional float confidence_threshold = 4 [default = 0.8]; -//} - -// Message that store parameters used by PriorBoxLayer -message PriorBoxParameter { - // Encode/decode type. - enum CodeType { - CORNER = 1; - CENTER_SIZE = 2; - CORNER_SIZE = 3; - } - // Minimum box size (in pixels). Required! - repeated float min_size = 1; - // Maximum box size (in pixels). Required! - repeated float max_size = 2; - // Various of aspect ratios. Duplicate ratios will be ignored. - // If none is provided, we use default ratio 1. - repeated float aspect_ratio = 3; - // If true, will flip each aspect ratio. - // For example, if there is aspect ratio "r", - // we will generate aspect ratio "1.0/r" as well. - optional bool flip = 4 [default = true]; - // If true, will clip the prior so that it is within [0, 1] - optional bool clip = 5 [default = false]; - // Variance for adjusting the prior bboxes. - repeated float variance = 6; - // By default, we calculate img_height, img_width, step_x, step_y based on - // bottom[0] (feat) and bottom[1] (img). Unless these values are explicitely - // provided. - // Explicitly provide the img_size. - optional uint32 img_size = 7; - // Either img_size or img_h/img_w should be specified; not both. - optional uint32 img_h = 8; - optional uint32 img_w = 9; - - // Explicitly provide the step size. - optional float step = 10; - // Either step or step_h/step_w should be specified; not both. - optional float step_h = 11; - optional float step_w = 12; - - // Offset to the top left corner of each cell. - optional float offset = 13 [default = 0.5]; -} - -// Message that stores parameters used by PermutetLayer -message PermuteParameter { - // The new orders of the axes of data. Notice it should be with - // in the same range as the input data, and it starts from 0. - // Do not provide repeated order. - repeated uint32 order = 1; -} - -message NormalizeParameter { - optional bool across_spatial = 1 [default = true]; - // Initial value of scale. Default is 1.0 for all - optional FillerParameter scale_filler = 2; - // Whether or not scale parameters are shared across channels. - optional bool channel_shared = 3 [default = true]; - // Epsilon for not dividing by zero while normalizing variance - optional float eps = 4 [default = 1e-10]; -} - -// needed by ssd -message SaveOutputParameter { - // Output directory. If not empty, we will save the results. - optional string output_directory = 1; - // Output name prefix. - optional string output_name_prefix = 2; - // Output format. - // VOC - PASCAL VOC output format. - // COCO - MS COCO output format. - optional string output_format = 3; - // If you want to output results, must also provide the following two files. - // Otherwise, we will ignore saving results. - // label map file. - optional string label_map_file = 4; - // A file which contains a list of names and sizes with same order - // of the input DB. The file is in the following format: - // name height width - // ... - optional string name_size_file = 5; - // Number of test images. It can be less than the lines specified in - // name_size_file. For example, when we only want to evaluate on part - // of the test images. - optional uint32 num_test_image = 6; - // The resize parameter used in saving the data. - // optional ResizeParameter resize_param = 7; -} - -message NonMaximumSuppressionParameter { - // Threshold to be used in nms. - optional float nms_threshold = 1 [default = 0.3]; - // Maximum number of results to be kept. - optional int32 top_k = 2; - // Parameter for adaptive nms. - optional float eta = 3 [default = 1.0]; -} - -message GeneralNmsParameter { - optional int32 post_top_k = 1 ; - optional float nms_threshold = 2 [default = 0]; - optional float iou_threshold_decay = 3 [default = 1.0]; - optional float coor_scale_factor = 4 [default = 1.0]; -} - -// Message that store parameters used by DetectionOutputLayer, ssd/fasterRcnn -message DetectionOutputParameter { - optional int32 num_classes = 1; - optional bool share_location = 2 [default = true]; - optional int32 background_label_id = 3 [default = 0]; - optional NonMaximumSuppressionParameter nms_param = 4; - optional SaveOutputParameter save_output_param = 5; - optional PriorBoxParameter.CodeType code_type = 6 [default = CENTER_SIZE]; - optional bool variance_encoded_in_target = 8 [default = true]; - optional int32 keep_top_k = 7; - optional float confidence_threshold = 9; - optional float nms_threshold = 13; - optional int32 top_k = 14; - optional int32 boxes = 15 [default = 1]; - optional bool relative = 17 [default = true]; - optional float objectness_threshold = 18 [default = 0.5]; - optional float class_threshold = 19 [default = 0.5]; - repeated float biases = 20; - optional GeneralNmsParameter general_nms_param = 21; - optional float objectness_score = 22; -} -message PSROIPoolingParameter { - required float spatial_scale = 1; - required int32 output_dim = 2; // output channel number - required int32 group_size = 3; // number of groups to encode position-sensitive score maps -} -// Message that stores parameters used by FreespaceExtractLayer -message FreespaceExtractParameter { - optional float org_height = 1; -} - -// Message that stores parameters used by DetectpostprocessLayer -message PostprocessParameter { - optional float nms_thresh = 1 [default = 0.3]; - optional float conf_thresh = 2 [default = 0.5]; - optional uint32 post_nms_topn = 3 [default = 100]; - optional uint32 cls_num = 4 [default = 12]; - repeated float bbox_reg_weights = 5; -} - -// Message that stores parameters used by SpatialTransformLayer -message SpatialTransformParameter { - optional uint32 output_h = 1 [default = 0]; - optional uint32 output_w = 2 [default = 0]; - optional float border_value = 3 [default = 0]; - repeated float affine_transform = 4; - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 15 [default = DEFAULT]; -} -message ROIAlignParameter { - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in height and width or as Y, X pairs. - optional uint32 pooled_h = 1 [default = 0]; // The pooled output height - optional uint32 pooled_w = 2 [default = 0]; // The pooled output width - // Multiplicative spatial scale factor to translate ROI coords from their - // input scale to the scale used when pooling - optional float spatial_scale = 3 [default = 1]; - optional int32 sampling_ratio = 4 [default = -1]; - optional int32 roi_end_mode = 5 [default = 0]; -} - -message RegionParameter { - optional uint32 classes = 1 [default = 20]; // Category of classification - optional uint32 coords = 2 [default = 4]; // Coordinates of box - optional uint32 boxes = 3 [default = 1]; // Number of boxes predicted per grid - optional uint32 softmax = 4 [default = 0]; - optional string softmax_tree = 5 [default = ""]; - optional uint32 background = 6 [default = 0]; -} -message ReorgParameter{ - optional uint32 stride = 2 [default = 2]; - optional bool reverse = 1 [default = false]; -} -message ReverseParameter{ - repeated int32 axis = 1; -} -message InterpParameter{ - optional int32 height = 1 [default = 0];//Height of output - optional int32 width = 2 [default = 0];//Width of output - optional int32 zoom_factor = 3 [default = 1];//zoom factor - optional int32 shrink_factor = 4 [default = 1];//shrink factor - optional int32 pad_beg = 5 [default = 0];//padding at begin of input - optional int32 pad_end = 6 [default = 0];//padding at end of input -} -message ShuffleChannelParameter{ - optional uint32 group = 1[default = 1]; // The number of group -} -message UpsampleParameter{ - optional float scale = 1[default = 1]; - optional int32 stride = 2[default = 2]; - optional int32 stride_h = 3[default = 2]; - optional int32 stride_w = 4[default=2]; -} -message ROIPoolingParameter { - required int32 pooled_h = 1; - required int32 pooled_w = 2; - optional float spatial_scale = 3 [default=0.0625]; - optional float spatial_scale_h = 4; - optional float spatial_scale_w = 5; -} - -message YoloParameter { - optional int32 boxes = 1 [default = 3]; - optional int32 coords = 2 [default = 4]; - optional int32 classes = 3 [default = 80]; - optional string yolo_version = 4 [default = "V3"]; - optional bool softmax = 5 [default = false]; - optional bool background = 6 [default = false]; - optional bool softmaxtree = 7 [default = false]; -} - -message YoloV3DetectionOutputParameter { - optional int32 boxes = 1 [default = 3]; - optional int32 classes = 2 [default = 80]; - optional bool relative = 3 [default = true]; - optional float obj_threshold = 4 [default = 0.5]; - optional float score_threshold = 5 [default = 0.5]; - optional float iou_threshold = 6 [default = 0.45]; - optional int32 pre_nms_topn = 7 [default = 512]; - optional int32 post_nms_topn = 8 [default = 1024]; - repeated float biases_high = 9; - repeated float biases_mid = 10; - repeated float biases_low = 11; - optional int32 coords = 12 [default = 4]; - repeated float biases = 13; - optional bool resize_origin_img_to_net = 14 [default = false]; -} - -message ProposalParameter { - optional float feat_stride = 1 [default = 16]; - optional float base_size = 2 [default = 16]; - optional float min_size = 3 [default = 16]; - repeated float ratio = 4; - repeated float scale = 5; - optional int32 pre_nms_topn = 6 [default = 3000]; - optional int32 post_nms_topn = 7 [default = 304]; - optional float iou_threshold = 8 [default = 0.7]; - optional bool output_actual_rois_num = 9 [default = false]; -} - -message FSRDetectionOutputParameter { - required int32 num_classes = 1; - required float score_threshold = 2; - required float iou_threshold = 3; - optional int32 batch_rois = 4 [default = 1]; -} - -message SSDDetectionOutputParameter { - required int32 num_classes= 1 [default = 2]; - optional bool share_location = 2 [default = true]; - optional int32 background_label_id = 3 [default = 0]; - optional float iou_threshold = 4 [default = 0.3]; - optional int32 top_k = 5 [default = 200]; - optional float eta = 6 [default = 1.0]; - optional bool variance_encoded_in_target = 7 [default = false]; - optional int32 code_type = 8 [default = 1]; - optional int32 keep_top_k = 9 [default = -1]; - optional float confidence_threshold = 10 [default = 0.0]; -} -message YoloV2DetectionOutputParameter { - optional int32 boxes = 1 [default = 5]; - optional int32 classes = 2 [default = 80]; - optional bool relative = 3 [default = true]; - optional float obj_threshold = 4 [default = 0.5]; - optional float score_threshold = 5 [default = 0.5]; - optional float iou_threshold = 6 [default = 0.45]; - optional int32 pre_nms_topn = 7 [default = 512]; - optional int32 post_nms_topn = 8 [default = 1024]; - repeated float biases = 9; - optional int32 coords = 10 [default = 4]; - optional bool resize_origin_img_to_net = 11 [default = false]; -} - -message QuantParameter { - optional float scale = 2; - optional bytes offset = 3; -} - -message BatchMatMulParameter{ - optional bool adj_x1 = 1 [default = false]; - optional bool adj_x2 = 2 [default = false]; -} - -message CondTakeParameter { - required string mode = 1; - required float val = 2; - optional float eps = 3 [default = 1e-06]; -} - -message MatrixInverseParameter { - optional bool adjoint = 1 [default = false]; -} - -message WarpPerspectiveParameter { - required int32 out_height = 1; - required int32 out_width = 2; - optional float constant = 3; - optional string border_type = 4 [default = 'BORDER_CONSTANT']; -} - -message SpatialTransformerParameter { - // How to use the parameter passed by localisation network - optional string transform_type = 1 [default = "affine"]; - // What is the sampling technique - optional string sampler_type = 2 [default = "bilinear"]; - - // If not set,stay same with the input dimension H and W - optional int32 output_H = 3; - optional int32 output_W = 4; - // If false, only compute dTheta, DO NOT compute dU - optional bool to_compute_dU = 5 [default = true]; - - // The default value for some parameters - optional double theta_1_1 = 6; - optional double theta_1_2 = 7; - optional double theta_1_3 = 8; - optional double theta_2_1 = 9; - optional double theta_2_2 = 10; - optional double theta_2_3 = 11; -} diff --git a/inc/register/proto/onnx/ge_onnx.proto b/inc/register/proto/onnx/ge_onnx.proto deleted file mode 100644 index 4cd77f3ae5a9ebb1746ea63a9d23d682f9b9fbbf..0000000000000000000000000000000000000000 --- a/inc/register/proto/onnx/ge_onnx.proto +++ /dev/null @@ -1,563 +0,0 @@ -// Copyright (c) ONNX Project Contributors. -// Licensed under the MIT license. - -syntax = "proto3"; - -package ge.onnx; - -// Overview -// -// ONNX is an open specification that is comprised of the following components: -// -// 1) A definition of an extensible computation graph model. -// 2) Definitions of standard data types. -// 3) Definitions of built-in operators. -// -// This document describes the syntax of models and their computation graphs, -// as well as the standard data types. Together, they are referred to as the ONNX -// Intermediate Representation, or 'IR' for short. -// -// The normative semantic specification of the ONNX IR is found in docs/IR.md. -// Definitions of the built-in neural network operators may be found in docs/Operators.md. - -// Notes -// -// Release -// -// We are still in the very early stage of defining ONNX. The current -// version of ONNX is a starting point. While we are actively working -// towards a complete spec, we would like to get the community involved -// by sharing our working version of ONNX. -// -// Protobuf compatibility -// -// To simplify framework compatibility, ONNX is defined using the subset of protobuf -// that is compatible with both protobuf v2 and v3. This means that we do not use any -// protobuf features that are only available in one of the two versions. -// -// Here are the most notable contortions we have to carry out to work around -// these limitations: -// -// - No 'map' (added protobuf 3.0). We instead represent mappings as lists -// of key-value pairs, where order does not matter and duplicates -// are not allowed. - - -// Versioning -// -// ONNX versioning is specified in docs/IR.md and elaborated on in docs/Versioning.md -// -// To be compatible with both proto2 and proto3, we will use a version number -// that is not defined by the default value but an explicit enum number. -enum Version { - // proto3 requires the first enum value to be zero. - // We add this just to appease the compiler. - _START_VERSION = 0; - // The version field is always serialized and we will use it to store the - // version that the graph is generated from. This helps us set up version - // control. - // For the IR, we are using simple numbers starting with with 0x00000001, - // which was the version we published on Oct 10, 2017. - IR_VERSION_2017_10_10 = 0x0000000000000001; - - // IR_VERSION 2 published on Oct 30, 2017 - // - Added type discriminator to AttributeProto to support proto3 users - IR_VERSION_2017_10_30 = 0x0000000000000002; - - // IR VERSION 3 published on Nov 3, 2017 - // - For operator versioning: - // - Added new message OperatorSetIdProto - // - Added opset_import in ModelProto - // - For vendor extensions, added domain in NodeProto - IR_VERSION_2017_11_3 = 0x0000000000000003; - - // IR VERSION 4 published on Jan 22, 2019 - // - Relax constraint that initializers should be a subset of graph inputs - // - Add type BFLOAT16 - IR_VERSION_2019_1_22 = 0x0000000000000004; - - // IR VERSION 5 published on March 18, 2019 - // - Add message TensorAnnotation. - // - Add quantization annotation in GraphProto to map tensor with its scale and zero point quantization parameters. - IR_VERSION_2019_3_18 = 0x0000000000000005; - - // IR VERSION 6 published on Sep 19, 2019 - // - Add support for sparse tensor constants stored in model. - // - Add message SparseTensorProto - // - Add sparse initializers - IR_VERSION = 0x0000000000000006; -} - -// Attributes -// -// A named attribute containing either singular float, integer, string, graph, -// and tensor values, or repeated float, integer, string, graph, and tensor values. -// An AttributeProto MUST contain the name field, and *only one* of the -// following content fields, effectively enforcing a C/C++ union equivalent. -message AttributeProto { - - // Note: this enum is structurally identical to the OpSchema::AttrType - // enum defined in schema.h. If you rev one, you likely need to rev the other. - enum AttributeType { - UNDEFINED = 0; - FLOAT = 1; - INT = 2; - STRING = 3; - TENSOR = 4; - GRAPH = 5; - SPARSE_TENSOR = 11; - - FLOATS = 6; - INTS = 7; - STRINGS = 8; - TENSORS = 9; - GRAPHS = 10; - SPARSE_TENSORS = 12; - } - - // The name field MUST be present for this version of the IR. - string name = 1; // namespace Attribute - - // if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function. - // In this case, this AttributeProto does not contain data, and it's a reference of attribute - // in parent scope. - // NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph. - string ref_attr_name = 21; - - // A human-readable documentation for this attribute. Markdown is allowed. - string doc_string = 13; - - // The type field MUST be present for this version of the IR. - // For 0.0.1 versions of the IR, this field was not defined, and - // implementations needed to use has_field hueristics to determine - // which value field was in use. For IR_VERSION 0.0.2 or later, this - // field MUST be set and match the f|i|s|t|... field in use. This - // change was made to accomodate proto3 implementations. - AttributeType type = 20; // discriminator that indicates which field below is in use - - // Exactly ONE of the following fields must be present for this version of the IR - float f = 2; // float - int64 i = 3; // int - bytes s = 4; // UTF-8 string - TensorProto t = 5; // tensor value - GraphProto g = 6; // graph - SparseTensorProto sparse_tensor = 22; // sparse tensor value - // Do not use field below, it's deprecated. - // optional ValueProto v = 12; // value - subsumes everything but graph - - repeated float floats = 7; // list of floats - repeated int64 ints = 8; // list of ints - repeated bytes strings = 9; // list of UTF-8 strings - repeated TensorProto tensors = 10; // list of tensors - repeated GraphProto graphs = 11; // list of graph - repeated SparseTensorProto sparse_tensors = 23; // list of sparse tensors -} - -// Defines information on value, including the name, the type, and -// the shape of the value. -message ValueInfoProto { - // This field MUST be present in this version of the IR. - string name = 1; // namespace Value - // This field MUST be present in this version of the IR for - // inputs and outputs of the top-level graph. - TypeProto type = 2; - // A human-readable documentation for this value. Markdown is allowed. - string doc_string = 3; -} - -// Nodes -// -// Computation graphs are made up of a DAG of nodes, which represent what is -// commonly called a "layer" or "pipeline stage" in machine learning frameworks. -// -// For example, it can be a node of type "Conv" that takes in an image, a filter -// tensor and a bias tensor, and produces the convolved output. -message NodeProto { - repeated string input = 1; // namespace Value - repeated string output = 2; // namespace Value - - // An optional identifier for this node in a graph. - // This field MAY be absent in ths version of the IR. - string name = 3; // namespace Node - - // The symbolic identifier of the Operator to execute. - string op_type = 4; // namespace Operator - // The domain of the OperatorSet that specifies the operator named by op_type. - string domain = 7; // namespace Domain - - // Additional named attributes. - repeated AttributeProto attribute = 5; - - // A human-readable documentation for this node. Markdown is allowed. - string doc_string = 6; -} - -// Models -// -// ModelProto is a top-level file/container format for bundling a ML model and -// associating its computation graph with metadata. -// -// The semantics of the model are described by the associated GraphProto. -message ModelProto { - // The version of the IR this model targets. See Version enum above. - // This field MUST be present. - int64 ir_version = 1; - - // The OperatorSets this model relies on. - // All ModelProtos MUST have at least one entry that - // specifies which version of the ONNX OperatorSet is - // being imported. - // - // All nodes in the ModelProto's graph will bind against the operator - // with the same-domain/same-op_type operator with the HIGHEST version - // in the referenced operator sets. - repeated OperatorSetIdProto opset_import = 8; - - // The name of the framework or tool used to generate this model. - // This field SHOULD be present to indicate which implementation/tool/framework - // emitted the model. - string producer_name = 2; - - // The version of the framework or tool used to generate this model. - // This field SHOULD be present to indicate which implementation/tool/framework - // emitted the model. - string producer_version = 3; - - // Domain name of the model. - // We use reverse domain names as name space indicators. For example: - // `com.facebook.fair` or `com.microsoft.cognitiveservices` - // - // Together with `model_version` and GraphProto.name, this forms the unique identity of - // the graph. - string domain = 4; - - // The version of the graph encoded. See Version enum below. - int64 model_version = 5; - - // A human-readable documentation for this model. Markdown is allowed. - string doc_string = 6; - - // The parameterized graph that is evaluated to execute the model. - GraphProto graph = 7; - - // Named metadata values; keys should be distinct. - repeated StringStringEntryProto metadata_props = 14; -}; - -// StringStringEntryProto follows the pattern for cross-proto-version maps. -// See https://developers.google.com/protocol-buffers/docs/proto3#maps -message StringStringEntryProto { - string key = 1; - string value= 2; -}; - -message TensorAnnotation { - string tensor_name = 1; - // pairs to annotate tensor specified by above. - // The keys used in the mapping below must be pre-defined in ONNX spec. - // For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as - // quantization parameter keys. - repeated StringStringEntryProto quant_parameter_tensor_names = 2; -} - - - -// Graphs -// -// A graph defines the computational logic of a model and is comprised of a parameterized -// list of nodes that form a directed acyclic graph based on their inputs and outputs. -// This is the equivalent of the "network" or "graph" in many deep learning -// frameworks. -message GraphProto { - // The nodes in the graph, sorted topologically. - repeated NodeProto node = 1; - - // The name of the graph. - string name = 2; // namespace Graph - - // A list of named tensor values, used to specify constant inputs of the graph. - // Each TensorProto entry must have a distinct name (within the list) that - // MAY also appear in the input list. - repeated TensorProto initializer = 5; - - // Initializers (see above) stored in sparse format. - repeated SparseTensorProto sparse_initializer = 15; - - // A human-readable documentation for this graph. Markdown is allowed. - string doc_string = 10; - - // The inputs and outputs of the graph. - repeated ValueInfoProto input = 11; - repeated ValueInfoProto output = 12; - - // Information for the values in the graph. The ValueInfoProto.name's - // must be distinct. It is optional for a value to appear in value_info list. - repeated ValueInfoProto value_info = 13; - - // This field carries information to indicate the mapping among a tensor and its - // quantization parameter tensors. For example: - // For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated, - // which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model. - repeated TensorAnnotation quantization_annotation = 14; - - // DO NOT USE the following fields, they were deprecated from earlier versions. - // repeated string input = 3; - // repeated string output = 4; - // optional int64 ir_version = 6; - // optional int64 producer_version = 7; - // optional string producer_tag = 8; - // optional string domain = 9; -} - -// Tensors -// -// A serialized tensor value. -message TensorProto { - enum DataType { - UNDEFINED = 0; - // Basic types. - FLOAT = 1; // float - UINT8 = 2; // uint8_t - INT8 = 3; // int8_t - UINT16 = 4; // uint16_t - INT16 = 5; // int16_t - INT32 = 6; // int32_t - INT64 = 7; // int64_t - STRING = 8; // string - BOOL = 9; // bool - - // IEEE754 half-precision floating-point format (16 bits wide). - // This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits. - FLOAT16 = 10; - - DOUBLE = 11; - UINT32 = 12; - UINT64 = 13; - COMPLEX64 = 14; // complex with float32 real and imaginary components - COMPLEX128 = 15; // complex with float64 real and imaginary components - - // Non-IEEE floating-point format based on IEEE754 single-precision - // floating-point number truncated to 16 bits. - // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits. - BFLOAT16 = 16; - - // Future extensions go here. - } - - // The shape of the tensor. - repeated int64 dims = 1; - - // The data type of the tensor. - // This field MUST have a valid TensorProto.DataType value - int32 data_type = 2; - - // For very large tensors, we may want to store them in chunks, in which - // case the following fields will specify the segment that is stored in - // the current TensorProto. - message Segment { - int64 begin = 1; - int64 end = 2; - } - Segment segment = 3; - - // Tensor content must be organized in row-major order. - // - // Depending on the data_type field, exactly one of the fields below with - // name ending in _data is used to store the elements of the tensor. - - // For float and complex64 values - // Complex64 tensors are encoded as a single array of floats, - // with the real components appearing in odd numbered positions, - // and the corresponding imaginary component apparing in the - // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] - // is encoded as [1.0, 2.0 ,3.0 ,4.0] - // When this field is present, the data_type field MUST be FLOAT or COMPLEX64. - repeated float float_data = 4 [packed = true]; - - // For int32, uint8, int8, uint16, int16, bool, and float16 values - // float16 values must be bit-wise converted to an uint16_t prior - // to writing to the buffer. - // When this field is present, the data_type field MUST be - // INT32, INT16, INT8, UINT16, UINT8, BOOL, or FLOAT16 - repeated int32 int32_data = 5 [packed = true]; - - // For strings. - // Each element of string_data is a UTF-8 encoded Unicode - // string. No trailing null, no leading BOM. The protobuf "string" - // scalar type is not used to match ML community conventions. - // When this field is present, the data_type field MUST be STRING - repeated bytes string_data = 6; - - // For int64. - // When this field is present, the data_type field MUST be INT64 - repeated int64 int64_data = 7 [packed = true]; - - // Optionally, a name for the tensor. - string name = 8; // namespace Value - - // A human-readable documentation for this tensor. Markdown is allowed. - string doc_string = 12; - - // Serializations can either use one of the fields above, or use this - // raw bytes field. The only exception is the string case, where one is - // required to store the content in the repeated bytes string_data field. - // - // When this raw_data field is used to store tensor value, elements MUST - // be stored in as fixed-width, little-endian order. - // Floating-point data types MUST be stored in IEEE 754 format. - // Complex64 elements must be written as two consecutive FLOAT values, real component first. - // Complex128 elements must be written as two consecutive DOUBLE values, real component first. - // Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false). - // - // Note: the advantage of specific field rather than the raw_data field is - // that in some cases (e.g. int data), protobuf does a better packing via - // variable length storage, and may lead to smaller binary footprint. - // When this field is present, the data_type field MUST NOT be STRING or UNDEFINED - bytes raw_data = 9; - - // Data can be stored inside the protobuf file using type-specific fields or raw_data. - // Alternatively, raw bytes data can be stored in an external file, using the external_data field. - // external_data stores key-value pairs describing data location. Recognized keys are: - // - "location" (required) - POSIX filesystem path relative to the directory where the ONNX - // protobuf model was stored - // - "offset" (optional) - position of byte at which stored data begins. Integer stored as string. - // Offset values SHOULD be multiples 4096 (page size) to enable mmap support. - // - "length" (optional) - number of bytes containing data. Integer stored as string. - // - "checksum" (optional) - SHA1 digest of file specified in under 'location' key. - repeated StringStringEntryProto external_data = 13; - - // Location of the data for this tensor. MUST be one of: - // - DEFAULT - data stored inside the protobuf message. Data is stored in raw_data (if set) otherwise in type-specified field. - // - EXTERNAL - data stored in an external location as described by external_data field. - enum DataLocation { - DEFAULT = 0; - EXTERNAL = 1; - } - - // If value not set, data is stored in raw_data (if set) otherwise in type-specified field. - DataLocation data_location = 14; - - // For double - // Complex128 tensors are encoded as a single array of doubles, - // with the real components appearing in odd numbered positions, - // and the corresponding imaginary component apparing in the - // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] - // is encoded as [1.0, 2.0 ,3.0 ,4.0] - // When this field is present, the data_type field MUST be DOUBLE or COMPLEX128 - repeated double double_data = 10 [packed = true]; - - // For uint64 and uint32 values - // When this field is present, the data_type field MUST be - // UINT32 or UINT64 - repeated uint64 uint64_data = 11 [packed = true]; -} - -// A serialized sparse-tensor value -message SparseTensorProto { - // The sequence of non-default values are encoded as a tensor of shape [NNZ]. - // The default-value is zero for numeric tensors, and empty-string for string tensors. - TensorProto values = 1; - - // The indices of the non-default values, which may be stored in one of two formats. - // (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value - // corresponding to the j-th index of the i-th value (in the values tensor). - // (b) Indices can be a tensor of shape [NNZ], in which case the i-th value - // must be the linearized-index of the i-th value (in the values tensor). - // The linearized-index can be converted into an index tuple (k_1,...,k_rank) - // using the shape provided below. - // The indices must appear in ascending order without duplication. - // In the first format, the ordering is lexicographic-ordering: - // e.g., index-value [1,4] must appear before [2,1] - TensorProto indices = 2; - - // The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank] - repeated int64 dims = 3; -} - -// Defines a tensor shape. A dimension can be either an integer value -// or a symbolic variable. A symbolic variable represents an unknown -// dimension. -message TensorShapeProto { - message Dimension { - oneof value { - int64 dim_value = 1; - string dim_param = 2; // namespace Shape - }; - // Standard denotation can optionally be used to denote tensor - // dimensions with standard semantic descriptions to ensure - // that operations are applied to the correct axis of a tensor. - // Refer to https://github.com/onnx/onnx/blob/master/docs/DimensionDenotation.md#denotation-definition - // for pre-defined dimension denotations. - string denotation = 3; - }; - repeated Dimension dim = 1; -} - -// Types -// -// The standard ONNX data types. -message TypeProto { - - message Tensor { - // This field MUST NOT have the value of UNDEFINED - // This field MUST have a valid TensorProto.DataType value - // This field MUST be present for this version of the IR. - int32 elem_type = 1; - TensorShapeProto shape = 2; - } - - // repeated T - message Sequence { - // The type and optional shape of each element of the sequence. - // This field MUST be present for this version of the IR. - TypeProto elem_type = 1; - }; - - // map - message Map { - // This field MUST have a valid TensorProto.DataType value - // This field MUST be present for this version of the IR. - // This field MUST refer to an integral type ([U]INT{8|16|32|64}) or STRING - int32 key_type = 1; - // This field MUST be present for this version of the IR. - TypeProto value_type = 2; - }; - - oneof value { - // The type of a tensor. - Tensor tensor_type = 1; - - // NOTE: DNN-only implementations of ONNX MAY elect to not support non-tensor values - // as input and output to graphs and nodes. These types are needed to naturally - // support classical ML operators. DNN operators SHOULD restrict their input - // and output types to tensors. - - // The type of a sequence. - Sequence sequence_type = 4; - - // The type of a map. - Map map_type = 5; - - } - - // An optional denotation can be used to denote the whole - // type with a standard semantic description as to what is - // stored inside. Refer to https://github.com/onnx/onnx/blob/master/docs/TypeDenotation.md#type-denotation-definition - // for pre-defined type denotations. - string denotation = 6; -} - -// Operator Sets -// -// OperatorSets are uniquely identified by a (domain, opset_version) pair. -message OperatorSetIdProto { - // The domain of the operator set being identified. - // The empty string ("") or absence of this field implies the operator - // set that is defined as part of the ONNX specification. - // This field MUST be present in this version of the IR when referring to any other operator set. - string domain = 1; - - // The version of the operator set being identified. - // This field MUST be present in this version of the IR. - int64 version = 2; -} diff --git a/inc/register/proto/tensorflow/attr_value.proto b/inc/register/proto/tensorflow/attr_value.proto deleted file mode 100644 index 1cc67d627bdb3b01057b19b5d6aca75a0b0416e2..0000000000000000000000000000000000000000 --- a/inc/register/proto/tensorflow/attr_value.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "AttrValueProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "tensor.proto"; -import "tensor_shape.proto"; -import "types.proto"; - -// Protocol buffer representing the value for an attr used to configure an Op. -// Comment indicates the corresponding attr type. Only the field matching the -// attr type may be filled. -message AttrValue { - // LINT.IfChange - message ListValue { - repeated bytes s = 2; // "list(string)" - repeated int64 i = 3 [packed = true]; // "list(int)" - repeated float f = 4 [packed = true]; // "list(float)" - repeated bool b = 5 [packed = true]; // "list(bool)" - repeated DataType type = 6 [packed = true]; // "list(type)" - repeated TensorShapeProto shape = 7; // "list(shape)" - repeated TensorProto tensor = 8; // "list(tensor)" - repeated NameAttrList func = 9; // "list(attr)" - } - // LINT.ThenChange(https://www.tensorflow.org/code/tensorflow/c/c_api.cc) - - oneof value { - bytes s = 2; // "string" - int64 i = 3; // "int" - float f = 4; // "float" - bool b = 5; // "bool" - DataType type = 6; // "type" - TensorShapeProto shape = 7; // "shape" - TensorProto tensor = 8; // "tensor" - ListValue list = 1; // any "list(...)" - - // "func" represents a function. func.name is a function's name or - // a primitive op's name. func.attr.first is the name of an attr - // defined for that function. func.attr.second is the value for - // that attr in the instantiation. - NameAttrList func = 10; - - // This is a placeholder only used in nodes defined inside a - // function. It indicates the attr value will be supplied when - // the function is instantiated. For example, let us suppose a - // node "N" in function "FN". "N" has an attr "A" with value - // placeholder = "foo". When FN is instantiated with attr "foo" - // set to "bar", the instantiated node N's attr A will have been - // given the value "bar". - string placeholder = 9; - } -} - -// A list of attr names and their values. The whole list is attached -// with a string name. E.g., MatMul[T=float]. -message NameAttrList { - string name = 1; - map attr = 2; -} diff --git a/inc/register/proto/tensorflow/function.proto b/inc/register/proto/tensorflow/function.proto deleted file mode 100644 index 075897c689391c68cc3795687be2f28d118cd196..0000000000000000000000000000000000000000 --- a/inc/register/proto/tensorflow/function.proto +++ /dev/null @@ -1,100 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "FunctionProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "attr_value.proto"; -import "node_def.proto"; -import "op_def.proto"; - -// A library is a set of named functions. -message FunctionDefLibrary { - repeated FunctionDef function = 1; - repeated GradientDef gradient = 2; -} - -// A function can be instantiated when the runtime can bind every attr -// with a value. When a GraphDef has a call to a function, it must -// have binding for every attr defined in the signature. -// * device spec, etc. -message FunctionDef { - // The definition of the function's name, arguments, return values, - // attrs etc. - OpDef signature = 1; - - // Attributes specific to this function definition. - map attr = 5; - - // NOTE: field id 2 deleted on Jan 11, 2017, GraphDef version 21. - reserved 2; - - // In both of the following fields, there is the need to specify an - // output that is used as either the input to another node (in - // `node_def`) or as a return value of the function (in `ret`). - // Unlike the NodeDefs in GraphDef, we need to be able to specify a - // list in some cases (instead of just single outputs). Also, we - // need to be able to deal with lists of unknown length (so the - // output index may not be known at function definition time). So - // we use the following format instead: - // * "fun_in" where "fun_in" is the name of a function input arg in - // the `signature` field above. This represents that input, whether - // it is a single tensor or a list. - // * "fun_in:0" gives the first element of a function input arg (a - // non-list input is considered a list of length 1 for these - // purposes). - // * "node:out" where "node" is the name of a node in `node_def` and - // "out" is the name one of its op's output arguments (the name - // comes from the OpDef of the node's op). This represents that - // node's output, whether it is a single tensor or a list. - // Note: We enforce that an op's output arguments are never - // renamed in the backwards-compatibility test. - // * "node:out:0" gives the first element of a node output arg (a - // non-list output is considered a list of length 1 for these - // purposes). - // - // NOT CURRENTLY SUPPORTED (but may be in the future): - // * "node:out:-1" gives last element in a node output list - // * "node:out:1:" gives a list with all but the first element in a - // node output list - // * "node:out::-1" gives a list with all but the last element in a - // node output list - - // The body of the function. Unlike the NodeDefs in a GraphDef, attrs - // may have values of type `placeholder` and the `input` field uses - // the "output" format above. - - // By convention, "op" in node_def is resolved by consulting with a - // user-defined library first. If not resolved, "func" is assumed to - // be a builtin op. - repeated NodeDef node_def = 3; - - // A mapping from the output arg names from `signature` to the - // outputs from `node_def` that should be returned by the function. - map ret = 4; -} - -// GradientDef defines the gradient function of a function defined in -// a function library. -// -// A gradient function g (specified by gradient_func) for a function f -// (specified by function_name) must follow the following: -// -// The function 'f' must be a numerical function which takes N inputs -// and produces M outputs. Its gradient function 'g', which is a -// function taking N + M inputs and produces N outputs. -// -// I.e. if we have -// (y1, y2, ..., y_M) = f(x1, x2, ..., x_N), -// then, g is -// (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N, -// dL/dy1, dL/dy2, ..., dL/dy_M), -// where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the -// loss function). dL/dx_i is the partial derivative of L with respect -// to x_i. -message GradientDef { - string function_name = 1; // The function name. - string gradient_func = 2; // The gradient function's name. -} diff --git a/inc/register/proto/tensorflow/graph.proto b/inc/register/proto/tensorflow/graph.proto deleted file mode 100644 index d639a7d6c618e2278e7eb60b0ff3bd0743011f0c..0000000000000000000000000000000000000000 --- a/inc/register/proto/tensorflow/graph.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "GraphProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "node_def.proto"; -import "function.proto"; -import "versions.proto"; - -// Represents the graph of operations -message GraphDef { - repeated NodeDef node = 1; - - // Compatibility versions of the graph. See core/public/version.h for version - // history. The GraphDef version is distinct from the TensorFlow version, and - // each release of TensorFlow will support a range of GraphDef versions. - VersionDef versions = 4; - - // Deprecated single version field; use versions above instead. Since all - // GraphDef changes before "versions" was introduced were forward - // compatible, this field is entirely ignored. - int32 version = 3 [deprecated = true]; - - // EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET. - // - // "library" provides user-defined functions. - // - // Naming: - // * library.function.name are in a flat namespace. - // NOTE: We may need to change it to be hierarchical to support - // different orgs. E.g., - // { "/google/nn", { ... }}, - // { "/google/vision", { ... }} - // { "/org_foo/module_bar", { ... }} - // map named_lib; - // * If node[i].op is the name of one function in "library", - // node[i] is deemed as a function call. Otherwise, node[i].op - // must be a primitive operation supported by the runtime. - // - // - // Function call semantics: - // - // * The callee may start execution as soon as some of its inputs - // are ready. The caller may want to use Tuple() mechanism to - // ensure all inputs are ready in the same time. - // - // * The consumer of return values may start executing as soon as - // the return values the consumer depends on are ready. The - // consumer may want to use Tuple() mechanism to ensure the - // consumer does not start until all return values of the callee - // function are ready. - FunctionDefLibrary library = 2; -}; diff --git a/inc/register/proto/tensorflow/node_def.proto b/inc/register/proto/tensorflow/node_def.proto deleted file mode 100644 index b9bc97ee6a37fb3dad24a6094a8ca651bde58d1c..0000000000000000000000000000000000000000 --- a/inc/register/proto/tensorflow/node_def.proto +++ /dev/null @@ -1,63 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "NodeProto"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "attr_value.proto"; - -message NodeDef { - // The name given to this operator. Used for naming inputs, - // logging, visualization, etc. Unique within a single GraphDef. - // Must match the regexp "[A-Za-z0-9.][A-Za-z0-9_./]*". - string name = 1; - - // The operation name. There may be custom parameters in attrs. - // Op names starting with an underscore are reserved for internal use. - string op = 2; - - // Each input is "node:src_output" with "node" being a string name and - // "src_output" indicating which output tensor to use from "node". If - // "src_output" is 0 the ":0" suffix can be omitted. Regular inputs - // may optionally be followed by control inputs that have the format - // "^node". - repeated string input = 3; - - // A (possibly partial) specification for the device on which this - // node should be placed. - // The expected syntax for this string is as follows: - // - // DEVICE_SPEC ::= PARTIAL_SPEC - // - // PARTIAL_SPEC ::= ("/" CONSTRAINT) * - // CONSTRAINT ::= ("job:" JOB_NAME) - // | ("replica:" [1-9][0-9]*) - // | ("task:" [1-9][0-9]*) - // | ("device:" [A-Za-z]* ":" ([1-9][0-9]* | "*") ) - // - // Valid values for this string include: - // * "/job:worker/replica:0/task:1/device:GPU:3" (full specification) - // * "/job:worker/device:GPU:3" (partial specification) - // * "" (no specification) - // - // If the constraints do not resolve to a single device (or if this - // field is empty or not present), the runtime will attempt to - // choose a device automatically. - string device = 4; - - // Operation-specific graph-construction-time configuration. - // Note that this should include all attrs defined in the - // corresponding OpDef, including those with a value matching - // the default -- this allows the default to change and makes - // NodeDefs easier to interpret on their own. However, if - // an attr with a default is not specified in this list, the - // default will be used. - // The "names" (keys) must match the regexp "[a-z][a-z0-9_]+" (and - // one of the names from the corresponding OpDef's attr field). - // The values must have a type matching the corresponding OpDef - // attr's type field. - // Add some examples here showing best practices. - map attr = 5; -}; diff --git a/inc/register/proto/tensorflow/op_def.proto b/inc/register/proto/tensorflow/op_def.proto deleted file mode 100644 index 3485d0453040766ce5434482c4c328b088959528..0000000000000000000000000000000000000000 --- a/inc/register/proto/tensorflow/op_def.proto +++ /dev/null @@ -1,164 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "OpDefProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "attr_value.proto"; -import "types.proto"; - -// Defines an operation. A NodeDef in a GraphDef specifies an Op by -// using the "op" field which should match the name of a OpDef. -// LINT.IfChange -message OpDef { - // Op names starting with an underscore are reserved for internal use. - // Names should be CamelCase and match the regexp "[A-Z][a-zA-Z0-9_]*". - string name = 1; - - // For describing inputs and outputs. - message ArgDef { - // Name for the input/output. Should match the regexp "[a-z][a-z0-9_]*". - string name = 1; - - // Human readable description. - string description = 2; - - // Describes the type of one or more tensors that are accepted/produced - // by this input/output arg. The only legal combinations are: - // * For a single tensor: either the "type" field is set or the - // "type_attr" field is set to the name of an attr with type "type". - // * For a sequence of tensors with the same type: the "number_attr" - // field will be set to the name of an attr with type "int", and - // either the "type" or "type_attr" field will be set as for - // single tensors. - // * For a sequence of tensors, the "type_list_attr" field will be set - // to the name of an attr with type "list(type)". - DataType type = 3; - string type_attr = 4; // if specified, attr must have type "type" - string number_attr = 5; // if specified, attr must have type "int" - // If specified, attr must have type "list(type)", and none of - // type, type_attr, and number_attr may be specified. - string type_list_attr = 6; - - // For inputs: if true, the inputs are required to be refs. - // By default, inputs can be either refs or non-refs. - // For outputs: if true, outputs are refs, otherwise they are not. - bool is_ref = 16; - }; - - // Description of the input(s). - repeated ArgDef input_arg = 2; - - // Description of the output(s). - repeated ArgDef output_arg = 3; - - // Description of the graph-construction-time configuration of this - // Op. That is to say, this describes the attr fields that will - // be specified in the NodeDef. - message AttrDef { - // A descriptive name for the argument. May be used, e.g. by the - // Python client, as a keyword argument name, and so should match - // the regexp "[a-z][a-z0-9_]+". - string name = 1; - - // One of the type names from attr_value.proto ("string", "list(string)", - // "int", etc.). - string type = 2; - - // A reasonable default for this attribute if the user does not supply - // a value. If not specified, the user must supply a value. - AttrValue default_value = 3; - - // Human-readable description. - string description = 4; - - - // --- Constraints --- - // These constraints are only in effect if specified. Default is no - // constraints. - - // For type == "int", this is a minimum value. For "list(___)" - // types, this is the minimum length. - bool has_minimum = 5; - int64 minimum = 6; - - // The set of allowed values. Has type that is the "list" version - // of the "type" field above (uses the "list" field of AttrValue). - // If type == "type" or "list(type)" above, then the "type" field - // of "allowed_values.list" has the set of allowed DataTypes. - // If type == "string" or "list(string)", then the "s" field of - // "allowed_values.list" has the set of allowed strings. - AttrValue allowed_values = 7; - } - repeated AttrDef attr = 4; - - // Optional deprecation based on GraphDef versions. - OpDeprecation deprecation = 8; - - // One-line human-readable description of what the Op does. - string summary = 5; - - // Additional, longer human-readable description of what the Op does. - string description = 6; - - // ------------------------------------------------------------------------- - // Which optimizations this operation can participate in. - - // True if the operation is commutative ("op(a,b) == op(b,a)" for all inputs) - bool is_commutative = 18; - - // If is_aggregate is true, then this operation accepts N >= 2 - // inputs and produces 1 output all of the same type. Should be - // associative and commutative, and produce output with the same - // shape as the input. The optimizer may replace an aggregate op - // taking input from multiple devices with a tree of aggregate ops - // that aggregate locally within each device (and possibly within - // groups of nearby devices) before communicating. - bool is_aggregate = 16; // for things like add - - // Other optimizations go here, like - // can_alias_input, rewrite_when_output_unused, partitioning_strategy, etc. - - // ------------------------------------------------------------------------- - // Optimization constraints. - - // Ops are marked as stateful if their behavior depends on some state beyond - // their input tensors (e.g. variable reading op) or if they have - // a side-effect (e.g. printing or asserting ops). Equivalently, stateless ops - // must always produce the same output for the same input and have - // no side-effects. - // - // By default Ops may be moved between devices. Stateful ops should - // either not be moved, or should only be moved if that state can also - // be moved (e.g. via some sort of save / restore). - // Stateful ops are guaranteed to never be optimized away by Common - // Subexpression Elimination (CSE). - bool is_stateful = 17; // for things like variables, queue - - // ------------------------------------------------------------------------- - // Non-standard options. - - // By default, all inputs to an Op must be initialized Tensors. Ops - // that may initialize tensors for the first time should set this - // field to true, to allow the Op to take an uninitialized Tensor as - // input. - bool allows_uninitialized_input = 19; // for Assign, etc. -}; -// LINT.ThenChange( -// https://www.tensorflow.org/code/tensorflow/core/framework/op_def_util.cc) - -// Information about version-dependent deprecation of an op -message OpDeprecation { - // First GraphDef version at which the op is disallowed. - int32 version = 1; - - // Explanation of why it was deprecated and what to use instead. - string explanation = 2; -}; - -// A collection of OpDefs -message OpList { - repeated OpDef op = 1; -}; diff --git a/inc/register/proto/tensorflow/resource_handle.proto b/inc/register/proto/tensorflow/resource_handle.proto deleted file mode 100644 index a34523512e0f698ae92f493ac79336e395446ab0..0000000000000000000000000000000000000000 --- a/inc/register/proto/tensorflow/resource_handle.proto +++ /dev/null @@ -1,29 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "ResourceHandle"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -// Protocol buffer representing a handle to a tensorflow resource. Handles are -// not valid across executions, but can be serialized back and forth from within -// a single run. -message ResourceHandleProto { - // Unique name for the device containing the resource. - string device = 1; - - // Container in which this resource is placed. - string container = 2; - - // Unique name of this resource. - string name = 3; - - // Hash code for the type of the resource. Is only valid in the same device - // and in the same execution. - uint64 hash_code = 4; - - // For debug-only, the name of the type pointed to by this handle, if - // available. - string maybe_type_name = 5; -}; diff --git a/inc/register/proto/tensorflow/tensor.proto b/inc/register/proto/tensorflow/tensor.proto deleted file mode 100644 index d0a4d024cc551cb20a60ecbd2396c66badd297d2..0000000000000000000000000000000000000000 --- a/inc/register/proto/tensorflow/tensor.proto +++ /dev/null @@ -1,94 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "TensorProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -import "resource_handle.proto"; -import "tensor_shape.proto"; -import "types.proto"; - -// Protocol buffer representing a tensor. -message TensorProto { - DataType dtype = 1; - - // Shape of the tensor. - TensorShapeProto tensor_shape = 2; - - // Only one of the representations below is set, one of "tensor_contents" and - // the "xxx_val" attributes. We are not using oneof because as oneofs cannot - // contain repeated fields it would require another extra set of messages. - - // Version number. - // - // In version 0, if the "repeated xxx" representations contain only one - // element, that element is repeated to fill the shape. This makes it easy - // to represent a constant Tensor with a single value. - int32 version_number = 3; - - // Serialized raw tensor content from either Tensor::AsProtoTensorContent or - // memcpy in tensorflow::grpc::EncodeTensorToByteBuffer. This representation - // can be used for all tensor types. The purpose of this representation is to - // reduce serialization overhead during RPC call by avoiding serialization of - // many repeated small items. - bytes tensor_content = 4; - - // Type specific representations that make it easy to create tensor protos in - // all languages. Only the representation corresponding to "dtype" can - // be set. The values hold the flattened representation of the tensor in - // row major order. - - // DT_HALF, DT_BFLOAT16. Note that since protobuf has no int16 type, we'll - // have some pointless zero padding for each value here. - repeated int32 half_val = 13 [packed = true]; - - // DT_FLOAT. - repeated float float_val = 5 [packed = true]; - - // DT_DOUBLE. - repeated double double_val = 6 [packed = true]; - - // DT_INT32, DT_INT16, DT_INT8, DT_UINT8. - repeated int32 int_val = 7 [packed = true]; - - // DT_STRING - repeated bytes string_val = 8; - - // DT_COMPLEX64. scomplex_val(2*i) and scomplex_val(2*i+1) are real - // and imaginary parts of i-th single precision complex. - repeated float scomplex_val = 9 [packed = true]; - - // DT_INT64 - repeated int64 int64_val = 10 [packed = true]; - - // DT_BOOL - repeated bool bool_val = 11 [packed = true]; - - // DT_COMPLEX128. dcomplex_val(2*i) and dcomplex_val(2*i+1) are real - // and imaginary parts of i-th double precision complex. - repeated double dcomplex_val = 12 [packed = true]; - - // DT_RESOURCE - repeated ResourceHandleProto resource_handle_val = 14; - - // DT_VARIANT - repeated VariantTensorDataProto variant_val = 15; - - // DT_UINT32 - repeated uint32 uint32_val = 16 [packed = true]; - - // DT_UINT64 - repeated uint64 uint64_val = 17 [packed = true]; -}; - -// Protocol buffer representing the serialization format of DT_VARIANT tensors. -message VariantTensorDataProto { - // Name of the type of objects being serialized. - string type_name = 1; - // Portions of the object that are not Tensors. - bytes metadata = 2; - // Tensors contained within objects being serialized. - repeated TensorProto tensors = 3; -} diff --git a/inc/register/proto/tensorflow/tensor_shape.proto b/inc/register/proto/tensorflow/tensor_shape.proto deleted file mode 100644 index 4225a2e373e0ca643b8d61bc6044251c4f5a3422..0000000000000000000000000000000000000000 --- a/inc/register/proto/tensorflow/tensor_shape.proto +++ /dev/null @@ -1,45 +0,0 @@ -// Protocol buffer representing the shape of tensors. - -syntax = "proto3"; -option cc_enable_arenas = true; -option java_outer_classname = "TensorShapeProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -package domi.tensorflow; - -// Dimensions of a tensor. -message TensorShapeProto { - // One dimension of the tensor. - message Dim { - // Size of the tensor in that dimension. - // This value must be >= -1, but values of -1 are reserved for "unknown" - // shapes (values of -1 mean "unknown" dimension). Certain wrappers - // that work with TensorShapeProto may fail at runtime when deserializing - // a TensorShapeProto containing a dim value of -1. - int64 size = 1; - - // Optional name of the tensor dimension. - string name = 2; - }; - - // Dimensions of the tensor, such as {"input", 30}, {"output", 40} - // for a 30 x 40 2D tensor. If an entry has size -1, this - // corresponds to a dimension of unknown size. The names are - // optional. - // - // The order of entries in "dim" matters: It indicates the layout of the - // values in the tensor in-memory representation. - // - // The first entry in "dim" is the outermost dimension used to layout the - // values, the last entry is the innermost dimension. This matches the - // in-memory layout of RowMajor Eigen tensors. - // - // If "dim.size()" > 0, "unknown_rank" must be false. - repeated Dim dim = 2; - - // If true, the number of dimensions in the shape is unknown. - // - // If true, "dim.size()" must be 0. - bool unknown_rank = 3; -}; diff --git a/inc/register/proto/tensorflow/types.proto b/inc/register/proto/tensorflow/types.proto deleted file mode 100644 index ba7a72b302b93bde912c26a4dde71d5a57dfc04f..0000000000000000000000000000000000000000 --- a/inc/register/proto/tensorflow/types.proto +++ /dev/null @@ -1,74 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "TypesProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -// LINT.IfChange -enum DataType { - // Not a legal value for DataType. Used to indicate a DataType field - // has not been set. - DT_INVALID = 0; - - // Data types that all computation devices are expected to be - // capable to support. - DT_FLOAT = 1; - DT_DOUBLE = 2; - DT_INT32 = 3; - DT_UINT8 = 4; - DT_INT16 = 5; - DT_INT8 = 6; - DT_STRING = 7; - DT_COMPLEX64 = 8; // Single-precision complex - DT_INT64 = 9; - DT_BOOL = 10; - DT_QINT8 = 11; // Quantized int8 - DT_QUINT8 = 12; // Quantized uint8 - DT_QINT32 = 13; // Quantized int32 - DT_BFLOAT16 = 14; // Float32 truncated to 16 bits. Only for cast ops. - DT_QINT16 = 15; // Quantized int16 - DT_QUINT16 = 16; // Quantized uint16 - DT_UINT16 = 17; - DT_COMPLEX128 = 18; // Double-precision complex - DT_HALF = 19; - DT_RESOURCE = 20; - DT_VARIANT = 21; // Arbitrary C++ data types - DT_UINT32 = 22; - DT_UINT64 = 23; - - // Do not use! These are only for parameters. Every enum above - // should have a corresponding value below (verified by types_test). - DT_FLOAT_REF = 101; - DT_DOUBLE_REF = 102; - DT_INT32_REF = 103; - DT_UINT8_REF = 104; - DT_INT16_REF = 105; - DT_INT8_REF = 106; - DT_STRING_REF = 107; - DT_COMPLEX64_REF = 108; - DT_INT64_REF = 109; - DT_BOOL_REF = 110; - DT_QINT8_REF = 111; - DT_QUINT8_REF = 112; - DT_QINT32_REF = 113; - DT_BFLOAT16_REF = 114; - DT_QINT16_REF = 115; - DT_QUINT16_REF = 116; - DT_UINT16_REF = 117; - DT_COMPLEX128_REF = 118; - DT_HALF_REF = 119; - DT_RESOURCE_REF = 120; - DT_VARIANT_REF = 121; - DT_UINT32_REF = 122; - DT_UINT64_REF = 123; -} -// LINT.ThenChange( -// https://www.tensorflow.org/code/tensorflow/c/c_api.h, -// https://www.tensorflow.org/code/tensorflow/go/tensor.go, -// https://www.tensorflow.org/code/tensorflow/core/framework/tensor.cc, -// https://www.tensorflow.org/code/tensorflow/core/framework/types.h, -// https://www.tensorflow.org/code/tensorflow/core/framework/types.cc, -// https://www.tensorflow.org/code/tensorflow/python/framework/dtypes.py, -// https://www.tensorflow.org/code/tensorflow/python/framework/function.py) diff --git a/inc/register/proto/tensorflow/versions.proto b/inc/register/proto/tensorflow/versions.proto deleted file mode 100644 index 48061218af7524b375bad328c18e7d884bd5849d..0000000000000000000000000000000000000000 --- a/inc/register/proto/tensorflow/versions.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; - -package domi.tensorflow; -option cc_enable_arenas = true; -option java_outer_classname = "VersionsProtos"; -option java_multiple_files = true; -option java_package = "org.tensorflow.framework"; - -// Version information for a piece of serialized data -// -// There are different types of versions for each type of data -// (GraphDef, etc.), but they all have the same common shape -// described here. -// -// Each consumer has "consumer" and "min_producer" versions (specified -// elsewhere). A consumer is allowed to consume this data if -// -// producer >= min_producer -// consumer >= min_consumer -// consumer not in bad_consumers -// -message VersionDef { - // The version of the code that produced this data. - int32 producer = 1; - - // Any consumer below this version is not allowed to consume this data. - int32 min_consumer = 2; - - // Specific consumer versions which are disallowed (e.g. due to bugs). - repeated int32 bad_consumers = 3; -}; diff --git a/inc/register/register.h b/inc/register/register.h deleted file mode 100644 index 32adaea06175641013601eb4287e2a600238dc69..0000000000000000000000000000000000000000 --- a/inc/register/register.h +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_REGISTRY_H_ -#define INC_REGISTER_REGISTRY_H_ - -#include "external/register/register.h" - -namespace ge { -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY HostCpuOp { - public: - HostCpuOp() = default; - virtual ~HostCpuOp() = default; - - virtual graphStatus Compute(Operator &op, - const std::map &inputs, - std::map &outputs) = 0; -}; - -class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY HostCpuOpRegistrar { - public: - HostCpuOpRegistrar(const char *op_type, HostCpuOp *(*create_fn)()); - ~HostCpuOpRegistrar() = default; -}; - -#define REGISTER_HOST_CPU_OP_BUILDER(name, op) \ - REGISTER_HOST_CPU_OP_BUILDER_UNIQ_HELPER(__COUNTER__, name, op) - -#define REGISTER_HOST_CPU_OP_BUILDER_UNIQ_HELPER(ctr, name, op) \ - REGISTER_HOST_CPU_OP_BUILDER_UNIQ(ctr, name, op) - -#define REGISTER_HOST_CPU_OP_BUILDER_UNIQ(ctr, name, op) \ - static ::ge::HostCpuOpRegistrar register_host_cpu_op##ctr \ - __attribute__((unused)) = \ - ::ge::HostCpuOpRegistrar(name, []()->::ge::HostCpuOp* { \ - return new (std::nothrow) op(); \ - }) -} // namespace ge - -#endif //INC_REGISTER_REGISTRY_H_ diff --git a/inc/register/register_format_transfer.h b/inc/register/register_format_transfer.h deleted file mode 100644 index 5cbf4ab4238d882551428fc70d51894a09f5e142..0000000000000000000000000000000000000000 --- a/inc/register/register_format_transfer.h +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_REGISTER_REGISTER_FORMAT_TRANSFER_H_ -#define INC_REGISTER_REGISTER_FORMAT_TRANSFER_H_ - -#include -#include -#include - -#include "external/graph/types.h" -#include "ge/ge_api_error_codes.h" - -namespace ge { -namespace formats { -struct TransArgs { - const uint8_t *data; - Format src_format; - Format dst_format; - // For scenes that need to supplement the shape, for example, 5D to 4D - // It is not possible to convert the format normally if you only get the src_shape, - // and must get the shape before you mend the shape. - // So the parameters here need to be passed in both src_shape and dst_shape - std::vector src_shape; - std::vector dst_shape; - DataType src_data_type; -}; - -struct TransResult { - std::shared_ptr data; - // data length in bytes - size_t length; -}; - -class FormatTransfer { - public: - virtual ~FormatTransfer() = default; - virtual Status TransFormat(const TransArgs &args, TransResult &result) = 0; - virtual Status TransShape(Format src_format, const std::vector &src_shape, DataType data_type, - Format dst_format, std::vector &dst_shape) = 0; -}; - -using FormatTransferBuilder = std::function()>; - -class FormatTransferRegister { - public: - FormatTransferRegister(FormatTransferBuilder builder, Format src, Format dst); - ~FormatTransferRegister() = default; -}; - -#define REGISTER_FORMAT_TRANSFER(TransferClass, format1, format2) \ - namespace { \ - FormatTransferRegister format_transfer_register_##TransferClass##format1##format2( \ - []() { return std::make_shared(); }, format1, format2); \ - } - -/// Build a formattransfer according to 'args' -/// @param args -/// @param result -/// @return -std::shared_ptr BuildFormatTransfer(const TransArgs &args); - -bool FormatTransferExists(const TransArgs &args); -} // namespace formats -} // namespace ge -#endif // INC_REGISTER_REGISTER_FORMAT_TRANSFER_H_ \ No newline at end of file diff --git a/inc/register/scope/scope_graph_impl.h b/inc/register/scope/scope_graph_impl.h deleted file mode 100644 index 7d022d20f2e9618a8475c33b5378f3ca02ad4d00..0000000000000000000000000000000000000000 --- a/inc/register/scope/scope_graph_impl.h +++ /dev/null @@ -1,143 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef REGISTER_SCOPE_SCOPE_GRAPH_IMPL_H_ -#define REGISTER_SCOPE_SCOPE_GRAPH_IMPL_H_ - -#include "external/register/scope/scope_fusion_pass_register.h" -#include "proto/tensorflow/node_def.pb.h" -#include "proto/tensorflow/graph.pb.h" - -namespace ge { -class Scope::ScopeImpl { - public: - explicit ScopeImpl(const std::string &name, const std::string &sub_type = "", Scope *father_scope = nullptr) - : name_(name), sub_type_(sub_type), father_scope_(father_scope) {} - ~ScopeImpl(); - - std::string Name() const { return name_; } - std::string SubType() const { return sub_type_; } - void SetSubType(const std::string &sub_type) { sub_type_ = sub_type; } - void ClearTypeAndSubType(); - void AddNode(ge::OperatorPtr node_def); - std::vector Nodes() const { return nodes_; } - std::vector AllNodes() const; - std::map AllNodesMap() const; - void AddSubScope(Scope *scope) { sub_scopes_[scope->Name()] = scope; } - Scope *GetSubScope(const std::string &scope_name) const; - std::map GetSubScopes() const { return sub_scopes_; } - std::vector GetAllSubScopes() const; - int32_t GetOpTypeNum(const std::string &op_type) const; - void OpsNumInc(const std::string &op_type); - std::string LastName() const; - const Scope *GetFatherScope() const { return father_scope_; } - // trim scope_index - static std::string TrimScopeIndex(const std::string &scope_name); - - private: - std::string name_; - std::string sub_type_; - Scope *father_scope_; - std::map op_nums_; - std::map sub_scopes_; - std::vector nodes_; -}; - -class FusionScopesResult::FusionScopesResultImpl { - public: - FusionScopesResultImpl() {} - ~FusionScopesResultImpl() {}; - void SetName(const std::string &name) { name_ = name; } - void SetType(const std::string &type) { type_ = type; } - void SetDescription(const std::string &description) { description_ = description; } - std::string Name() const { return name_; } - std::string Type() const { return type_; } - std::string Description() const { return description_; } - void AddNodes(std::vector nodes); - std::vector Nodes() const { return nodes_; } - void AddScopes(const std::vector &scopes) { scopes_.insert(scopes_.end(), scopes.begin(), scopes.end()); } - std::vector Scopes() const { return scopes_; } - std::map> GetInputs() const { return inputs_; } - std::map> GetOutputs() const { return outputs_; } - void InsertInputs(const std::string &inner_op_name, const std::vector &index_map); - void InsertOutputs(const std::string &inner_op_name, const std::vector &index_map); - bool FindNodes(const std::string &node_name) const; - bool FindScopes(const std::string &scope_name) const; - - private: - std::string name_; - std::string type_; - std::string description_; - std::vector scopes_; - std::vector nodes_; - std::map> inputs_; - std::map> outputs_; -}; - -class ScopeTree::ScopeTreeImpl { - public: - ScopeTreeImpl() : root_(nullptr) {} - ScopeTreeImpl(const ScopeTreeImpl &) = delete; - ScopeTreeImpl &operator=(const ScopeTreeImpl &) = delete; - Status Init(); - ~ScopeTreeImpl(); - - void AddNodeToScope(ge::OperatorPtr node_def); - std::vector GetAllScopes() const { return scopes_; } - const Scope *Root() const { return root_; } - - private: - std::vector SplitNodeName(const std::string &node_name, char delim) const; - Scope *root_; - std::vector scopes_; -}; - -struct ScopeFusionOpInfo { - std::string node_name; - std::string fusion_node_name; - std::string fusion_op_type; - std::string description; - bool scope_pass = true; -}; - -class ScopeGraph::ScopeGraphImpl { - public: - ScopeGraphImpl() : scope_tree_(nullptr) {} - ScopeGraphImpl(const ScopeGraphImpl &) = delete; - ScopeGraphImpl &operator=(const ScopeGraphImpl &) = delete; - Status Init(); - ~ScopeGraphImpl(); - - const ScopeTree *GetScopeTree() const { return scope_tree_; } - void BuildScopeGraph(domi::tensorflow::GraphDef *graph_def); - void AddFusionScopesResult(FusionScopesResult *result); - std::map FusionScopesResults() const { return fusion_results_; } - FusionScopesResult *GetFusionScopesResults(const domi::tensorflow::NodeDef *node_def) const; - std::map GetNodesMap() const { return nodes_map_; } - bool IsFusionOpChild(const std::string &node_name, std::vector &info_list); - bool FusionOpChildIgnore(const ScopeFusionOpInfo &info); - bool IsFusionOp(const domi::tensorflow::NodeDef *node_def); - Status GetInputOrOutputIndex(const ScopeFusionOpInfo &info, int32_t old_index, bool input, int32_t &new_index); - - private: - std::vector GetFusionResultInputOrOutput(const ScopeFusionOpInfo &info, bool input); // input:true,output:false - void CheckScopesResult(FusionScopesResult *fusion_node); - std::map fusion_results_; - std::map nodes_map_; - ScopeTree *scope_tree_; -}; -} // namespace ge -#endif // REGISTER_SCOPE_SCOPE_GRAPH_IMPL_H_ \ No newline at end of file diff --git a/inc/register/scope/scope_pass_impl.h b/inc/register/scope/scope_pass_impl.h deleted file mode 100644 index 52f1328845ff2abef2be21352c0e1089a497665d..0000000000000000000000000000000000000000 --- a/inc/register/scope/scope_pass_impl.h +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef REGISTER_SCOPE_SCOPE_PASS_IMPL_H_ -#define REGISTER_SCOPE_SCOPE_PASS_IMPL_H_ - -#include "external/register/scope/scope_fusion_pass_register.h" - -namespace ge { -class ScopesResult::ScopesResultImpl { - public: - void SetScopes(std::vector &scopes) { scopes_ = scopes; } - std::vector GetScopes() const { return scopes_; } - void SetNodes(std::vector nodes) { nodes_ = nodes; } - std::vector GetNodes() const { return nodes_; } - - private: - std::vector scopes_; // multiple scopes - std::vector nodes_; // op outside of scope -}; - -class ScopeBasePass::ScopeBasePassImpl { - public: - ScopeBasePassImpl(ScopeBasePass *parent) : parent_(parent) {} - virtual ~ScopeBasePassImpl(); - - Status Run(std::shared_ptr &scope_graph); - - private: - Status AddFusionScopesResultToScopeGraph(std::shared_ptr &scope_graph, - std::vector &scope_results); - // Match rules one by one, support multiple sets of matching rules, and finally output a single scope - // Note: This function does not have to be rewritten. - // In order to match the fusion rules designed by you better, - // you can implement your specific versions separately. - bool MatchAllBatches(const ScopeTree *scope_tree, std::vector &results); - - bool MatchOneBatch(const ScopeTree *scope_tree, const std::vector &patternlist, - std::vector &results); - bool MatchOneScope(const ScopePattern *pattern, Scope *scope, std::vector &results); - Status PrintFusionScopeInfo(std::shared_ptr &scope_graph); - - private: - std::vector patterns_; - ScopeBasePass *parent_; -}; -} // namespace ge -#endif // REGISTER_SCOPE_SCOPE_PASS_IMPL_H_ \ No newline at end of file diff --git a/inc/register/scope/scope_pass_registry_impl.h b/inc/register/scope/scope_pass_registry_impl.h deleted file mode 100644 index 9e68dba06bda7397c18aafe4f813d5cf182d7804..0000000000000000000000000000000000000000 --- a/inc/register/scope/scope_pass_registry_impl.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef REGISTER_SCOPE_SCOPE_REGISTRY_IMPL_H_ -#define REGISTER_SCOPE_SCOPE_REGISTRY_IMPL_H_ - -#include "external/register/scope/scope_fusion_pass_register.h" -#include - -namespace ge { -struct CreatePassFnPack; -class ScopeFusionPassRegistry::ScopeFusionPassRegistryImpl { - public: - void RegisterScopeFusionPass(const std::string &pass_name, ScopeFusionPassRegistry::CreateFn create_fn, - bool is_general); - ScopeFusionPassRegistry::CreateFn GetCreateFn(const std::string &pass_name); - std::unique_ptr CreateScopeFusionPass(const std::string &pass_name); - std::vector GetAllRegisteredPasses(); - bool SetPassEnableFlag(const std::string pass_name, const bool flag); - - private: - std::mutex mu_; - std::vector pass_names_; // In the order of user registration - std::map create_fn_packs_; -}; -} // namespace ge -#endif // REGISTER_SCOPE_SCOPE_REGISTRY_IMPL_H_ \ No newline at end of file diff --git a/inc/register/scope/scope_pattern_impl.h b/inc/register/scope/scope_pattern_impl.h deleted file mode 100644 index 1c5a7bce3f42d8cab16a37f668140d7fda79bb79..0000000000000000000000000000000000000000 --- a/inc/register/scope/scope_pattern_impl.h +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef REGISTER_SCOPE_SCOPE_PATTERN_IMPL_H_ -#define REGISTER_SCOPE_SCOPE_PATTERN_IMPL_H_ - -#include "external/register/scope/scope_fusion_pass_register.h" - -namespace ge { -class ScopeAttrValue::ScopeAttrValueImpl { - public: - ScopeAttrValueImpl() : int_value_(0), float_value_(0.0), string_value_(""), bool_value_(false) {} - ~ScopeAttrValueImpl() {} - - void SetIntValue(int64_t value) { int_value_ = value; } - void SetFloatValue(float value) { float_value_ = value; } - void SetStringValue(std::string value) { string_value_ = value; } - void SetBoolValue(bool value) { bool_value_ = value; } - int64_t GetIntValue() { return int_value_; } - float GetFloatValue() { return float_value_; } - std::string GetStrValue() { return string_value_; } - bool GetBoolValue() { return bool_value_; } - - private: - int64_t int_value_; - float float_value_; - std::string string_value_; - bool bool_value_; -}; - -class NodeOpTypeFeature::NodeOpTypeFeatureImpl : ScopeBaseFeature { - public: - NodeOpTypeFeatureImpl(std::string nodeType, int num, int step = 0) : node_type_(nodeType), num_(num), step_(step) {} - ~NodeOpTypeFeatureImpl() {} - bool Match(const Scope *scope) override; - - public: - std::string node_type_; // Node type - int num_; // Node number - int step_; // step -}; - -class NodeAttrFeature::NodeAttrFeatureImpl : ScopeBaseFeature { - public: - NodeAttrFeatureImpl(std::string nodeType, std::string attr_name, ge::DataType datatype, ScopeAttrValue attr_value) - : node_type_(nodeType), attr_name_(attr_name), datatype_(datatype), attr_value_(attr_value) {} - ~NodeAttrFeatureImpl() {} - bool Match(const Scope *scope) override; - - public: - std::string node_type_; // Node type - std::string attr_name_; // attribute name - ge::DataType datatype_; // datatype - ScopeAttrValue attr_value_; // AttrValue -}; - -class ScopeFeature::ScopeFeatureImpl : ScopeBaseFeature { - public: - ScopeFeatureImpl(std::string sub_type, int32_t num, std::string suffix = "", - std::string sub_scope_mask = "", int step = 0) - : sub_type_(sub_type), num_(num), suffix_(suffix), sub_scope_mask_(sub_scope_mask), step_(step) {} - ~ScopeFeatureImpl() {} - bool Match(const Scope *scope) override; - bool SubScopesMatch(std::vector &scopes); - - public: - std::string sub_type_; - int32_t num_; - std::string suffix_; - std::string sub_scope_mask_; - int step_; -}; - -class ScopePattern::ScopePatternImpl { - public: - ScopePatternImpl() {} - ~ScopePatternImpl() {} - bool Match(const Scope *scope) const; - void SetSubType(const std::string &sub_type); - std::string SubType() const { return sub_type_; } - void AddNodeOpTypeFeature(NodeOpTypeFeature feature); - void AddNodeAttrFeature(NodeAttrFeature feature); - void AddScopeFeature(ScopeFeature feature); - - private: - std::string sub_type_; // get Scope sub type - std::vector node_optype_features_; - std::vector node_attr_features_; - std::vector scopes_features_; -}; -} // namespace ge -#endif // REGISTER_SCOPE_SCOPE_PATTERN_IMPL_H_ \ No newline at end of file diff --git a/inc/register/tensor_assign.h b/inc/register/tensor_assign.h deleted file mode 100644 index 57a37f6c27257cc1fc8f739ed2b9d4a780b4a111..0000000000000000000000000000000000000000 --- a/inc/register/tensor_assign.h +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TENSOR_ASSIGN_H_ -#define TENSOR_ASSIGN_H_ - -#include "graph/ge_tensor.h" -#include "proto/tensorflow/tensor.pb.h" - -namespace domi { -using GeTensorPtr = std::shared_ptr; -using Status = uint32_t; -using domi::tensorflow::TensorProto; -using google::protobuf::int32; -using google::protobuf::int64; - -class TensorAssign { - public: - static Status SetGeTensor(const TensorProto &tensor, GeTensorPtr &weight); - - static Status SetGeTensorDataType(int64_t dataType, GeTensorPtr &weight); - - static ge::DataType ConvertTensorflowDataType(uint32_t tf_data_type); - - private: - static bool CheckBoolVal(tensorflow::DataType data_type); - - static bool CheckHalfVal(tensorflow::DataType data_type); - - static bool CheckFloatVal(tensorflow::DataType data_type); - - static bool CheckDoubleVal(tensorflow::DataType data_type); - - static bool CheckComplex64Val(tensorflow::DataType data_type); - - static bool CheckComplex128Val(tensorflow::DataType data_type); - - static bool CheckStringVal(tensorflow::DataType data_type); - - static bool CheckByte(tensorflow::DataType data_type); - - static bool CheckDoubleByte(tensorflow::DataType data_type); - - static bool CheckSignedFourByte(tensorflow::DataType data_type); - - static bool CheckUnsignedFourByte(tensorflow::DataType data_type); - - static bool CheckSignedEightByte(tensorflow::DataType data_type); - - static bool CheckUnsignedEightByte(tensorflow::DataType data_type); - - static Status GetDoubleByteVal(int32_t val_size, const google::protobuf::RepeatedField &val_vector, int count, - GeTensorPtr &weight); - static Status GetByteVal(int32_t val_size, const google::protobuf::RepeatedField &val_vector, int count, - GeTensorPtr &weight); - - static Status GetStringVal(int32_t val_size, const google::protobuf::RepeatedPtrField &val_vector, - int count, GeTensorPtr &weight); - - static void SetGeTensorWeightData(const TensorProto &tensor, int32_t val_size, int count, GeTensorPtr &weight); - - static void SetWeightData(tensorflow::DataType data_type, int count, const std::string &tensor_content, - GeTensorPtr &weight); - - template - static Status GetVal(int32_t val_size, const google::protobuf::RepeatedField &val_vector, int count, - GeTensorPtr &weight) { - bool zerosLike = (count != val_size && val_size == 1); - T *addr = new (std::nothrow) T[count](); - GE_CHECK_NOTNULL(addr); - int minCount = (count > val_size) ? val_size : count; - if (!zerosLike) { - for (int32_t i = 0; i < minCount; i++) { - *(addr + i) = val_vector.Get(i); - } - for (int32_t i = minCount; i < count; i++) { - *(addr + i) = val_vector.Get(minCount - 1); - } - } else { - for (int32_t i = 0; i < count; i++) { - *(addr + i) = val_vector.Get(0); - } - } - (void)weight->SetData(reinterpret_cast(addr), count * sizeof(T)); - GE_DELETE_NEW_ARRAY(addr); - return SUCCESS; - } -}; -} // namespace domi -#endif // TENSOR_ASSIGN_H_ diff --git a/inc/soft_dp/ExternalSoftDp.h b/inc/soft_dp/ExternalSoftDp.h index 6681fbbac47428b7424700f5e2324a01837dca1a..bef5c05d7000f20535b4452970a305e93d092625 100644 --- a/inc/soft_dp/ExternalSoftDp.h +++ b/inc/soft_dp/ExternalSoftDp.h @@ -1,18 +1,12 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +* @file ExternalSoftDp.h +* +* Copyright (c) Huawei Technologies Co., Ltd. 2012-2018. All rights reserved. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +*/ #ifndef EXTERNALSOFTDP_H #define EXTERNALSOFTDP_H @@ -30,7 +24,7 @@ struct SoftDpProcsessInfo { uint32_t outputWidth; uint32_t outputHeight; - bool isVBeforeU; // uv : true, uv : false + uint32_t reserved; }; struct DpCropInfo { @@ -41,18 +35,18 @@ struct DpCropInfo { }; /* - * @brief 解码、缩放接口 - * @param [in] SoftDpProcsessInfo& inMsg: 软实现结构体 + * @brief decode and resize interface + * @param [in] SoftDpProcsessInfo& softDpProcsessInfo : soft dp struct * @return success: return 0, fail: return error number */ -uint32_t DecodeAndResizeJpeg(SoftDpProcsessInfo& inMsg); +uint32_t DecodeAndResizeJpeg(SoftDpProcsessInfo& softDpProcsessInfo); /* - * @brief 解码、裁剪、缩放接口 - * @param [in] SoftDpProcsessInfo& inMsg: 软实现结构体 - * @param [in] const DpCropInfo& cropInfo: 裁剪结构体 + * @brief decode crop and resize interface + * @param [in] SoftDpProcsessInfo& softDpProcsessInfo : soft dp struct + * @param [in] const DpCropInfo& cropInfo: crop struct * @return success: return 0, fail: return error number */ -uint32_t DecodeAndCropAndResizeJpeg(SoftDpProcsessInfo& inMsg, const DpCropInfo& cropInfo); +uint32_t DecodeAndCropAndResizeJpeg(SoftDpProcsessInfo& softDpProcsessInfo, const DpCropInfo& cropInfo); } #endif // EXTERNALSOFTDP_H \ No newline at end of file diff --git a/inc/tdt/index_transform.h b/inc/tdt/index_transform.h new file mode 100644 index 0000000000000000000000000000000000000000..a5af2c83fab05028310c5ea65dd0ed99c7781c15 --- /dev/null +++ b/inc/tdt/index_transform.h @@ -0,0 +1,29 @@ +/** +* @file index_transform.h +* +* Copyright (C) Huawei Technologies Co., Ltd. 2018-2019. All Rights Reserved. +* +* This program is used to get logical device id by phy device id. +*/ + +#ifndef INC_TDT_INDEX_TRANSFORM_H +#define INC_TDT_INDEX_TRANSFORM_H + +#include "stdint.h" +/** +* @ingroup IndexTransform +* @brief get logical device id by phy device id. +* +* @par Function get logical device id by phy device id. +* +* @param phyId [IN] physical device id +* @param logicalId [OUT] logical device id +* @retval 0 Success +* @retval OtherValues Fail +* +* @par Dependency +* @li libruntime.so: Library to which the interface belongs. +*/ + +int32_t IndexTransform(const uint32_t phyId, uint32_t &logicId); +#endif diff --git a/inc/tdt/tdt_device.h b/inc/tdt/tdt_device.h deleted file mode 100644 index 441e392384464cca75020a916b584098be212195..0000000000000000000000000000000000000000 --- a/inc/tdt/tdt_device.h +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef HOST_INNER_INC_TDT_DEVICE_H_ -#define HOST_INNER_INC_TDT_DEVICE_H_ - -#include -#include -#include -#include "tdt/data_common.h" - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -namespace tdt { -/** - * @ingroup TdtDevicePushData - * @brief Tdt device push data to queue for ops. - * - * @par Function - * Tdt device push data to queue for ops. - * - * @param channelName [IN] type #String. queue channel name - * @param items [IN] type #vector DataItem is defined in data_common.h. input data - * @retval 0 Success - * @retval OtherValues Fail - * - * @par Dependency - * @li libtdtdevice.so: Library to which the interface belongs. - * @li tdt_device.h: Header file where the interface declaration is located. - * @li data_common.h: Header file where 'DataItem' defined - * - */ -int32_t TdtDevicePushData(const std::string &channelName, std::vector &items); -} // namespace tdt -#ifdef __cplusplus -} -#endif // __cplusplus -#endif // HOST_INNER_INC_TDT_DEVICE_H_ diff --git a/inc/tdt/tdt_server.h b/inc/tdt/tdt_server.h deleted file mode 100644 index 5d45047b216962c05730953dee91ee9372e4b566..0000000000000000000000000000000000000000 --- a/inc/tdt/tdt_server.h +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_TDT_TDT_SERVER_H -#define INC_TDT_TDT_SERVER_H - -#include -#include "tdt/status.h" - -namespace tdt { -/** -* @ingroup TDTServerInit -* @brief Initialization functions, establish TDT Server, -* provide services such as access services, initialization and tuning channels -* -* @par Function -* Initialization functions, establish TDT Server, -* provide services such as access services, initialization and tuning channels -* -* @param deviceID [IN] type #unsigned int. Physical device ID -* @param bindCoreList [IN] type #List bindCoreList. -* device CPU core sequence, the maximum value of the core sequence should not -* exceed the total number of CPU cores -* @retval 0 Success -* @retval OtherValues 0 Fail -* -* @par Dependency -* @li libtdtserver.so: Library to which the interface belongs. -* @li tdt_server.h: Header file where the interface declaration is located. -*/ -TDT_LIB_EXPORT int32_t TDTServerInit(const uint32_t deviceID, const std::list &bindCoreList); - -/** -* @ingroup TDTServerInit -* @brief End TDT Server -* -* @par Function -* End TDT Server -* -* @param NA -* @retval 0 Success -* @retval OtherValues 0 Fail -* -* @par Dependency -* @li libtdtserver.so: Library to which the interface belongs. -* @li tdt_server.h: Header file where the interface declaration is located. -*/ -TDT_LIB_EXPORT int32_t TDTServerStop(); - -class TdtServer { - public: - private: - /** - * @ingroup TdtServer - * @brief TdtServer is a static class, all delete constructs and destructors - */ - TdtServer() = delete; - - /** - * @ingroup TdtServer - * @brief TdtServer destructor - */ - virtual ~TdtServer() = delete; - TdtServer(const TdtServer &) = delete; - TdtServer(TdtServer &&) = delete; - TdtServer &operator=(const TdtServer &) = delete; - TdtServer &operator=(TdtServer &&) = delete; -}; -}; // namespace tdt -#endif // INC_TDT_TDT_SERVER_H diff --git a/inc/tdt/tsd.h b/inc/tdt/tsd.h deleted file mode 100644 index bb7d7cd40c6b713619f35503abe16e194d29e982..0000000000000000000000000000000000000000 --- a/inc/tdt/tsd.h +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef INC_TDT_TSD_H_ -#define INC_TDT_TSD_H_ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -/** -* @ingroup Tsdaemon. -* -* Identifies that HCCP or Compute_process is waiting for -* Tsdaemon to issue a shutdown command. -*/ -typedef enum { - TSD_HCCP = 0, /**< HCCP*/ - TSD_COMPUTE = 1, /**< Compute_process*/ - TSD_WAITTYPE_MAX /**< Max*/ -} TsdWaitType; - -/** -* @ingroup TsdWaitForShutdown -* @brief Wait for the TSD process to issue the shutdown command -* -* @par Function -* Wait for the TSD process to issue the shutdown command -* -* @param NA -* @param deviceID [IN] type #unsigned int. Physical device ID -* @param waitType [IN] type #TsdWaitType. HCCP or CP -* @retval 0 Success -* @retval OtherValues 0 Fail -* -* @par Dependency -* @li libtsdppc.so: Library to which the interface belongs. -* @li tsd.h: Header file where the interface declaration is located. -*/ -int32_t TsdWaitForShutdown(const uint32_t deviceId, const TsdWaitType waitType); - -/** -* @ingroup PpcClientSendHeartbeat -* @brief Ppc client send heartbeat msg to ppc server -* -* @par Function -* Ppc client send heartbeat msg to ppc server -* -* @param NA -* @param deviceID [IN] type #unsigned int. Physical device ID -* @param waitType [IN] type #TsdWaitType. HCCP or CP -* @retval 0 Success -* @retval OtherValues 0 Fail -* -* @par Dependency -* @li libtsdppc.so: Library to which the interface belongs. -* @li tsd.h: Header file where the interface declaration is located. -*/ -int32_t TsdHeartbeatSend(const uint32_t deviceId, const TsdWaitType waitType); - -/** -* @ingroup PpcClientSendAbnormalMsg -* @brief Ppc client send abnormal msg to ppc server -* -* @par Function -* Ppc client send abnormal msg to ppc server -* -* @param NA -* @param deviceID [IN] type #unsigned int. Physical device ID -* @param waitType [IN] type #TsdWaitType. HCCP or CP -* @retval 0 Success -* @retval OtherValues 0 Fail -* -* @par Dependency -* @li libtsdppc.so: Library to which the interface belongs. -* @li tsd.h: Header file where the interface declaration is located. -*/ -int32_t TsdDestory(const uint32_t deviceId, const TsdWaitType waitType); - -#ifdef __cplusplus -} -#endif // __cplusplus -#endif // INC_TDT_TSD_H_ diff --git a/inc/toolchain/adx_dump_server.h b/inc/toolchain/adx_dump_server.h new file mode 100644 index 0000000000000000000000000000000000000000..55ead8e6e542862ca0a34e4afbb4aa0cb8f8cff8 --- /dev/null +++ b/inc/toolchain/adx_dump_server.h @@ -0,0 +1,26 @@ +/** +* @file adx_dump_server.h +* +* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +*/ + +#ifndef ADX_DUMP_SERVER_H +#define ADX_DUMP_SERVER_H +#ifdef __cplusplus +extern "C" { +#endif +/** + * @brief get adx workspace path + * @return + * adx workspace path + */ +int AdxCoreDumpServerInit(); +#ifdef __cplusplus +} +#endif +#endif + diff --git a/inc/toolchain/bbox/bbox_ddr_data.h b/inc/toolchain/bbox/bbox_ddr_data.h deleted file mode 100644 index bec57ac1e1a79cc3550f2ec1c58c872d90575242..0000000000000000000000000000000000000000 --- a/inc/toolchain/bbox/bbox_ddr_data.h +++ /dev/null @@ -1,126 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef BBOX_DDR_DATA_H -#define BBOX_DDR_DATA_H - -/* ELEM_OUTPUT_BIN - * key: - * 0x00000000 FFFF FFFF FFFF FFFF - * 0x00000010 FFFF FFFF FFFF FFFF - * - * ELEM_OUTPUT_STR - * key: qwertyuiop - * - * ELEM_OUTPUT_STR_NL - * key: - * qwertyuiop - * - * ELEM_OUTPUT_HEX - * key: FFFFFFFFFFFFFFFF - * - * ELEM_OUTPUT_INT - * key: 0xFF - */ -#define ELEM_OUTPUT_CHAR_LEN 1 -#define ELEM_OUTPUT_SHORT_LEN 2 -#define ELEM_OUTPUT_INT_LEN 4 -#define ELEM_OUTPUT_LONG_LEN 8 -#define ELEM_OUTPUT_HEX_MAX_LEN 32 -#define ELEM_OUTPUT_DIVIDE_MAX_LEN 15 -#define ELEMENT_NAME_MAX_LEN 32 - -enum ModelElementType { - ELEM_OUTPUT_TYPE = 0x0, - ELEM_OUTPUT_BIN = 0x1, // name = func(offset, size); 整片二进制输出,按16进制显示 - ELEM_OUTPUT_STR = 0x2, // name = func(offset, maxSize); 换行字符串输出 - ELEM_OUTPUT_STR_NL = 0x3, // name = func(offset, maxSize); 不换行字符串输出 - ELEM_OUTPUT_HEX = 0x4, // name = func(offset, size); 每个字节按hex输出,最多输出16个字节 - ELEM_OUTPUT_INT = 0x5, // name = func(offset, size); 1,2,4,8字节整型输出 - ELEM_OUTPUT_CHAR = 0x6, // name = func(offset, size); 按长度,字符输出 - ELEM_OUTPUT_INT_CONST = 0x7, // name = value; 整型:value(size) - ELEM_OUTPUT_STR_CONST = 0x8, // name; 字符串:value(0) size(0) - ELEM_OUTPUT_NL = 0x9, // \n - ELEM_OUTPUT_DIVIDE = 0xa, // ==========name========== - ELEM_OUTPUT_MAX = 0xFFF, - - ELEM_FEATURE_TYPE = 0x1000, - ELEM_FEATURE_TABLE = 0x1001, - ELEM_FEATURE_COMPARE = 0x1002, - ELEM_FEATURE_LOOPBUF = 0x1003, - ELEM_FEATURE_CHARLOG = 0x1004, - ELEM_FEATURE_STRUCTLOG = 0x1005, - ELEM_FEATURE_MAX = 0x1FFF, - - ELEM_CTRL_TYPE = 0x2000, - ELEM_CTRL_TABLE = 0x2000, // ELEM_FEATURE_TABLE 控制类 - ELEM_CTRL_TABLE_GOTO = 0x2000, // (tableEnumType, 0);跳转表的PlaintextTableType enum类型值,非显示项 - ELEM_CTRL_TABLE_RANGE = 0x2001, // (indexOffset, indexCnt);子表开始地址和长度,非显示项 - ELEM_CTRL_COMPARE = 0x2100, // ELEM_FEATURE_COMPARE 控制类 - ELEM_CTRL_COM_VALUE = 0x2100, // (offset, size);需要比较的值所在位置和长度,非显示项 - ELEM_CTRL_CMP_JUMP_NE = 0x2101, // (compareValue, jumpIndex);如果不等于则跳转,非显示项 - ELEM_CTRL_CMP_JUMP_LE = 0x2102, // (compareValue, jumpIndex);如果不大于则跳转,非显示项 - ELEM_CTRL_CMP_JUMP_LT = 0x2103, // (compareValue, jumpIndex);如果小于则跳转,非显示项 - ELEM_CTRL_CMP_JUMP_GE = 0x2104, // (compareValue, jumpIndex);如果不小于则跳转,非显示项 - ELEM_CTRL_CMP_JUMP_GT = 0x2105, // (compareValue, jumpIndex);如果大于则跳转,非显示项 - ELEM_CTRL_CMP_JUMP_EQ = 0x2106, // (compareValue, jumpIndex);如果等于则跳转,非显示项 - ELEM_CTRL_LOOPBUF = 0x2200, // ELEM_FEATURE_LOOPBUF 控制类 - ELEM_CTRL_LPBF_HEAD = 0x2200, // value;循环buffer头结构体长度,非显示项 - ELEM_CTRL_LPBF_READ = 0x2201, // name: OutPutFunc(offset, size); 循环buffer读指针在结构体中偏移位置 - ELEM_CTRL_LPBF_WRITE = 0x2203, // name: OutPutFunc(offset, size); 循环buffer写指针在结构体中偏移位置 - ELEM_CTRL_LPBF_SIZE = 0x2202, // name: OutPutFunc(offset, size); 循环buffer总大小在结构体中偏移位置 - ELEM_CTRL_LPBF_SIZE_C = 0x2202, // name: value; 循环buffer总长度,以固定值设置 - ELEM_CTRL_LPBF_ROLLBK = 0x2203, // (offset, size); roll-back标记位,标记buffer是否翻转,非显示项 - ELEM_CTRL_MAX = 0xFFFF, -}; - -enum ElemConditionType { - ELEM_EQUAL = 1 << 0, // 0x001 - ELEM_GRATER = 1 << 1, // 0x010 - ELEM_LESS = 1 << 2, // 0x100 -}; - -struct ModelElement { - char name[ELEMENT_NAME_MAX_LEN]; - unsigned int type; - union { - unsigned int arg1; - unsigned int offset; - unsigned int value; - unsigned int index; - }; - union { - unsigned int arg2; - unsigned int size; - unsigned int maxSize; - unsigned int mark; - unsigned int indexOffset; - }; -}; - -#define MODEL_VECTOR(NAME) struct ModelElement MODEL_VECTOR_OBJECT_##NAME[] -#define MODEL_VECTOR_OBJECT(NAME) (&MODEL_VECTOR_OBJECT_##NAME[0]) -#define MODEL_VECTOR_ITEM(NAME, i) (&MODEL_VECTOR_OBJECT_##NAME[i]) -#define MODEL_VECTOR_SIZE(NAME) (sizeof(MODEL_VECTOR_OBJECT_##NAME) / sizeof(struct ModelElement)) -#define DEFINE_DATA_MODEL(name) DATA_MODEL_##name - -#define ELEMENT_CLASSIFY(type) ((type) & 0xFFFF) -#define OUTPUT_ELEMENT(type) (ELEMENT_CLASSIFY(type) > ELEM_OUTPUT_TYPE && ELEMENT_CLASSIFY(type) < ELEM_OUTPUT_MAX) -#define CTRL_ELEMENT(type) (ELEMENT_CLASSIFY(type) >= ELEM_CTRL_TYPE && ELEMENT_CLASSIFY(type) < ELEM_CTRL_MAX) -#define CMP_ELEMENT(type) (((type) & 0xFFF0) == ELEM_CTRL_COM_VALUE) -#define ELEM_CMP_CONDITION(type) ((type) & 0x000F) - -#endif diff --git a/inc/toolchain/bbox/bbox_ddr_data_cloud.h b/inc/toolchain/bbox/bbox_ddr_data_cloud.h deleted file mode 100644 index 32bdd61337d02a475844f4d1ca236b276e027d48..0000000000000000000000000000000000000000 --- a/inc/toolchain/bbox/bbox_ddr_data_cloud.h +++ /dev/null @@ -1,1183 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef BBOX_DDR_DATA_CLOUD_H -#define BBOX_DDR_DATA_CLOUD_H - -#include "bbox_ddr_data.h" - -/* each Module need define as follows */ -#define DATA_MODEL_LPFW MODEL_VECTOR(LPFW) = { \ - {"****exc****reg**", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"reset_reason", ELEM_OUTPUT_INT, {0x80}, {0x4}}, \ - {"slice", ELEM_OUTPUT_INT, {0x88}, {0x4}}, \ - {"rtc", ELEM_OUTPUT_INT, {0x90}, {0x4}}, \ - {"REGSP", ELEM_OUTPUT_INT, {0x98}, {0x8}}, \ - {"REGPC", ELEM_OUTPUT_INT, {0xa0}, {0x8}}, \ - {"REGELR", ELEM_OUTPUT_INT, {0xa8}, {0x8}}, \ - {"REGCPSR", ELEM_OUTPUT_INT, {0xb0}, {0x8}}, \ - {"REGSPSR", ELEM_OUTPUT_INT, {0xb8}, {0x8}}, \ - {"ESR", ELEM_OUTPUT_INT, {0xc0}, {0x8}}, \ - {"FAR", ELEM_OUTPUT_INT, {0xc8}, {0x8}}, \ - {"excTrace", ELEM_OUTPUT_INT, {0xd0}, {0x1}}, \ - {"ddrExc", ELEM_OUTPUT_INT, {0xd1}, {0x1}}, \ - {"irqId", ELEM_OUTPUT_INT, {0xd2}, {0x2}}, \ - {"taskId", ELEM_OUTPUT_INT, {0xd4}, {0x4}}, \ - {"**backup**reg***", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"reg_backup_index", ELEM_OUTPUT_INT, {0x280}, {0x4}}, \ - {"reason_0", ELEM_OUTPUT_INT, {0x284}, {0x4}}, \ - {"reason_1", ELEM_OUTPUT_INT, {0x288}, {0x4}}, \ - {"reason_2", ELEM_OUTPUT_INT, {0x28C}, {0x4}}, \ - {"x0", ELEM_OUTPUT_INT, {0x290}, {0x8}}, \ - {"x1", ELEM_OUTPUT_INT, {0x298}, {0x8}}, \ - {"x2", ELEM_OUTPUT_INT, {0x2a0}, {0x8}}, \ - {"x3", ELEM_OUTPUT_INT, {0x2a8}, {0x8}}, \ - {"x4", ELEM_OUTPUT_INT, {0x2b0}, {0x8}}, \ - {"x5", ELEM_OUTPUT_INT, {0x2b8}, {0x8}}, \ - {"x6", ELEM_OUTPUT_INT, {0x2c0}, {0x8}}, \ - {"x7", ELEM_OUTPUT_INT, {0x2c8}, {0x8}}, \ - {"x8", ELEM_OUTPUT_INT, {0x2d0}, {0x8}}, \ - {"x9", ELEM_OUTPUT_INT, {0x2d8}, {0x8}}, \ - {"x10", ELEM_OUTPUT_INT, {0x2e0}, {0x8}}, \ - {"x11", ELEM_OUTPUT_INT, {0x2e8}, {0x8}}, \ - {"x12", ELEM_OUTPUT_INT, {0x2f0}, {0x8}}, \ - {"x13", ELEM_OUTPUT_INT, {0x2f8}, {0x8}}, \ - {"x14", ELEM_OUTPUT_INT, {0x300}, {0x8}}, \ - {"x15", ELEM_OUTPUT_INT, {0x308}, {0x8}}, \ - {"x16", ELEM_OUTPUT_INT, {0x310}, {0x8}}, \ - {"x17", ELEM_OUTPUT_INT, {0x318}, {0x8}}, \ - {"x18", ELEM_OUTPUT_INT, {0x320}, {0x8}}, \ - {"x19", ELEM_OUTPUT_INT, {0x328}, {0x8}}, \ - {"x20", ELEM_OUTPUT_INT, {0x330}, {0x8}}, \ - {"x21", ELEM_OUTPUT_INT, {0x338}, {0x8}}, \ - {"x22", ELEM_OUTPUT_INT, {0x340}, {0x8}}, \ - {"x23", ELEM_OUTPUT_INT, {0x348}, {0x8}}, \ - {"x24", ELEM_OUTPUT_INT, {0x350}, {0x8}}, \ - {"x25", ELEM_OUTPUT_INT, {0x358}, {0x8}}, \ - {"x26", ELEM_OUTPUT_INT, {0x360}, {0x8}}, \ - {"x27", ELEM_OUTPUT_INT, {0x368}, {0x8}}, \ - {"x28", ELEM_OUTPUT_INT, {0x370}, {0x8}}, \ - {"x29", ELEM_OUTPUT_INT, {0x378}, {0x8}}, \ - {"x30", ELEM_OUTPUT_INT, {0x380}, {0x8}}, \ - {"XZR", ELEM_OUTPUT_INT, {0x388}, {0x8}}, \ - {"ESR", ELEM_OUTPUT_INT, {0x390}, {0x8}}, \ - {"FAR", ELEM_OUTPUT_INT, {0x398}, {0x8}}, \ - {"SPSR", ELEM_OUTPUT_INT, {0x3a0}, {0x8}}, \ - {"ELR", ELEM_OUTPUT_INT, {0x3a8}, {0x8}}, \ - {"PC", ELEM_OUTPUT_INT, {0x3b0}, {0x8}}, \ - {"SP", ELEM_OUTPUT_INT, {0x3b8}, {0x8}}, \ - {"CPSR", ELEM_OUTPUT_INT, {0x3c0}, {0x8}}, \ - {"Exceptioncode", ELEM_OUTPUT_INT, {0x3c8}, {0x8}}, \ - {"**runtime*******", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"T-AIC00", ELEM_OUTPUT_INT, {0xD380}, {0x1}}, \ - {"T-AIC01", ELEM_OUTPUT_INT, {0xD381}, {0x1}}, \ - {"T-AIC02", ELEM_OUTPUT_INT, {0xD382}, {0x1}}, \ - {"T-AIC03", ELEM_OUTPUT_INT, {0xD383}, {0x1}}, \ - {"T-AIC04", ELEM_OUTPUT_INT, {0xD384}, {0x1}}, \ - {"T-AIC05", ELEM_OUTPUT_INT, {0xD385}, {0x1}}, \ - {"T-AIC06", ELEM_OUTPUT_INT, {0xD386}, {0x1}}, \ - {"T-AIC07", ELEM_OUTPUT_INT, {0xD387}, {0x1}}, \ - {"T-AIC08", ELEM_OUTPUT_INT, {0xD388}, {0x1}}, \ - {"T-AIC09", ELEM_OUTPUT_INT, {0xD389}, {0x1}}, \ - {"T-AIC10", ELEM_OUTPUT_INT, {0xD38A}, {0x1}}, \ - {"T-AIC11", ELEM_OUTPUT_INT, {0xD38B}, {0x1}}, \ - {"T-AIC12", ELEM_OUTPUT_INT, {0xD38C}, {0x1}}, \ - {"T-AIC13", ELEM_OUTPUT_INT, {0xD38D}, {0x1}}, \ - {"T-AIC14", ELEM_OUTPUT_INT, {0xD38E}, {0x1}}, \ - {"T-AIC15", ELEM_OUTPUT_INT, {0xD38F}, {0x1}}, \ - {"T-AIC16", ELEM_OUTPUT_INT, {0xD390}, {0x1}}, \ - {"T-AIC17", ELEM_OUTPUT_INT, {0xD391}, {0x1}}, \ - {"T-AIC18", ELEM_OUTPUT_INT, {0xD392}, {0x1}}, \ - {"T-AIC19", ELEM_OUTPUT_INT, {0xD393}, {0x1}}, \ - {"T-AIC20", ELEM_OUTPUT_INT, {0xD394}, {0x1}}, \ - {"T-AIC21", ELEM_OUTPUT_INT, {0xD395}, {0x1}}, \ - {"T-AIC22", ELEM_OUTPUT_INT, {0xD396}, {0x1}}, \ - {"T-AIC23", ELEM_OUTPUT_INT, {0xD397}, {0x1}}, \ - {"T-AIC24", ELEM_OUTPUT_INT, {0xD398}, {0x1}}, \ - {"T-AIC25", ELEM_OUTPUT_INT, {0xD399}, {0x1}}, \ - {"T-AIC26", ELEM_OUTPUT_INT, {0xD39A}, {0x1}}, \ - {"T-AIC27", ELEM_OUTPUT_INT, {0xD39B}, {0x1}}, \ - {"T-AIC28", ELEM_OUTPUT_INT, {0xD39C}, {0x1}}, \ - {"T-AIC29", ELEM_OUTPUT_INT, {0xD39D}, {0x1}}, \ - {"T-AIC30", ELEM_OUTPUT_INT, {0xD39E}, {0x1}}, \ - {"T-AIC31", ELEM_OUTPUT_INT, {0xD39F}, {0x1}}, \ - {"T-AICPU0", ELEM_OUTPUT_INT, {0xD3A0}, {0x1}}, \ - {"T-AICPU1", ELEM_OUTPUT_INT, {0xD3A1}, {0x1}}, \ - {"T-AICPU2", ELEM_OUTPUT_INT, {0xD3A2}, {0x1}}, \ - {"T-AICPU3", ELEM_OUTPUT_INT, {0xD3A3}, {0x1}}, \ - {"T-HBMPHY0", ELEM_OUTPUT_INT, {0xD3A4}, {0x1}}, \ - {"T-HBMPHY1", ELEM_OUTPUT_INT, {0xD3A5}, {0x1}}, \ - {"T-HBMPHY2", ELEM_OUTPUT_INT, {0xD3A6}, {0x1}}, \ - {"T-HBMPHY3", ELEM_OUTPUT_INT, {0xD3A7}, {0x1}}, \ - {"T-DDRPHY", ELEM_OUTPUT_INT, {0xD3A8}, {0x1}}, \ - {"T-NIMBUS", ELEM_OUTPUT_INT, {0xD3A9}, {0x1}}, \ - {"T-HBMDEV0", ELEM_OUTPUT_INT, {0xD3AA}, {0x1}}, \ - {"T-HBMDEV1", ELEM_OUTPUT_INT, {0xD3AB}, {0x1}}, \ - {"T-HBMDEV2", ELEM_OUTPUT_INT, {0xD3AC}, {0x1}}, \ - {"T-HBMDEV3", ELEM_OUTPUT_INT, {0xD3AD}, {0x1}}, \ - {"T-ZONE-AIC", ELEM_OUTPUT_INT, {0xD3B0}, {0x1}}, \ - {"T-ZONE-AICPU", ELEM_OUTPUT_INT, {0xD3B1}, {0x1}}, \ - {"T-ZONE-HBMPHY", ELEM_OUTPUT_INT, {0xD3B2}, {0x1}}, \ - {"T-ZONE-DDRPHY", ELEM_OUTPUT_INT, {0xD3B3}, {0x1}}, \ - {"T-ZONE-NIMBUS", ELEM_OUTPUT_INT, {0xD3B4}, {0x1}}, \ - {"T-ZONE-HBMDEV", ELEM_OUTPUT_INT, {0xD3B5}, {0x1}}, \ - {"TMP_STATUS", ELEM_OUTPUT_INT, {0xD3B8}, {0x2}}, \ - {"EDP_AVG_CURRENT", ELEM_OUTPUT_INT, {0xD3BA}, {0x2}}, \ - {"EDP_HEART_ADDR", ELEM_OUTPUT_INT, {0xD3BC}, {0x4}}, \ - {"EDP_IRQ_COUNT", ELEM_OUTPUT_INT, {0xD3C0}, {0x4}}, \ - {"EDP_DOWN_COUNT", ELEM_OUTPUT_INT, {0xD3C4}, {0x2}}, \ - {"EDP_UP_COUNT", ELEM_OUTPUT_INT, {0xD3C6}, {0x2}}, \ - {"EDP_TIMER_COUNT", ELEM_OUTPUT_INT, {0xD3C8}, {0x4}}, \ - {"THERMAL_TIMER_CNT",ELEM_OUTPUT_INT, {0xD3CC}, {0x4}}, \ - {"VOLT_VALUE", ELEM_OUTPUT_INT, {0XD3D4}, {0x4}}, \ - {"CURRENT_VALUE", ELEM_OUTPUT_INT, {0XD3D8}, {0x4}}, \ - {"POWER_VALUE", ELEM_OUTPUT_INT, {0XD3DC}, {0x4}}, \ - {"LPNV_MAGIC", ELEM_OUTPUT_INT, {0xD3E0}, {0x4}}, \ - {"HI_EDP", ELEM_OUTPUT_INT, {0xD3E4}, {0x1}}, \ - {"EDP_SCALE", ELEM_OUTPUT_INT, {0xD3E5}, {0x1}}, \ - {"EDP_PERIOD", ELEM_OUTPUT_INT, {0xD3E6}, {0x2}}, \ - {"EDP_MAX_CURRENT", ELEM_OUTPUT_INT, {0xD3E8}, {0x2}}, \ - {"EDP_AVE_CURRENT", ELEM_OUTPUT_INT, {0xD3EA}, {0x2}}, \ - {"AVS_NV", ELEM_OUTPUT_INT, {0xD3EC}, {0x1}}, \ - {"SVFD_NV", ELEM_OUTPUT_INT, {0xD3ED}, {0x1}}, \ - {"PLLMODE", ELEM_OUTPUT_INT, {0xD3EE}, {0x1}}, \ - {"HOT_RESET", ELEM_OUTPUT_INT, {0xD3EF}, {0x1}}, \ - {"RESERVED_CTRL", ELEM_OUTPUT_INT, {0xD3F0}, {0x2}}, \ - {"RESERVED_EN", ELEM_OUTPUT_INT, {0xD3F2}, {0x2}}, \ - {"RESERVED_IMU", ELEM_OUTPUT_INT, {0xD3F4}, {0x2}}, \ - {"LP_MNTN", ELEM_OUTPUT_INT, {0xD3F6}, {0x1}}, \ - {"THERMAL_CTRL", ELEM_OUTPUT_INT, {0xD3F7}, {0x1}}, \ - {"THERMAL_SHUTDOWN", ELEM_OUTPUT_INT, {0xD3F8}, {0x1}}, \ - {"THERMAL_FREQ_STEP",ELEM_OUTPUT_INT, {0xD3F9}, {0x1}}, \ - {"THERMAL_HWRST", ELEM_OUTPUT_INT, {0xD3FA}, {0x1}}, \ - {"THERMAL_SWRST", ELEM_OUTPUT_INT, {0xD3FB}, {0x1}}, \ - {"THERMAL_HIGH_TEMP",ELEM_OUTPUT_INT, {0xD3FC}, {0x1}}, \ - {"THERMAL_NOR_TEMP", ELEM_OUTPUT_INT, {0xD3FD}, {0x1}}, \ - {"THERMAL_SLOW_PER", ELEM_OUTPUT_INT, {0xD3FE}, {0x1}}, \ - {"THERMAL_FAST_PER", ELEM_OUTPUT_INT, {0xD3FF}, {0x1}}, \ - {"THERMAL_COOL_CNT", ELEM_OUTPUT_INT, {0xD400}, {0x2}}, \ - {"UTRALSOC_DIS", ELEM_OUTPUT_INT, {0xD402}, {0x1}}, \ - {"POWERBRAKE_EN", ELEM_OUTPUT_INT, {0xD403}, {0x1}}, \ - {"POWERBRAKE_SCALE", ELEM_OUTPUT_INT, {0xD404}, {0x2}}, \ - {"FLOOR_FREQ", ELEM_OUTPUT_INT, {0xD406}, {0x2}}, \ - {"HBM_DEBUG_LEVEL", ELEM_OUTPUT_INT, {0xD410}, {0x1}}, \ - {"HBM_MP_INIT", ELEM_OUTPUT_INT, {0xD411}, {0x1}}, \ - {"HBM_ECC_EN", ELEM_OUTPUT_INT, {0xD412}, {0x1}}, \ - {"HBM_RASC_EN", ELEM_OUTPUT_INT, {0xD413}, {0x1}}, \ - {"HBM_PATROL_SCRUB", ELEM_OUTPUT_INT, {0xD414}, {0x1}}, \ - {"HBM_CLEAN_MEM_EN", ELEM_OUTPUT_INT, {0xD415}, {0x1}}, \ - {"HBM_POISON_EN", ELEM_OUTPUT_INT, {0xD416}, {0x1}}, \ - {"HBM_FW_PATCH_EN", ELEM_OUTPUT_INT, {0xD417}, {0x1}}, \ - {"HBM_FW_SFC_MRS_EN",ELEM_OUTPUT_INT, {0xD418}, {0x1}}, \ - {"HBM_TMON_EN", ELEM_OUTPUT_INT, {0xD419}, {0x1}}, \ - {"HBM_PD", ELEM_OUTPUT_INT, {0xD41A}, {0x1}}, \ - {"HBM_HIGH_TEMP", ELEM_OUTPUT_INT, {0xD41B}, {0x1}}, \ - {"HBM_RESERVED_0", ELEM_OUTPUT_INT, {0xD41C}, {0x1}}, \ - {"HBM_RESERVED_1", ELEM_OUTPUT_INT, {0xD41D}, {0x1}}, \ - {"HBM_RESERVED_2", ELEM_OUTPUT_INT, {0xD41E}, {0x1}}, \ - {"HBM_FREQ", ELEM_OUTPUT_INT, {0xD420}, {0x2}}, \ - {"DDR_DEBUG_LEVEL", ELEM_OUTPUT_INT, {0xD422}, {0x1}}, \ - {"DDR_MP_INIT", ELEM_OUTPUT_INT, {0xD423}, {0x1}}, \ - {"DDR_ECC_EN", ELEM_OUTPUT_INT, {0xD424}, {0x1}}, \ - {"DDR_RASC_EN", ELEM_OUTPUT_INT, {0xD425}, {0x1}}, \ - {"DDR_PD_EN", ELEM_OUTPUT_INT, {0xD426}, {0x1}}, \ - {"DDR_LP_EN", ELEM_OUTPUT_INT, {0xD427}, {0x1}}, \ - {"DDR_RASC_ALGO", ELEM_OUTPUT_INT, {0xD428}, {0x1}}, \ - {"DDR_CA_PARITY_EN", ELEM_OUTPUT_INT, {0xD429}, {0x1}}, \ - {"DDR_POISON_EN", ELEM_OUTPUT_INT, {0xD42A}, {0x1}}, \ - {"DDR_PATROL_SCRUB", ELEM_OUTPUT_INT, {0xD42B}, {0x1}}, \ - {"DDR_TMON_EN", ELEM_OUTPUT_INT, {0xD42C}, {0x1}}, \ - {"DDR_HIGH_TEMP", ELEM_OUTPUT_INT, {0xD42D}, {0x1}}, \ - {"DDR_RESERVED_0", ELEM_OUTPUT_INT, {0xD42E}, {0x1}}, \ - {"DDR_RESERVED_1", ELEM_OUTPUT_INT, {0xD42F}, {0x1}}, \ - {"DDR_RESERVED_2", ELEM_OUTPUT_INT, {0xD430}, {0x1}}, \ - {"DDR_FREQ", ELEM_OUTPUT_INT, {0xD432}, {0x2}}, \ - {"AVS_CALCU_VOLT", ELEM_OUTPUT_INT, {0xD444}, {0x4}}, \ - {"AVS_WORK_VOLT", ELEM_OUTPUT_INT, {0xD448}, {0x4}}, \ - {"BBOX_BASE", ELEM_OUTPUT_INT, {0xD460}, {0x8}}, \ - {"BBOX_SIZE", ELEM_OUTPUT_INT, {0xD468}, {0x4}}, \ - {"BACKUP_BBOX_ADDR", ELEM_OUTPUT_INT, {0xD470}, {0x8}}, \ - {"BACKUP_BBOX_SIZE", ELEM_OUTPUT_INT, {0xD478}, {0x4}}, \ - {"PMBUS_CHECK0", ELEM_OUTPUT_INT, {0xD4A0}, {0x2}}, \ - {"PMBUS_CHECK1", ELEM_OUTPUT_INT, {0xD4A2}, {0x2}}, \ - {"PMBUS_CHECK2", ELEM_OUTPUT_INT, {0xD4A4}, {0x2}}, \ - {"PMBUS_CHECK3", ELEM_OUTPUT_INT, {0xD4A6}, {0x2}}, \ - {"PMBUS_CHECK4", ELEM_OUTPUT_INT, {0xD4A8}, {0x2}}, \ - {"PMBUS_CHECK5", ELEM_OUTPUT_INT, {0xD4AA}, {0x2}}, \ - {"LP_STARTUP_EXCEPTION", ELEM_OUTPUT_INT, {0xD4B0}, {0x4}}, \ - {"**GIC*******", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"ENABLE[0]", ELEM_OUTPUT_INT, {0x1280}, {0x4}}, \ - {"ENABLE[1]", ELEM_OUTPUT_INT, {0x1284}, {0x4}}, \ - {"ENABLE[2]", ELEM_OUTPUT_INT, {0x1288}, {0x4}}, \ - {"ENABLE[3]", ELEM_OUTPUT_INT, {0x128C}, {0x4}}, \ - {"ENABLE[4]", ELEM_OUTPUT_INT, {0x1290}, {0x4}}, \ - {"ENABLE[5]", ELEM_OUTPUT_INT, {0x1294}, {0x4}}, \ - {"ENABLE[6]", ELEM_OUTPUT_INT, {0x1298}, {0x4}}, \ - {"ENABLE[7]", ELEM_OUTPUT_INT, {0x129C}, {0x4}}, \ - {"ENABLE[8]", ELEM_OUTPUT_INT, {0x12A0}, {0x4}}, \ - {"ENABLE[9]", ELEM_OUTPUT_INT, {0x12A4}, {0x4}}, \ - {"ENABLE[10]", ELEM_OUTPUT_INT, {0x12A8}, {0x4}}, \ - {"ENABLE[11]", ELEM_OUTPUT_INT, {0x12AC}, {0x4}}, \ - {"ENABLE[12]", ELEM_OUTPUT_INT, {0x12B0}, {0x4}}, \ - {"PENDING[0]", ELEM_OUTPUT_INT, {0x12B4}, {0x4}}, \ - {"PENDING[1]", ELEM_OUTPUT_INT, {0x12B8}, {0x4}}, \ - {"PENDING[2]", ELEM_OUTPUT_INT, {0x12BC}, {0x4}}, \ - {"**IPC*******", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"IPC_MBX", ELEM_OUTPUT_INT, {0x15080}, {0x4}}, \ - {"IPC_SRC", ELEM_OUTPUT_INT, {0x15084}, {0x4}}, \ - {"IPC_MODE", ELEM_OUTPUT_INT, {0x15088}, {0x4}}, \ - {"IPC_ICLR", ELEM_OUTPUT_INT, {0x1508c}, {0x4}}, \ - {"IPC_DATA0", ELEM_OUTPUT_INT, {0x15090}, {0x4}}, \ - {"IPC_DATA1", ELEM_OUTPUT_INT, {0x15094}, {0x4}}, \ - {"IPC_DATA2", ELEM_OUTPUT_INT, {0x15098}, {0x4}}, \ - {"IPC_DATA3", ELEM_OUTPUT_INT, {0x1509c}, {0x4}}, \ - {"IPC_DATA4", ELEM_OUTPUT_INT, {0x150a0}, {0x4}}, \ - {"IPC_DATA5", ELEM_OUTPUT_INT, {0x150a4}, {0x4}}, \ - {"IPC_DATA6", ELEM_OUTPUT_INT, {0x150a8}, {0x4}}, \ - {"IPC_DATA7", ELEM_OUTPUT_INT, {0x150ac}, {0x4}}, \ - {"IPC_Q0_DATA0", ELEM_OUTPUT_INT, {0x150c0}, {0x4}}, \ - {"IPC_Q0_DATA1", ELEM_OUTPUT_INT, {0x150c4}, {0x4}}, \ - {"IPC_Q0_DATA2", ELEM_OUTPUT_INT, {0x150c8}, {0x4}}, \ - {"IPC_Q0_DATA3", ELEM_OUTPUT_INT, {0x150cc}, {0x4}}, \ - {"IPC_Q0_DATA4", ELEM_OUTPUT_INT, {0x150d0}, {0x4}}, \ - {"IPC_Q0_DATA5", ELEM_OUTPUT_INT, {0x150d4}, {0x4}}, \ - {"IPC_Q0_DATA6", ELEM_OUTPUT_INT, {0x150d8}, {0x4}}, \ - {"IPC_Q0_DATA7", ELEM_OUTPUT_INT, {0x150dc}, {0x4}}, \ - {"IPC_Q0_SYSCNT", ELEM_OUTPUT_INT, {0x150e0}, {0x4}}, \ - {"IPC_Q1_DATA0", ELEM_OUTPUT_INT, {0x150e4}, {0x4}}, \ - {"IPC_Q1_DATA1", ELEM_OUTPUT_INT, {0x150e8}, {0x4}}, \ - {"IPC_Q1_DATA2", ELEM_OUTPUT_INT, {0x150ec}, {0x4}}, \ - {"IPC_Q1_DATA3", ELEM_OUTPUT_INT, {0x150f0}, {0x4}}, \ - {"IPC_Q1_DATA4", ELEM_OUTPUT_INT, {0x150f4}, {0x4}}, \ - {"IPC_Q1_DATA5", ELEM_OUTPUT_INT, {0x150f8}, {0x4}}, \ - {"IPC_Q1_DATA6", ELEM_OUTPUT_INT, {0x150fc}, {0x4}}, \ - {"IPC_Q1_DATA7", ELEM_OUTPUT_INT, {0x15100}, {0x4}}, \ - {"IPC_Q1_SYSCNT", ELEM_OUTPUT_INT, {0x15104}, {0x4}}, \ - {"IPC_Q2_DATA0", ELEM_OUTPUT_INT, {0x15108}, {0x4}}, \ - {"IPC_Q2_DATA1", ELEM_OUTPUT_INT, {0x1510c}, {0x4}}, \ - {"IPC_Q2_DATA2", ELEM_OUTPUT_INT, {0x15110}, {0x4}}, \ - {"IPC_Q2_DATA3", ELEM_OUTPUT_INT, {0x15114}, {0x4}}, \ - {"IPC_Q2_DATA4", ELEM_OUTPUT_INT, {0x15118}, {0x4}}, \ - {"IPC_Q2_DATA5", ELEM_OUTPUT_INT, {0x1511c}, {0x4}}, \ - {"IPC_Q2_DATA6", ELEM_OUTPUT_INT, {0x15120}, {0x4}}, \ - {"IPC_Q2_DATA7", ELEM_OUTPUT_INT, {0x15124}, {0x4}}, \ - {"IPC_Q2_SYSCNT", ELEM_OUTPUT_INT, {0x15128}, {0x4}}, \ - {"IPC_Q3_DATA0", ELEM_OUTPUT_INT, {0x1512c}, {0x4}}, \ - {"IPC_Q3_DATA1", ELEM_OUTPUT_INT, {0x15130}, {0x4}}, \ - {"IPC_Q3_DATA2", ELEM_OUTPUT_INT, {0x15134}, {0x4}}, \ - {"IPC_Q3_DATA3", ELEM_OUTPUT_INT, {0x15138}, {0x4}}, \ - {"IPC_Q3_DATA4", ELEM_OUTPUT_INT, {0x1513c}, {0x4}}, \ - {"IPC_Q3_DATA5", ELEM_OUTPUT_INT, {0x15140}, {0x4}}, \ - {"IPC_Q3_DATA6", ELEM_OUTPUT_INT, {0x15144}, {0x4}}, \ - {"IPC_Q3_DATA7", ELEM_OUTPUT_INT, {0x15148}, {0x4}}, \ - {"IPC_Q3_SYSCNT", ELEM_OUTPUT_INT, {0x1514c}, {0x4}}, \ - {"IPC_Q4_DATA0", ELEM_OUTPUT_INT, {0x15150}, {0x4}}, \ - {"IPC_Q4_DATA1", ELEM_OUTPUT_INT, {0x15154}, {0x4}}, \ - {"IPC_Q4_DATA2", ELEM_OUTPUT_INT, {0x15158}, {0x4}}, \ - {"IPC_Q4_DATA3", ELEM_OUTPUT_INT, {0x1515c}, {0x4}}, \ - {"IPC_Q4_DATA4", ELEM_OUTPUT_INT, {0x15160}, {0x4}}, \ - {"IPC_Q4_DATA5", ELEM_OUTPUT_INT, {0x15164}, {0x4}}, \ - {"IPC_Q4_DATA6", ELEM_OUTPUT_INT, {0x15168}, {0x4}}, \ - {"IPC_Q4_DATA7", ELEM_OUTPUT_INT, {0x1516c}, {0x4}}, \ - {"IPC_Q4_SYSCNT", ELEM_OUTPUT_INT, {0x15170}, {0x4}}, \ - {"IPC_Q5_DATA0", ELEM_OUTPUT_INT, {0x15174}, {0x4}}, \ - {"IPC_Q5_DATA1", ELEM_OUTPUT_INT, {0x15178}, {0x4}}, \ - {"IPC_Q5_DATA2", ELEM_OUTPUT_INT, {0x1517c}, {0x4}}, \ - {"IPC_Q5_DATA3", ELEM_OUTPUT_INT, {0x15180}, {0x4}}, \ - {"IPC_Q5_DATA4", ELEM_OUTPUT_INT, {0x15184}, {0x4}}, \ - {"IPC_Q5_DATA5", ELEM_OUTPUT_INT, {0x15188}, {0x4}}, \ - {"IPC_Q5_DATA6", ELEM_OUTPUT_INT, {0x1518c}, {0x4}}, \ - {"IPC_Q5_DATA7", ELEM_OUTPUT_INT, {0x15190}, {0x4}}, \ - {"IPC_Q5_SYSCNT", ELEM_OUTPUT_INT, {0x15194}, {0x4}}, \ - {"IPC_Q6_DATA0", ELEM_OUTPUT_INT, {0x15198}, {0x4}}, \ - {"IPC_Q6_DATA1", ELEM_OUTPUT_INT, {0x1519C}, {0x4}}, \ - {"IPC_Q6_DATA2", ELEM_OUTPUT_INT, {0x151A0}, {0x4}}, \ - {"IPC_Q6_DATA3", ELEM_OUTPUT_INT, {0x151A4}, {0x4}}, \ - {"IPC_Q6_DATA4", ELEM_OUTPUT_INT, {0x151A8}, {0x4}}, \ - {"IPC_Q6_DATA5", ELEM_OUTPUT_INT, {0x151AC}, {0x4}}, \ - {"IPC_Q6_DATA6", ELEM_OUTPUT_INT, {0x151B0}, {0x4}}, \ - {"IPC_Q6_DATA7", ELEM_OUTPUT_INT, {0x151B4}, {0x4}}, \ - {"IPC_Q6_SYSCNT", ELEM_OUTPUT_INT, {0x151B8}, {0x4}}, \ - {"IPC_Q7_DATA0", ELEM_OUTPUT_INT, {0x151BC}, {0x4}}, \ - {"IPC_Q7_DATA1", ELEM_OUTPUT_INT, {0x151C0}, {0x4}}, \ - {"IPC_Q7_DATA2", ELEM_OUTPUT_INT, {0x151C4}, {0x4}}, \ - {"IPC_Q7_DATA3", ELEM_OUTPUT_INT, {0x151C8}, {0x4}}, \ - {"IPC_Q7_DATA4", ELEM_OUTPUT_INT, {0x151CC}, {0x4}}, \ - {"IPC_Q7_DATA5", ELEM_OUTPUT_INT, {0x151D0}, {0x4}}, \ - {"IPC_Q7_DATA6", ELEM_OUTPUT_INT, {0x151D4}, {0x4}}, \ - {"IPC_Q7_DATA7", ELEM_OUTPUT_INT, {0x151D8}, {0x4}}, \ - {"IPC_Q7_SYSCNT", ELEM_OUTPUT_INT, {0x151DC}, {0x4}}, \ - {"IPC_Q8_DATA0", ELEM_OUTPUT_INT, {0x151E0}, {0x4}}, \ - {"IPC_Q8_DATA1", ELEM_OUTPUT_INT, {0x151E4}, {0x4}}, \ - {"IPC_Q8_DATA2", ELEM_OUTPUT_INT, {0x151E8}, {0x4}}, \ - {"IPC_Q8_DATA3", ELEM_OUTPUT_INT, {0x151EC}, {0x4}}, \ - {"IPC_Q8_DATA4", ELEM_OUTPUT_INT, {0x151F0}, {0x4}}, \ - {"IPC_Q8_DATA5", ELEM_OUTPUT_INT, {0x151F4}, {0x4}}, \ - {"IPC_Q8_DATA6", ELEM_OUTPUT_INT, {0x151F8}, {0x4}}, \ - {"IPC_Q8_DATA7", ELEM_OUTPUT_INT, {0x151FC}, {0x4}}, \ - {"IPC_Q8_SYSCNT", ELEM_OUTPUT_INT, {0x15200}, {0x4}}, \ - {"IPC_Q9_DATA0", ELEM_OUTPUT_INT, {0x15204}, {0x4}}, \ - {"IPC_Q9_DATA1", ELEM_OUTPUT_INT, {0x15208}, {0x4}}, \ - {"IPC_Q9_DATA2", ELEM_OUTPUT_INT, {0x1520C}, {0x4}}, \ - {"IPC_Q9_DATA3", ELEM_OUTPUT_INT, {0x15210}, {0x4}}, \ - {"IPC_Q9_DATA4", ELEM_OUTPUT_INT, {0x15214}, {0x4}}, \ - {"IPC_Q9_DATA5", ELEM_OUTPUT_INT, {0x15218}, {0x4}}, \ - {"IPC_Q9_DATA6", ELEM_OUTPUT_INT, {0x1521C}, {0x4}}, \ - {"IPC_Q9_DATA7", ELEM_OUTPUT_INT, {0x15220}, {0x4}}, \ - {"IPC_Q9_SYSCNT", ELEM_OUTPUT_INT, {0x15224}, {0x4}}, \ - {"***RAS*******", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"RAS_DATA0", ELEM_OUTPUT_INT, {0x13c80}, {0x4}}, \ - {"RAS_DATA1", ELEM_OUTPUT_INT, {0x13c88}, {0x4}}, \ - {"***DDR_REG_DUMP*******", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"DDR_REG_DUMP0", ELEM_OUTPUT_INT, {0x15580}, {0x8}}, \ - {"DDR_REG_DUMP1", ELEM_OUTPUT_INT, {0x15588}, {0x8}}, \ - {"DDR_REG_DUMP2", ELEM_OUTPUT_INT, {0x15590}, {0x8}}, \ - {"DDR_REG_DUMP3", ELEM_OUTPUT_INT, {0x15598}, {0x8}}, \ - {"DDR_REG_DUMP4", ELEM_OUTPUT_INT, {0x155A0}, {0x8}}, \ - {"DDR_REG_DUMP5", ELEM_OUTPUT_INT, {0x155A8}, {0x8}}, \ - {"DDR_REG_DUMP6", ELEM_OUTPUT_INT, {0x155B0}, {0x8}}, \ - {"DDR_REG_DUMP7", ELEM_OUTPUT_INT, {0x155B8}, {0x8}}, \ -} - -#define DATA_MODEL_LPFW_SRAM MODEL_VECTOR(LPFW_SRAM) = { \ - {"****exc****reg**", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"reset_reason", ELEM_OUTPUT_INT, {0x00}, {0x4}}, \ - {"slice", ELEM_OUTPUT_INT, {0x08}, {0x4}}, \ - {"rtc", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"REGSP", ELEM_OUTPUT_INT, {0x18}, {0x8}}, \ - {"REGPC", ELEM_OUTPUT_INT, {0x20}, {0x8}}, \ - {"REGELR", ELEM_OUTPUT_INT, {0x28}, {0x8}}, \ - {"REGCPSR", ELEM_OUTPUT_INT, {0x30}, {0x8}}, \ - {"REGSPSR", ELEM_OUTPUT_INT, {0x38}, {0x8}}, \ - {"ESR", ELEM_OUTPUT_INT, {0x40}, {0x8}}, \ - {"FAR", ELEM_OUTPUT_INT, {0x48}, {0x8}}, \ - {"excTrace", ELEM_OUTPUT_INT, {0x50}, {0x1}}, \ - {"ddrExc", ELEM_OUTPUT_INT, {0x51}, {0x1}}, \ - {"irqId", ELEM_OUTPUT_INT, {0x52}, {0x2}}, \ - {"taskId", ELEM_OUTPUT_INT, {0x54}, {0x4}}, \ - {"**backup**reg***", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"reg_backup_index", ELEM_OUTPUT_INT, {0x820}, {0x4}}, \ - {"reason_0", ELEM_OUTPUT_INT, {0x824}, {0x4}}, \ - {"reason_1", ELEM_OUTPUT_INT, {0x828}, {0x4}}, \ - {"reason_2", ELEM_OUTPUT_INT, {0x82C}, {0x4}}, \ - {"x0", ELEM_OUTPUT_INT, {0x830}, {0x8}}, \ - {"x1", ELEM_OUTPUT_INT, {0x848}, {0x8}}, \ - {"x2", ELEM_OUTPUT_INT, {0x850}, {0x8}}, \ - {"x3", ELEM_OUTPUT_INT, {0x858}, {0x8}}, \ - {"x4", ELEM_OUTPUT_INT, {0x860}, {0x8}}, \ - {"x5", ELEM_OUTPUT_INT, {0x868}, {0x8}}, \ - {"x6", ELEM_OUTPUT_INT, {0x870}, {0x8}}, \ - {"x7", ELEM_OUTPUT_INT, {0x878}, {0x8}}, \ - {"x8", ELEM_OUTPUT_INT, {0x880}, {0x8}}, \ - {"x9", ELEM_OUTPUT_INT, {0x888}, {0x8}}, \ - {"x10", ELEM_OUTPUT_INT, {0x890}, {0x8}}, \ - {"x11", ELEM_OUTPUT_INT, {0x898}, {0x8}}, \ - {"x12", ELEM_OUTPUT_INT, {0x8A0}, {0x8}}, \ - {"x13", ELEM_OUTPUT_INT, {0x8A8}, {0x8}}, \ - {"x14", ELEM_OUTPUT_INT, {0x8B0}, {0x8}}, \ - {"x15", ELEM_OUTPUT_INT, {0x8B8}, {0x8}}, \ - {"x16", ELEM_OUTPUT_INT, {0x8C0}, {0x8}}, \ - {"x17", ELEM_OUTPUT_INT, {0x8C8}, {0x8}}, \ - {"x18", ELEM_OUTPUT_INT, {0x8D0}, {0x8}}, \ - {"x19", ELEM_OUTPUT_INT, {0x8D8}, {0x8}}, \ - {"x20", ELEM_OUTPUT_INT, {0x8E0}, {0x8}}, \ - {"x21", ELEM_OUTPUT_INT, {0x8E8}, {0x8}}, \ - {"x22", ELEM_OUTPUT_INT, {0x8F0}, {0x8}}, \ - {"x23", ELEM_OUTPUT_INT, {0x8F8}, {0x8}}, \ - {"x24", ELEM_OUTPUT_INT, {0x900}, {0x8}}, \ - {"x25", ELEM_OUTPUT_INT, {0x908}, {0x8}}, \ - {"x26", ELEM_OUTPUT_INT, {0x910}, {0x8}}, \ - {"x27", ELEM_OUTPUT_INT, {0x918}, {0x8}}, \ - {"x28", ELEM_OUTPUT_INT, {0x920}, {0x8}}, \ - {"x29", ELEM_OUTPUT_INT, {0x928}, {0x8}}, \ - {"x30", ELEM_OUTPUT_INT, {0x930}, {0x8}}, \ - {"XZR", ELEM_OUTPUT_INT, {0x938}, {0x8}}, \ - {"ESR", ELEM_OUTPUT_INT, {0x940}, {0x8}}, \ - {"FAR", ELEM_OUTPUT_INT, {0x948}, {0x8}}, \ - {"SPSR", ELEM_OUTPUT_INT, {0x950}, {0x8}}, \ - {"ELR", ELEM_OUTPUT_INT, {0x958}, {0x8}}, \ - {"PC", ELEM_OUTPUT_INT, {0x960}, {0x8}}, \ - {"SP", ELEM_OUTPUT_INT, {0x968}, {0x8}}, \ - {"CPSR", ELEM_OUTPUT_INT, {0x970}, {0x8}}, \ - {"**GIC*******", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"ENABLE[0]", ELEM_OUTPUT_INT, {0x2820}, {0x4}}, \ - {"ENABLE[1]", ELEM_OUTPUT_INT, {0x2824}, {0x4}}, \ - {"ENABLE[2]", ELEM_OUTPUT_INT, {0x2828}, {0x4}}, \ - {"ENABLE[3]", ELEM_OUTPUT_INT, {0x282C}, {0x4}}, \ - {"ENABLE[4]", ELEM_OUTPUT_INT, {0x2830}, {0x4}}, \ - {"ENABLE[5]", ELEM_OUTPUT_INT, {0x2834}, {0x4}}, \ - {"ENABLE[6]", ELEM_OUTPUT_INT, {0x2838}, {0x4}}, \ - {"ENABLE[7]", ELEM_OUTPUT_INT, {0x283C}, {0x4}}, \ - {"ENABLE[8]", ELEM_OUTPUT_INT, {0x2840}, {0x4}}, \ - {"ENABLE[9]", ELEM_OUTPUT_INT, {0x2844}, {0x4}}, \ - {"ENABLE[10]", ELEM_OUTPUT_INT, {0x2848}, {0x4}}, \ - {"ENABLE[11]", ELEM_OUTPUT_INT, {0x284C}, {0x4}}, \ - {"ENABLE[12]", ELEM_OUTPUT_INT, {0x2850}, {0x4}}, \ - {"PENDING[0]", ELEM_OUTPUT_INT, {0x2854}, {0x4}}, \ - {"PENDING[1]", ELEM_OUTPUT_INT, {0x2858}, {0x4}}, \ - {"PENDING[2]", ELEM_OUTPUT_INT, {0x285C}, {0x4}}, \ - {"**IPC*******", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"IPC_MBX", ELEM_OUTPUT_INT, {0x1000}, {0x4}}, \ - {"IPC_SRC", ELEM_OUTPUT_INT, {0x1004}, {0x4}}, \ - {"IPC_MODE", ELEM_OUTPUT_INT, {0x1008}, {0x4}}, \ - {"IPC_ICLR", ELEM_OUTPUT_INT, {0x100c}, {0x4}}, \ - {"IPC_DATA0", ELEM_OUTPUT_INT, {0x1010}, {0x4}}, \ - {"IPC_DATA1", ELEM_OUTPUT_INT, {0x1014}, {0x4}}, \ - {"IPC_DATA2", ELEM_OUTPUT_INT, {0x1018}, {0x4}}, \ - {"IPC_DATA3", ELEM_OUTPUT_INT, {0x101c}, {0x4}}, \ - {"IPC_DATA4", ELEM_OUTPUT_INT, {0x1020}, {0x4}}, \ - {"IPC_DATA5", ELEM_OUTPUT_INT, {0x1024}, {0x4}}, \ - {"IPC_DATA6", ELEM_OUTPUT_INT, {0x1028}, {0x4}}, \ - {"IPC_DATA7", ELEM_OUTPUT_INT, {0x102c}, {0x4}}, \ - {"IPC_Q0_DATA0", ELEM_OUTPUT_INT, {0x1040}, {0x4}}, \ - {"IPC_Q0_DATA1", ELEM_OUTPUT_INT, {0x1044}, {0x4}}, \ - {"IPC_Q0_DATA2", ELEM_OUTPUT_INT, {0x1048}, {0x4}}, \ - {"IPC_Q0_DATA3", ELEM_OUTPUT_INT, {0x104c}, {0x4}}, \ - {"IPC_Q0_DATA4", ELEM_OUTPUT_INT, {0x1050}, {0x4}}, \ - {"IPC_Q0_DATA5", ELEM_OUTPUT_INT, {0x1054}, {0x4}}, \ - {"IPC_Q0_DATA6", ELEM_OUTPUT_INT, {0x1058}, {0x4}}, \ - {"IPC_Q0_DATA7", ELEM_OUTPUT_INT, {0x105c}, {0x4}}, \ - {"IPC_Q0_SYSCNT", ELEM_OUTPUT_INT, {0x1060}, {0x4}}, \ - {"IPC_Q1_DATA0", ELEM_OUTPUT_INT, {0x1064}, {0x4}}, \ - {"IPC_Q1_DATA1", ELEM_OUTPUT_INT, {0x1068}, {0x4}}, \ - {"IPC_Q1_DATA2", ELEM_OUTPUT_INT, {0x106c}, {0x4}}, \ - {"IPC_Q1_DATA3", ELEM_OUTPUT_INT, {0x1070}, {0x4}}, \ - {"IPC_Q1_DATA4", ELEM_OUTPUT_INT, {0x1074}, {0x4}}, \ - {"IPC_Q1_DATA5", ELEM_OUTPUT_INT, {0x1078}, {0x4}}, \ - {"IPC_Q1_DATA6", ELEM_OUTPUT_INT, {0x107c}, {0x4}}, \ - {"IPC_Q1_DATA7", ELEM_OUTPUT_INT, {0x1080}, {0x4}}, \ - {"IPC_Q1_SYSCNT", ELEM_OUTPUT_INT, {0x1084}, {0x4}}, \ - {"IPC_Q2_DATA0", ELEM_OUTPUT_INT, {0x1088}, {0x4}}, \ - {"IPC_Q2_DATA1", ELEM_OUTPUT_INT, {0x108c}, {0x4}}, \ - {"IPC_Q2_DATA2", ELEM_OUTPUT_INT, {0x1190}, {0x4}}, \ - {"IPC_Q2_DATA3", ELEM_OUTPUT_INT, {0x1194}, {0x4}}, \ - {"IPC_Q2_DATA4", ELEM_OUTPUT_INT, {0x1198}, {0x4}}, \ - {"IPC_Q2_DATA5", ELEM_OUTPUT_INT, {0x119c}, {0x4}}, \ - {"IPC_Q2_DATA6", ELEM_OUTPUT_INT, {0x11A0}, {0x4}}, \ - {"IPC_Q2_DATA7", ELEM_OUTPUT_INT, {0x11A4}, {0x4}}, \ - {"IPC_Q2_SYSCNT", ELEM_OUTPUT_INT, {0x11A8}, {0x4}}, \ - {"IPC_Q3_DATA0", ELEM_OUTPUT_INT, {0x11Ac}, {0x4}}, \ - {"IPC_Q3_DATA1", ELEM_OUTPUT_INT, {0x11B0}, {0x4}}, \ - {"IPC_Q3_DATA2", ELEM_OUTPUT_INT, {0x11B4}, {0x4}}, \ - {"IPC_Q3_DATA3", ELEM_OUTPUT_INT, {0x11B8}, {0x4}}, \ - {"IPC_Q3_DATA4", ELEM_OUTPUT_INT, {0x11Bc}, {0x4}}, \ - {"IPC_Q3_DATA5", ELEM_OUTPUT_INT, {0x11C0}, {0x4}}, \ - {"IPC_Q3_DATA6", ELEM_OUTPUT_INT, {0x11C4}, {0x4}}, \ - {"IPC_Q3_DATA7", ELEM_OUTPUT_INT, {0x11C8}, {0x4}}, \ - {"IPC_Q3_SYSCNT", ELEM_OUTPUT_INT, {0x11Cc}, {0x4}}, \ - {"IPC_Q4_DATA0", ELEM_OUTPUT_INT, {0x11D0}, {0x4}}, \ - {"IPC_Q4_DATA1", ELEM_OUTPUT_INT, {0x11D4}, {0x4}}, \ - {"IPC_Q4_DATA2", ELEM_OUTPUT_INT, {0x11D8}, {0x4}}, \ - {"IPC_Q4_DATA3", ELEM_OUTPUT_INT, {0x11Dc}, {0x4}}, \ - {"IPC_Q4_DATA4", ELEM_OUTPUT_INT, {0x11E0}, {0x4}}, \ - {"IPC_Q4_DATA5", ELEM_OUTPUT_INT, {0x11E4}, {0x4}}, \ - {"IPC_Q4_DATA6", ELEM_OUTPUT_INT, {0x11E8}, {0x4}}, \ - {"IPC_Q4_DATA7", ELEM_OUTPUT_INT, {0x11Ec}, {0x4}}, \ - {"IPC_Q4_SYSCNT", ELEM_OUTPUT_INT, {0x11F0}, {0x4}}, \ - {"IPC_Q5_DATA0", ELEM_OUTPUT_INT, {0x11F4}, {0x4}}, \ - {"IPC_Q5_DATA1", ELEM_OUTPUT_INT, {0x11F8}, {0x4}}, \ - {"IPC_Q5_DATA2", ELEM_OUTPUT_INT, {0x11Fc}, {0x4}}, \ - {"IPC_Q5_DATA3", ELEM_OUTPUT_INT, {0x1200}, {0x4}}, \ - {"IPC_Q5_DATA4", ELEM_OUTPUT_INT, {0x1204}, {0x4}}, \ - {"IPC_Q5_DATA5", ELEM_OUTPUT_INT, {0x1208}, {0x4}}, \ - {"IPC_Q5_DATA6", ELEM_OUTPUT_INT, {0x120c}, {0x4}}, \ - {"IPC_Q5_DATA7", ELEM_OUTPUT_INT, {0x1210}, {0x4}}, \ - {"IPC_Q5_SYSCNT", ELEM_OUTPUT_INT, {0x1214}, {0x4}}, \ - {"IPC_Q6_DATA0", ELEM_OUTPUT_INT, {0x1218}, {0x4}}, \ - {"IPC_Q6_DATA1", ELEM_OUTPUT_INT, {0x121C}, {0x4}}, \ - {"IPC_Q6_DATA2", ELEM_OUTPUT_INT, {0x1220}, {0x4}}, \ - {"IPC_Q6_DATA3", ELEM_OUTPUT_INT, {0x1224}, {0x4}}, \ - {"IPC_Q6_DATA4", ELEM_OUTPUT_INT, {0x1228}, {0x4}}, \ - {"IPC_Q6_DATA5", ELEM_OUTPUT_INT, {0x122C}, {0x4}}, \ - {"IPC_Q6_DATA6", ELEM_OUTPUT_INT, {0x1230}, {0x4}}, \ - {"IPC_Q6_DATA7", ELEM_OUTPUT_INT, {0x1234}, {0x4}}, \ - {"IPC_Q6_SYSCNT", ELEM_OUTPUT_INT, {0x1238}, {0x4}}, \ - {"IPC_Q7_DATA0", ELEM_OUTPUT_INT, {0x123C}, {0x4}}, \ - {"IPC_Q7_DATA1", ELEM_OUTPUT_INT, {0x1240}, {0x4}}, \ - {"IPC_Q7_DATA2", ELEM_OUTPUT_INT, {0x1244}, {0x4}}, \ - {"IPC_Q7_DATA3", ELEM_OUTPUT_INT, {0x1248}, {0x4}}, \ - {"IPC_Q7_DATA4", ELEM_OUTPUT_INT, {0x124C}, {0x4}}, \ - {"IPC_Q7_DATA5", ELEM_OUTPUT_INT, {0x1250}, {0x4}}, \ - {"IPC_Q7_DATA6", ELEM_OUTPUT_INT, {0x1254}, {0x4}}, \ - {"IPC_Q7_DATA7", ELEM_OUTPUT_INT, {0x1258}, {0x4}}, \ - {"IPC_Q7_SYSCNT", ELEM_OUTPUT_INT, {0x125C}, {0x4}}, \ - {"IPC_Q8_DATA0", ELEM_OUTPUT_INT, {0x1260}, {0x4}}, \ - {"IPC_Q8_DATA1", ELEM_OUTPUT_INT, {0x1264}, {0x4}}, \ - {"IPC_Q8_DATA2", ELEM_OUTPUT_INT, {0x1268}, {0x4}}, \ - {"IPC_Q8_DATA3", ELEM_OUTPUT_INT, {0x126C}, {0x4}}, \ - {"IPC_Q8_DATA4", ELEM_OUTPUT_INT, {0x1270}, {0x4}}, \ - {"IPC_Q8_DATA5", ELEM_OUTPUT_INT, {0x1274}, {0x4}}, \ - {"IPC_Q8_DATA6", ELEM_OUTPUT_INT, {0x1278}, {0x4}}, \ - {"IPC_Q8_DATA7", ELEM_OUTPUT_INT, {0x127C}, {0x4}}, \ - {"IPC_Q8_SYSCNT", ELEM_OUTPUT_INT, {0x1280}, {0x4}}, \ - {"IPC_Q9_DATA0", ELEM_OUTPUT_INT, {0x1284}, {0x4}}, \ - {"IPC_Q9_DATA1", ELEM_OUTPUT_INT, {0x1288}, {0x4}}, \ - {"IPC_Q9_DATA2", ELEM_OUTPUT_INT, {0x128C}, {0x4}}, \ - {"IPC_Q9_DATA3", ELEM_OUTPUT_INT, {0x1290}, {0x4}}, \ - {"IPC_Q9_DATA4", ELEM_OUTPUT_INT, {0x1294}, {0x4}}, \ - {"IPC_Q9_DATA5", ELEM_OUTPUT_INT, {0x1298}, {0x4}}, \ - {"IPC_Q9_DATA6", ELEM_OUTPUT_INT, {0x129C}, {0x4}}, \ - {"IPC_Q9_DATA7", ELEM_OUTPUT_INT, {0x12A0}, {0x4}}, \ - {"IPC_Q9_SYSCNT", ELEM_OUTPUT_INT, {0x12A4}, {0x4}}, \ - {"***DDR_REG_DUMP*******", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"DDR_REG_DUMP0", ELEM_OUTPUT_INT, {0x3C60}, {0x8}}, \ - {"DDR_REG_DUMP1", ELEM_OUTPUT_INT, {0x3C68}, {0x8}}, \ - {"DDR_REG_DUMP2", ELEM_OUTPUT_INT, {0x3C70}, {0x8}}, \ - {"DDR_REG_DUMP3", ELEM_OUTPUT_INT, {0x3C78}, {0x8}}, \ - {"DDR_REG_DUMP4", ELEM_OUTPUT_INT, {0x3C80}, {0x8}}, \ - {"DDR_REG_DUMP5", ELEM_OUTPUT_INT, {0x3C88}, {0x8}}, \ - {"DDR_REG_DUMP6", ELEM_OUTPUT_INT, {0x3C90}, {0x8}}, \ - {"DDR_REG_DUMP7", ELEM_OUTPUT_INT, {0x3C98}, {0x8}}, \ -} - -#define DATA_MODEL_LPFW_PMU MODEL_VECTOR(LPFW_PMU) = { \ - {"cpuid", ELEM_OUTPUT_INT, {0x0000}, {0x1}}, \ - {"slaveid", ELEM_OUTPUT_INT, {0x0001}, {0x1}}, \ - {"loopid", ELEM_OUTPUT_INT, {0x0002}, {0x1}}, \ - {"79H", ELEM_OUTPUT_HEX, {0x0004}, {0x2}}, \ - {"7AH", ELEM_OUTPUT_HEX, {0x0006}, {0x1}}, \ - {"7BH", ELEM_OUTPUT_HEX, {0x0007}, {0x1}}, \ - {"7CH", ELEM_OUTPUT_HEX, {0x0008}, {0x1}}, \ - {"7DH", ELEM_OUTPUT_HEX, {0x0009}, {0x1}}, \ - {"7EH", ELEM_OUTPUT_HEX, {0x000A}, {0x1}}, \ - {"80H", ELEM_OUTPUT_HEX, {0x000B}, {0x1}}, \ - {"88H", ELEM_OUTPUT_HEX, {0x000C}, {0x2}}, \ - {"89H", ELEM_OUTPUT_HEX, {0x000E}, {0x2}}, \ - {"8BH", ELEM_OUTPUT_HEX, {0x0010}, {0x2}}, \ - {"8CH", ELEM_OUTPUT_HEX, {0x0012}, {0x2}}, \ - {"8DH", ELEM_OUTPUT_HEX, {0x0014}, {0x2}}, \ - {"96H", ELEM_OUTPUT_HEX, {0x0016}, {0x2}}, \ - {"97H", ELEM_OUTPUT_HEX, {0x0018}, {0x2}}, \ - {"20H", ELEM_OUTPUT_HEX, {0x001A}, {0x1}}, \ - {"21H", ELEM_OUTPUT_HEX, {0x001C}, {0x2}}, \ -} - -/* TEE module */ -#define DATA_MODEL_TEE MODEL_VECTOR(TEE) = { \ - {"tee info", ELEM_OUTPUT_STR_NL, {0x0}, {0x10000}}, \ -} - -/* TF module */ -#define DATA_MODEL_TF MODEL_VECTOR(TF) = { \ - {"x0", ELEM_OUTPUT_HEX, {0x0}, {0x8}}, \ - {"x1", ELEM_OUTPUT_HEX, {0x8}, {0x8}}, \ - {"x30", ELEM_OUTPUT_HEX, {0x10}, {0x8}}, \ - {"x2", ELEM_OUTPUT_HEX, {0x18}, {0x8}}, \ - {"x3", ELEM_OUTPUT_HEX, {0x20}, {0x8}}, \ - {"x4", ELEM_OUTPUT_HEX, {0x28}, {0x8}}, \ - {"x5", ELEM_OUTPUT_HEX, {0x30}, {0x8}}, \ - {"x6", ELEM_OUTPUT_HEX, {0x38}, {0x8}}, \ - {"x7", ELEM_OUTPUT_HEX, {0x40}, {0x8}}, \ - {"x8", ELEM_OUTPUT_HEX, {0x48}, {0x8}}, \ - {"x9", ELEM_OUTPUT_HEX, {0x50}, {0x8}}, \ - {"x10", ELEM_OUTPUT_HEX, {0x58}, {0x8}}, \ - {"x11", ELEM_OUTPUT_HEX, {0x60}, {0x8}}, \ - {"x12", ELEM_OUTPUT_HEX, {0x68}, {0x8}}, \ - {"x13", ELEM_OUTPUT_HEX, {0x70}, {0x8}}, \ - {"x14", ELEM_OUTPUT_HEX, {0x78}, {0x8}}, \ - {"x15", ELEM_OUTPUT_HEX, {0x80}, {0x8}}, \ - {"x16", ELEM_OUTPUT_HEX, {0x88}, {0x8}}, \ - {"x17", ELEM_OUTPUT_HEX, {0x90}, {0x8}}, \ - {"x18", ELEM_OUTPUT_HEX, {0x98}, {0x8}}, \ - {"x19", ELEM_OUTPUT_HEX, {0xA0}, {0x8}}, \ - {"x20", ELEM_OUTPUT_HEX, {0xA8}, {0x8}}, \ - {"x21", ELEM_OUTPUT_HEX, {0xB0}, {0x8}}, \ - {"x22", ELEM_OUTPUT_HEX, {0xB8}, {0x8}}, \ - {"x23", ELEM_OUTPUT_HEX, {0xC0}, {0x8}}, \ - {"x24", ELEM_OUTPUT_HEX, {0xC8}, {0x8}}, \ - {"x25", ELEM_OUTPUT_HEX, {0xD0}, {0x8}}, \ - {"x26", ELEM_OUTPUT_HEX, {0xD8}, {0x8}}, \ - {"x27", ELEM_OUTPUT_HEX, {0xE0}, {0x8}}, \ - {"x28", ELEM_OUTPUT_HEX, {0xE8}, {0x8}}, \ - {"x29", ELEM_OUTPUT_HEX, {0xF0}, {0x8}}, \ - {"scr_el3", ELEM_OUTPUT_HEX, {0xF8}, {0x8}}, \ - {"sctlr_el3", ELEM_OUTPUT_HEX, {0x100}, {0x8}}, \ - {"cptr_el3", ELEM_OUTPUT_HEX, {0x108}, {0x8}}, \ - {"tcr_el3", ELEM_OUTPUT_HEX, {0x110}, {0x8}}, \ - {"daif", ELEM_OUTPUT_HEX, {0x118}, {0x8}}, \ - {"mair_el3", ELEM_OUTPUT_HEX, {0x120}, {0x8}}, \ - {"spsr_el3", ELEM_OUTPUT_HEX, {0x128}, {0x8}}, \ - {"elr_el3", ELEM_OUTPUT_HEX, {0x130}, {0x8}}, \ - {"ttbr0_el3", ELEM_OUTPUT_HEX, {0x138}, {0x8}}, \ - {"esr_el3", ELEM_OUTPUT_HEX, {0x140}, {0x8}}, \ - {"far_el3", ELEM_OUTPUT_HEX, {0x148}, {0x8}}, \ -} - -/* DVPP module */ -#define DATA_MODEL_DVPP MODEL_VECTOR(DVPP) = { \ - {"dvpp info", ELEM_OUTPUT_STR_NL, {0x0}, {0x10000}}, \ -} - -/* DRIVE module */ -#define DATA_MODEL_DRIVER MODEL_VECTOR(DRIVER) = { \ - {"driver info", ELEM_OUTPUT_STR_NL, {0x0}, {0x20000}}, \ -} - -/* TS module */ -#define DATA_MODEL_TS MODEL_VECTOR(TS) = { \ - {"ts info", ELEM_OUTPUT_CHAR, {0x0}, {0x100000}}, \ -} - -/* TS module, start */ -#define DATA_MODEL_TS_START MODEL_VECTOR(TS_START) = { \ - {"ts start info", ELEM_OUTPUT_STR_NL, {0x0}, {0xC800}}, \ -} - -/* AP module, early print */ -#define DATA_MODEL_AP_EPRINT MODEL_VECTOR(AP_EPRINT) = { \ - {"early print info", ELEM_OUTPUT_STR_NL, {0x0}, {0x400}}, \ -} - -/* BIOS module */ -#define DATA_MODEL_BIOS MODEL_VECTOR(BIOS) = { \ - {"bios info", ELEM_OUTPUT_STR_NL, {0x0}, {0x50000}}, \ -} - -/* BIOS module, sram */ -#define DATA_MODEL_BIOS_SRAM MODEL_VECTOR(BIOS_SRAM) = { \ - {"LPM3_WAKE_UP_STATUS", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"DEBUG_TIME_POWERUP_DONE", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"DEBUG_TIME_PERSTHIGH_DONE", ELEM_OUTPUT_INT, {0x2C}, {0x4}}, \ - {"DEBUG_TIME_PCIEPHY_DONE", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"DEBUG_TIME_PHY_FIRMWARE_DONE", ELEM_OUTPUT_INT, {0x34}, {0x4}}, \ - {"DEBUG_TIME_PCIECTRL_DONE", ELEM_OUTPUT_INT, {0x38}, {0x4}}, \ - {"DEBUG_TIME_IMG_DONE", ELEM_OUTPUT_INT, {0x3C}, {0x4}}, \ - {"DEBUG_TIME_SECURE_DONE", ELEM_OUTPUT_INT, {0x40}, {0x4}}, \ - {"DEBUG_VERSION_ADDR", ELEM_OUTPUT_HEX, {0x50}, {0x10}}, \ - {"XLOADER_RESET_REG", ELEM_OUTPUT_INT, {0x200}, {0x4}}, \ - {"XLOADER_KEY_POINT", ELEM_OUTPUT_INT, {0x204}, {0x4}}, \ - {"XLOADER_TIME_POWERUP_DONE", ELEM_OUTPUT_INT, {0x228}, {0x4}}, \ - {"XLOADER_TIME_PERSTHIGH_DONE", ELEM_OUTPUT_INT, {0x22C}, {0x4}}, \ - {"XLOADER_TIME_PCIEPHY_DONE", ELEM_OUTPUT_INT, {0x230}, {0x4}}, \ - {"XLOADER_TIME_PHY_FIRMWARE_DONE", ELEM_OUTPUT_INT, {0x234}, {0x4}}, \ - {"XLOADER_TIME_PCIECTRL_DONE", ELEM_OUTPUT_INT, {0x238}, {0x4}}, \ - {"XLOADER_TIME_PCIE_DETECT_DONE", ELEM_OUTPUT_INT, {0x23C}, {0x4}}, \ - {"UEFI_LAST_KEYPOINT", ELEM_OUTPUT_INT, {0x320}, {0x4}}, \ - {"SD_LOAD_FILE_STATUS", ELEM_OUTPUT_INT, {0x350}, {0x4}}, \ -} - -/* NETWORK module */ -#define DATA_MODEL_NETWORK MODEL_VECTOR(NETWORK) = { \ - {"network info", ELEM_OUTPUT_STR, {0x0}, {0x20000}}, \ -} - -#define DATA_MODEL_IMU_BOOT_LOG MODEL_VECTOR(IMU_BOOT_LOG) = { \ - {"imu log buffer", ELEM_FEATURE_LOOPBUF, {1}, {6}}, \ - {"buf_read", ELEM_CTRL_LPBF_READ, {0x0}, {0x4}}, \ - {"buf_len", ELEM_CTRL_LPBF_SIZE, {0x4}, {0x4}}, \ - {"log_level", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"rollback", ELEM_CTRL_LPBF_ROLLBK, {0x10}, {0x4}}, \ - {"buf_write", ELEM_CTRL_LPBF_WRITE, {0x40}, {0x4}}, \ - {"buf_head_len", ELEM_CTRL_LPBF_HEAD, {0x80}, {0x4}}, \ - {"imu log data", ELEM_FEATURE_CHARLOG, {1}, {1}}, \ - {"imu log", ELEM_OUTPUT_STR_NL, {0x80}, {0xFFF80}}, \ -} - -#define DATA_MODEL_IMU_UEFI_BOOT MODEL_VECTOR(IMU_UEFI_BOOT) = { \ - {"imu log buffer", ELEM_FEATURE_LOOPBUF, {1}, {6}}, \ - {"buf_read", ELEM_CTRL_LPBF_READ, {0x0}, {0x4}}, \ - {"buf_len", ELEM_CTRL_LPBF_SIZE, {0x4}, {0x4}}, \ - {"log_level", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"rollback", ELEM_CTRL_LPBF_ROLLBK, {0x10}, {0x4}}, \ - {"buf_write", ELEM_CTRL_LPBF_WRITE, {0x40}, {0x4}}, \ - {"buf_head_len", ELEM_CTRL_LPBF_HEAD, {0x80}, {0x4}}, \ - {"imu log data", ELEM_FEATURE_CHARLOG, {1}, {1}}, \ - {"imu log", ELEM_OUTPUT_STR_NL, {0x80}, {0x2FFF80}}, \ -} - -#define DATA_MODEL_IMU_RUN_LOG MODEL_VECTOR(IMU_RUN_LOG) = { \ - {"imu log buffer", ELEM_FEATURE_LOOPBUF, {1}, {6}}, \ - {"buf_read", ELEM_CTRL_LPBF_READ, {0x0}, {0x4}}, \ - {"buf_len", ELEM_CTRL_LPBF_SIZE, {0x4}, {0x4}}, \ - {"log_level", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"rollback", ELEM_CTRL_LPBF_ROLLBK, {0x10}, {0x4}}, \ - {"buf_write", ELEM_CTRL_LPBF_WRITE, {0x40}, {0x4}}, \ - {"buf_head_len", ELEM_CTRL_LPBF_HEAD, {0x80}, {0x4}}, \ - {"imu log data", ELEM_FEATURE_CHARLOG, {1}, {1}}, \ - {"imu log", ELEM_OUTPUT_STR_NL, {0x80}, {0x3FFF80}}, \ -} - -// lpfw exception with imu log -#define DATA_MODEL_LPFW_LOG MODEL_VECTOR(LPFW_LOG) = { \ - {"IMU BOOT LOG", ELEM_CTRL_TABLE_GOTO, {0x0}, {0x100000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_IMU_BOOT_LOG}, {0x1}}, \ - {"IMU RUN LOG", ELEM_CTRL_TABLE_GOTO, {0x400000}, {0x400000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_IMU_RUN_LOG}, {0x1}}, \ -} - -// bbox kbox info -#define DATA_MODEL_BBOX_KBOX MODEL_VECTOR(BBOX_KBOX) = { \ - {"CONSOLE START", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"[console info]", ELEM_OUTPUT_STR_NL, {0x0}, {0x10000}}, \ - {"CONSOLE END", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"MESSAGE START", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"[message info]", ELEM_OUTPUT_STR_NL, {0x10000}, {0x40000}}, \ - {"MESSAGE END", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"if panic", ELEM_CTRL_COMPARE, {0x50000}, {0x1}}, \ - {"", ELEM_CTRL_CMP_JUMP_EQ, {0x0}, {0x3}}, \ - {"PANIC START", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"[panic info]", ELEM_OUTPUT_STR_NL, {0x50000}, {0x8000}}, \ - {"PANIC END", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"if emerge", ELEM_CTRL_COMPARE, {0x58000}, {0x1}}, \ - {"", ELEM_CTRL_CMP_JUMP_EQ, {0x0}, {0x3}}, \ - {"EMERGE START", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"[emerge info]", ELEM_OUTPUT_STR_NL, {0x58000}, {0x8000}}, \ - {"EMERGE END", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"if die", ELEM_CTRL_COMPARE, {0x60000}, {0x1}}, \ - {"", ELEM_CTRL_CMP_JUMP_EQ, {0x0}, {0x3}}, \ - {"DIE START", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"[die info]", ELEM_OUTPUT_STR_NL, {0x60000}, {0x20000}}, \ - {"DIE END", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ -} - -/** - * the whole space is 512k, used for histroy data record - * the struct distribution is as follows: - * +-------------------+ - * | head info(1k) | region: area: module block: - * +-------------------+ +--------------------+ +-----------------+ +-----------------+ - * | boot region |---->| first area |---->| module block |---->| block head | - * +-------------------+ +--------------------+ +-----------------+ +-----------------+ - * | run region | | second area | | module block | | block data | - * +-------------------+ +--------------------+ +-----------------+ +-----------------+ - * | reserved | | ...... | | ...... | - * +-------------------+ +--------------------+ +-----------------+ - */ -#define DATA_MODEL_HDR_BOOT_BIOS MODEL_VECTOR(HDR_BOOT_BIOS) = { \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0xFF}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"bsbc point", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {"bsbc exc point", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"hboot1 point", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {"hboot1 exc point", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"hboot2 point", ELEM_OUTPUT_INT, {0x2C}, {0x4}}, \ - {"hboot2 exc point", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"[BIOS info]", ELEM_OUTPUT_STR_NL, {0x480}, {0x2780}}, \ -} - -#define DATA_MODEL_HDR_BOOT_DDR MODEL_VECTOR(HDR_BOOT_DDR) = { \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0xFF}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"magic_begin", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {"init_keypoint", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"ldo8_vol", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {"buck3_status", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"buck3_vol", ELEM_OUTPUT_INT, {0x2C}, {0x4}}, \ - {"buck5_status", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"buck5_vol", ELEM_OUTPUT_INT, {0x34}, {0x4}}, \ - {"wr_test_result", ELEM_OUTPUT_INT, {0x38}, {0x4}}, \ - {"rint_status[0]", ELEM_OUTPUT_INT, {0x3C}, {0x4}}, \ - {"rint_status[1]", ELEM_OUTPUT_INT, {0x40}, {0x4}}, \ - {"rint_status[2]", ELEM_OUTPUT_INT, {0x44}, {0x4}}, \ - {"rint_status[3]", ELEM_OUTPUT_INT, {0x48}, {0x4}}, \ - {"rint_status[4]", ELEM_OUTPUT_INT, {0x4C}, {0x4}}, \ - {"rint_status[5]", ELEM_OUTPUT_INT, {0x50}, {0x4}}, \ - {"rint_status[6]", ELEM_OUTPUT_INT, {0x54}, {0x4}}, \ - {"rint_status[7]", ELEM_OUTPUT_INT, {0x58}, {0x4}}, \ - {"SOC_SCTRL_DDRRETENTION_ADDR", ELEM_OUTPUT_INT, {0x5C}, {0x4}}, \ - {"SOC_SCTRL_DDRRETENTIONCLR_ADDR", ELEM_OUTPUT_INT, {0x60}, {0x4}}, \ - {"SOC_SCTRL_DRAMRETENTION_ADDR", ELEM_OUTPUT_INT, {0x64}, {0x4}}, \ - {"SC_DDRC_0_3_RESET_REQ", ELEM_OUTPUT_INT, {0x68}, {0x4}}, \ - {"SC_DDRC_4_7_RESET_REQ", ELEM_OUTPUT_INT, {0x6C}, {0x4}}, \ - {"SC_DDRC_0_3_PACK_RESET_REQ", ELEM_OUTPUT_INT, {0x70}, {0x4}}, \ - {"SC_DDRC_4_7_PACK_RESET_REQ", ELEM_OUTPUT_INT, {0x74}, {0x4}}, \ - {"SC_DDRC_EXMBIST0_REGS_RESET_REQ", ELEM_OUTPUT_INT, {0x78}, {0x4}}, \ - {"SC_DDRC_EXMBIST1_REGS_RESET_REQ", ELEM_OUTPUT_INT, {0x7C}, {0x4}}, \ - {"SOC_SCTRL_DDRC_0_3_AO_RST_ADDR", ELEM_OUTPUT_INT, {0x80}, {0x4}}, \ - {"SOC_SCTRL_DDRC_4_7_AO_RST_ADDR", ELEM_OUTPUT_INT, {0x84}, {0x4}}, \ - {"SOC_PMCTRL_PPLLBYPASS0_ADDR", ELEM_OUTPUT_INT, {0x88}, {0x4}}, \ - {"SOC_PMCTRL_PPLLBYPASS1_ADDR", ELEM_OUTPUT_INT, {0x8C}, {0x4}}, \ - {"SOC_PMCTRL_PPLL3FCTRL_ADDR", ELEM_OUTPUT_INT, {0x90}, {0x4}}, \ - {"SOC_PMCTRL_PPLL3FCTRL_FRAC_ADDR", ELEM_OUTPUT_INT, {0x94}, {0x4}}, \ - {"SOC_PMCTRL_PPLL4FCTRL_ADDR", ELEM_OUTPUT_INT, {0x98}, {0x4}}, \ - {"SOC_PMCTRL_PPLL4FCTRL_FRAC_ADDR", ELEM_OUTPUT_INT, {0x9C}, {0x4}}, \ - {"SOC_PMCTRL_PPLLOCKSTATUS_ADDR", ELEM_OUTPUT_INT, {0x100}, {0x4}}, \ - {"SC_DDRC_0_3_BYPASS_MODE_CTRL", ELEM_OUTPUT_INT, {0x104}, {0x4}}, \ - {"SC_DDRC_4_7_BYPASS_MODE_CTRL", ELEM_OUTPUT_INT, {0x108}, {0x4}}, \ - {"SC_PLL_PROF_CFG1", ELEM_OUTPUT_INT, {0x10C}, {0x4}}, \ -} - -#define DATA_MODEL_HDR_BOOT_TEE MODEL_VECTOR(HDR_BOOT_TEE) = { \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0xFF}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"[BOOT FATAL INFO SIZE]", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {"[BOOT FATAL INFO]", ELEM_OUTPUT_STR_NL, {0x20}, {0x7E0}}, \ - {"[run point tail]", ELEM_OUTPUT_INT, {0x800}, {0x4}}, \ - {"[boot point info]", ELEM_OUTPUT_HEX, {0x804}, {0x20}}, \ - {"[run point info]", ELEM_OUTPUT_HEX, {0x884}, {0x20}}, \ - {"[last log size]", ELEM_OUTPUT_INT, {0xC00}, {0x4}}, \ - {"[last log data]", ELEM_OUTPUT_STR_NL, {0xC04}, {0x3FC}}, \ -} - -#define DATA_MODEL_HDR_BOOT_ATF MODEL_VECTOR(HDR_BOOT_ATF) = { \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0xFF}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"[ATF info]", ELEM_OUTPUT_STR_NL, {0x1C}, {0xFE4}}, \ -} - -#define DATA_MODEL_HDR_BOOT_AREA MODEL_VECTOR(HDR_BOOT_AREA) = { \ - {"BIOS INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_BOOT_BIOS", ELEM_CTRL_TABLE_GOTO, {0x0}, {0x3000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_BIOS}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"DDR INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_BOOT_DDR", ELEM_CTRL_TABLE_GOTO, {0x3000}, {0x1000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_DDR}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"TEE INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_BOOT_TEE", ELEM_CTRL_TABLE_GOTO, {0x4000}, {0x1000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_TEE}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"ATF INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_BOOT_ATF", ELEM_CTRL_TABLE_GOTO, {0x5000}, {0x1000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_ATF}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ -} - -#define DATA_MODEL_HDR_RUN_OS MODEL_VECTOR(HDR_RUN_OS) = { \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0xFF}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"[OS info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {"event_flag", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {"dump_flag", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"err num", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {"[OS log]", ELEM_OUTPUT_STR_NL, {0x100}, {0xF00}}, \ -} - -#define DATA_MODEL_HDR_RUN_LPFW MODEL_VECTOR(HDR_RUN_LPFW) = { \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0x200}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"[LPFW log]", ELEM_OUTPUT_STR_NL, {0x40}, {0x400}}, \ -} - -#define DATA_MODEL_HDR_RUN_TEE MODEL_VECTOR(HDR_RUN_TEE) = { \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0xFF}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"[RUN FATAL INFO SIZE]", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {"[RUN FATAL INFO]", ELEM_OUTPUT_STR_NL, {0x20}, {0x7E0}}, \ -} - -#define DATA_MODEL_HDR_RUN_ATF MODEL_VECTOR(HDR_RUN_ATF) = {\ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0xFF}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"[ATF info]", ELEM_OUTPUT_STR_NL, {0x1C}, {0x7E4}}, \ -} - -#define DATA_MODEL_HDR_RUN_AREA MODEL_VECTOR(HDR_RUN_AREA) = { \ - {"TEE INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_RUN_TEE", ELEM_CTRL_TABLE_GOTO, {0x0}, {0x800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_TEE}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"ATF INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_RUN_ATF", ELEM_CTRL_TABLE_GOTO, {0x800}, {0x800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_ATF}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"LPFW INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_RUN_LPFW", ELEM_CTRL_TABLE_GOTO, {0x1000}, {0x1000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_LPFW}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"OS INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_RUN_OS", ELEM_CTRL_TABLE_GOTO, {0x2000}, {0x1000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_OS}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ -} - -#define DATA_MODEL_HDR_BOOT MODEL_VECTOR(HDR_BOOT) = { \ - {"area 0", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x0}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 1", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x7800}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 2", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0xF000}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 3", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x16800}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 4", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x1E000}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 5", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x25800}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 6", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x2D000}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ -} - -#define DATA_MODEL_HDR_RUN MODEL_VECTOR(HDR_RUN) = { \ - {"area 0", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_RUN_AREA", ELEM_CTRL_TABLE_GOTO, {0x0}, {0x3C00}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 1", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_RUN_AREA", ELEM_CTRL_TABLE_GOTO, {0x3C00}, {0x3C00}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 2", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_RUN_AREA", ELEM_CTRL_TABLE_GOTO, {0x7800}, {0x3C00}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 3", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_RUN_AREA", ELEM_CTRL_TABLE_GOTO, {0xB400}, {0x3C00}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 4", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_RUN_AREA", ELEM_CTRL_TABLE_GOTO, {0xF000}, {0x3C00}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 5", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_RUN_AREA", ELEM_CTRL_TABLE_GOTO, {0x12C00}, {0x3C00}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 6", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_RUN_AREA", ELEM_CTRL_TABLE_GOTO, {0x16800}, {0x3C00}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_AREA}, {0x1}}, \ -} - -#define DATA_MODEL_HDR_BOOT_INFO MODEL_VECTOR(HDR_BOOT_INFO) = { \ - {"region offset", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"region size", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"region config", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"total area", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"history area", ELEM_OUTPUT_INT, {0xC}, {0x4}}, \ - {"error area", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"area config:", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" used module count", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"module config:", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" module 0 offset", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {" module 0 size", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 1 offset", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {" module 1 size", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 2 offset", ELEM_OUTPUT_INT, {0x2C}, {0x4}}, \ - {" module 2 size", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 3 offset", ELEM_OUTPUT_INT, {0x34}, {0x4}}, \ - {" module 3 size", ELEM_OUTPUT_INT, {0x38}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"region control", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"area index", ELEM_OUTPUT_INT, {0x6C}, {0x4}}, \ - {"error area count", ELEM_OUTPUT_INT, {0x70}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 0 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x74}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x78}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x7C}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0x80}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0x84}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x88}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 1 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x8C}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x90}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x94}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0x98}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0x9C}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xA0}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 2 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xA4}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xA8}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xAC}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0xB0}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0xB4}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xB8}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 3 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xBC}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xC0}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xC4}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0xC8}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0xCC}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xD0}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 4 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xD4}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xD8}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xDC}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0xE0}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0xE4}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xE8}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 5 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xEC}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xF0}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xF4}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0xF8}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0xFC}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x100}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 6 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x104}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x108}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x10C}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0x110}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0x114}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x118}, {0x4}}, \ -} - -#define DATA_MODEL_HDR_RUN_INFO MODEL_VECTOR(HDR_RUN_INFO) = { \ - {"region offset", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"region size", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"region config", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"total area", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"history area", ELEM_OUTPUT_INT, {0xC}, {0x4}}, \ - {"error area", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"area config:", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" used module count", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"module config:", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" module 0 offset", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {" module 0 size", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 1 offset", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {" module 1 size", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 2 offset", ELEM_OUTPUT_INT, {0x2C}, {0x4}}, \ - {" module 2 size", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 3 offset", ELEM_OUTPUT_INT, {0x34}, {0x4}}, \ - {" module 3 size", ELEM_OUTPUT_INT, {0x38}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"region control", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"area index", ELEM_OUTPUT_INT, {0x6C}, {0x4}}, \ - {"error area count", ELEM_OUTPUT_INT, {0x70}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 0 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x74}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x78}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x7C}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x88}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 1 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x8C}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x90}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x94}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xA0}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 2 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xA4}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xA8}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xAC}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xB8}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 3 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xBC}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xC0}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xC4}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xD0}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 4 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xD4}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xD8}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xDC}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xE8}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 5 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xEC}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xF0}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xF4}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x100}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 6 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x104}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x108}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x10C}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x118}, {0x4}}, \ -} - -#define DATA_MODEL_HDR MODEL_VECTOR(HDR) = { \ - {"head info", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"reset count", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"boot region", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_INFO", ELEM_CTRL_TABLE_GOTO, {0XC}, {0x168}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_INFO}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"HDR_BOOT", ELEM_CTRL_TABLE_GOTO, {0x400}, {0xA000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"run region", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_RUN_INFO", ELEM_CTRL_TABLE_GOTO, {0x170}, {0x164}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_INFO}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"HDR_RUN", ELEM_CTRL_TABLE_GOTO, {0x4B400}, {0xA000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN}, {0x1}}, \ -} - -#endif // BBOX_DDR_DATA_CLOUD_H diff --git a/inc/toolchain/bbox/bbox_ddr_data_dc.h b/inc/toolchain/bbox/bbox_ddr_data_dc.h deleted file mode 100644 index 1fe23d793f16f76f52b4a38f4dcc8103be028661..0000000000000000000000000000000000000000 --- a/inc/toolchain/bbox/bbox_ddr_data_dc.h +++ /dev/null @@ -1,458 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef BBOX_DDR_DATA_DC_H -#define BBOX_DDR_DATA_DC_H - -#include "bbox_ddr_data.h" - -/* each Module need define as follows */ -/* LPM module */ -#define DATA_MODEL_LPM_START MODEL_VECTOR(LPM_START) = { \ - {"start_steps", ELEM_OUTPUT_STR_NL, {0x0}, {0x800}}, \ -} - -#define DATA_MODEL_LPM MODEL_VECTOR(LPM) = { \ - {"****exc****reg**", ELEM_OUTPUT_STR, {0x0}, {0x8}}, \ - {"fault_regs_dfsr", ELEM_OUTPUT_INT, {0x1410}, {0x4}}, \ - {"fault_regs_ifsr", ELEM_OUTPUT_INT, {0x1414}, {0x4}}, \ - {"fault_regs_adfsr", ELEM_OUTPUT_INT, {0x1418}, {0x4}}, \ - {"fault_regs_aifsr", ELEM_OUTPUT_INT, {0x141c}, {0x4}}, \ - {"fault_regs_dfar", ELEM_OUTPUT_INT, {0x1420}, {0x4}}, \ - {"fault_regs_ifar", ELEM_OUTPUT_INT, {0x1424}, {0x4}}, \ - {"usr_regs_r13", ELEM_OUTPUT_INT, {0x1428}, {0x4}}, \ - {"usr_regs_r14", ELEM_OUTPUT_INT, {0x142c}, {0x4}}, \ - {"svc_regs_regs_r13", ELEM_OUTPUT_INT, {0x1430}, {0x4}}, \ - {"svc_regs_regs_r14", ELEM_OUTPUT_INT, {0x1434}, {0x4}}, \ - {"svc_regs_regs_spsr", ELEM_OUTPUT_INT, {0x1438}, {0x4}}, \ - {"irq_regs_regs_r13", ELEM_OUTPUT_INT, {0x143c}, {0x4}}, \ - {"irq_regs_regs_r14", ELEM_OUTPUT_INT, {0x1440}, {0x4}}, \ - {"irq_regs_regs_spsr", ELEM_OUTPUT_INT, {0x1444}, {0x4}}, \ - {"fiq_regs_regs_r13", ELEM_OUTPUT_INT, {0x1448}, {0x4}}, \ - {"fiq_regs_regs_r14", ELEM_OUTPUT_INT, {0x144c}, {0x4}}, \ - {"fiq_regs_regs_spsr", ELEM_OUTPUT_INT, {0x1450}, {0x4}}, \ - {"und_regs_regs_r13", ELEM_OUTPUT_INT, {0x1454}, {0x4}}, \ - {"und_regs_regs_r14", ELEM_OUTPUT_INT, {0x1458}, {0x4}}, \ - {"und_regs_regs_spsr", ELEM_OUTPUT_INT, {0x145c}, {0x4}}, \ - {"abort_regs_regs_r13", ELEM_OUTPUT_INT, {0x1460}, {0x4}}, \ - {"abort_regs_regs_r14", ELEM_OUTPUT_INT, {0x1464}, {0x4}}, \ - {"abort_regs_regs_spsr", ELEM_OUTPUT_INT, {0x1468}, {0x4}}, \ - {"data_regs_r1", ELEM_OUTPUT_INT, {0x146c}, {0x4}}, \ - {"data_regs_r2", ELEM_OUTPUT_INT, {0x1470}, {0x4}}, \ - {"data_regs_r3", ELEM_OUTPUT_INT, {0x1474}, {0x4}}, \ - {"data_regs_r4", ELEM_OUTPUT_INT, {0x1478}, {0x4}}, \ - {"data_regs_r5", ELEM_OUTPUT_INT, {0x147c}, {0x4}}, \ - {"data_regs_r6", ELEM_OUTPUT_INT, {0x1480}, {0x4}}, \ - {"data_regs_r7", ELEM_OUTPUT_INT, {0x1484}, {0x4}}, \ - {"data_regs_r8", ELEM_OUTPUT_INT, {0x1488}, {0x4}}, \ - {"data_regs_r9", ELEM_OUTPUT_INT, {0x148c}, {0x4}}, \ - {"data_regs_r10", ELEM_OUTPUT_INT, {0x1490}, {0x4}}, \ - {"data_regs_r11", ELEM_OUTPUT_INT, {0x1494}, {0x4}}, \ - {"data_regs_r12", ELEM_OUTPUT_INT, {0x1498}, {0x4}}, \ - {"data_regs_r13", ELEM_OUTPUT_INT, {0x149c}, {0x4}}, \ - {"data_regs_r14", ELEM_OUTPUT_INT, {0x14a0}, {0x4}}, \ - {"data_regs_r15", ELEM_OUTPUT_INT, {0x14a4}, {0x4}}, \ - {"prog_regs_cpsr", ELEM_OUTPUT_INT, {0x14a8}, {0x4}}, \ - {"prog_regs_spsr", ELEM_OUTPUT_INT, {0x14ac}, {0x4}}, \ - {"log", ELEM_OUTPUT_STR_NL, {0xDC80}, {0x400}}, \ -} - -#define DATA_MODEL_LPM_PMU MODEL_VECTOR(LPM_PMU) = { \ - {"cpuid", ELEM_OUTPUT_INT, {0x0000}, {0x1}}, \ - {"2CCH", ELEM_OUTPUT_HEX, {0x0001}, {0x1}}, \ - {"2CDH", ELEM_OUTPUT_HEX, {0x0002}, {0x1}}, \ - {"2CEH", ELEM_OUTPUT_HEX, {0x0003}, {0x1}}, \ - {"2CFH", ELEM_OUTPUT_HEX, {0x0004}, {0x1}}, \ - {"2D0H", ELEM_OUTPUT_HEX, {0x0005}, {0x1}}, \ - {"2D1H", ELEM_OUTPUT_HEX, {0x0006}, {0x1}}, \ - {"2D2H", ELEM_OUTPUT_HEX, {0x0007}, {0x1}}, \ - {"2D3H", ELEM_OUTPUT_HEX, {0x0008}, {0x1}}, \ - {"2D4H", ELEM_OUTPUT_HEX, {0x0009}, {0x1}}, \ - {"2D5H", ELEM_OUTPUT_HEX, {0x000A}, {0x1}}, \ - {"2D6H", ELEM_OUTPUT_HEX, {0x000B}, {0x1}}, \ - {"2D7H", ELEM_OUTPUT_HEX, {0x000C}, {0x1}}, \ - {"2D8H", ELEM_OUTPUT_HEX, {0x000D}, {0x1}}, \ - {"2D9H", ELEM_OUTPUT_HEX, {0x000E}, {0x1}}, \ - {"2DAH", ELEM_OUTPUT_HEX, {0x000F}, {0x1}}, \ - {"2DBH", ELEM_OUTPUT_HEX, {0x0010}, {0x1}}, \ - {"2DCH", ELEM_OUTPUT_HEX, {0x0011}, {0x1}}, \ - {"2DDH", ELEM_OUTPUT_HEX, {0x0012}, {0x1}}, \ - {"2DEH", ELEM_OUTPUT_HEX, {0x0013}, {0x1}}, \ - {"2DFH", ELEM_OUTPUT_HEX, {0x0014}, {0x1}}, \ - {"2E0H", ELEM_OUTPUT_HEX, {0x0015}, {0x1}}, \ - {"2E1H", ELEM_OUTPUT_HEX, {0x0016}, {0x1}}, \ - {"2E2H", ELEM_OUTPUT_HEX, {0x0017}, {0x1}}, \ - {"2E3H", ELEM_OUTPUT_HEX, {0x0018}, {0x1}}, \ - {"2E4H", ELEM_OUTPUT_HEX, {0x0019}, {0x1}}, \ - {"2E5H", ELEM_OUTPUT_HEX, {0x001A}, {0x1}}, \ - {"2E6H", ELEM_OUTPUT_HEX, {0x001B}, {0x1}}, \ - {"2E7H", ELEM_OUTPUT_HEX, {0x001C}, {0x1}}, \ - {"slave0", ELEM_OUTPUT_HEX, {0x0020}, {0x1}}, \ - {"E0", ELEM_OUTPUT_HEX, {0x0021}, {0x1}}, \ - {"E1", ELEM_OUTPUT_HEX, {0x0022}, {0x1}}, \ - {"E2", ELEM_OUTPUT_HEX, {0x0023}, {0x1}}, \ - {"reserve", ELEM_OUTPUT_HEX, {0x0024}, {0x1}}, \ - {"reserve", ELEM_OUTPUT_HEX, {0x0025}, {0x1}}, \ - {"reserve", ELEM_OUTPUT_HEX, {0x0026}, {0x1}}, \ - {"slave1", ELEM_OUTPUT_HEX, {0x0040}, {0x1}}, \ - {"E0", ELEM_OUTPUT_HEX, {0x0041}, {0x1}}, \ - {"E1", ELEM_OUTPUT_HEX, {0x0042}, {0x1}}, \ - {"E2", ELEM_OUTPUT_HEX, {0x0043}, {0x1}}, \ - {"reserve", ELEM_OUTPUT_HEX, {0x0044}, {0x1}}, \ - {"reserve", ELEM_OUTPUT_HEX, {0x0045}, {0x1}}, \ - {"reserve", ELEM_OUTPUT_HEX, {0x0046}, {0x1}}, \ - {"aicSlave", ELEM_OUTPUT_HEX, {0x0060}, {0x1}}, \ - {"0x79", ELEM_OUTPUT_HEX, {0x0061}, {0x2}}, \ - {"0x7A", ELEM_OUTPUT_HEX, {0x0063}, {0x1}}, \ - {"0x7B", ELEM_OUTPUT_HEX, {0x0064}, {0x1}}, \ - {"0x7C", ELEM_OUTPUT_HEX, {0x0065}, {0x1}}, \ - {"0x7D", ELEM_OUTPUT_HEX, {0x0066}, {0x1}}, \ - {"0x7E", ELEM_OUTPUT_HEX, {0x0067}, {0x1}}, \ - {"dvppSlave", ELEM_OUTPUT_HEX, {0x0080}, {0x1}}, \ - {"0x79", ELEM_OUTPUT_HEX, {0x0081}, {0x2}}, \ - {"0x7A", ELEM_OUTPUT_HEX, {0x0083}, {0x1}}, \ - {"0x7B", ELEM_OUTPUT_HEX, {0x0084}, {0x1}}, \ - {"0x7C", ELEM_OUTPUT_HEX, {0x0085}, {0x1}}, \ - {"0x7D", ELEM_OUTPUT_HEX, {0x0086}, {0x1}}, \ - {"0x7E", ELEM_OUTPUT_HEX, {0x0087}, {0x1}}, \ - {"cpuSlave", ELEM_OUTPUT_HEX, {0x00A0}, {0x1}}, \ - {"0x79", ELEM_OUTPUT_HEX, {0x00A1}, {0x2}}, \ - {"0x7A", ELEM_OUTPUT_HEX, {0x00A3}, {0x1}}, \ - {"0x7B", ELEM_OUTPUT_HEX, {0x00A4}, {0x1}}, \ - {"0x7C", ELEM_OUTPUT_HEX, {0x00A5}, {0x1}}, \ - {"0x7D", ELEM_OUTPUT_HEX, {0x00A6}, {0x1}}, \ - {"0x7E", ELEM_OUTPUT_HEX, {0x00A7}, {0x1}}, \ -} - -/* (LPM)DDR module */ -#define DATA_MODEL_DDR_SRAM MODEL_VECTOR(DDR_SRAM) = { \ - {"dram type", ELEM_OUTPUT_INT, {0x00}, {0x4}}, \ - {"dram size", ELEM_OUTPUT_INT, {0x04}, {0x4}}, \ - {"rank num", ELEM_OUTPUT_INT, {0x08}, {0x4}}, \ - {"chn bit map", ELEM_OUTPUT_INT, {0x0c}, {0x4}}, \ - {"manufacteryId[0]", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"manufacteryId[1]", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"manufacteryId[2]", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"manufacteryId[3]", ELEM_OUTPUT_INT, {0x1c}, {0x4}}, \ - {"manufacteryId[4]", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"manufacteryId[5]", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {"manufacteryId[6]", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"manufacteryId[7]", ELEM_OUTPUT_INT, {0x2c}, {0x4}}, \ - {"manufacteryId[8]", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"manufacteryId[9]", ELEM_OUTPUT_INT, {0x34}, {0x4}}, \ - {"manufacteryId[10]", ELEM_OUTPUT_INT, {0x38}, {0x4}}, \ - {"manufacteryId[11]", ELEM_OUTPUT_INT, {0x3c}, {0x4}}, \ - {"manufacteryId[12]", ELEM_OUTPUT_INT, {0x40}, {0x4}}, \ - {"manufacteryId[13]", ELEM_OUTPUT_INT, {0x44}, {0x4}}, \ - {"manufacteryId[14]", ELEM_OUTPUT_INT, {0x48}, {0x4}}, \ - {"manufacteryId[15]", ELEM_OUTPUT_INT, {0x4c}, {0x4}}, \ - {"manufacteryId[16]", ELEM_OUTPUT_INT, {0x50}, {0x4}}, \ - {"manufacteryId[17]", ELEM_OUTPUT_INT, {0x54}, {0x4}}, \ - {"manufacteryId[18]", ELEM_OUTPUT_INT, {0x58}, {0x4}}, \ - {"manufacteryId[19]", ELEM_OUTPUT_INT, {0x5c}, {0x4}}, \ - {"manufacteryId[20]", ELEM_OUTPUT_INT, {0x60}, {0x4}}, \ - {"manufacteryId[21]", ELEM_OUTPUT_INT, {0x64}, {0x4}}, \ - {"manufacteryId[22]", ELEM_OUTPUT_INT, {0x68}, {0x4}}, \ - {"manufacteryId[23]", ELEM_OUTPUT_INT, {0x6c}, {0x4}}, \ - {"iecc", ELEM_OUTPUT_INT, {0x70}, {0x4}}, \ - {"swap type", ELEM_OUTPUT_INT, {0x74}, {0x4}}, \ - {"freq", ELEM_OUTPUT_INT, {0x78}, {0x4}}, \ -} - -/* TEE module */ -#define DATA_MODEL_TEE MODEL_VECTOR(TEE) = { \ - {"tee info", ELEM_OUTPUT_CHAR, {0x0}, {0x20000}}, \ -} - -/* DVPP module */ -#define DATA_MODEL_DVPP MODEL_VECTOR(DVPP) = { \ - {"dvpp info", ELEM_OUTPUT_STR_NL, {0x0}, {0x10000}}, \ -} - -/* DRIVE module */ -#define DATA_MODEL_DRIVER MODEL_VECTOR(DRIVER) = { \ - {"driver info", ELEM_OUTPUT_STR_NL, {0x0}, {0x20000}}, \ -} - -/* TS module, start */ -#define DATA_MODEL_TS_START MODEL_VECTOR(TS_START) = { \ - {"ts start info", ELEM_OUTPUT_STR_NL, {0x0}, {0x19000}}, \ -} - -/* TS module */ -#define DATA_MODEL_TS MODEL_VECTOR(TS) = { \ - {"ts info", ELEM_OUTPUT_STR_NL, {0x0}, {0x1E6E00}}, \ -} - -/* HSM module */ -#define DATA_MODEL_HSM MODEL_VECTOR(HSM) = { \ - {"hsm info", ELEM_OUTPUT_STR_NL, {0x0}, {0x1000}}, \ -} - -/* HSM module, start */ -#define DATA_MODEL_HSM_START MODEL_VECTOR(HSM_START) = { \ - {"hsm start info", ELEM_OUTPUT_STR_NL, {0x0}, {0x1000}}, \ -} - -/* DP module */ -#define DATA_MODEL_DP MODEL_VECTOR(DP) = { \ - {"os kbox info", ELEM_OUTPUT_STR_NL, {0x3000}, {0x4FD000}}, \ -} - -// lpm common log data -#define DATA_MODEL_LPM_LOG MODEL_VECTOR(LPM_LOG) = { \ - {"lpm log buffer", ELEM_FEATURE_LOOPBUF, {1}, {6}}, \ - {"buf_read", ELEM_CTRL_LPBF_READ, {0x0}, {0x4}}, \ - {"buf_len", ELEM_CTRL_LPBF_SIZE, {0x4}, {0x4}}, \ - {"buf_write", ELEM_CTRL_LPBF_WRITE, {0x40}, {0x4}}, \ - {"log_level", ELEM_OUTPUT_INT, {0x44}, {0x4}}, \ - {"buf_head_len", ELEM_CTRL_LPBF_HEAD, {0x80}, {0x4}}, \ - {"lpm log data", ELEM_FEATURE_CHARLOG, {1}, {1}}, \ - {"lpm log", ELEM_OUTPUT_STR_NL, {0x80}, {0x1FF80}}, \ -} - -// hsm common log data -#define DATA_MODEL_HSM_LOG MODEL_VECTOR(HSM_LOG) = { \ - {"hsm log buffer", ELEM_FEATURE_LOOPBUF, {1}, {6}}, \ - {"buf_read", ELEM_CTRL_LPBF_READ, {0x0}, {0x4}}, \ - {"buf_len", ELEM_CTRL_LPBF_SIZE, {0x4}, {0x4}}, \ - {"buf_write", ELEM_CTRL_LPBF_WRITE, {0x40}, {0x4}}, \ - {"log_level", ELEM_OUTPUT_INT, {0x44}, {0x4}}, \ - {"buf_head_len", ELEM_CTRL_LPBF_HEAD, {0x80}, {0x4}}, \ - {"hsm log data", ELEM_FEATURE_CHARLOG, {1}, {1}}, \ - {"hsm log", ELEM_OUTPUT_STR_NL, {0x80}, {0x1FF80}}, \ -} - -/* DVPP module */ -#define DATA_MODEL_DVPP MODEL_VECTOR(DVPP) = { \ - {"dvpp info", ELEM_OUTPUT_STR_NL, {0x0}, {0x10000}}, \ -} - -/* TF module */ -#define DATA_MODEL_TF MODEL_VECTOR(TF) = { \ - {"x0", ELEM_OUTPUT_HEX, {0x0}, {0x8}}, \ - {"x1", ELEM_OUTPUT_HEX, {0x8}, {0x8}}, \ - {"x30", ELEM_OUTPUT_HEX, {0x10}, {0x8}}, \ - {"x2", ELEM_OUTPUT_HEX, {0x18}, {0x8}}, \ - {"x3", ELEM_OUTPUT_HEX, {0x20}, {0x8}}, \ - {"x4", ELEM_OUTPUT_HEX, {0x28}, {0x8}}, \ - {"x5", ELEM_OUTPUT_HEX, {0x30}, {0x8}}, \ - {"x6", ELEM_OUTPUT_HEX, {0x38}, {0x8}}, \ - {"x7", ELEM_OUTPUT_HEX, {0x40}, {0x8}}, \ - {"x8", ELEM_OUTPUT_HEX, {0x48}, {0x8}}, \ - {"x9", ELEM_OUTPUT_HEX, {0x50}, {0x8}}, \ - {"x10", ELEM_OUTPUT_HEX, {0x58}, {0x8}}, \ - {"x11", ELEM_OUTPUT_HEX, {0x60}, {0x8}}, \ - {"x12", ELEM_OUTPUT_HEX, {0x68}, {0x8}}, \ - {"x13", ELEM_OUTPUT_HEX, {0x70}, {0x8}}, \ - {"x14", ELEM_OUTPUT_HEX, {0x78}, {0x8}}, \ - {"x15", ELEM_OUTPUT_HEX, {0x80}, {0x8}}, \ - {"x16", ELEM_OUTPUT_HEX, {0x88}, {0x8}}, \ - {"x17", ELEM_OUTPUT_HEX, {0x90}, {0x8}}, \ - {"x18", ELEM_OUTPUT_HEX, {0x98}, {0x8}}, \ - {"x19", ELEM_OUTPUT_HEX, {0xA0}, {0x8}}, \ - {"x20", ELEM_OUTPUT_HEX, {0xA8}, {0x8}}, \ - {"x21", ELEM_OUTPUT_HEX, {0xB0}, {0x8}}, \ - {"x22", ELEM_OUTPUT_HEX, {0xB8}, {0x8}}, \ - {"x23", ELEM_OUTPUT_HEX, {0xC0}, {0x8}}, \ - {"x24", ELEM_OUTPUT_HEX, {0xC8}, {0x8}}, \ - {"x25", ELEM_OUTPUT_HEX, {0xD0}, {0x8}}, \ - {"x26", ELEM_OUTPUT_HEX, {0xD8}, {0x8}}, \ - {"x27", ELEM_OUTPUT_HEX, {0xE0}, {0x8}}, \ - {"x28", ELEM_OUTPUT_HEX, {0xE8}, {0x8}}, \ - {"x29", ELEM_OUTPUT_HEX, {0xF0}, {0x8}}, \ - {"scr_el3", ELEM_OUTPUT_HEX, {0xF8}, {0x8}}, \ - {"sctlr_el3", ELEM_OUTPUT_HEX, {0x100}, {0x8}}, \ - {"cptr_el3", ELEM_OUTPUT_HEX, {0x108}, {0x8}}, \ - {"tcr_el3", ELEM_OUTPUT_HEX, {0x110}, {0x8}}, \ - {"daif", ELEM_OUTPUT_HEX, {0x118}, {0x8}}, \ - {"mair_el3", ELEM_OUTPUT_HEX, {0x120}, {0x8}}, \ - {"spsr_el3", ELEM_OUTPUT_HEX, {0x128}, {0x8}}, \ - {"elr_el3", ELEM_OUTPUT_HEX, {0x130}, {0x8}}, \ - {"ttbr0_el3", ELEM_OUTPUT_HEX, {0x138}, {0x8}}, \ - {"esr_el3", ELEM_OUTPUT_HEX, {0x140}, {0x8}}, \ - {"far_el3", ELEM_OUTPUT_HEX, {0x148}, {0x8}}, \ -} - -/** - * the whole space is 512k, used for histroy data record - * the struct distribution is as follows: - * +-------------------+ - * | head info(1k) | region: area: module block: - * +-------------------+ +--------------------+ +-----------------+ +-----------------+ - * | boot region |---->| first area |---->| module block |---->| block head | - * +-------------------+ +--------------------+ +-----------------+ +-----------------+ - * | run region | | second area | | module block | | block data | - * +-------------------+ +--------------------+ +-----------------+ +-----------------+ - * | reserved | | ...... | | ...... | - * +-------------------+ +--------------------+ +-----------------+ - */ -#define DATA_MODEL_HDR_BOOT_BIOS MODEL_VECTOR(HDR_BOOT_BIOS) = { \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0xFF}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"bsbc point", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {"bsbc exc point", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"hboot1 point", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {"hboot1 exc point", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"hboot2 point", ELEM_OUTPUT_INT, {0x2C}, {0x4}}, \ - {"hboot2 exc point", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"[BIOS info]", ELEM_OUTPUT_STR_NL, {0x480}, {0x2780}}, \ -} - -#define DATA_MODEL_HDR_BOOT_AREA MODEL_VECTOR(HDR_BOOT_AREA) = { \ - {"BIOS INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_BOOT_BIOS", ELEM_CTRL_TABLE_GOTO, {0x0}, {0x3000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_BIOS}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ -} - -#define DATA_MODEL_HDR_BOOT MODEL_VECTOR(HDR_BOOT) = { \ - {"area 0", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x0}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 1", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x7800}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 2", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0xF000}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 3", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x16800}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 4", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x1E000}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 5", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x25800}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 6", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x2D000}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ -} - -#define DATA_MODEL_HDR_BOOT_INFO MODEL_VECTOR(HDR_BOOT_INFO) = { \ - {"region offset", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"region size", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"region config", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"total area", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"history area", ELEM_OUTPUT_INT, {0xC}, {0x4}}, \ - {"error area", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"area config:", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" used module count", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"module config:", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" module 0 offset", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {" module 0 size", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 1 offset", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {" module 1 size", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 2 offset", ELEM_OUTPUT_INT, {0x2C}, {0x4}}, \ - {" module 2 size", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 3 offset", ELEM_OUTPUT_INT, {0x34}, {0x4}}, \ - {" module 3 size", ELEM_OUTPUT_INT, {0x38}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"region control", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"area index", ELEM_OUTPUT_INT, {0x6C}, {0x4}}, \ - {"error area count", ELEM_OUTPUT_INT, {0x70}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 0 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x74}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x78}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x7C}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0x80}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0x84}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x88}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 1 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x8C}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x90}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x94}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0x98}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0x9C}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xA0}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 2 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xA4}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xA8}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xAC}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0xB0}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0xB4}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xB8}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 3 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xBC}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xC0}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xC4}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0xC8}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0xCC}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xD0}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 4 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xD4}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xD8}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xDC}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0xE0}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0xE4}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xE8}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 5 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xEC}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xF0}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xF4}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0xF8}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0xFC}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x100}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 6 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x104}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x108}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x10C}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0x110}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0x114}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x118}, {0x4}}, \ -} - -#define DATA_MODEL_HDR MODEL_VECTOR(HDR) = { \ - {"head info", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"reset count", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"boot region", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_INFO", ELEM_CTRL_TABLE_GOTO, {0XC}, {0x168}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_INFO}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"HDR_BOOT", ELEM_CTRL_TABLE_GOTO, {0x400}, {0xA000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ -} - -#endif // BBOX_DDR_DATA_DC_H diff --git a/inc/toolchain/bbox/bbox_ddr_data_mdc.h b/inc/toolchain/bbox/bbox_ddr_data_mdc.h deleted file mode 100644 index 5c35fdfab267c6deb924b92bf2763a066e765489..0000000000000000000000000000000000000000 --- a/inc/toolchain/bbox/bbox_ddr_data_mdc.h +++ /dev/null @@ -1,474 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef BBOX_DDR_DATA_MDC_H -#define BBOX_DDR_DATA_MDC_H - -#include "bbox_ddr_data.h" - -/* each Module need define as follows */ -/* LPM module */ -#define DATA_MODEL_LPM_START MODEL_VECTOR(LPM_START) = { \ - {"start_steps", ELEM_OUTPUT_STR_NL, {0x0}, {0x800}}, \ -} - -#define DATA_MODEL_LPM MODEL_VECTOR(LPM) = { \ - {"****exc****reg**", ELEM_OUTPUT_STR, {0x0}, {0x8}}, \ - {"fault_regs_dfsr", ELEM_OUTPUT_INT, {0x1410}, {0x4}}, \ - {"fault_regs_ifsr", ELEM_OUTPUT_INT, {0x1414}, {0x4}}, \ - {"fault_regs_adfsr", ELEM_OUTPUT_INT, {0x1418}, {0x4}}, \ - {"fault_regs_aifsr", ELEM_OUTPUT_INT, {0x141c}, {0x4}}, \ - {"fault_regs_dfar", ELEM_OUTPUT_INT, {0x1420}, {0x4}}, \ - {"fault_regs_ifar", ELEM_OUTPUT_INT, {0x1424}, {0x4}}, \ - {"usr_regs_r13", ELEM_OUTPUT_INT, {0x1428}, {0x4}}, \ - {"usr_regs_r14", ELEM_OUTPUT_INT, {0x142c}, {0x4}}, \ - {"svc_regs_regs_r13", ELEM_OUTPUT_INT, {0x1430}, {0x4}}, \ - {"svc_regs_regs_r14", ELEM_OUTPUT_INT, {0x1434}, {0x4}}, \ - {"svc_regs_regs_spsr", ELEM_OUTPUT_INT, {0x1438}, {0x4}}, \ - {"irq_regs_regs_r13", ELEM_OUTPUT_INT, {0x143c}, {0x4}}, \ - {"irq_regs_regs_r14", ELEM_OUTPUT_INT, {0x1440}, {0x4}}, \ - {"irq_regs_regs_spsr", ELEM_OUTPUT_INT, {0x1444}, {0x4}}, \ - {"fiq_regs_regs_r13", ELEM_OUTPUT_INT, {0x1448}, {0x4}}, \ - {"fiq_regs_regs_r14", ELEM_OUTPUT_INT, {0x144c}, {0x4}}, \ - {"fiq_regs_regs_spsr", ELEM_OUTPUT_INT, {0x1450}, {0x4}}, \ - {"und_regs_regs_r13", ELEM_OUTPUT_INT, {0x1454}, {0x4}}, \ - {"und_regs_regs_r14", ELEM_OUTPUT_INT, {0x1458}, {0x4}}, \ - {"und_regs_regs_spsr", ELEM_OUTPUT_INT, {0x145c}, {0x4}}, \ - {"abort_regs_regs_r13", ELEM_OUTPUT_INT, {0x1460}, {0x4}}, \ - {"abort_regs_regs_r14", ELEM_OUTPUT_INT, {0x1464}, {0x4}}, \ - {"abort_regs_regs_spsr", ELEM_OUTPUT_INT, {0x1468}, {0x4}}, \ - {"data_regs_r1", ELEM_OUTPUT_INT, {0x146c}, {0x4}}, \ - {"data_regs_r2", ELEM_OUTPUT_INT, {0x1470}, {0x4}}, \ - {"data_regs_r3", ELEM_OUTPUT_INT, {0x1474}, {0x4}}, \ - {"data_regs_r4", ELEM_OUTPUT_INT, {0x1478}, {0x4}}, \ - {"data_regs_r5", ELEM_OUTPUT_INT, {0x147c}, {0x4}}, \ - {"data_regs_r6", ELEM_OUTPUT_INT, {0x1480}, {0x4}}, \ - {"data_regs_r7", ELEM_OUTPUT_INT, {0x1484}, {0x4}}, \ - {"data_regs_r8", ELEM_OUTPUT_INT, {0x1488}, {0x4}}, \ - {"data_regs_r9", ELEM_OUTPUT_INT, {0x148c}, {0x4}}, \ - {"data_regs_r10", ELEM_OUTPUT_INT, {0x1490}, {0x4}}, \ - {"data_regs_r11", ELEM_OUTPUT_INT, {0x1494}, {0x4}}, \ - {"data_regs_r12", ELEM_OUTPUT_INT, {0x1498}, {0x4}}, \ - {"data_regs_r13", ELEM_OUTPUT_INT, {0x149c}, {0x4}}, \ - {"data_regs_r14", ELEM_OUTPUT_INT, {0x14a0}, {0x4}}, \ - {"data_regs_r15", ELEM_OUTPUT_INT, {0x14a4}, {0x4}}, \ - {"prog_regs_cpsr", ELEM_OUTPUT_INT, {0x14a8}, {0x4}}, \ - {"prog_regs_spsr", ELEM_OUTPUT_INT, {0x14ac}, {0x4}}, \ - {"log", ELEM_OUTPUT_STR_NL, {0xDC80}, {0x400}}, \ -} - -#define DATA_MODEL_LPM_PMU MODEL_VECTOR(LPM_PMU) = { \ - {"cpuid", ELEM_OUTPUT_INT, {0x0000}, {0x1}}, \ - {"2CCH", ELEM_OUTPUT_HEX, {0x0001}, {0x1}}, \ - {"2CDH", ELEM_OUTPUT_HEX, {0x0002}, {0x1}}, \ - {"2CEH", ELEM_OUTPUT_HEX, {0x0003}, {0x1}}, \ - {"2CFH", ELEM_OUTPUT_HEX, {0x0004}, {0x1}}, \ - {"2D0H", ELEM_OUTPUT_HEX, {0x0005}, {0x1}}, \ - {"2D1H", ELEM_OUTPUT_HEX, {0x0006}, {0x1}}, \ - {"2D2H", ELEM_OUTPUT_HEX, {0x0007}, {0x1}}, \ - {"2D3H", ELEM_OUTPUT_HEX, {0x0008}, {0x1}}, \ - {"2D4H", ELEM_OUTPUT_HEX, {0x0009}, {0x1}}, \ - {"2D5H", ELEM_OUTPUT_HEX, {0x000A}, {0x1}}, \ - {"2D6H", ELEM_OUTPUT_HEX, {0x000B}, {0x1}}, \ - {"2D7H", ELEM_OUTPUT_HEX, {0x000C}, {0x1}}, \ - {"2D8H", ELEM_OUTPUT_HEX, {0x000D}, {0x1}}, \ - {"2D9H", ELEM_OUTPUT_HEX, {0x000E}, {0x1}}, \ - {"2DAH", ELEM_OUTPUT_HEX, {0x000F}, {0x1}}, \ - {"2DBH", ELEM_OUTPUT_HEX, {0x0010}, {0x1}}, \ - {"2DCH", ELEM_OUTPUT_HEX, {0x0011}, {0x1}}, \ - {"2DDH", ELEM_OUTPUT_HEX, {0x0012}, {0x1}}, \ - {"2DEH", ELEM_OUTPUT_HEX, {0x0013}, {0x1}}, \ - {"2DFH", ELEM_OUTPUT_HEX, {0x0014}, {0x1}}, \ - {"2E0H", ELEM_OUTPUT_HEX, {0x0015}, {0x1}}, \ - {"2E1H", ELEM_OUTPUT_HEX, {0x0016}, {0x1}}, \ - {"2E2H", ELEM_OUTPUT_HEX, {0x0017}, {0x1}}, \ - {"2E3H", ELEM_OUTPUT_HEX, {0x0018}, {0x1}}, \ - {"2E4H", ELEM_OUTPUT_HEX, {0x0019}, {0x1}}, \ - {"2E5H", ELEM_OUTPUT_HEX, {0x001A}, {0x1}}, \ - {"2E6H", ELEM_OUTPUT_HEX, {0x001B}, {0x1}}, \ - {"2E7H", ELEM_OUTPUT_HEX, {0x001C}, {0x1}}, \ - {"slave0", ELEM_OUTPUT_HEX, {0x0020}, {0x1}}, \ - {"E0", ELEM_OUTPUT_HEX, {0x0021}, {0x1}}, \ - {"E1", ELEM_OUTPUT_HEX, {0x0022}, {0x1}}, \ - {"E2", ELEM_OUTPUT_HEX, {0x0023}, {0x1}}, \ - {"reserve", ELEM_OUTPUT_HEX, {0x0024}, {0x1}}, \ - {"reserve", ELEM_OUTPUT_HEX, {0x0025}, {0x1}}, \ - {"reserve", ELEM_OUTPUT_HEX, {0x0026}, {0x1}}, \ - {"slave1", ELEM_OUTPUT_HEX, {0x0040}, {0x1}}, \ - {"E0", ELEM_OUTPUT_HEX, {0x0041}, {0x1}}, \ - {"E1", ELEM_OUTPUT_HEX, {0x0042}, {0x1}}, \ - {"E2", ELEM_OUTPUT_HEX, {0x0043}, {0x1}}, \ - {"reserve", ELEM_OUTPUT_HEX, {0x0044}, {0x1}}, \ - {"reserve", ELEM_OUTPUT_HEX, {0x0045}, {0x1}}, \ - {"reserve", ELEM_OUTPUT_HEX, {0x0046}, {0x1}}, \ - {"aicSlave", ELEM_OUTPUT_HEX, {0x0060}, {0x1}}, \ - {"0x79", ELEM_OUTPUT_HEX, {0x0061}, {0x2}}, \ - {"0x7A", ELEM_OUTPUT_HEX, {0x0063}, {0x1}}, \ - {"0x7B", ELEM_OUTPUT_HEX, {0x0064}, {0x1}}, \ - {"0x7C", ELEM_OUTPUT_HEX, {0x0065}, {0x1}}, \ - {"0x7D", ELEM_OUTPUT_HEX, {0x0066}, {0x1}}, \ - {"0x7E", ELEM_OUTPUT_HEX, {0x0067}, {0x1}}, \ - {"dvppSlave", ELEM_OUTPUT_HEX, {0x0080}, {0x1}}, \ - {"0x79", ELEM_OUTPUT_HEX, {0x0081}, {0x2}}, \ - {"0x7A", ELEM_OUTPUT_HEX, {0x0083}, {0x1}}, \ - {"0x7B", ELEM_OUTPUT_HEX, {0x0084}, {0x1}}, \ - {"0x7C", ELEM_OUTPUT_HEX, {0x0085}, {0x1}}, \ - {"0x7D", ELEM_OUTPUT_HEX, {0x0086}, {0x1}}, \ - {"0x7E", ELEM_OUTPUT_HEX, {0x0087}, {0x1}}, \ - {"cpuSlave", ELEM_OUTPUT_HEX, {0x00A0}, {0x1}}, \ - {"0x79", ELEM_OUTPUT_HEX, {0x00A1}, {0x2}}, \ - {"0x7A", ELEM_OUTPUT_HEX, {0x00A3}, {0x1}}, \ - {"0x7B", ELEM_OUTPUT_HEX, {0x00A4}, {0x1}}, \ - {"0x7C", ELEM_OUTPUT_HEX, {0x00A5}, {0x1}}, \ - {"0x7D", ELEM_OUTPUT_HEX, {0x00A6}, {0x1}}, \ - {"0x7E", ELEM_OUTPUT_HEX, {0x00A7}, {0x1}}, \ -} - -/* (LPM)DDR module */ -#define DATA_MODEL_DDR_SRAM MODEL_VECTOR(DDR_SRAM) = { \ - {"dram type", ELEM_OUTPUT_INT, {0x00}, {0x4}}, \ - {"dram size", ELEM_OUTPUT_INT, {0x04}, {0x4}}, \ - {"rank num", ELEM_OUTPUT_INT, {0x08}, {0x4}}, \ - {"chn bit map", ELEM_OUTPUT_INT, {0x0c}, {0x4}}, \ - {"manufacteryId[0]", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"manufacteryId[1]", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"manufacteryId[2]", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"manufacteryId[3]", ELEM_OUTPUT_INT, {0x1c}, {0x4}}, \ - {"manufacteryId[4]", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"manufacteryId[5]", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {"manufacteryId[6]", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"manufacteryId[7]", ELEM_OUTPUT_INT, {0x2c}, {0x4}}, \ - {"manufacteryId[8]", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"manufacteryId[9]", ELEM_OUTPUT_INT, {0x34}, {0x4}}, \ - {"manufacteryId[10]", ELEM_OUTPUT_INT, {0x38}, {0x4}}, \ - {"manufacteryId[11]", ELEM_OUTPUT_INT, {0x3c}, {0x4}}, \ - {"manufacteryId[12]", ELEM_OUTPUT_INT, {0x40}, {0x4}}, \ - {"manufacteryId[13]", ELEM_OUTPUT_INT, {0x44}, {0x4}}, \ - {"manufacteryId[14]", ELEM_OUTPUT_INT, {0x48}, {0x4}}, \ - {"manufacteryId[15]", ELEM_OUTPUT_INT, {0x4c}, {0x4}}, \ - {"manufacteryId[16]", ELEM_OUTPUT_INT, {0x50}, {0x4}}, \ - {"manufacteryId[17]", ELEM_OUTPUT_INT, {0x54}, {0x4}}, \ - {"manufacteryId[18]", ELEM_OUTPUT_INT, {0x58}, {0x4}}, \ - {"manufacteryId[19]", ELEM_OUTPUT_INT, {0x5c}, {0x4}}, \ - {"manufacteryId[20]", ELEM_OUTPUT_INT, {0x60}, {0x4}}, \ - {"manufacteryId[21]", ELEM_OUTPUT_INT, {0x64}, {0x4}}, \ - {"manufacteryId[22]", ELEM_OUTPUT_INT, {0x68}, {0x4}}, \ - {"manufacteryId[23]", ELEM_OUTPUT_INT, {0x6c}, {0x4}}, \ - {"iecc", ELEM_OUTPUT_INT, {0x70}, {0x4}}, \ - {"swap type", ELEM_OUTPUT_INT, {0x74}, {0x4}}, \ - {"freq", ELEM_OUTPUT_INT, {0x78}, {0x4}}, \ -} - -/* TEE module */ -#define DATA_MODEL_TEE MODEL_VECTOR(TEE) = { \ - {"tee info", ELEM_OUTPUT_CHAR, {0x0}, {0x20000}}, \ -} - -/* DVPP module */ -#define DATA_MODEL_DVPP MODEL_VECTOR(DVPP) = { \ - {"dvpp info", ELEM_OUTPUT_STR_NL, {0x0}, {0x10000}}, \ -} - -/* DRIVE module */ -#define DATA_MODEL_DRIVER MODEL_VECTOR(DRIVER) = { \ - {"driver info", ELEM_OUTPUT_STR_NL, {0x0}, {0x20000}}, \ -} - -/* TS module, start */ -#define DATA_MODEL_TS_START MODEL_VECTOR(TS_START) = { \ - {"ts0 start info", ELEM_OUTPUT_STR_NL, {0x0}, {0x19000}}, \ - {"ts1 start info", ELEM_OUTPUT_STR_NL, {0x19000}, {0x19000}}, \ -} - -/* TS module */ -#define DATA_MODEL_TS MODEL_VECTOR(TS) = { \ - {"ts info", ELEM_OUTPUT_STR_NL, {0x0}, {0xE6F00}}, \ -} - -/* HSM module */ -#define DATA_MODEL_HSM MODEL_VECTOR(HSM) = { \ - {"hsm info", ELEM_OUTPUT_STR_NL, {0x0}, {0x1000}}, \ -} - -/* HSM module, start */ -#define DATA_MODEL_HSM_START MODEL_VECTOR(HSM_START) = { \ - {"hsm start info", ELEM_OUTPUT_STR_NL, {0x0}, {0x1000}}, \ -} - -// lpm common log data -#define DATA_MODEL_LPM_LOG MODEL_VECTOR(LPM_LOG) = { \ - {"lpm log buffer", ELEM_FEATURE_LOOPBUF, {1}, {6}}, \ - {"buf_read", ELEM_CTRL_LPBF_READ, {0x0}, {0x4}}, \ - {"buf_len", ELEM_CTRL_LPBF_SIZE, {0x4}, {0x4}}, \ - {"buf_write", ELEM_CTRL_LPBF_WRITE, {0x40}, {0x4}}, \ - {"log_level", ELEM_OUTPUT_INT, {0x44}, {0x4}}, \ - {"buf_head_len", ELEM_CTRL_LPBF_HEAD, {0x80}, {0x4}}, \ - {"lpm log data", ELEM_FEATURE_CHARLOG, {1}, {1}}, \ - {"lpm log", ELEM_OUTPUT_STR_NL, {0x80}, {0x1FF80}}, \ -} - -// hsm common log data -#define DATA_MODEL_HSM_LOG MODEL_VECTOR(HSM_LOG) = { \ - {"hsm log buffer", ELEM_FEATURE_LOOPBUF, {1}, {6}}, \ - {"buf_read", ELEM_CTRL_LPBF_READ, {0x0}, {0x4}}, \ - {"buf_len", ELEM_CTRL_LPBF_SIZE, {0x4}, {0x4}}, \ - {"buf_write", ELEM_CTRL_LPBF_WRITE, {0x40}, {0x4}}, \ - {"log_level", ELEM_OUTPUT_INT, {0x44}, {0x4}}, \ - {"buf_head_len", ELEM_CTRL_LPBF_HEAD, {0x80}, {0x4}}, \ - {"hsm log data", ELEM_FEATURE_CHARLOG, {1}, {1}}, \ - {"hsm log", ELEM_OUTPUT_STR_NL, {0x80}, {0x1FF80}}, \ -} - -/* isp bbox data */ -#define DATA_MODEL_ISP MODEL_VECTOR(ISP) = { \ - {"isp log", ELEM_OUTPUT_STR_NL, {0}, {0x40000}}, \ -} - -/* isp module, start */ -#define DATA_MODEL_ISP_START MODEL_VECTOR(ISP_START) = { \ - {"isp start info", ELEM_OUTPUT_STR_NL, {0x0}, {0x1000}}, \ -} - -/* DVPP module */ -#define DATA_MODEL_DVPP MODEL_VECTOR(DVPP) = { \ - {"dvpp info", ELEM_OUTPUT_STR_NL, {0x0}, {0x10000}}, \ -} - -/* DP module */ -#define DATA_MODEL_DP MODEL_VECTOR(DP) = { \ - {"os kbox info", ELEM_OUTPUT_STR_NL, {0x3000}, {0x4FD000}}, \ -} - -/* safetyisland module */ -#define DATA_MODEL_SAFETYISLAND MODEL_VECTOR(SAFETYISLAND) = { \ - {"safetyisland info", ELEM_OUTPUT_STR_NL, {0x0}, {0xC800}}, \ -} - -/* TF module */ -#define DATA_MODEL_TF MODEL_VECTOR(TF) = { \ - {"x0", ELEM_OUTPUT_HEX, {0x0}, {0x8}}, \ - {"x1", ELEM_OUTPUT_HEX, {0x8}, {0x8}}, \ - {"x30", ELEM_OUTPUT_HEX, {0x10}, {0x8}}, \ - {"x2", ELEM_OUTPUT_HEX, {0x18}, {0x8}}, \ - {"x3", ELEM_OUTPUT_HEX, {0x20}, {0x8}}, \ - {"x4", ELEM_OUTPUT_HEX, {0x28}, {0x8}}, \ - {"x5", ELEM_OUTPUT_HEX, {0x30}, {0x8}}, \ - {"x6", ELEM_OUTPUT_HEX, {0x38}, {0x8}}, \ - {"x7", ELEM_OUTPUT_HEX, {0x40}, {0x8}}, \ - {"x8", ELEM_OUTPUT_HEX, {0x48}, {0x8}}, \ - {"x9", ELEM_OUTPUT_HEX, {0x50}, {0x8}}, \ - {"x10", ELEM_OUTPUT_HEX, {0x58}, {0x8}}, \ - {"x11", ELEM_OUTPUT_HEX, {0x60}, {0x8}}, \ - {"x12", ELEM_OUTPUT_HEX, {0x68}, {0x8}}, \ - {"x13", ELEM_OUTPUT_HEX, {0x70}, {0x8}}, \ - {"x14", ELEM_OUTPUT_HEX, {0x78}, {0x8}}, \ - {"x15", ELEM_OUTPUT_HEX, {0x80}, {0x8}}, \ - {"x16", ELEM_OUTPUT_HEX, {0x88}, {0x8}}, \ - {"x17", ELEM_OUTPUT_HEX, {0x90}, {0x8}}, \ - {"x18", ELEM_OUTPUT_HEX, {0x98}, {0x8}}, \ - {"x19", ELEM_OUTPUT_HEX, {0xA0}, {0x8}}, \ - {"x20", ELEM_OUTPUT_HEX, {0xA8}, {0x8}}, \ - {"x21", ELEM_OUTPUT_HEX, {0xB0}, {0x8}}, \ - {"x22", ELEM_OUTPUT_HEX, {0xB8}, {0x8}}, \ - {"x23", ELEM_OUTPUT_HEX, {0xC0}, {0x8}}, \ - {"x24", ELEM_OUTPUT_HEX, {0xC8}, {0x8}}, \ - {"x25", ELEM_OUTPUT_HEX, {0xD0}, {0x8}}, \ - {"x26", ELEM_OUTPUT_HEX, {0xD8}, {0x8}}, \ - {"x27", ELEM_OUTPUT_HEX, {0xE0}, {0x8}}, \ - {"x28", ELEM_OUTPUT_HEX, {0xE8}, {0x8}}, \ - {"x29", ELEM_OUTPUT_HEX, {0xF0}, {0x8}}, \ - {"scr_el3", ELEM_OUTPUT_HEX, {0xF8}, {0x8}}, \ - {"sctlr_el3", ELEM_OUTPUT_HEX, {0x100}, {0x8}}, \ - {"cptr_el3", ELEM_OUTPUT_HEX, {0x108}, {0x8}}, \ - {"tcr_el3", ELEM_OUTPUT_HEX, {0x110}, {0x8}}, \ - {"daif", ELEM_OUTPUT_HEX, {0x118}, {0x8}}, \ - {"mair_el3", ELEM_OUTPUT_HEX, {0x120}, {0x8}}, \ - {"spsr_el3", ELEM_OUTPUT_HEX, {0x128}, {0x8}}, \ - {"elr_el3", ELEM_OUTPUT_HEX, {0x130}, {0x8}}, \ - {"ttbr0_el3", ELEM_OUTPUT_HEX, {0x138}, {0x8}}, \ - {"esr_el3", ELEM_OUTPUT_HEX, {0x140}, {0x8}}, \ - {"far_el3", ELEM_OUTPUT_HEX, {0x148}, {0x8}}, \ -} - -/** - * the whole space is 512k, used for histroy data record - * the struct distribution is as follows: - * +-------------------+ - * | head info(1k) | region: area: module block: - * +-------------------+ +--------------------+ +-----------------+ +-----------------+ - * | boot region |---->| first area |---->| module block |---->| block head | - * +-------------------+ +--------------------+ +-----------------+ +-----------------+ - * | run region | | second area | | module block | | block data | - * +-------------------+ +--------------------+ +-----------------+ +-----------------+ - * | reserved | | ...... | | ...... | - * +-------------------+ +--------------------+ +-----------------+ - */ -#define DATA_MODEL_HDR_BOOT_BIOS MODEL_VECTOR(HDR_BOOT_BIOS) = { \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0xFF}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"bsbc point", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {"bsbc exc point", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"hboot1 point", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {"hboot1 exc point", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"hboot2 point", ELEM_OUTPUT_INT, {0x2C}, {0x4}}, \ - {"hboot2 exc point", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"[BIOS info]", ELEM_OUTPUT_STR_NL, {0x480}, {0x2780}}, \ -} - -#define DATA_MODEL_HDR_BOOT_AREA MODEL_VECTOR(HDR_BOOT_AREA) = { \ - {"BIOS INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_BOOT_BIOS", ELEM_CTRL_TABLE_GOTO, {0x0}, {0x3000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_BIOS}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ -} - -#define DATA_MODEL_HDR_BOOT MODEL_VECTOR(HDR_BOOT) = { \ - {"area 0", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x0}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 1", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x7800}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 2", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0xF000}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 3", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x16800}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 4", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x1E000}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 5", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x25800}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 6", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x2D000}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ -} - -#define DATA_MODEL_HDR_BOOT_INFO MODEL_VECTOR(HDR_BOOT_INFO) = { \ - {"region offset", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"region size", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"region config", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"total area", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"history area", ELEM_OUTPUT_INT, {0xC}, {0x4}}, \ - {"error area", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"area config:", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" used module count", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"module config:", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" module 0 offset", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {" module 0 size", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 1 offset", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {" module 1 size", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 2 offset", ELEM_OUTPUT_INT, {0x2C}, {0x4}}, \ - {" module 2 size", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 3 offset", ELEM_OUTPUT_INT, {0x34}, {0x4}}, \ - {" module 3 size", ELEM_OUTPUT_INT, {0x38}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"region control", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"area index", ELEM_OUTPUT_INT, {0x6C}, {0x4}}, \ - {"error area count", ELEM_OUTPUT_INT, {0x70}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 0 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x74}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x78}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x7C}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0x80}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0x84}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x88}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 1 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x8C}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x90}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x94}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0x98}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0x9C}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xA0}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 2 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xA4}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xA8}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xAC}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0xB0}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0xB4}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xB8}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 3 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xBC}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xC0}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xC4}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0xC8}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0xCC}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xD0}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 4 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xD4}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xD8}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xDC}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0xE0}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0xE4}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xE8}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 5 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xEC}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xF0}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xF4}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0xF8}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0xFC}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x100}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 6 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x104}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x108}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x10C}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0x110}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0x114}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x118}, {0x4}}, \ -} - -#define DATA_MODEL_HDR MODEL_VECTOR(HDR) = { \ - {"head info", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"reset count", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"boot region", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_INFO", ELEM_CTRL_TABLE_GOTO, {0XC}, {0x168}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_INFO}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"HDR_BOOT", ELEM_CTRL_TABLE_GOTO, {0x400}, {0xA000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ -} - -#endif // BBOX_DDR_DATA_MDC_H diff --git a/inc/toolchain/bbox/bbox_ddr_data_mini.h b/inc/toolchain/bbox/bbox_ddr_data_mini.h deleted file mode 100644 index 02aaeb33d1d8b0198cf767db3ecfd328f5759c3b..0000000000000000000000000000000000000000 --- a/inc/toolchain/bbox/bbox_ddr_data_mini.h +++ /dev/null @@ -1,1278 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef BBOX_DDR_DATA_MINI_H -#define BBOX_DDR_DATA_MINI_H - -#include "bbox_ddr_data.h" - -/* each Module need define as follows */ -/* LPM3 module */ -#define DATA_MODEL_LPM3_START MODEL_VECTOR(LPM3_START) = { \ - {"start_step 1", ELEM_OUTPUT_INT, {0x0}, {0x1}}, \ - {"start_step 2", ELEM_OUTPUT_INT, {0x1}, {0x1}}, \ - {"start_step 3", ELEM_OUTPUT_INT, {0x2}, {0x1}}, \ - {"start_step 4", ELEM_OUTPUT_INT, {0x3}, {0x1}}, \ - {"start_step 5", ELEM_OUTPUT_INT, {0x4}, {0x1}}, \ - {"start_step 6", ELEM_OUTPUT_INT, {0x5}, {0x1}}, \ - {"start_step 7", ELEM_OUTPUT_INT, {0x6}, {0x1}}, \ - {"start_step 8", ELEM_OUTPUT_INT, {0x7}, {0x1}}, \ - {"start_step 9", ELEM_OUTPUT_INT, {0x8}, {0x1}}, \ - {"start_step 10", ELEM_OUTPUT_INT, {0x9}, {0x1}}, \ - {"start_step 11", ELEM_OUTPUT_INT, {0xa}, {0x1}}, \ - {"start_step 12", ELEM_OUTPUT_INT, {0xb}, {0x1}}, \ - {"start_step 13", ELEM_OUTPUT_INT, {0xc}, {0x1}}, \ - {"start_step 14", ELEM_OUTPUT_INT, {0xd}, {0x1}}, \ - {"start_step 15", ELEM_OUTPUT_INT, {0xe}, {0x1}}, \ - {"start_step 16", ELEM_OUTPUT_INT, {0xf}, {0x1}}, \ - {"start_step 17", ELEM_OUTPUT_INT, {0x10}, {0x1}}, \ - {"start_step 18", ELEM_OUTPUT_INT, {0x11}, {0x1}}, \ - {"start_step 19", ELEM_OUTPUT_INT, {0x12}, {0x1}}, \ - {"start_step 20", ELEM_OUTPUT_INT, {0x13}, {0x1}}, \ - {"start_step 21", ELEM_OUTPUT_INT, {0x14}, {0x1}}, \ - {"start_step 22", ELEM_OUTPUT_INT, {0x15}, {0x1}}, \ - {"start_step 23", ELEM_OUTPUT_INT, {0x16}, {0x1}}, \ - {"start_step 24", ELEM_OUTPUT_INT, {0x17}, {0x1}}, \ - {"start_step 25", ELEM_OUTPUT_INT, {0x18}, {0x1}}, \ - {"start_step 26", ELEM_OUTPUT_INT, {0x19}, {0x1}}, \ - {"start_step 27", ELEM_OUTPUT_INT, {0x1a}, {0x1}}, \ -} - -#define DATA_MODEL_LPM3 MODEL_VECTOR(LPM3) = { \ - {"****exc****reg**", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"reset_reason", ELEM_OUTPUT_INT, {0x80}, {0x4}}, \ - {"slice", ELEM_OUTPUT_INT, {0x84}, {0x4}}, \ - {"rtc", ELEM_OUTPUT_INT, {0x88}, {0x4}}, \ - {"r13", ELEM_OUTPUT_INT, {0x8C}, {0x4}}, \ - {"lr1", ELEM_OUTPUT_INT, {0x90}, {0x4}}, \ - {"pc", ELEM_OUTPUT_INT, {0x94}, {0x4}}, \ - {"xpsr", ELEM_OUTPUT_INT, {0x98}, {0x4}}, \ - {"cfsr", ELEM_OUTPUT_INT, {0x9C}, {0x4}}, \ - {"hfsr", ELEM_OUTPUT_INT, {0xa0}, {0x4}}, \ - {"bfar", ELEM_OUTPUT_INT, {0xa4}, {0x4}}, \ - {"exc_trace", ELEM_OUTPUT_INT, {0xa8}, {0x1}}, \ - {"ddr_exc", ELEM_OUTPUT_INT, {0xa9}, {0x1}}, \ - {"irq_id", ELEM_OUTPUT_INT, {0xaa}, {0x2}}, \ - {"task_id", ELEM_OUTPUT_INT, {0xac}, {0x4}}, \ - {"**backup**reg***", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"reg_backup_index", ELEM_OUTPUT_INT, {0x200}, {0x4}}, \ - {"reason_0", ELEM_OUTPUT_INT, {0x204}, {0x4}}, \ - {"reason_1", ELEM_OUTPUT_INT, {0x208}, {0x4}}, \ - {"reason_2", ELEM_OUTPUT_INT, {0x20C}, {0x4}}, \ - {"r0", ELEM_OUTPUT_INT, {0x210}, {0x4}}, \ - {"r1", ELEM_OUTPUT_INT, {0x214}, {0x4}}, \ - {"r2", ELEM_OUTPUT_INT, {0x218}, {0x4}}, \ - {"r3", ELEM_OUTPUT_INT, {0x21c}, {0x4}}, \ - {"r4", ELEM_OUTPUT_INT, {0x220}, {0x4}}, \ - {"r5", ELEM_OUTPUT_INT, {0x224}, {0x4}}, \ - {"r6", ELEM_OUTPUT_INT, {0x228}, {0x4}}, \ - {"r7", ELEM_OUTPUT_INT, {0x22c}, {0x4}}, \ - {"r8", ELEM_OUTPUT_INT, {0x230}, {0x4}}, \ - {"r9", ELEM_OUTPUT_INT, {0x234}, {0x4}}, \ - {"r10", ELEM_OUTPUT_INT, {0x238}, {0x4}}, \ - {"r11", ELEM_OUTPUT_INT, {0x23c}, {0x4}}, \ - {"r12", ELEM_OUTPUT_INT, {0x240}, {0x4}}, \ - {"r13", ELEM_OUTPUT_INT, {0x244}, {0x4}}, \ - {"msp", ELEM_OUTPUT_INT, {0x248}, {0x4}}, \ - {"psp", ELEM_OUTPUT_INT, {0x24c}, {0x4}}, \ - {"lr0_ctrl", ELEM_OUTPUT_INT, {0x250}, {0x4}}, \ - {"lr1", ELEM_OUTPUT_INT, {0x254}, {0x4}}, \ - {"pc", ELEM_OUTPUT_INT, {0x258}, {0x4}}, \ - {"xpsr", ELEM_OUTPUT_INT, {0x25c}, {0x4}}, \ - {"primask", ELEM_OUTPUT_INT, {0x260}, {0x4}}, \ - {"basepri", ELEM_OUTPUT_INT, {0x264}, {0x4}}, \ - {"faultmask", ELEM_OUTPUT_INT, {0x268}, {0x4}}, \ - {"control", ELEM_OUTPUT_INT, {0x26c}, {0x4}}, \ - {"**runtime*******", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"RT_BBX_MAGIC_NUM", ELEM_OUTPUT_INT, {0x7940}, {0x4}}, \ - {"RT_BBX_SIZE", ELEM_OUTPUT_INT, {0x7948}, {0x4}}, \ - {"TSENSOR_A55", ELEM_OUTPUT_INT, {0x7960}, {0x1}}, \ - {"TSENSOR_PERI", ELEM_OUTPUT_INT, {0x7961}, {0x1}}, \ - {"TSENSOR_AIC0", ELEM_OUTPUT_INT, {0x7962}, {0x1}}, \ - {"TSENSOR_AIC1", ELEM_OUTPUT_INT, {0x7963}, {0x1}}, \ - {"DDR_T_GRADE", ELEM_OUTPUT_INT, {0x7964}, {0x1}}, \ - {"EDP_SCALE_0", ELEM_OUTPUT_INT, {0x7965}, {0x1}}, \ - {"EDP_SCALE_1", ELEM_OUTPUT_INT, {0x7966}, {0x1}}, \ - {"TMP_STATUS", ELEM_OUTPUT_INT, {0x7967}, {0x1}}, \ - {"TMP_CTRL_ST", ELEM_OUTPUT_INT, {0x7968}, {0x1}}, \ - {"AIC_FREQ_ST", ELEM_OUTPUT_INT, {0x7969}, {0x1}}, \ - {"A55_FREQ_ST", ELEM_OUTPUT_INT, {0x796A}, {0x1}}, \ - {"AIC_NUM_ST", ELEM_OUTPUT_INT, {0x796B}, {0x1}}, \ - {"TMP_RST", ELEM_OUTPUT_INT, {0x796C}, {0x1}}, \ - {"TMP_HIGH", ELEM_OUTPUT_INT, {0x796D}, {0x1}}, \ - {"TMP_NOR", ELEM_OUTPUT_INT, {0x796E}, {0x1}}, \ - {"TMP_PERIOD", ELEM_OUTPUT_INT, {0x796F}, {0x1}}, \ - {"T_RST_STATUS", ELEM_OUTPUT_INT, {0x797D}, {0x1}}, \ - {"T_ERR_TSENSOR", ELEM_OUTPUT_INT, {0x797E}, {0x1}}, \ - {"T_ERR_EFUSE", ELEM_OUTPUT_INT, {0x797F}, {0x1}}, \ - {"**NV*******", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"LPNV_MAGIC",ELEM_OUTPUT_INT, {0x7980}, {0x4}}, \ - {"LP_LPALL_NV",ELEM_OUTPUT_INT, {0x7984}, {0x1}}, \ - {"LP_AVS_NV",ELEM_OUTPUT_INT, {0x7985}, {0x1}}, \ - {"LP_SVFD_NV",ELEM_OUTPUT_INT, {0x7986}, {0x1}}, \ - {"LP_PLLMOD_SEL_NV",ELEM_OUTPUT_INT, {0x7987}, {0x1}}, \ - {"LP_DEEP_SLEEP_NV",ELEM_OUTPUT_INT, {0x7988}, {0x1}}, \ - {"LP_HIMNTN_NV",ELEM_OUTPUT_INT, {0x7989}, {0x1}}, \ - {"LP_LOGBUF_CTRL",ELEM_OUTPUT_INT, {0x798A}, {0x1}}, \ - {"LP_WDT_RST_NV",ELEM_OUTPUT_INT, {0x798B}, {0x1}}, \ - {"LP_RDRLOG_CTRL",ELEM_OUTPUT_INT, {0x798C}, {0x1}}, \ - {"THERMAL_EN_NV", ELEM_OUTPUT_INT, {0x798D}, {0x1}}, \ - {"TMP_HWRST_EN_NV", ELEM_OUTPUT_INT, {0x798E}, {0x1}}, \ - {"TMP_GCTRL_EN_NV", ELEM_OUTPUT_INT, {0x798F}, {0x1}}, \ - {"TMP_GCTRL_SCALE_NV", ELEM_OUTPUT_INT, {0x7990}, {0x1}}, \ - {"TMP_RST_NV", ELEM_OUTPUT_INT, {0x7991}, {0x1}}, \ - {"TMP_HIGH_NV", ELEM_OUTPUT_INT, {0x7992}, {0x1}}, \ - {"TMP_NOR_NV", ELEM_OUTPUT_INT, {0x7993}, {0x1}}, \ - {"TMP_PERIOD_NV", ELEM_OUTPUT_INT, {0x7994}, {0x1}}, \ - {"DDR_ALL_NV", ELEM_OUTPUT_INT, {0x7995}, {0x1}}, \ - {"DDR_THERMAL_NV", ELEM_OUTPUT_INT, {0x7996}, {0x1}}, \ - {"DDR_EXMBIST_NV", ELEM_OUTPUT_INT, {0x7997}, {0x1}}, \ - {"DDR_SWAP_NV", ELEM_OUTPUT_INT, {0x7998}, {0x1}}, \ - {"DDR_IECC_NV", ELEM_OUTPUT_INT, {0x7999}, {0x1}}, \ - {"DDR_PASR_NV", ELEM_OUTPUT_INT, {0x799A}, {0x1}}, \ - {"DDR_UDIS_NV", ELEM_OUTPUT_INT, {0x799B}, {0x1}}, \ - {"DDR_TDIS_NV", ELEM_OUTPUT_INT, {0x799C}, {0x1}}, \ - {"DDR_FREQ_NV", ELEM_OUTPUT_INT, {0x799D}, {0x1}}, \ - {"**DDR_RUNTIME***", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"DDR_STATUS", ELEM_OUTPUT_INT, {0x79C0}, {0x2}}, \ - {"INITFREQ", ELEM_OUTPUT_INT, {0x79C2}, {0x1}}, \ - {"DDR_TMP_PERIOD", ELEM_OUTPUT_INT, {0x79C3}, {0x1}}, \ - {"DDR_PD_PRD", ELEM_OUTPUT_INT, {0x79C4}, {0x2}}, \ - {"DDR_ASREF_PRD", ELEM_OUTPUT_INT, {0x79C6}, {0x2}}, \ - {"DDR_FREQ_LOAD", ELEM_OUTPUT_HEX, {0x79C8}, {0x10}}, \ - {"DDR_MIN", ELEM_OUTPUT_INT, {0x79D8}, {0x1}}, \ - {"DDR_MAX", ELEM_OUTPUT_INT, {0x79D9}, {0x1}}, \ - {"DDR_LAST", ELEM_OUTPUT_INT, {0x79DA}, {0x1}}, \ - {"DDR_CURRENT", ELEM_OUTPUT_INT, {0x79DB}, {0x1}}, \ - {"DDR_TARGET", ELEM_OUTPUT_INT, {0x79DC}, {0x1}}, \ - {"DDR_DN_LIMIT", ELEM_OUTPUT_INT, {0x79DD}, {0x1}}, \ - {"DDR_UP_LIMIT", ELEM_OUTPUT_INT, {0x79DE}, {0x1}}, \ - {"DDR_PLL", ELEM_OUTPUT_INT, {0x79DF}, {0x1}}, \ - {"DDR_LAST_PLL", ELEM_OUTPUT_INT, {0x79E0}, {0x4}}, \ - {"CMD_CNT", ELEM_OUTPUT_HEX, {0x79E4}, {0x10}}, \ - {"DATA_CNT", ELEM_OUTPUT_INT, {0x79F4}, {0x4}}, \ - {"**AVS*******", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"AICORE0_HIGHTEMP_VOLT", ELEM_OUTPUT_INT, {0x7AE0}, {0x4}}, \ - {"AICORE1_HIGHTEMP_VOLT", ELEM_OUTPUT_INT, {0x7AE4}, {0x4}}, \ - {"CPU_HIGHTEMP_VOLT", ELEM_OUTPUT_INT, {0x7AE8}, {0x4}}, \ - {"AIMEMORY_HIGHTEMP_VOLT", ELEM_OUTPUT_INT, {0x7AEC}, {0x4}}, \ - {"PERI_HIGHTEMP_VOLT", ELEM_OUTPUT_INT, {0x7AF0}, {0x4}}, \ - {"AICORE0_CUR_VOLT", ELEM_OUTPUT_INT, {0x7AF4}, {0x4}}, \ - {"AICORE1_CUR_VOLT", ELEM_OUTPUT_INT, {0x7AF8}, {0x4}}, \ - {"CPU_CUR_VOLT", ELEM_OUTPUT_INT, {0x7AFC}, {0x4}}, \ - {"AIMEMORY_CUR_VOLT", ELEM_OUTPUT_INT, {0x7B00}, {0x4}}, \ - {"PERI_CUR_VOLT", ELEM_OUTPUT_INT, {0x7B04}, {0x4}}, \ - {"AICORE0_SVFD_VOLT", ELEM_OUTPUT_INT, {0x7B08}, {0x4}}, \ - {"AICORE1_SVFD_VOLT", ELEM_OUTPUT_INT, {0x7B0C}, {0x4}}, \ - {"AICORE0_SVFD_CPM", ELEM_OUTPUT_INT, {0x7B10}, {0x2}}, \ - {"AICORE1_SVFD_CPM", ELEM_OUTPUT_INT, {0x7B12}, {0x2}}, \ - {"AICORE0_NOTIFY_ST", ELEM_OUTPUT_INT, {0x7B14}, {0x1}}, \ - {"AICORE1_NOTIFY_ST", ELEM_OUTPUT_INT, {0x7B15}, {0x1}}, \ - {"CPU_NOTIFY_ST", ELEM_OUTPUT_INT, {0x7B16}, {0x1}}, \ - {"AIMEMORY_NOTIFY_ST", ELEM_OUTPUT_INT, {0x7B17}, {0x1}}, \ - {"PERI_NOTIFY_ST", ELEM_OUTPUT_INT, {0x7B18}, {0x1}}, \ - {"AICORE0_TZONE", ELEM_OUTPUT_INT, {0x7B19}, {0x1}}, \ - {"AICORE1_TZONE", ELEM_OUTPUT_INT, {0x7B1A}, {0x1}}, \ - {"CPU_TZONE", ELEM_OUTPUT_INT, {0x7B1B}, {0x1}}, \ - {"AIMEMORY_TZONE", ELEM_OUTPUT_INT, {0x7B1C}, {0x1}}, \ - {"PERI_TZONE", ELEM_OUTPUT_INT, {0x7B1D}, {0x1}}, \ - {"VOLT_RISE_TEMP", ELEM_OUTPUT_INT, {0x7B1E}, {0x1}}, \ - {"VOLT_DECREASE_TEMP", ELEM_OUTPUT_INT, {0x7B1F}, {0x1}}, \ - {"**efuse*******", ELEM_OUTPUT_STR, {0x00}, {0x8}}, \ - {"TSENSOR_EFUSE", ELEM_OUTPUT_INT, {0x7B28}, {0x8}}, \ -} - -#define DATA_MODEL_LPM3_SRAM MODEL_VECTOR(LPM3_SRAM) = { \ - {"magic_begin", ELEM_OUTPUT_INT, {0x5000}, {0x4}}, \ - {"slice_time", ELEM_OUTPUT_INT, {0x5004}, {0x4}}, \ - {"mod_reason", ELEM_OUTPUT_INT, {0x5008}, {0x4}}, \ - {"ddr_freq_id", ELEM_OUTPUT_INT, {0x500C}, {0x4}}, \ - {"uce_exc", ELEM_OUTPUT_INT, {0x5010}, {0x4}}, \ - {"reserved0-2", ELEM_OUTPUT_INT, {0x5014}, {0x4}}, \ - {"reg_save_addr", ELEM_OUTPUT_INT, {0x5020}, {0x4}}, \ - {"DDRRETENTION", ELEM_OUTPUT_INT, {0x5024}, {0x4}}, \ - {"DDRRETENTIONCLR", ELEM_OUTPUT_INT, {0x5028}, {0x4}}, \ - {"DRAMRETENTION", ELEM_OUTPUT_INT, {0x502C}, {0x4}}, \ - {"DDRC_0_3_RESET", ELEM_OUTPUT_INT, {0x5030}, {0x4}}, \ - {"DDRC_4_7_RESET", ELEM_OUTPUT_INT, {0x5034}, {0x4}}, \ - {"DDRC_0_3_PACK_RESET", ELEM_OUTPUT_INT, {0x5038}, {0x4}}, \ - {"DDRC_4_7_PACK_RESET", ELEM_OUTPUT_INT, {0x503C}, {0x4}}, \ - {"DDRC_EXMBIST0_REGS_RESET",ELEM_OUTPUT_INT, {0x5040}, {0x4}}, \ - {"DDRC_EXMBIST1_REGS_RESET",ELEM_OUTPUT_INT, {0x5044}, {0x4}}, \ - {"DDRC_0_3_PACK_RESET", ELEM_OUTPUT_INT, {0x5048}, {0x4}}, \ - {"DDRC_4_7_PACK_RESET", ELEM_OUTPUT_INT, {0x504C}, {0x4}}, \ - {"SCTRL_DDRC_0_3_AO_RST", ELEM_OUTPUT_INT, {0x5050}, {0x4}}, \ - {"SCTRL_DDRC_4_7_AO_RST", ELEM_OUTPUT_INT, {0x5054}, {0x4}}, \ - {"PPLLBYPASS0", ELEM_OUTPUT_INT, {0x5058}, {0x4}}, \ - {"PPLLBYPASS1", ELEM_OUTPUT_INT, {0x505C}, {0x4}}, \ - {"PPLL3FCTRL", ELEM_OUTPUT_INT, {0x5060}, {0x4}}, \ - {"PPLL3FCTRL_FRAC", ELEM_OUTPUT_INT, {0x5064}, {0x4}}, \ - {"PPLL4FCTRL", ELEM_OUTPUT_INT, {0x5068}, {0x4}}, \ - {"PPLL4FCTRL_FRAC", ELEM_OUTPUT_INT, {0x506C}, {0x4}}, \ - {"PPLLOCKSTATUS", ELEM_OUTPUT_INT, {0x5070}, {0x4}}, \ - {"DDRC_0_3_BYPASS_MODE", ELEM_OUTPUT_INT, {0x5074}, {0x4}}, \ - {"DDRC_4_7_BYPASS_MODE", ELEM_OUTPUT_INT, {0x5078}, {0x4}}, \ - {"PLL_PROF_CFG1", ELEM_OUTPUT_INT, {0x507C}, {0x4}}, \ -} - -/* TEE module */ -#define DATA_MODEL_TEE MODEL_VECTOR(TEE) = { \ - {"tee info", ELEM_OUTPUT_STR_NL, {0x0}, {0x10000}}, \ -} - -/* TF module */ -#define DATA_MODEL_TF MODEL_VECTOR(TF) = { \ - {"tf info", ELEM_OUTPUT_STR_NL, {0x8}, {0xFFF8}}, \ -} - -/* DVPP module */ -#define DATA_MODEL_DVPP MODEL_VECTOR(DVPP) = { \ - {"dvpp info", ELEM_OUTPUT_STR_NL, {0x0}, {0x10000}}, \ -} - -/* DRIVE module */ -#define DATA_MODEL_DRIVER MODEL_VECTOR(DRIVER) = { \ - {"driver info", ELEM_OUTPUT_STR_NL, {0x0}, {0x20000}}, \ -} - -/* TS module */ -#define DATA_MODEL_TS MODEL_VECTOR(TS) = { \ - {"ts info", ELEM_OUTPUT_CHAR, {0x0}, {0x100000}}, \ -} - -/* TS module, start */ -#define DATA_MODEL_TS_START MODEL_VECTOR(TS_START) = { \ - {"ts start info", ELEM_OUTPUT_STR_NL, {0x0}, {0xC800}}, \ -} - -/* AP module, early print */ -#define DATA_MODEL_AP_EPRINT MODEL_VECTOR(AP_EPRINT) = { \ - {"early print info", ELEM_OUTPUT_STR_NL, {0x0}, {0x400}}, \ -} - -/* BIOS module */ -#define DATA_MODEL_BIOS MODEL_VECTOR(BIOS) = { \ - {"bios info", ELEM_OUTPUT_STR_NL, {0x0}, {0x50000}}, \ -} - -/* BIOS module, sram */ -#define DATA_MODEL_BIOS_SRAM MODEL_VECTOR(BIOS_SRAM) = { \ - {"LPM3_WAKE_UP_STATUS", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"DEBUG_TIME_POWERUP_DONE", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"DEBUG_TIME_PERSTHIGH_DONE", ELEM_OUTPUT_INT, {0x2C}, {0x4}}, \ - {"DEBUG_TIME_PCIEPHY_DONE", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"DEBUG_TIME_PHY_FIRMWARE_DONE", ELEM_OUTPUT_INT, {0x34}, {0x4}}, \ - {"DEBUG_TIME_PCIECTRL_DONE", ELEM_OUTPUT_INT, {0x38}, {0x4}}, \ - {"DEBUG_TIME_IMG_DONE", ELEM_OUTPUT_INT, {0x3C}, {0x4}}, \ - {"DEBUG_TIME_SECURE_DONE", ELEM_OUTPUT_INT, {0x40}, {0x4}}, \ - {"DEBUG_VERSION_ADDR", ELEM_OUTPUT_HEX, {0x50}, {0x10}}, \ - {"XLOADER_RESET_REG", ELEM_OUTPUT_INT, {0x200}, {0x4}}, \ - {"XLOADER_KEY_POINT", ELEM_OUTPUT_INT, {0x204}, {0x4}}, \ - {"XLOADER_TIME_POWERUP_DONE", ELEM_OUTPUT_INT, {0x228}, {0x4}}, \ - {"XLOADER_TIME_PERSTHIGH_DONE", ELEM_OUTPUT_INT, {0x22C}, {0x4}}, \ - {"XLOADER_TIME_PCIEPHY_DONE", ELEM_OUTPUT_INT, {0x230}, {0x4}}, \ - {"XLOADER_TIME_PHY_FIRMWARE_DONE", ELEM_OUTPUT_INT, {0x234}, {0x4}}, \ - {"XLOADER_TIME_PCIECTRL_DONE", ELEM_OUTPUT_INT, {0x238}, {0x4}}, \ - {"XLOADER_TIME_PCIE_DETECT_DONE", ELEM_OUTPUT_INT, {0x23C}, {0x4}}, \ - {"UEFI_LAST_KEYPOINT", ELEM_OUTPUT_INT, {0x320}, {0x4}}, \ - {"SD_LOAD_FILE_STATUS", ELEM_OUTPUT_INT, {0x350}, {0x4}}, \ -} - - -/* DDR_SRAM module */ -#define DATA_MODEL_DDR_SRAM MODEL_VECTOR(DDR_SRAM) = {\ - {"magic_begin", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"slice_time", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"mod_reason", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"ddr_freq_id", ELEM_OUTPUT_INT, {0xC}, {0x4}}, \ - {"ddr_status", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"uce_exc", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"SC_TSENSOR_INFO_ADDR", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"SC_TSENSOR_AICORE_LIMIT", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {"SC_TSENSOR_MAX_TEMP", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"channel_mask", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {"channel_num", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"rank_num", ELEM_OUTPUT_INT, {0x2C}, {0x4}}, \ - {"ddr_size", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"manufactery_id", ELEM_OUTPUT_INT, {0x34}, {0x4}}, \ - {"iecc_cerr_thresh[0]", ELEM_OUTPUT_INT, {0x38}, {0x4}}, \ - {"iecc_cerr_thresh[1]", ELEM_OUTPUT_INT, {0x3C}, {0x4}}, \ - {"iecc_cerr_thresh[2]", ELEM_OUTPUT_INT, {0x40}, {0x4}}, \ - {"iecc_cerr_thresh[3]", ELEM_OUTPUT_INT, {0x44}, {0x4}}, \ - {"iecc_cerr_thresh[4]", ELEM_OUTPUT_INT, {0x48}, {0x4}}, \ - {"iecc_cerr_thresh[5]", ELEM_OUTPUT_INT, {0x4C}, {0x4}}, \ - {"iecc_cerr_thresh[6]", ELEM_OUTPUT_INT, {0x50}, {0x4}}, \ - {"iecc_cerr_thresh[7]", ELEM_OUTPUT_INT, {0x54}, {0x4}}, \ - {"iecc_ctrl[0]", ELEM_OUTPUT_INT, {0x58}, {0x4}}, \ - {"iecc_ctrl[1]", ELEM_OUTPUT_INT, {0x5C}, {0x4}}, \ - {"iecc_ctrl[2]", ELEM_OUTPUT_INT, {0x60}, {0x4}}, \ - {"iecc_ctrl[3]", ELEM_OUTPUT_INT, {0x64}, {0x4}}, \ - {"iecc_ctrl[4]", ELEM_OUTPUT_INT, {0x68}, {0x4}}, \ - {"iecc_ctrl[5]", ELEM_OUTPUT_INT, {0x6C}, {0x4}}, \ - {"iecc_ctrl[6]", ELEM_OUTPUT_INT, {0x70}, {0x4}}, \ - {"iecc_ctrl[7]", ELEM_OUTPUT_INT, {0x74}, {0x4}}, \ - {"iecc_cerr_cnt[0]", ELEM_OUTPUT_INT, {0x78}, {0x4}}, \ - {"iecc_cerr_cnt[1]", ELEM_OUTPUT_INT, {0x7C}, {0x4}}, \ - {"iecc_cerr_cnt[2]", ELEM_OUTPUT_INT, {0x80}, {0x4}}, \ - {"iecc_cerr_cnt[3]", ELEM_OUTPUT_INT, {0x84}, {0x4}}, \ - {"iecc_cerr_cnt[4]", ELEM_OUTPUT_INT, {0x88}, {0x4}}, \ - {"iecc_cerr_cnt[5]", ELEM_OUTPUT_INT, {0x8C}, {0x4}}, \ - {"iecc_cerr_cnt[6]", ELEM_OUTPUT_INT, {0x90}, {0x4}}, \ - {"iecc_cerr_cnt[7]", ELEM_OUTPUT_INT, {0x94}, {0x4}}, \ - {"iecc_uerr_cnt[0]", ELEM_OUTPUT_INT, {0x98}, {0x4}}, \ - {"iecc_uerr_cnt[1]", ELEM_OUTPUT_INT, {0x9C}, {0x4}}, \ - {"iecc_uerr_cnt[2]", ELEM_OUTPUT_INT, {0xA0}, {0x4}}, \ - {"iecc_uerr_cnt[3]", ELEM_OUTPUT_INT, {0xA4}, {0x4}}, \ - {"iecc_uerr_cnt[4]", ELEM_OUTPUT_INT, {0xA8}, {0x4}}, \ - {"iecc_uerr_cnt[5]", ELEM_OUTPUT_INT, {0xAC}, {0x4}}, \ - {"iecc_uerr_cnt[6]", ELEM_OUTPUT_INT, {0xB0}, {0x4}}, \ - {"iecc_uerr_cnt[7]", ELEM_OUTPUT_INT, {0xB4}, {0x4}}, \ - {"magic_byte", ELEM_OUTPUT_INT, {0x100}, {0x1}}, \ - {"err_max", ELEM_OUTPUT_INT, {0x104}, {0x1}}, \ - {"irq_count", ELEM_OUTPUT_INT, {0x108}, {0x1}}, \ - {"index", ELEM_OUTPUT_INT, {0x10C}, {0x1}}, \ - {"rate[0].time", ELEM_OUTPUT_INT, {0x100}, {0x4}}, \ - {"rate[0].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x104}, {0x1}}, \ - {"rate[0].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x105}, {0x1}}, \ - {"rate[0].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x106}, {0x1}}, \ - {"rate[0].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x107}, {0x1}}, \ - {"rate[0].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x108}, {0x1}}, \ - {"rate[0].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x109}, {0x1}}, \ - {"rate[0].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x10A}, {0x1}}, \ - {"rate[0].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x10B}, {0x1}}, \ - {"rate[0].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x10C}, {0x1}}, \ - {"rate[0].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x10D}, {0x1}}, \ - {"rate[0].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x10E}, {0x1}}, \ - {"rate[0].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x10F}, {0x1}}, \ - {"rate[0].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x110}, {0x1}}, \ - {"rate[0].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x111}, {0x1}}, \ - {"rate[0].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x112}, {0x1}}, \ - {"rate[0].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x113}, {0x1}}, \ - {"rate[1].time", ELEM_OUTPUT_INT, {0x114}, {0x4}}, \ - {"rate[1].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x118}, {0x1}}, \ - {"rate[1].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x119}, {0x1}}, \ - {"rate[1].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x11A}, {0x1}}, \ - {"rate[1].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x11B}, {0x1}}, \ - {"rate[1].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x11C}, {0x1}}, \ - {"rate[1].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x11D}, {0x1}}, \ - {"rate[1].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x11E}, {0x1}}, \ - {"rate[1].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x11F}, {0x1}}, \ - {"rate[1].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x120}, {0x1}}, \ - {"rate[1].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x121}, {0x1}}, \ - {"rate[1].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x122}, {0x1}}, \ - {"rate[1].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x123}, {0x1}}, \ - {"rate[1].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x124}, {0x1}}, \ - {"rate[1].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x125}, {0x1}}, \ - {"rate[1].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x126}, {0x1}}, \ - {"rate[1].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x127}, {0x1}}, \ - {"rate[2].time", ELEM_OUTPUT_INT, {0x128}, {0x4}}, \ - {"rate[2].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x12C}, {0x1}}, \ - {"rate[2].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x12D}, {0x1}}, \ - {"rate[2].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x12E}, {0x1}}, \ - {"rate[2].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x12F}, {0x1}}, \ - {"rate[2].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x130}, {0x1}}, \ - {"rate[2].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x131}, {0x1}}, \ - {"rate[2].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x132}, {0x1}}, \ - {"rate[2].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x133}, {0x1}}, \ - {"rate[2].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x134}, {0x1}}, \ - {"rate[2].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x135}, {0x1}}, \ - {"rate[2].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x136}, {0x1}}, \ - {"rate[2].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x137}, {0x1}}, \ - {"rate[2].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x138}, {0x1}}, \ - {"rate[2].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x139}, {0x1}}, \ - {"rate[2].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x13A}, {0x1}}, \ - {"rate[2].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x13B}, {0x1}}, \ - {"rate[3].time", ELEM_OUTPUT_INT, {0x13C}, {0x4}}, \ - {"rate[3].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x140}, {0x1}}, \ - {"rate[3].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x141}, {0x1}}, \ - {"rate[3].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x142}, {0x1}}, \ - {"rate[3].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x143}, {0x1}}, \ - {"rate[3].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x144}, {0x1}}, \ - {"rate[3].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x145}, {0x1}}, \ - {"rate[3].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x146}, {0x1}}, \ - {"rate[3].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x147}, {0x1}}, \ - {"rate[3].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x148}, {0x1}}, \ - {"rate[3].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x149}, {0x1}}, \ - {"rate[3].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x14A}, {0x1}}, \ - {"rate[3].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x14B}, {0x1}}, \ - {"rate[3].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x14C}, {0x1}}, \ - {"rate[3].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x14D}, {0x1}}, \ - {"rate[3].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x14E}, {0x1}}, \ - {"rate[3].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x14F}, {0x1}}, \ - {"rate[4].time", ELEM_OUTPUT_INT, {0x150}, {0x4}}, \ - {"rate[4].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x154}, {0x1}}, \ - {"rate[4].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x155}, {0x1}}, \ - {"rate[4].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x156}, {0x1}}, \ - {"rate[4].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x157}, {0x1}}, \ - {"rate[4].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x158}, {0x1}}, \ - {"rate[4].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x159}, {0x1}}, \ - {"rate[4].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x15A}, {0x1}}, \ - {"rate[4].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x15B}, {0x1}}, \ - {"rate[4].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x15C}, {0x1}}, \ - {"rate[4].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x15D}, {0x1}}, \ - {"rate[4].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x15E}, {0x1}}, \ - {"rate[4].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x15F}, {0x1}}, \ - {"rate[4].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x160}, {0x1}}, \ - {"rate[4].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x161}, {0x1}}, \ - {"rate[4].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x162}, {0x1}}, \ - {"rate[4].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x163}, {0x1}}, \ - {"rate[5].time", ELEM_OUTPUT_INT, {0x164}, {0x4}}, \ - {"rate[5].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x168}, {0x1}}, \ - {"rate[5].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x169}, {0x1}}, \ - {"rate[5].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x16A}, {0x1}}, \ - {"rate[5].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x16B}, {0x1}}, \ - {"rate[5].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x16C}, {0x1}}, \ - {"rate[5].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x16D}, {0x1}}, \ - {"rate[5].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x16E}, {0x1}}, \ - {"rate[5].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x16F}, {0x1}}, \ - {"rate[5].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x170}, {0x1}}, \ - {"rate[5].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x171}, {0x1}}, \ - {"rate[5].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x172}, {0x1}}, \ - {"rate[5].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x173}, {0x1}}, \ - {"rate[5].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x174}, {0x1}}, \ - {"rate[5].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x175}, {0x1}}, \ - {"rate[5].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x176}, {0x1}}, \ - {"rate[5].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x177}, {0x1}}, \ - {"rate[6].time", ELEM_OUTPUT_INT, {0x178}, {0x4}}, \ - {"rate[6].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x17C}, {0x1}}, \ - {"rate[6].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x17D}, {0x1}}, \ - {"rate[6].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x17E}, {0x1}}, \ - {"rate[6].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x17F}, {0x1}}, \ - {"rate[6].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x180}, {0x1}}, \ - {"rate[6].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x181}, {0x1}}, \ - {"rate[6].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x182}, {0x1}}, \ - {"rate[6].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x183}, {0x1}}, \ - {"rate[6].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x184}, {0x1}}, \ - {"rate[6].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x185}, {0x1}}, \ - {"rate[6].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x186}, {0x1}}, \ - {"rate[6].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x187}, {0x1}}, \ - {"rate[6].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x188}, {0x1}}, \ - {"rate[6].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x189}, {0x1}}, \ - {"rate[6].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x18A}, {0x1}}, \ - {"rate[6].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x18B}, {0x1}}, \ - {"rate[7].time", ELEM_OUTPUT_INT, {0x18C}, {0x4}}, \ - {"rate[7].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x190}, {0x1}}, \ - {"rate[7].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x191}, {0x1}}, \ - {"rate[7].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x192}, {0x1}}, \ - {"rate[7].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x193}, {0x1}}, \ - {"rate[7].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x194}, {0x1}}, \ - {"rate[7].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x195}, {0x1}}, \ - {"rate[7].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x196}, {0x1}}, \ - {"rate[7].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x197}, {0x1}}, \ - {"rate[7].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x198}, {0x1}}, \ - {"rate[7].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x199}, {0x1}}, \ - {"rate[7].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x19A}, {0x1}}, \ - {"rate[7].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x19B}, {0x1}}, \ - {"rate[7].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x19C}, {0x1}}, \ - {"rate[7].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x19D}, {0x1}}, \ - {"rate[7].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x19E}, {0x1}}, \ - {"rate[7].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x1AF}, {0x1}}, \ - {"rate[8].time", ELEM_OUTPUT_INT, {0x1B0}, {0x4}}, \ - {"rate[8].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x1B4}, {0x1}}, \ - {"rate[8].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x1B5}, {0x1}}, \ - {"rate[8].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x1B6}, {0x1}}, \ - {"rate[8].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x1B7}, {0x1}}, \ - {"rate[8].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x1B8}, {0x1}}, \ - {"rate[8].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x1B9}, {0x1}}, \ - {"rate[8].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x1BA}, {0x1}}, \ - {"rate[8].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x1BB}, {0x1}}, \ - {"rate[8].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x1BC}, {0x1}}, \ - {"rate[8].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x1BD}, {0x1}}, \ - {"rate[8].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x1BE}, {0x1}}, \ - {"rate[8].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x1BF}, {0x1}}, \ - {"rate[8].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x1C0}, {0x1}}, \ - {"rate[8].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x1C1}, {0x1}}, \ - {"rate[8].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x1C2}, {0x1}}, \ - {"rate[8].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x1C3}, {0x1}}, \ - {"rate[9].time", ELEM_OUTPUT_INT, {0x1C4}, {0x4}}, \ - {"rate[9].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x1C8}, {0x1}}, \ - {"rate[9].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x1C9}, {0x1}}, \ - {"rate[9].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x1CA}, {0x1}}, \ - {"rate[9].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x1CB}, {0x1}}, \ - {"rate[9].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x1CC}, {0x1}}, \ - {"rate[9].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x1CD}, {0x1}}, \ - {"rate[9].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x1CE}, {0x1}}, \ - {"rate[9].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x1CF}, {0x1}}, \ - {"rate[9].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x1D0}, {0x1}}, \ - {"rate[9].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x1D1}, {0x1}}, \ - {"rate[9].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x1D2}, {0x1}}, \ - {"rate[9].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x1D3}, {0x1}}, \ - {"rate[9].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x1D4}, {0x1}}, \ - {"rate[9].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x1D5}, {0x1}}, \ - {"rate[9].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x1D6}, {0x1}}, \ - {"rate[9].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x1D7}, {0x1}}, \ - {"mrr4_0.count", ELEM_OUTPUT_INT, {0x1D8}, {0x1}}, \ - {"mrr4_0.happen", ELEM_OUTPUT_INT, {0x1D9}, {0x1}}, \ - {"shake_count.count", ELEM_OUTPUT_INT, {0x1DA}, {0x1}}, \ - {"shake_count.happen", ELEM_OUTPUT_INT, {0x1DB}, {0x1}}, \ - {"sfc_record0", ELEM_OUTPUT_INT, {0x1DC}, {0x1}}, \ - {"sfc_record1", ELEM_OUTPUT_INT, {0x1DD}, {0x1}}, \ - {"sfc_mr5", ELEM_OUTPUT_INT, {0x1DE}, {0x1}}, \ -} - -// bbox kbox info -#define DATA_MODEL_BBOX_KBOX MODEL_VECTOR(BBOX_KBOX) = { \ - {"CONSOLE START", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"[console info]", ELEM_OUTPUT_STR_NL, {0x0}, {0x10000}}, \ - {"CONSOLE END", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"MESSAGE START", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"[message info]", ELEM_OUTPUT_STR_NL, {0x10000}, {0x40000}}, \ - {"MESSAGE END", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"if panic", ELEM_CTRL_COMPARE, {0x50000}, {0x1}}, \ - {"", ELEM_CTRL_CMP_JUMP_EQ, {0x0}, {0x3}}, \ - {"PANIC START", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"[panic info]", ELEM_OUTPUT_STR_NL, {0x50000}, {0x8000}}, \ - {"PANIC END", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"if emerge", ELEM_CTRL_COMPARE, {0x58000}, {0x1}}, \ - {"", ELEM_CTRL_CMP_JUMP_EQ, {0x0}, {0x3}}, \ - {"EMERGE START", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"[emerge info]", ELEM_OUTPUT_STR_NL, {0x58000}, {0x8000}}, \ - {"EMERGE END", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"if die", ELEM_CTRL_COMPARE, {0x60000}, {0x1}}, \ - {"", ELEM_CTRL_CMP_JUMP_EQ, {0x0}, {0x3}}, \ - {"DIE START", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"[die info]", ELEM_OUTPUT_STR_NL, {0x60000}, {0x20000}}, \ - {"DIE END", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ -} - -/** - * the whole space is 512k, used for histroy data record - * the struct distribution is as follows: - * +-------------------+ - * | head info(1k) | region: area: module block: - * +-------------------+ +--------------------+ +-----------------+ +-----------------+ - * | boot region |---->| first area |---->| module block |---->| block head | - * +-------------------+ +--------------------+ +-----------------+ +-----------------+ - * | run region | | second area | | module block | | block data | - * +-------------------+ +--------------------+ +-----------------+ +-----------------+ - * | reserved | | ...... | | ...... | - * +-------------------+ +--------------------+ +-----------------+ - */ -#define DATA_MODEL_HDR_BOOT_BIOS MODEL_VECTOR(HDR_BOOT_BIOS) = { \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0xFF}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"bsbc point", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {"bsbc exc point", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"hboot1 point", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {"hboot1 exc point", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"hboot2 point", ELEM_OUTPUT_INT, {0x2C}, {0x4}}, \ - {"hboot2 exc point", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"[BIOS info]", ELEM_OUTPUT_STR_NL, {0x480}, {0x2780}}, \ -} - -#define DATA_MODEL_HDR_BOOT_DDR MODEL_VECTOR(HDR_BOOT_DDR) = { \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0xFF}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"magic_begin", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {"init_keypoint", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"ldo8_vol", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {"buck3_status", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"buck3_vol", ELEM_OUTPUT_INT, {0x2C}, {0x4}}, \ - {"buck5_status", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"buck5_vol", ELEM_OUTPUT_INT, {0x34}, {0x4}}, \ - {"wr_test_result", ELEM_OUTPUT_INT, {0x38}, {0x4}}, \ - {"rint_status[0]", ELEM_OUTPUT_INT, {0x3C}, {0x4}}, \ - {"rint_status[1]", ELEM_OUTPUT_INT, {0x40}, {0x4}}, \ - {"rint_status[2]", ELEM_OUTPUT_INT, {0x44}, {0x4}}, \ - {"rint_status[3]", ELEM_OUTPUT_INT, {0x48}, {0x4}}, \ - {"rint_status[4]", ELEM_OUTPUT_INT, {0x4C}, {0x4}}, \ - {"rint_status[5]", ELEM_OUTPUT_INT, {0x50}, {0x4}}, \ - {"rint_status[6]", ELEM_OUTPUT_INT, {0x54}, {0x4}}, \ - {"rint_status[7]", ELEM_OUTPUT_INT, {0x58}, {0x4}}, \ - {"SOC_SCTRL_DDRRETENTION_ADDR", ELEM_OUTPUT_INT, {0x5C}, {0x4}}, \ - {"SOC_SCTRL_DDRRETENTIONCLR_ADDR", ELEM_OUTPUT_INT, {0x60}, {0x4}}, \ - {"SOC_SCTRL_DRAMRETENTION_ADDR", ELEM_OUTPUT_INT, {0x64}, {0x4}}, \ - {"SC_DDRC_0_3_RESET_REQ", ELEM_OUTPUT_INT, {0x68}, {0x4}}, \ - {"SC_DDRC_4_7_RESET_REQ", ELEM_OUTPUT_INT, {0x6C}, {0x4}}, \ - {"SC_DDRC_0_3_PACK_RESET_REQ", ELEM_OUTPUT_INT, {0x70}, {0x4}}, \ - {"SC_DDRC_4_7_PACK_RESET_REQ", ELEM_OUTPUT_INT, {0x74}, {0x4}}, \ - {"SC_DDRC_EXMBIST0_REGS_RESET_REQ", ELEM_OUTPUT_INT, {0x78}, {0x4}}, \ - {"SC_DDRC_EXMBIST1_REGS_RESET_REQ", ELEM_OUTPUT_INT, {0x7C}, {0x4}}, \ - {"SOC_SCTRL_DDRC_0_3_AO_RST_ADDR", ELEM_OUTPUT_INT, {0x80}, {0x4}}, \ - {"SOC_SCTRL_DDRC_4_7_AO_RST_ADDR", ELEM_OUTPUT_INT, {0x84}, {0x4}}, \ - {"SOC_PMCTRL_PPLLBYPASS0_ADDR", ELEM_OUTPUT_INT, {0x88}, {0x4}}, \ - {"SOC_PMCTRL_PPLLBYPASS1_ADDR", ELEM_OUTPUT_INT, {0x8C}, {0x4}}, \ - {"SOC_PMCTRL_PPLL3FCTRL_ADDR", ELEM_OUTPUT_INT, {0x90}, {0x4}}, \ - {"SOC_PMCTRL_PPLL3FCTRL_FRAC_ADDR", ELEM_OUTPUT_INT, {0x94}, {0x4}}, \ - {"SOC_PMCTRL_PPLL4FCTRL_ADDR", ELEM_OUTPUT_INT, {0x98}, {0x4}}, \ - {"SOC_PMCTRL_PPLL4FCTRL_FRAC_ADDR", ELEM_OUTPUT_INT, {0x9C}, {0x4}}, \ - {"SOC_PMCTRL_PPLLOCKSTATUS_ADDR", ELEM_OUTPUT_INT, {0x100}, {0x4}}, \ - {"SC_DDRC_0_3_BYPASS_MODE_CTRL", ELEM_OUTPUT_INT, {0x104}, {0x4}}, \ - {"SC_DDRC_4_7_BYPASS_MODE_CTRL", ELEM_OUTPUT_INT, {0x108}, {0x4}}, \ - {"SC_PLL_PROF_CFG1", ELEM_OUTPUT_INT, {0x10C}, {0x4}}, \ -} - -#define DATA_MODEL_HDR_BOOT_TEE MODEL_VECTOR(HDR_BOOT_TEE) = { \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0xFF}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"[BOOT FATAL INFO SIZE]", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {"[BOOT FATAL INFO]", ELEM_OUTPUT_STR_NL, {0x20}, {0x7E0}}, \ - {"[run point tail]", ELEM_OUTPUT_INT, {0x800}, {0x4}}, \ - {"[boot point info]", ELEM_OUTPUT_HEX, {0x804}, {0x20}}, \ - {"[run point info]", ELEM_OUTPUT_HEX, {0x884}, {0x20}}, \ - {"[last log size]", ELEM_OUTPUT_INT, {0xC00}, {0x4}}, \ - {"[last log data]", ELEM_OUTPUT_STR_NL, {0xC04}, {0x3FC}}, \ -} - -#define DATA_MODEL_HDR_BOOT_ATF MODEL_VECTOR(HDR_BOOT_ATF) = { \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0xFF}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"[ATF info]", ELEM_OUTPUT_STR_NL, {0x1C}, {0xFE4}}, \ -} - -#define DATA_MODEL_HDR_BOOT_AREA MODEL_VECTOR(HDR_BOOT_AREA) = { \ - {"BIOS INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_BOOT_BIOS", ELEM_CTRL_TABLE_GOTO, {0x0}, {0x3000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_BIOS}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"DDR INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_BOOT_DDR", ELEM_CTRL_TABLE_GOTO, {0x3000}, {0x1000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_DDR}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"TEE INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_BOOT_TEE", ELEM_CTRL_TABLE_GOTO, {0x4000}, {0x1000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_TEE}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"ATF INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_BOOT_ATF", ELEM_CTRL_TABLE_GOTO, {0x5000}, {0x1000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_ATF}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ -} - -#define DATA_MODEL_HDR_RUN_OS MODEL_VECTOR(HDR_RUN_OS) = { \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0xFF}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"[OS info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {"event_flag", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {"dump_flag", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"err num", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {"[OS log]", ELEM_OUTPUT_STR_NL, {0x100}, {0xF00}}, \ -} - -#define DATA_MODEL_HDR_RUN_LPM MODEL_VECTOR(HDR_RUN_LPM) = { \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0x200}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"[LPM log]", ELEM_OUTPUT_STR_NL, {0x40}, {0x400}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[LPM data]:", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {"reset_reason", ELEM_OUTPUT_INT, {0x440}, {0x4}}, \ - {"slice", ELEM_OUTPUT_INT, {0x444}, {0x4}}, \ - {"rtc", ELEM_OUTPUT_INT, {0x448}, {0x4}}, \ - {"r13", ELEM_OUTPUT_INT, {0x44C}, {0x4}}, \ - {"lr1", ELEM_OUTPUT_INT, {0x450}, {0x4}}, \ - {"pc", ELEM_OUTPUT_INT, {0x454}, {0x4}}, \ - {"xpsr", ELEM_OUTPUT_INT, {0x458}, {0x4}}, \ - {"cfsr", ELEM_OUTPUT_INT, {0x45C}, {0x4}}, \ - {"hfsr", ELEM_OUTPUT_INT, {0x460}, {0x4}}, \ - {"bfar", ELEM_OUTPUT_INT, {0x464}, {0x4}}, \ - {"exc_trace", ELEM_OUTPUT_INT, {0x468}, {0x1}}, \ - {"ddr_exc", ELEM_OUTPUT_INT, {0x469}, {0x1}}, \ - {"irq_id", ELEM_OUTPUT_INT, {0x46A}, {0x2}}, \ - {"task_id", ELEM_OUTPUT_INT, {0x46C}, {0x4}}, \ - {"TSENSOR_A55", ELEM_OUTPUT_INT, {0x470}, {0x1}}, \ - {"TSENSOR_PERI", ELEM_OUTPUT_INT, {0x471}, {0x1}}, \ - {"TSENSOR_AIC0", ELEM_OUTPUT_INT, {0x472}, {0x1}}, \ - {"TSENSOR_AIC1", ELEM_OUTPUT_INT, {0x473}, {0x1}}, \ - {"DDR_T_GRADE", ELEM_OUTPUT_INT, {0x474}, {0x1}}, \ - {"EDP_SCALE_0", ELEM_OUTPUT_INT, {0x475}, {0x1}}, \ - {"EDP_SCALE_1", ELEM_OUTPUT_INT, {0x476}, {0x1}}, \ - {"TMP_STATUS", ELEM_OUTPUT_INT, {0x477}, {0x1}}, \ - {"TMP_CTRL_ST", ELEM_OUTPUT_INT, {0x478}, {0x1}}, \ - {"AIC_FREQ_ST", ELEM_OUTPUT_INT, {0x479}, {0x1}}, \ - {"A55_FREQ_ST", ELEM_OUTPUT_INT, {0x47a}, {0x1}}, \ - {"AIC_NUM_ST", ELEM_OUTPUT_INT, {0x47b}, {0x1}}, \ - {"TMP_RST", ELEM_OUTPUT_INT, {0x47c}, {0x1}}, \ - {"TMP_HIGH", ELEM_OUTPUT_INT, {0x47d}, {0x1}}, \ - {"TMP_NOR", ELEM_OUTPUT_INT, {0x47e}, {0x1}}, \ - {"TMP_PERIOD", ELEM_OUTPUT_INT, {0x47f}, {0x1}}, \ - {"T_RST_STATUS", ELEM_OUTPUT_INT, {0x48D}, {0x1}}, \ - {"T_ERR_TSENSOR", ELEM_OUTPUT_INT, {0x48e}, {0x1}}, \ - {"T_ERR_EFUSE", ELEM_OUTPUT_INT, {0x48f}, {0x1}}, \ - {"AICORE0_HIGHTEMP_VOLT", ELEM_OUTPUT_INT, {0x490}, {0x4}}, \ - {"AICORE1_HIGHTEMP_VOLT", ELEM_OUTPUT_INT, {0x494}, {0x4}}, \ - {"CPU_HIGHTEMP_VOLT", ELEM_OUTPUT_INT, {0x498}, {0x4}}, \ - {"AIMEMORY_HIGHTEMP_VOLT", ELEM_OUTPUT_INT, {0x49c}, {0x4}}, \ - {"PERI_HIGHTEMP_VOLT", ELEM_OUTPUT_INT, {0x4a0}, {0x4}}, \ - {"AICORE0_CUR_VOLT", ELEM_OUTPUT_INT, {0x4a4}, {0x4}}, \ - {"AICORE1_CUR_VOLT", ELEM_OUTPUT_INT, {0x4a8}, {0x4}}, \ - {"CPU_CUR_VOLT", ELEM_OUTPUT_INT, {0x4ac}, {0x4}}, \ - {"AIMEMORY_CUR_VOLT", ELEM_OUTPUT_INT, {0x4b0}, {0x4}}, \ - {"PERI_CUR_VOLT", ELEM_OUTPUT_INT, {0x4b4}, {0x4}}, \ - {"AICORE0_SVFD_VOLT", ELEM_OUTPUT_INT, {0x4b8}, {0x4}}, \ - {"AICORE1_SVFD_VOLT", ELEM_OUTPUT_INT, {0x4bc}, {0x4}}, \ - {"AICORE0_SVFD_CPM", ELEM_OUTPUT_INT, {0x4c0}, {0x2}}, \ - {"AICORE1_SVFD_CPM", ELEM_OUTPUT_INT, {0x4c2}, {0x2}}, \ - {"AICORE0_NOTIFY_ST", ELEM_OUTPUT_INT, {0x4c4}, {0x1}}, \ - {"AICORE1_NOTIFY_ST", ELEM_OUTPUT_INT, {0x4c5}, {0x1}}, \ - {"CPU_NOTIFY_ST", ELEM_OUTPUT_INT, {0x4c6}, {0x1}}, \ - {"AIMEMORY_NOTIFY_ST", ELEM_OUTPUT_INT, {0x4c7}, {0x1}}, \ - {"PERI_NOTIFY_ST", ELEM_OUTPUT_INT, {0x4c8}, {0x1}}, \ - {"AICORE0_TZONE", ELEM_OUTPUT_INT, {0x4c9}, {0x1}}, \ - {"AICORE1_TZONE", ELEM_OUTPUT_INT, {0x4ca}, {0x1}}, \ - {"CPU_TZONE", ELEM_OUTPUT_INT, {0x4cb}, {0x1}}, \ - {"AIMEMORY_TZONE", ELEM_OUTPUT_INT, {0x4cc}, {0x1}}, \ - {"PERI_TZONE", ELEM_OUTPUT_INT, {0x4cd}, {0x1}}, \ - {"VOLT_RISE_TEMP", ELEM_OUTPUT_INT, {0x4ce}, {0x1}}, \ - {"VOLT_DECREASE_TEMP", ELEM_OUTPUT_INT, {0x4cf}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, /* below for ddr */ \ - {"[DDR data]:", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {"magic_begin", ELEM_OUTPUT_INT, {0x800}, {0x4}}, \ - {"slice_time", ELEM_OUTPUT_INT, {0x804}, {0x4}}, \ - {"mod_reason", ELEM_OUTPUT_INT, {0x808}, {0x4}}, \ - {"ddr_freq_id", ELEM_OUTPUT_INT, {0x80C}, {0x4}}, \ - {"ddr_status", ELEM_OUTPUT_INT, {0x810}, {0x4}}, \ - {"uce_exc", ELEM_OUTPUT_INT, {0x814}, {0x4}}, \ - {"SC_TSENSOR_INFO_ADDR", ELEM_OUTPUT_INT, {0x818}, {0x4}}, \ - {"SC_TSENSOR_AICORE_LIMIT", ELEM_OUTPUT_INT, {0x81C}, {0x4}}, \ - {"SC_TSENSOR_MAX_TEMP", ELEM_OUTPUT_INT, {0x820}, {0x4}}, \ - {"channel_mask", ELEM_OUTPUT_INT, {0x824}, {0x4}}, \ - {"channel_num", ELEM_OUTPUT_INT, {0x828}, {0x4}}, \ - {"rank_num", ELEM_OUTPUT_INT, {0x82C}, {0x4}}, \ - {"ddr_size", ELEM_OUTPUT_INT, {0x830}, {0x4}}, \ - {"manufactery_id", ELEM_OUTPUT_INT, {0x834}, {0x4}}, \ - {"iecc_cerr_thresh[0]", ELEM_OUTPUT_INT, {0x838}, {0x4}}, \ - {"iecc_cerr_thresh[1]", ELEM_OUTPUT_INT, {0x83C}, {0x4}}, \ - {"iecc_cerr_thresh[2]", ELEM_OUTPUT_INT, {0x840}, {0x4}}, \ - {"iecc_cerr_thresh[3]", ELEM_OUTPUT_INT, {0x844}, {0x4}}, \ - {"iecc_cerr_thresh[4]", ELEM_OUTPUT_INT, {0x848}, {0x4}}, \ - {"iecc_cerr_thresh[5]", ELEM_OUTPUT_INT, {0x84C}, {0x4}}, \ - {"iecc_cerr_thresh[6]", ELEM_OUTPUT_INT, {0x850}, {0x4}}, \ - {"iecc_cerr_thresh[7]", ELEM_OUTPUT_INT, {0x854}, {0x4}}, \ - {"iecc_ctrl[0]", ELEM_OUTPUT_INT, {0x858}, {0x4}}, \ - {"iecc_ctrl[1]", ELEM_OUTPUT_INT, {0x85C}, {0x4}}, \ - {"iecc_ctrl[2]", ELEM_OUTPUT_INT, {0x860}, {0x4}}, \ - {"iecc_ctrl[3]", ELEM_OUTPUT_INT, {0x864}, {0x4}}, \ - {"iecc_ctrl[4]", ELEM_OUTPUT_INT, {0x868}, {0x4}}, \ - {"iecc_ctrl[5]", ELEM_OUTPUT_INT, {0x86C}, {0x4}}, \ - {"iecc_ctrl[6]", ELEM_OUTPUT_INT, {0x870}, {0x4}}, \ - {"iecc_ctrl[7]", ELEM_OUTPUT_INT, {0x874}, {0x4}}, \ - {"iecc_cerr_cnt[0]", ELEM_OUTPUT_INT, {0x878}, {0x4}}, \ - {"iecc_cerr_cnt[1]", ELEM_OUTPUT_INT, {0x87C}, {0x4}}, \ - {"iecc_cerr_cnt[2]", ELEM_OUTPUT_INT, {0x880}, {0x4}}, \ - {"iecc_cerr_cnt[3]", ELEM_OUTPUT_INT, {0x884}, {0x4}}, \ - {"iecc_cerr_cnt[4]", ELEM_OUTPUT_INT, {0x888}, {0x4}}, \ - {"iecc_cerr_cnt[5]", ELEM_OUTPUT_INT, {0x88C}, {0x4}}, \ - {"iecc_cerr_cnt[6]", ELEM_OUTPUT_INT, {0x890}, {0x4}}, \ - {"iecc_cerr_cnt[7]", ELEM_OUTPUT_INT, {0x894}, {0x4}}, \ - {"iecc_uerr_cnt[0]", ELEM_OUTPUT_INT, {0x898}, {0x4}}, \ - {"iecc_uerr_cnt[1]", ELEM_OUTPUT_INT, {0x89C}, {0x4}}, \ - {"iecc_uerr_cnt[2]", ELEM_OUTPUT_INT, {0x8A0}, {0x4}}, \ - {"iecc_uerr_cnt[3]", ELEM_OUTPUT_INT, {0x8A4}, {0x4}}, \ - {"iecc_uerr_cnt[4]", ELEM_OUTPUT_INT, {0x8A8}, {0x4}}, \ - {"iecc_uerr_cnt[5]", ELEM_OUTPUT_INT, {0x8AC}, {0x4}}, \ - {"iecc_uerr_cnt[6]", ELEM_OUTPUT_INT, {0x8B0}, {0x4}}, \ - {"iecc_uerr_cnt[7]", ELEM_OUTPUT_INT, {0x8B4}, {0x4}}, \ - {"magic_byte", ELEM_OUTPUT_INT, {0x900}, {0x1}}, \ - {"err_max", ELEM_OUTPUT_INT, {0x904}, {0x1}}, \ - {"irq_count", ELEM_OUTPUT_INT, {0x908}, {0x1}}, \ - {"index", ELEM_OUTPUT_INT, {0x90C}, {0x1}}, \ - {"rate[0].time", ELEM_OUTPUT_INT, {0x900}, {0x4}}, \ - {"rate[0].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x904}, {0x1}}, \ - {"rate[0].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x905}, {0x1}}, \ - {"rate[0].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x906}, {0x1}}, \ - {"rate[0].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x907}, {0x1}}, \ - {"rate[0].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x908}, {0x1}}, \ - {"rate[0].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x909}, {0x1}}, \ - {"rate[0].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x90A}, {0x1}}, \ - {"rate[0].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x90B}, {0x1}}, \ - {"rate[0].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x90C}, {0x1}}, \ - {"rate[0].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x90D}, {0x1}}, \ - {"rate[0].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x90E}, {0x1}}, \ - {"rate[0].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x90F}, {0x1}}, \ - {"rate[0].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x910}, {0x1}}, \ - {"rate[0].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x911}, {0x1}}, \ - {"rate[0].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x912}, {0x1}}, \ - {"rate[0].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x913}, {0x1}}, \ - {"rate[1].time", ELEM_OUTPUT_INT, {0x914}, {0x4}}, \ - {"rate[1].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x918}, {0x1}}, \ - {"rate[1].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x919}, {0x1}}, \ - {"rate[1].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x91A}, {0x1}}, \ - {"rate[1].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x91B}, {0x1}}, \ - {"rate[1].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x91C}, {0x1}}, \ - {"rate[1].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x91D}, {0x1}}, \ - {"rate[1].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x91E}, {0x1}}, \ - {"rate[1].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x91F}, {0x1}}, \ - {"rate[1].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x920}, {0x1}}, \ - {"rate[1].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x921}, {0x1}}, \ - {"rate[1].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x922}, {0x1}}, \ - {"rate[1].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x923}, {0x1}}, \ - {"rate[1].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x924}, {0x1}}, \ - {"rate[1].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x925}, {0x1}}, \ - {"rate[1].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x926}, {0x1}}, \ - {"rate[1].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x927}, {0x1}}, \ - {"rate[2].time", ELEM_OUTPUT_INT, {0x928}, {0x4}}, \ - {"rate[2].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x92C}, {0x1}}, \ - {"rate[2].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x92D}, {0x1}}, \ - {"rate[2].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x92E}, {0x1}}, \ - {"rate[2].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x92F}, {0x1}}, \ - {"rate[2].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x930}, {0x1}}, \ - {"rate[2].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x931}, {0x1}}, \ - {"rate[2].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x932}, {0x1}}, \ - {"rate[2].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x933}, {0x1}}, \ - {"rate[2].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x934}, {0x1}}, \ - {"rate[2].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x935}, {0x1}}, \ - {"rate[2].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x936}, {0x1}}, \ - {"rate[2].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x937}, {0x1}}, \ - {"rate[2].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x938}, {0x1}}, \ - {"rate[2].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x939}, {0x1}}, \ - {"rate[2].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x93A}, {0x1}}, \ - {"rate[2].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x93B}, {0x1}}, \ - {"rate[3].time", ELEM_OUTPUT_INT, {0x93C}, {0x4}}, \ - {"rate[3].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x940}, {0x1}}, \ - {"rate[3].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x941}, {0x1}}, \ - {"rate[3].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x942}, {0x1}}, \ - {"rate[3].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x943}, {0x1}}, \ - {"rate[3].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x944}, {0x1}}, \ - {"rate[3].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x945}, {0x1}}, \ - {"rate[3].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x946}, {0x1}}, \ - {"rate[3].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x947}, {0x1}}, \ - {"rate[3].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x948}, {0x1}}, \ - {"rate[3].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x949}, {0x1}}, \ - {"rate[3].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x94A}, {0x1}}, \ - {"rate[3].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x94B}, {0x1}}, \ - {"rate[3].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x94C}, {0x1}}, \ - {"rate[3].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x94D}, {0x1}}, \ - {"rate[3].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x94E}, {0x1}}, \ - {"rate[3].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x94F}, {0x1}}, \ - {"rate[4].time", ELEM_OUTPUT_INT, {0x950}, {0x4}}, \ - {"rate[4].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x954}, {0x1}}, \ - {"rate[4].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x955}, {0x1}}, \ - {"rate[4].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x956}, {0x1}}, \ - {"rate[4].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x957}, {0x1}}, \ - {"rate[4].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x958}, {0x1}}, \ - {"rate[4].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x959}, {0x1}}, \ - {"rate[4].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x95A}, {0x1}}, \ - {"rate[4].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x95B}, {0x1}}, \ - {"rate[4].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x95C}, {0x1}}, \ - {"rate[4].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x95D}, {0x1}}, \ - {"rate[4].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x95E}, {0x1}}, \ - {"rate[4].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x95F}, {0x1}}, \ - {"rate[4].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x960}, {0x1}}, \ - {"rate[4].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x961}, {0x1}}, \ - {"rate[4].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x962}, {0x1}}, \ - {"rate[4].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x963}, {0x1}}, \ - {"rate[5].time", ELEM_OUTPUT_INT, {0x964}, {0x4}}, \ - {"rate[5].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x968}, {0x1}}, \ - {"rate[5].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x969}, {0x1}}, \ - {"rate[5].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x96A}, {0x1}}, \ - {"rate[5].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x96B}, {0x1}}, \ - {"rate[5].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x96C}, {0x1}}, \ - {"rate[5].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x96D}, {0x1}}, \ - {"rate[5].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x96E}, {0x1}}, \ - {"rate[5].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x96F}, {0x1}}, \ - {"rate[5].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x970}, {0x1}}, \ - {"rate[5].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x971}, {0x1}}, \ - {"rate[5].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x972}, {0x1}}, \ - {"rate[5].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x973}, {0x1}}, \ - {"rate[5].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x974}, {0x1}}, \ - {"rate[5].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x975}, {0x1}}, \ - {"rate[5].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x976}, {0x1}}, \ - {"rate[5].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x977}, {0x1}}, \ - {"rate[6].time", ELEM_OUTPUT_INT, {0x978}, {0x4}}, \ - {"rate[6].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x97C}, {0x1}}, \ - {"rate[6].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x97D}, {0x1}}, \ - {"rate[6].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x97E}, {0x1}}, \ - {"rate[6].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x97F}, {0x1}}, \ - {"rate[6].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x980}, {0x1}}, \ - {"rate[6].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x981}, {0x1}}, \ - {"rate[6].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x982}, {0x1}}, \ - {"rate[6].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x983}, {0x1}}, \ - {"rate[6].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x984}, {0x1}}, \ - {"rate[6].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x985}, {0x1}}, \ - {"rate[6].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x986}, {0x1}}, \ - {"rate[6].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x987}, {0x1}}, \ - {"rate[6].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x988}, {0x1}}, \ - {"rate[6].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x989}, {0x1}}, \ - {"rate[6].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x98A}, {0x1}}, \ - {"rate[6].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x98B}, {0x1}}, \ - {"rate[7].time", ELEM_OUTPUT_INT, {0x98C}, {0x4}}, \ - {"rate[7].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x990}, {0x1}}, \ - {"rate[7].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x991}, {0x1}}, \ - {"rate[7].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x992}, {0x1}}, \ - {"rate[7].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x993}, {0x1}}, \ - {"rate[7].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x994}, {0x1}}, \ - {"rate[7].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x995}, {0x1}}, \ - {"rate[7].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x996}, {0x1}}, \ - {"rate[7].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x997}, {0x1}}, \ - {"rate[7].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x998}, {0x1}}, \ - {"rate[7].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x999}, {0x1}}, \ - {"rate[7].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x99A}, {0x1}}, \ - {"rate[7].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x99B}, {0x1}}, \ - {"rate[7].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x99C}, {0x1}}, \ - {"rate[7].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x99D}, {0x1}}, \ - {"rate[7].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x99E}, {0x1}}, \ - {"rate[7].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x9AF}, {0x1}}, \ - {"rate[8].time", ELEM_OUTPUT_INT, {0x9B0}, {0x4}}, \ - {"rate[8].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x9B4}, {0x1}}, \ - {"rate[8].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x9B5}, {0x1}}, \ - {"rate[8].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x9B6}, {0x1}}, \ - {"rate[8].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x9B7}, {0x1}}, \ - {"rate[8].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x9B8}, {0x1}}, \ - {"rate[8].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x9B9}, {0x1}}, \ - {"rate[8].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x9BA}, {0x1}}, \ - {"rate[8].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x9BB}, {0x1}}, \ - {"rate[8].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x9BC}, {0x1}}, \ - {"rate[8].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x9BD}, {0x1}}, \ - {"rate[8].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x9BE}, {0x1}}, \ - {"rate[8].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x9BF}, {0x1}}, \ - {"rate[8].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x9C0}, {0x1}}, \ - {"rate[8].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x9C1}, {0x1}}, \ - {"rate[8].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x9C2}, {0x1}}, \ - {"rate[8].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x9C3}, {0x1}}, \ - {"rate[9].time", ELEM_OUTPUT_INT, {0x9C4}, {0x4}}, \ - {"rate[9].rate_per_rank[0]", ELEM_OUTPUT_INT, {0x9C8}, {0x1}}, \ - {"rate[9].rate_per_rank[1]", ELEM_OUTPUT_INT, {0x9C9}, {0x1}}, \ - {"rate[9].rate_per_rank[2]", ELEM_OUTPUT_INT, {0x9CA}, {0x1}}, \ - {"rate[9].rate_per_rank[3]", ELEM_OUTPUT_INT, {0x9CB}, {0x1}}, \ - {"rate[9].rate_per_rank[4]", ELEM_OUTPUT_INT, {0x9CC}, {0x1}}, \ - {"rate[9].rate_per_rank[5]", ELEM_OUTPUT_INT, {0x9CD}, {0x1}}, \ - {"rate[9].rate_per_rank[6]", ELEM_OUTPUT_INT, {0x9CE}, {0x1}}, \ - {"rate[9].rate_per_rank[7]", ELEM_OUTPUT_INT, {0x9CF}, {0x1}}, \ - {"rate[9].rate_per_rank[8]", ELEM_OUTPUT_INT, {0x9D0}, {0x1}}, \ - {"rate[9].rate_per_rank[9]", ELEM_OUTPUT_INT, {0x9D1}, {0x1}}, \ - {"rate[9].rate_per_rank[A]", ELEM_OUTPUT_INT, {0x9D2}, {0x1}}, \ - {"rate[9].rate_per_rank[B]", ELEM_OUTPUT_INT, {0x9D3}, {0x1}}, \ - {"rate[9].rate_per_rank[C]", ELEM_OUTPUT_INT, {0x9D4}, {0x1}}, \ - {"rate[9].rate_per_rank[D]", ELEM_OUTPUT_INT, {0x9D5}, {0x1}}, \ - {"rate[9].rate_per_rank[E]", ELEM_OUTPUT_INT, {0x9D6}, {0x1}}, \ - {"rate[9].rate_per_rank[F]", ELEM_OUTPUT_INT, {0x9D7}, {0x1}}, \ - {"mrr4_0.count", ELEM_OUTPUT_INT, {0x9D8}, {0x1}}, \ - {"mrr4_0.happen", ELEM_OUTPUT_INT, {0x9D9}, {0x1}}, \ - {"shake_count.count", ELEM_OUTPUT_INT, {0x9DA}, {0x1}}, \ - {"shake_count.happen", ELEM_OUTPUT_INT, {0x9DB}, {0x1}}, \ - {"sfc_record0", ELEM_OUTPUT_INT, {0x9DC}, {0x1}}, \ - {"sfc_record1", ELEM_OUTPUT_INT, {0x9DD}, {0x1}}, \ - {"sfc_mr5", ELEM_OUTPUT_INT, {0x9DE}, {0x1}}, \ -} - -#define DATA_MODEL_HDR_RUN_TEE MODEL_VECTOR(HDR_RUN_TEE) = { \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0xFF}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"[RUN FATAL INFO SIZE]", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {"[RUN FATAL INFO]", ELEM_OUTPUT_STR_NL, {0x20}, {0x7E0}}, \ -} - -#define DATA_MODEL_HDR_RUN_ATF MODEL_VECTOR(HDR_RUN_ATF) = {\ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"module id", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"if", ELEM_CTRL_COMPARE, {0xC}, {0x4}}, \ - {"is used", ELEM_CTRL_CMP_JUMP_NE, {0x1}, {0xFF}}, \ - {"err code", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"reason", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"hot reset index", ELEM_OUTPUT_INT, {0x18}, {0x4}}, \ - {"[ATF info]", ELEM_OUTPUT_STR_NL, {0x1C}, {0x7E4}}, \ -} - -#define DATA_MODEL_HDR_RUN_AREA MODEL_VECTOR(HDR_RUN_AREA) = { \ - {"TEE INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_RUN_TEE", ELEM_CTRL_TABLE_GOTO, {0x0}, {0x800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_TEE}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"ATF INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_RUN_ATF", ELEM_CTRL_TABLE_GOTO, {0x800}, {0x800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_ATF}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"LPM INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_RUN_LPM", ELEM_CTRL_TABLE_GOTO, {0x1000}, {0x1000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_LPM}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"OS INFO", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"HDR_RUN_OS", ELEM_CTRL_TABLE_GOTO, {0x2000}, {0x1000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_OS}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ -} - -#define DATA_MODEL_HDR_BOOT MODEL_VECTOR(HDR_BOOT) = { \ - {"area 0", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x0}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 1", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x7800}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 2", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0xF000}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 3", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x16800}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 4", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x1E000}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 5", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x25800}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 6", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_AREA", ELEM_CTRL_TABLE_GOTO, {0x2D000}, {0x7800}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_AREA}, {0x1}}, \ -} - -#define DATA_MODEL_HDR_RUN MODEL_VECTOR(HDR_RUN) = { \ - {"area 0", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_RUN_AREA", ELEM_CTRL_TABLE_GOTO, {0x0}, {0x3C00}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 1", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_RUN_AREA", ELEM_CTRL_TABLE_GOTO, {0x3C00}, {0x3C00}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 2", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_RUN_AREA", ELEM_CTRL_TABLE_GOTO, {0x7800}, {0x3C00}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 3", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_RUN_AREA", ELEM_CTRL_TABLE_GOTO, {0xB400}, {0x3C00}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 4", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_RUN_AREA", ELEM_CTRL_TABLE_GOTO, {0xF000}, {0x3C00}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 5", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_RUN_AREA", ELEM_CTRL_TABLE_GOTO, {0x12C00}, {0x3C00}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_AREA}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"area 6", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_RUN_AREA", ELEM_CTRL_TABLE_GOTO, {0x16800}, {0x3C00}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_AREA}, {0x1}}, \ -} - -#define DATA_MODEL_HDR_BOOT_INFO MODEL_VECTOR(HDR_BOOT_INFO) = { \ - {"region offset", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"region size", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"region config", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"total area", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"history area", ELEM_OUTPUT_INT, {0xC}, {0x4}}, \ - {"error area", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"area config:", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" used module count", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"module config:", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" module 0 offset", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {" module 0 size", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 1 offset", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {" module 1 size", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 2 offset", ELEM_OUTPUT_INT, {0x2C}, {0x4}}, \ - {" module 2 size", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 3 offset", ELEM_OUTPUT_INT, {0x34}, {0x4}}, \ - {" module 3 size", ELEM_OUTPUT_INT, {0x38}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"region control", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"area index", ELEM_OUTPUT_INT, {0x6C}, {0x4}}, \ - {"error area count", ELEM_OUTPUT_INT, {0x70}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 0 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x74}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x78}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x7C}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0x80}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0x84}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x88}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 1 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x8C}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x90}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x94}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0x98}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0x9C}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xA0}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 2 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xA4}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xA8}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xAC}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0xB0}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0xB4}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xB8}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 3 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xBC}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xC0}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xC4}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0xC8}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0xCC}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xD0}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 4 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xD4}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xD8}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xDC}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0xE0}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0xE4}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xE8}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 5 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xEC}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xF0}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xF4}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0xF8}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0xFC}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x100}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 6 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x104}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x108}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x10C}, {0x4}}, \ - {" module id", ELEM_OUTPUT_INT, {0x110}, {0x4}}, \ - {" exception id", ELEM_OUTPUT_INT, {0x114}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x118}, {0x4}}, \ -} - -#define DATA_MODEL_HDR_RUN_INFO MODEL_VECTOR(HDR_RUN_INFO) = { \ - {"region offset", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"region size", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"region config", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"total area", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"history area", ELEM_OUTPUT_INT, {0xC}, {0x4}}, \ - {"error area", ELEM_OUTPUT_INT, {0x10}, {0x4}}, \ - {"area config:", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" used module count", ELEM_OUTPUT_INT, {0x14}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"module config:", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" module 0 offset", ELEM_OUTPUT_INT, {0x1C}, {0x4}}, \ - {" module 0 size", ELEM_OUTPUT_INT, {0x20}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 1 offset", ELEM_OUTPUT_INT, {0x24}, {0x4}}, \ - {" module 1 size", ELEM_OUTPUT_INT, {0x28}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 2 offset", ELEM_OUTPUT_INT, {0x2C}, {0x4}}, \ - {" module 2 size", ELEM_OUTPUT_INT, {0x30}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {" module 3 offset", ELEM_OUTPUT_INT, {0x34}, {0x4}}, \ - {" module 3 size", ELEM_OUTPUT_INT, {0x38}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"region control", ELEM_OUTPUT_DIVIDE, {0x0}, {0x2D}}, \ - {"area index", ELEM_OUTPUT_INT, {0x6C}, {0x4}}, \ - {"error area count", ELEM_OUTPUT_INT, {0x70}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 0 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x74}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x78}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x7C}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x88}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 1 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x8C}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x90}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x94}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xA0}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 2 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xA4}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xA8}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xAC}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xB8}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 3 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xBC}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xC0}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xC4}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xD0}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 4 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xD4}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xD8}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xDC}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0xE8}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 5 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0xEC}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0xF0}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0xF4}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x100}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"[area 6 control info]", ELEM_OUTPUT_STR_CONST, {0x0}, {0x0}}, \ - {" flag", ELEM_OUTPUT_INT, {0x104}, {0x4}}, \ - {" tag", ELEM_OUTPUT_INT, {0x108}, {0x4}}, \ - {" exception type", ELEM_OUTPUT_INT, {0x10C}, {0x4}}, \ - {" reset number", ELEM_OUTPUT_INT, {0x118}, {0x4}}, \ -} - -#define DATA_MODEL_HDR MODEL_VECTOR(HDR) = { \ - {"head info", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"magic", ELEM_OUTPUT_INT, {0x0}, {0x4}}, \ - {"version", ELEM_OUTPUT_INT, {0x4}, {0x4}}, \ - {"reset count", ELEM_OUTPUT_INT, {0x8}, {0x4}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"boot region", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_BOOT_INFO", ELEM_CTRL_TABLE_GOTO, {0XC}, {0x168}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT_INFO}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"HDR_BOOT", ELEM_CTRL_TABLE_GOTO, {0x400}, {0xA000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_BOOT}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"run region", ELEM_OUTPUT_DIVIDE, {0x0}, {0x3D}}, \ - {"HDR_RUN_INFO", ELEM_CTRL_TABLE_GOTO, {0x170}, {0x164}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN_INFO}, {0x1}}, \ - {"NL", ELEM_OUTPUT_NL, {0x0}, {0x0}}, \ - {"HDR_RUN", ELEM_CTRL_TABLE_GOTO, {0x4B400}, {0xA000}}, \ - {"table_index", ELEM_CTRL_TABLE_RANGE, {PLAINTEXT_TABLE_HDR_RUN}, {0x1}}, \ -} - -#endif // BBOX_DDR_DATA_MINI_H diff --git a/inc/toolchain/bbox/bbox_proxy.h b/inc/toolchain/bbox/bbox_proxy.h deleted file mode 100644 index 5b88b64fba57942d100bad63afb5e610093b3314..0000000000000000000000000000000000000000 --- a/inc/toolchain/bbox/bbox_proxy.h +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef BBOX_PROXY_H -#define BBOX_PROXY_H - -#include "bbox_proxy_config.h" - -typedef struct proxy_excep_time_t { - unsigned long long tv_sec; - unsigned long long tv_usec; -} proxy_excep_time; - -enum BBOX_PROXY_DUMP_STATUS { - PROXY_STATUS_INIT = 0, - PROXY_STATUS_DOING = 1, - PROXY_STATUS_DONE = 2, -}; - -struct bbox_proxy_exception_ctrl { - proxy_excep_time e_clock; // 模块触发异常时间 - unsigned int e_main_excepid; // 模块触发的异常id - unsigned int e_sub_excepid; // 模块触发的异常id - unsigned int e_info_offset; // 模块全部异常信息偏移值,基于模块预留内存首地址,从magic开始 - unsigned int e_info_len; // 模块全部异常信息长度 - unsigned short e_dump_status; // 模块将异常信息存预留内存的控制状态 - unsigned short e_save_status; // 代理将异常信息从预留内存导出的控制状态 - unsigned int e_reserved; // 结构对齐预留 -}; - -// 通过共享内存交互 -#define BBOX_PROXY_MAGIC 0x56312e31 -#define BBOX_PROXY_CTRL_RESERV 192 - -struct bbox_proxy_module_ctrl { - unsigned int magic; // 使用宏BBOX_PROXY_MAGIC - struct bbox_proxy_ctrl_info config; // ctrl块配置 - struct bbox_proxy_exception_ctrl block[BBOX_PROXY_CTRL_NUM]; // 模块dump信息控制状态 - unsigned char reserved[BBOX_PROXY_CTRL_RESERV]; // 预留空间,用于后续扩展 -}; - -#define BBOX_PROXY_CTRL_BLOCK_SIZE sizeof(struct bbox_proxy_module_ctrl) // total 512 byte - -#endif // BBOX_PROXY_H diff --git a/inc/toolchain/bbox/bbox_proxy_config.h b/inc/toolchain/bbox/bbox_proxy_config.h deleted file mode 100644 index a9d66c3c36bd2348794a69bfce48116c303ba821..0000000000000000000000000000000000000000 --- a/inc/toolchain/bbox/bbox_proxy_config.h +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef BBOX_PROXY_CONFIG_H -#define BBOX_PROXY_CONFIG_H - -#define BBOX_EXCEPTIONDESC_MAXLEN 48 - -struct bbox_proxy_exception_info { - unsigned int e_excepid; // 异常id - unsigned char e_process_priority; // 异常处理级别 - unsigned char e_reboot_priority; // 异常重启级别 - unsigned char e_excep_type; // 异常类型 - unsigned char e_reentrant; // 异常是否可重入 - unsigned long long e_notify_core_mask; // 异常联动掩码 - unsigned long long e_reset_core_mask; // 异常联动掩码 - unsigned char e_desc[BBOX_EXCEPTIONDESC_MAXLEN]; // 异常描述 -}; - -enum BBOX_PROXY_CAPACITY { - BBOX_PROXY_CAPACITY_REGISTER = 1 << 0, - BBOX_PROXY_CAPACITY_DUMP_DDR = 1 << 1, - BBOX_PROXY_CAPACITY_DUMP_LOG = 1 << 2, - BBOX_PROXY_CAPACITY_TRANS_ID = 1 << 3, -}; - -struct bbox_proxy_module_info { - unsigned char coreid; // 模块id - unsigned long long flag; // dump能力标记位,BBOX_PROXY_CAPACITY - const char *name; // 模块名 - unsigned long long log_addr; // 模块dump起始地址 - unsigned int log_len; // 模块dump长度 - unsigned int wait_timeout; // dump等待超时时间,单位ms - unsigned int e_count; // 异常信息注册数量 -}; - -enum BBOX_PROXY_BLOCK_TYPE { - BLOCK_TYPE_NORMAL = 1 << 0, // 普通数据 - BLOCK_TYPE_STARTUP = 1 << 1, // 启动异常数据 -}; - -enum BBOX_PROXY_CHECK_FLAG { - CHECK_NONE = 0, - CHECK_STARTUP_EXCEPID = 1 << 0, - CHECK_STARTUP_TMSTMP = 1 << 1, - CHECK_RUNTIME_EXCEPID = 1 << 2, - CHECK_RUNTIME_TMSTMP = 1 << 3, - CHECK_HEARTBEAT_EXCEPID = 1 << 4, - CHECK_HEARTBEAT_TMSTMP = 1 << 5, -}; - -struct bbox_proxy_block_info { - unsigned int ctrl_type : 16; - unsigned int ctrl_flag : 16; - unsigned int info_offset; - unsigned int info_block_len; -}; - -#define BBOX_PROXY_CTRL_NUM 6 -#define BBOX_PROXY_CTRL_PAD 3 - -struct bbox_proxy_ctrl_info { - unsigned char e_block_num; // 需要使用的控制块个数,最多BBOX_PROXY_CTRL_NUM - unsigned char padding[BBOX_PROXY_CTRL_PAD]; // padding - struct bbox_proxy_block_info block_info[BBOX_PROXY_CTRL_NUM]; // 控制块配置 -}; - -#define BBOX_PROXY_EXCEPTION_NUM 256 - -struct bbox_proxy_info { - struct bbox_proxy_module_info module; - struct bbox_proxy_exception_info exception[BBOX_PROXY_EXCEPTION_NUM]; - struct bbox_proxy_ctrl_info ctrl; -}; - -#define BBOX_PROXY_INITIALIZER(name) { \ - .module = BBOX_PROXY_MODULE_##name, \ - .exception = BBOX_PROXY_EXCEPTION_##name, \ - .ctrl = BBOX_PROXY_CTRL_##name, \ -} - -#endif // BBOX_PROXY_CONFIG_H diff --git a/inc/toolchain/bbox/bbox_proxy_config_dc.h b/inc/toolchain/bbox/bbox_proxy_config_dc.h deleted file mode 100644 index 3cce48cbff6ddc053e25930e37f07bb6b2f7931a..0000000000000000000000000000000000000000 --- a/inc/toolchain/bbox/bbox_proxy_config_dc.h +++ /dev/null @@ -1,268 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef BBOX_PROXY_CONFIG_DC -#define BBOX_PROXY_CONFIG_DC - -#include "bbox_proxy_config.h" -#include "device/bbox_pub.h" - -/* - * 说明:模块代理及异常注册 - * 各模块通过模板宏初始化 struct bbox_proxy_info 结构体,黑匣子引用该头文件并将初始化结构体实例化 - * - * 示例: - * 各模块通过BBOX_PROXY_MODULE_XXX宏定义初始化 struct bbox_proxy_module_info 结构体 - * #define BBOX_PROXY_MODULE_LPM { \ - * .coreid = BBOX_LPM, \ - * .flag = BBOX_PROXY_CAPACITY_REGISTER | BBOX_PROXY_CAPACITY_DUMP | BBOX_PROXY_CAPACITY_LOG, \ - * .name = "lpm", \ - * .log_addr = 0xA00000, \ - * .log_len = 0x400000, \ - * .wait_timeout = 20000, \ // wait timeout will be restricted <= 20s - * .e_count = 3, \ - * } - * 各模块通过BBOX_PROXY_EXCEPTION_XXX宏定义初始化 struct bbox_proxy_exception_info 结构体 - * #define BBOX_PROXY_EXCEPTION_LPM { \ - * {0xA819320F, BBOX_DEAD, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, - * BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "tsensor error"}, \ - * {0xA619FFFF, BBOX_ERR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, - * BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "lpm heartbeat lost"}, \ - * {0xA819FFFF, BBOX_DEAD, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, - * BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "lpm startup error"}, \ - * } - * 各模块通过BBOX_PROXY_CTRL_XXX宏定义初始化 struct bbox_proxy_ctrl_info 结构体 - * #define BBOX_PROXY_CTRL_LPM { \ - * .e_block_num = 2, \ - * .block_info = { \ - * {BLOCK_TYPE_STARTUP, CHECK_NONE, 0, 0x200000}, \ - * {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, \ - * 0x200000, 0x200000}, \ - * } \ - * } - * 各模块通过修改DEFINE_BBOX_PROXY(x)宏定义增加自身模块在黑匣子代码中实例化 struct bbox_proxy_info 结构体 - * #define BBOX_PROXY_INITIALIZER(name) { \ - * .module = BBOX_PROXY_MODULE_##name, \ - * .exception = BBOX_PROXY_EXCEPTION_##name, \ - * .ctrl = BBOX_PROXY_CTRL_##name, \ - * } - * DEFINE_BBOX_PROXY(x) struct bbox_proxy_info x[] = { \ - * BBOX_PROXY_INITIALIZER(TS), \ - * BBOX_PROXY_INITIALIZER(LPM), \ - * } - */ -#define BBOX_PROXY_MODULE_TS { \ - .coreid = BBOX_TS, \ - .flag = BBOX_PROXY_CAPACITY_REGISTER | BBOX_PROXY_CAPACITY_DUMP_DDR | BBOX_PROXY_CAPACITY_TRANS_ID, \ - .name = "ts", \ - .log_addr = 0, \ - .log_len = 0, \ - .wait_timeout = 10000, \ - .e_count = 21, \ -} - -#define BBOX_PROXY_EXCEPTION_TS { \ - {0xA6060FFF, BBOX_MAJOR, BBOX_REBOOT_WAIT, HEARTBEAT_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts heartbeat lost"}, \ - {0xA8060FFF, BBOX_CRITICAL, BBOX_REBOOT_WAIT, STARTUP_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts startup error"}, \ - {0xA6060000, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "data abort"}, \ - {0xA6060001, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "instr abort"}, \ - {0xA6060002, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "pc align fault"}, \ - {0xA6060003, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "sp align fault"}, \ - {0xA6060004, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "infinite loop"}, \ - {0xA6060005, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "unknow exception"}, \ - {0xB4060006, BBOX_MINOR, BBOX_REBOOT_NO, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "aicore exception"}, \ - {0xB4060007, BBOX_MINOR, BBOX_REBOOT_NO, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "aicore timeout"}, \ - {0xB6060008, BBOX_MAJOR, BBOX_REBOOT_NO, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "sdma init fault"}, \ - {0xB4060009, BBOX_MINOR, BBOX_REBOOT_NO, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "sdma timeout"}, \ - {0xA606000A, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "hwts bus error"}, \ - {0xA606000B, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "hwts sqe error"}, \ - {0xA606000C, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "hwts ecc error"}, \ - {0xA406000D, BBOX_MINOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts aicpu exception"}, \ - {0xA406000E, BBOX_MINOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts npu exception"}, \ - {0xA606000F, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "aicore reset timeout"}, \ - {0xA4060010, BBOX_MINOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts aiv exception"}, \ - {0xA4060011, BBOX_MINOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts aiv timeout"}, \ - {0xA4060014, BBOX_MINOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts aicpu timeout"}, \ -} - -#define BBOX_PROXY_CTRL_TS { \ - .e_block_num = 2, \ - .padding = {0}, \ - .block_info = { \ - {BLOCK_TYPE_STARTUP, CHECK_NONE, \ - 0x200, 0x19000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, \ - 0x19200, 0x1E6E00}, \ - } \ -} - -#define BBOX_PROXY_MODULE_LPM { \ - .coreid = BBOX_LPM, \ - .flag = BBOX_PROXY_CAPACITY_REGISTER | BBOX_PROXY_CAPACITY_DUMP_DDR | BBOX_PROXY_CAPACITY_DUMP_LOG, \ - .name = "lpm", \ - .log_addr = 0xA00000, \ - .log_len = 0x20000, \ - .wait_timeout = 10000, \ - .e_count = 24, \ -} - -#define BBOX_PROXY_EXCEPTION_LPM { \ - {0xa819320f, BBOX_CRITICAL, BBOX_REBOOT_NO, LPM_EXCEPTION, BBOX_REENTRANT_ALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "tsensor error"}, \ - {0xa6192d15, BBOX_MAJOR, BBOX_REBOOT_WAIT, HEARTBEAT_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "lpm heart error"}, \ - {0xa6193206, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_ALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "headfault error"}, \ - {0xa4193216, BBOX_MINOR, BBOX_REBOOT_NO, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "edp error"}, \ - {0xa4193217, BBOX_MINOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ipc timeout error"}, \ - {0xa4193218, BBOX_MINOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ipc queue error"}, \ - {0xa6193215, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "os heart error"}, \ - {0xa8193234,BBOX_CRITICAL, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr tmon low error"}, \ - {0xa8193235, BBOX_CRITICAL, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr tmon high error"}, \ - {0xa6193236, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr gate error"}, \ - {0xa619323f, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr aref error"}, \ - {0xa6193240, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr rdtimeout error"}, \ - {0xa6193241, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr pll unlock error"}, \ - {0xa6193242, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr retrain error"}, \ - {0xa6193243, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr tmon error"}, \ - {0xa6193244, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr dfs error"}, \ - {0xa6193245, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr dvalid error"}, \ - {0xa6193246, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr dfi sel error"}, \ - {0xa6193247, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr pll unlock lp error"}, \ - {0xa6193248, BBOX_MAJOR, BBOX_REBOOT_NO, LPM_EXCEPTION, BBOX_REENTRANT_ALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr iecc uerr error"}, \ - {0xa419324a, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr unkonwn error"}, \ - {0xa4193250, BBOX_MINOR, BBOX_REBOOT_NO, LPM_EXCEPTION, BBOX_REENTRANT_ALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr iecc cerr error"}, \ - {0xa4192c1a, BBOX_MINOR, BBOX_REBOOT_WAIT, STARTUP_EXCEPTION, BBOX_REENTRANT_ALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "lp startup error"}, \ - {0xa419321b, BBOX_MINOR, BBOX_REBOOT_NO, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "lp tmonitor error"}, \ -} - -#define BBOX_PROXY_CTRL_LPM { \ - .e_block_num = 6, \ - .padding = {0}, \ - .block_info = { \ - {BLOCK_TYPE_STARTUP, CHECK_NONE, 0x0400, 0x12C00}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x13000, 0x19000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x2C000, 0x19000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x45000, 0x19000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x5E000, 0x19000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x77000, 0x19000}, \ - } \ -} - -#define BBOX_PROXY_MODULE_HSM { \ - .coreid = BBOX_HSM, \ - .flag = BBOX_PROXY_CAPACITY_REGISTER | BBOX_PROXY_CAPACITY_DUMP_DDR | BBOX_PROXY_CAPACITY_DUMP_LOG, \ - .name = "hsm", \ - .log_addr = 0x3E00000, \ - .log_len = 0x100000, \ - .wait_timeout = 10000, \ - .e_count = 3, \ -} - -#define BBOX_PROXY_EXCEPTION_HSM { \ - {0xa6360000, BBOX_MAJOR, BBOX_REBOOT_WAIT, STARTUP_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_HSM), BBOX_COREID_MASK(BBOX_HSM), "HSM startup exception"}, \ - {0xa6361000, BBOX_MAJOR, BBOX_REBOOT_WAIT, HEARTBEAT_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_HSM), BBOX_COREID_MASK(BBOX_HSM), "HSM heartbeat exception"}, \ - {0xa6362000, BBOX_MAJOR, BBOX_REBOOT_NO, HSM_EXCEPTION, BBOX_REENTRANT_ALLOW, \ - BBOX_COREID_MASK(BBOX_HSM), BBOX_COREID_MASK(BBOX_HSM), "HSM resource shortage exception"}, \ -} - -#define BBOX_PROXY_CTRL_HSM { \ - .e_block_num = 5, \ - .padding = {0}, \ - .block_info = { \ - {BLOCK_TYPE_STARTUP, CHECK_NONE, 0x200, 0x1000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x1200, 0x1000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x2200, 0x1000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x3200, 0x1000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x4200, 0x1000}, \ - } \ -} - -#define BBOX_PROXY_MODULE_ATF { \ - .coreid = BBOX_TF, \ - .flag = BBOX_PROXY_CAPACITY_REGISTER, \ - .name = "atf", \ - .log_addr = 0x0, \ - .log_len = 0x0, \ - .wait_timeout = 10000, \ - .e_count = 1, \ -} - -#define BBOX_PROXY_EXCEPTION_ATF { \ - {0xA8340000, BBOX_CRITICAL, BBOX_REBOOT_WAIT, ATF_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TF), BBOX_COREID_MASK(BBOX_TF), "atf panic exception"}, \ -} - -#define BBOX_PROXY_CTRL_ATF { \ - .e_block_num = 1, \ - .padding = {0}, \ - .block_info = { \ - {BLOCK_TYPE_STARTUP, CHECK_NONE, 0x400, 0xF800}, \ - } \ -} - -#define DEFINE_BBOX_PROXY(x) struct bbox_proxy_info x[] = { \ - BBOX_PROXY_INITIALIZER(TS), \ - BBOX_PROXY_INITIALIZER(LPM), \ - BBOX_PROXY_INITIALIZER(HSM), \ - BBOX_PROXY_INITIALIZER(ATF), \ -} - -#endif // BBOX_PROXY_CONFIG_DC diff --git a/inc/toolchain/bbox/bbox_proxy_config_mdc.h b/inc/toolchain/bbox/bbox_proxy_config_mdc.h deleted file mode 100644 index dad0ea81397ae03c0b45365cf6e19a92a6c2d022..0000000000000000000000000000000000000000 --- a/inc/toolchain/bbox/bbox_proxy_config_mdc.h +++ /dev/null @@ -1,400 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef BBOX_PROXY_CONFIG_MDC -#define BBOX_PROXY_CONFIG_MDC - -#include "bbox_proxy_config.h" -#include "device/bbox_pub.h" - -/* - * 说明:模块代理及异常注册 - * 各模块通过模板宏初始化 struct bbox_proxy_info 结构体,黑匣子引用该头文件并将初始化结构体实例化 - * - * 示例: - * 各模块通过BBOX_PROXY_MODULE_XXX宏定义初始化 struct bbox_proxy_module_info 结构体 - * #define BBOX_PROXY_MODULE_LPM { \ - * .coreid = BBOX_LPM, \ - * .flag = BBOX_PROXY_CAPACITY_REGISTER | BBOX_PROXY_CAPACITY_DUMP | BBOX_PROXY_CAPACITY_LOG, \ - * .name = "lpm", \ - * .log_addr = 0xA00000, \ - * .log_len = 0x400000, \ - * .wait_timeout = 20000, \ // wait timeout will be restricted <= 20s - * .e_count = 3, \ - * } - * 各模块通过BBOX_PROXY_EXCEPTION_XXX宏定义初始化 struct bbox_proxy_exception_info 结构体 - * #define BBOX_PROXY_EXCEPTION_LPM { \ - * {0xA819320F, BBOX_DEAD, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, - * BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "tsensor error"}, \ - * {0xA619FFFF, BBOX_ERR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, - * BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "lpm heartbeat lost"}, \ - * {0xA819FFFF, BBOX_DEAD, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, - * BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "lpm startup error"}, \ - * } - * 各模块通过BBOX_PROXY_CTRL_XXX宏定义初始化 struct bbox_proxy_ctrl_info 结构体 - * #define BBOX_PROXY_CTRL_LPM { \ - * .e_block_num = 2, \ - * .block_info = { \ - * {BLOCK_TYPE_STARTUP, CHECK_NONE, 0, 0x200000}, \ - * {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, \ - * 0x200000, 0x200000}, \ - * } \ - * } - * 各模块通过修改DEFINE_BBOX_PROXY(x)宏定义增加自身模块在黑匣子代码中实例化 struct bbox_proxy_info 结构体 - * #define BBOX_PROXY_INITIALIZER(name) { \ - * .module = BBOX_PROXY_MODULE_##name, \ - * .exception = BBOX_PROXY_EXCEPTION_##name, \ - * .ctrl = BBOX_PROXY_CTRL_##name, \ - * } - * DEFINE_BBOX_PROXY(x) struct bbox_proxy_info x[] = { \ - * BBOX_PROXY_INITIALIZER(TS), \ - * BBOX_PROXY_INITIALIZER(LPM), \ - * } - */ -#define BBOX_PROXY_MODULE_TS { \ - .coreid = BBOX_TS, \ - .flag = BBOX_PROXY_CAPACITY_REGISTER | BBOX_PROXY_CAPACITY_DUMP_DDR | BBOX_PROXY_CAPACITY_TRANS_ID, \ - .name = "ts", \ - .log_addr = 0, \ - .log_len = 0, \ - .wait_timeout = 10000, \ - .e_count = 42, \ -} - -#define BBOX_PROXY_EXCEPTION_TS { \ - {0xA6060FFF, BBOX_MAJOR, BBOX_REBOOT_WAIT, HEARTBEAT_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 heartbeat lost"}, \ - {0xA8060FFF, BBOX_CRITICAL, BBOX_REBOOT_WAIT, STARTUP_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 startup error"}, \ - {0xA6060000, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 data abort"}, \ - {0xA6060001, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 instr abort"}, \ - {0xA6060002, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 pc align fault"}, \ - {0xA6060003, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 sp align fault"}, \ - {0xA6060004, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 infinite loop"}, \ - {0xA6060005, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 unknow exception"}, \ - {0xB4060006, BBOX_MINOR, BBOX_REBOOT_NO, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 aicore exception"}, \ - {0xB4060007, BBOX_MINOR, BBOX_REBOOT_NO, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 aicore timeout"}, \ - {0xB6060008, BBOX_MAJOR, BBOX_REBOOT_NO, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 sdma init fault"}, \ - {0xB4060009, BBOX_MINOR, BBOX_REBOOT_NO, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 sdma timeout"}, \ - {0xA606000A, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 hwts bus error"}, \ - {0xA606000B, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 hwts sqe error"}, \ - {0xA606000C, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 hwts ecc error"}, \ - {0xA406000D, BBOX_MINOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 aicpu exception"}, \ - {0xA406000E, BBOX_MINOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 npu exception"}, \ - {0xA606000F, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 aicore reset timeout"}, \ - {0xA4060010, BBOX_MINOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 aiv exception"}, \ - {0xA4060011, BBOX_MINOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 aiv timeout"}, \ - {0xA4060014, BBOX_MINOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts0 aicpu timeout"}, \ - {0xA6061FFF, BBOX_MAJOR, BBOX_REBOOT_WAIT, HEARTBEAT_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 heartbeat lost"}, \ - {0xA8061FFF, BBOX_CRITICAL, BBOX_REBOOT_WAIT, STARTUP_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 startup error"}, \ - {0xA6061000, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 data abort"}, \ - {0xA6061001, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 instr abort"}, \ - {0xA6061002, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 pc align fault"}, \ - {0xA6061003, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 sp align fault"}, \ - {0xA6061004, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 infinite loop"}, \ - {0xA6061005, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 unknow exception"}, \ - {0xB4061006, BBOX_MINOR, BBOX_REBOOT_NO, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 aicore exception"}, \ - {0xB4061007, BBOX_MINOR, BBOX_REBOOT_NO, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 aicore timeout"}, \ - {0xB6061008, BBOX_MAJOR, BBOX_REBOOT_NO, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 sdma init fault"}, \ - {0xB4061009, BBOX_MINOR, BBOX_REBOOT_NO, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 sdma timeout"}, \ - {0xA606100A, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 hwts bus error"}, \ - {0xA606100B, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 hwts sqe error"}, \ - {0xA606100C, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 hwts ecc error"}, \ - {0xA406100D, BBOX_MINOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 aicpu exception"}, \ - {0xA406100E, BBOX_MINOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 npu exception"}, \ - {0xA606100F, BBOX_MAJOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 aicore reset timeout"}, \ - {0xA4061010, BBOX_MINOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 aiv exception"}, \ - {0xA4061011, BBOX_MINOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 aiv timeout"}, \ - {0xA4061014, BBOX_MINOR, BBOX_REBOOT_WAIT, TS_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TS), BBOX_COREID_MASK(BBOX_TS), "ts1 aicpu timeout"}, \ -} - -#define BBOX_PROXY_CTRL_TS { \ - .e_block_num = 3, \ - .padding = {0}, \ - .block_info = { \ - {BLOCK_TYPE_STARTUP, CHECK_NONE, \ - 0x0200, 0x32000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, \ - 0x32200, 0xE6F00}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, \ - 0x119100, 0xE6F00}, \ - } \ -} - -#define BBOX_PROXY_MODULE_LPM { \ - .coreid = BBOX_LPM, \ - .flag = BBOX_PROXY_CAPACITY_REGISTER | BBOX_PROXY_CAPACITY_DUMP_DDR | BBOX_PROXY_CAPACITY_DUMP_LOG, \ - .name = "lpm", \ - .log_addr = 0xA00000, \ - .log_len = 0x20000, \ - .wait_timeout = 10000, \ - .e_count = 24, \ -} - -#define BBOX_PROXY_EXCEPTION_LPM { \ - {0xa819320f, BBOX_CRITICAL, BBOX_REBOOT_NO, LPM_EXCEPTION, BBOX_REENTRANT_ALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "tsensor error"}, \ - {0xa6192d15, BBOX_MAJOR, BBOX_REBOOT_WAIT, HEARTBEAT_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "lpm heart error"}, \ - {0xa6193206, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_ALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "headfault error"}, \ - {0xa4193216, BBOX_MINOR, BBOX_REBOOT_NO, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "edp error"}, \ - {0xa4193217, BBOX_MINOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ipc timeout error"}, \ - {0xa4193218, BBOX_MINOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ipc queue error"}, \ - {0xa6193215, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "os heart error"}, \ - {0xa8193234,BBOX_CRITICAL, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr tmon low error"}, \ - {0xa8193235, BBOX_CRITICAL, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr tmon high error"}, \ - {0xa6193236, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr gate error"}, \ - {0xa619323f, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr aref error"}, \ - {0xa6193240, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr rdtimeout error"}, \ - {0xa6193241, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr pll unlock error"}, \ - {0xa6193242, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr retrain error"}, \ - {0xa6193243, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr tmon error"}, \ - {0xa6193244, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr dfs error"}, \ - {0xa6193245, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr dvalid error"}, \ - {0xa6193246, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr dfi sel error"}, \ - {0xa6193247, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr pll unlock lp error"}, \ - {0xa6193248, BBOX_MAJOR, BBOX_REBOOT_NO, LPM_EXCEPTION, BBOX_REENTRANT_ALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr iecc uerr error"}, \ - {0xa419324a, BBOX_MAJOR, BBOX_REBOOT_WAIT, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr unkonwn error"}, \ - {0xa4193250, BBOX_MINOR, BBOX_REBOOT_NO, LPM_EXCEPTION, BBOX_REENTRANT_ALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "ddr iecc cerr error"}, \ - {0xa4192c1a, BBOX_MINOR, BBOX_REBOOT_WAIT, STARTUP_EXCEPTION, BBOX_REENTRANT_ALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "lp startup error"}, \ - {0xa419321b, BBOX_MINOR, BBOX_REBOOT_NO, LPM_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_LPM), BBOX_COREID_MASK(BBOX_LPM), "lp tmonitor error"}, \ -} - -#define BBOX_PROXY_CTRL_LPM { \ - .e_block_num = 6, \ - .padding = {0}, \ - .block_info = { \ - {BLOCK_TYPE_STARTUP, CHECK_NONE, 0x0400, 0x12C00}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x13000, 0x19000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x2C000, 0x19000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x45000, 0x19000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x5E000, 0x19000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x77000, 0x19000}, \ - } \ -} - -#define BBOX_PROXY_MODULE_HSM { \ - .coreid = BBOX_HSM, \ - .flag = BBOX_PROXY_CAPACITY_REGISTER | BBOX_PROXY_CAPACITY_DUMP_DDR | BBOX_PROXY_CAPACITY_DUMP_LOG, \ - .name = "hsm", \ - .log_addr = 0x3E00000, \ - .log_len = 0x100000, \ - .wait_timeout = 10000, \ - .e_count = 3, \ -} - -#define BBOX_PROXY_EXCEPTION_HSM { \ - {0xa6360000, BBOX_MAJOR, BBOX_REBOOT_WAIT, STARTUP_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_HSM), BBOX_COREID_MASK(BBOX_HSM), "HSM startup exception"}, \ - {0xa6361000, BBOX_MAJOR, BBOX_REBOOT_WAIT, HEARTBEAT_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_HSM), BBOX_COREID_MASK(BBOX_HSM), "HSM heartbeat exception"}, \ - {0xa6362000, BBOX_MAJOR, BBOX_REBOOT_NO, HSM_EXCEPTION, BBOX_REENTRANT_ALLOW, \ - BBOX_COREID_MASK(BBOX_HSM), BBOX_COREID_MASK(BBOX_HSM), "HSM resource shortage exception"}, \ -} - -#define BBOX_PROXY_CTRL_HSM { \ - .e_block_num = 5, \ - .padding = {0}, \ - .block_info = { \ - {BLOCK_TYPE_STARTUP, CHECK_NONE, 0x200, 0x1000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x1200, 0x1000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x2200, 0x1000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x3200, 0x1000}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x4200, 0x1000}, \ - } \ -} - -#define BBOX_PROXY_MODULE_ISP { \ - .coreid = BBOX_ISP, \ - .flag = BBOX_PROXY_CAPACITY_REGISTER | BBOX_PROXY_CAPACITY_DUMP_DDR, \ - .name = "isp", \ - .log_addr = 0, \ - .log_len = 0, \ - .wait_timeout = 10000, \ - .e_count = 8, \ -} - -/* startup error dump log only, heartbeat and running error dump both bbox and log */ -#define BBOX_PROXY_EXCEPTION_ISP { \ - {0xa8380000, BBOX_CRITICAL, BBOX_REBOOT_WAIT, STARTUP_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_ISP), BBOX_COREID_MASK(BBOX_ISP), "ISP0 startup exception"}, \ - {0xa8380001, BBOX_CRITICAL, BBOX_REBOOT_WAIT, STARTUP_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_ISP), BBOX_COREID_MASK(BBOX_ISP), "ISP1 startup exception"}, \ - {0xa8380002, BBOX_CRITICAL, BBOX_REBOOT_WAIT, STARTUP_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_ISP), BBOX_COREID_MASK(BBOX_ISP), "ISP2 startup exception"}, \ - {0xa8380003, BBOX_CRITICAL, BBOX_REBOOT_WAIT, STARTUP_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_ISP), BBOX_COREID_MASK(BBOX_ISP), "ISP3 startup exception"}, \ - {0xa8381000, BBOX_CRITICAL, BBOX_REBOOT_WAIT, ISP_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_ISP), BBOX_COREID_MASK(BBOX_ISP), "ISP0 running exception"}, \ - {0xa8381001, BBOX_CRITICAL, BBOX_REBOOT_WAIT, ISP_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_ISP), BBOX_COREID_MASK(BBOX_ISP), "ISP1 running exception"}, \ - {0xa8381002, BBOX_CRITICAL, BBOX_REBOOT_WAIT, ISP_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_ISP), BBOX_COREID_MASK(BBOX_ISP), "ISP2 running exception"}, \ - {0xa8381003, BBOX_CRITICAL, BBOX_REBOOT_WAIT, ISP_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_ISP), BBOX_COREID_MASK(BBOX_ISP), "ISP3 running exception"}, \ -} - -#define BBOX_PROXY_CTRL_ISP { \ - .e_block_num = 4, \ - .padding = {0}, \ - .block_info = { \ - {BLOCK_TYPE_NORMAL | BLOCK_TYPE_STARTUP, \ - CHECK_STARTUP_EXCEPID | CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, \ - 0x200, 0x7FE00}, \ - {BLOCK_TYPE_NORMAL | BLOCK_TYPE_STARTUP, \ - CHECK_STARTUP_EXCEPID | CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, \ - 0x80000, 0x80000}, \ - {BLOCK_TYPE_NORMAL | BLOCK_TYPE_STARTUP, \ - CHECK_STARTUP_EXCEPID | CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, \ - 0x100000, 0x80000}, \ - {BLOCK_TYPE_NORMAL | BLOCK_TYPE_STARTUP, \ - CHECK_STARTUP_EXCEPID | CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, \ - 0x180000, 0x80000}, \ - } \ -} - -#define BBOX_PROXY_MODULE_SAFETYISLAND { \ - .coreid = BBOX_SAFETYISLAND, \ - .flag = BBOX_PROXY_CAPACITY_REGISTER | BBOX_PROXY_CAPACITY_DUMP_DDR, \ - .name = "sil", \ - .log_addr = 0, \ - .log_len = 0, \ - .wait_timeout = 10000, \ - .e_count = 5, \ -} - -#define BBOX_PROXY_EXCEPTION_SAFETYISLAND { \ - {0xA63A0001, BBOX_CRITICAL, BBOX_REBOOT_NO, SAFETYISLAND_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_SAFETYISLAND), BBOX_COREID_MASK(BBOX_SAFETYISLAND), "sil os panic"}, \ - {0xA63A1001, BBOX_MAJOR, BBOX_REBOOT_NO, SAFETYISLAND_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_SAFETYISLAND), BBOX_COREID_MASK(BBOX_SAFETYISLAND), "sil lpm err"}, \ - {0xA43A2001, BBOX_MINOR, BBOX_REBOOT_WAIT, SAFETYISLAND_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_SAFETYISLAND), BBOX_COREID_MASK(BBOX_SAFETYISLAND), "sil ipc or hb err"}, \ - {0xA63A3001, BBOX_MAJOR, BBOX_REBOOT_WAIT, SAFETYISLAND_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_SAFETYISLAND), BBOX_COREID_MASK(BBOX_SAFETYISLAND), "sil heartbeat err"}, \ - {0xA63A4001, BBOX_MAJOR, BBOX_REBOOT_WAIT, SAFETYISLAND_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_SAFETYISLAND), BBOX_COREID_MASK(BBOX_SAFETYISLAND), "sil excep tbl full"}, \ -} - -#define BBOX_PROXY_CTRL_SAFETYISLAND { \ - .e_block_num = 6, \ - .padding = {0}, \ - .block_info = { \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x400, 0xC800}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0xD000, 0xC800}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x19C00, 0xC800}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x26800, 0xC800}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x33400, 0xC800}, \ - {BLOCK_TYPE_NORMAL, CHECK_RUNTIME_EXCEPID | CHECK_RUNTIME_TMSTMP | CHECK_HEARTBEAT_EXCEPID, 0x40000, 0xC800}, \ - } \ -} - -#define BBOX_PROXY_MODULE_ATF { \ - .coreid = BBOX_TF, \ - .flag = BBOX_PROXY_CAPACITY_REGISTER, \ - .name = "atf", \ - .log_addr = 0x0, \ - .log_len = 0x0, \ - .wait_timeout = 10000, \ - .e_count = 1, \ -} - -#define BBOX_PROXY_EXCEPTION_ATF { \ - {0xA8340000, BBOX_CRITICAL, BBOX_REBOOT_WAIT, ATF_EXCEPTION, BBOX_REENTRANT_DISALLOW, \ - BBOX_COREID_MASK(BBOX_TF), BBOX_COREID_MASK(BBOX_TF), "atf panic exception"}, \ -} - -#define BBOX_PROXY_CTRL_ATF { \ - .e_block_num = 1, \ - .padding = {0}, \ - .block_info = { \ - {BLOCK_TYPE_STARTUP, CHECK_NONE, 0x90400, 0xF800}, \ - } \ -} - - -#define DEFINE_BBOX_PROXY(x) struct bbox_proxy_info x[] = { \ - BBOX_PROXY_INITIALIZER(TS), \ - BBOX_PROXY_INITIALIZER(LPM), \ - BBOX_PROXY_INITIALIZER(HSM), \ - BBOX_PROXY_INITIALIZER(ISP), \ - BBOX_PROXY_INITIALIZER(SAFETYISLAND), \ - BBOX_PROXY_INITIALIZER(ATF), \ -} - -#endif // BBOX_PROXY_CONFIG_MDC diff --git a/inc/toolchain/bbox/device/bbox_pub.h b/inc/toolchain/bbox/device/bbox_pub.h deleted file mode 100644 index a75f51957efeeb701c2d2afc7df394dbf7f1ea35..0000000000000000000000000000000000000000 --- a/inc/toolchain/bbox/device/bbox_pub.h +++ /dev/null @@ -1,320 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef BBOX_PUB_H -#define BBOX_PUB_H - -#include "bbox_types.h" - -/********************************************************** - * basic type definitions * - **********************************************************/ -// 模块id列表 -enum BBOX_COREID_LIST { - BBOX_UNDEF = 0x0, - BBOX_DRIVER = 0x1, - BBOX_OS = 0x2, - BBOX_TS = 0x3, - BBOX_RUNTIME = 0x4, - BBOX_AICPU = 0x5, - BBOX_CCE = 0x6, - BBOX_TVM = 0x7, - BBOX_FRAMEWORK = 0x8, - BBOX_HIAI = 0x9, - BBOX_DVPP = 0xa, - BBOX_AIPP = 0xb, - BBOX_LPM = 0xc, - BBOX_MDC = 0xd, - BBOX_COMPILER = 0xe, - BBOX_TOOLCHAIN = 0xf, - BBOX_ALGORITHM = 0x10, - BBOX_PROFILING = 0x11, - BBOX_HCCL = 0x12, - BBOX_EMULATE = 0x13, - BBOX_BIOS = 0x14, - BBOX_TEEOS = 0x15, - BBOX_TINY = 0x16, - BBOX_LPFW = 0x17, - BBOX_NETWORK = 0x18, - BBOX_ZIP = 0x19, - BBOX_TF = 0x1A, - BBOX_HSM = 0x1B, - BBOX_ISP = 0x1C, - BBOX_SAFETYISLAND = 0x1D, - BBOX_CLUSTER = 0x1E, - BBOX_COMISOLATOR = 0x1F, - BBOX_SD = 0x20, - BBOX_DP = 0x21, - BBOX_CORE_MAX = 0x22, -}; - -// 异常类型 -enum BBOX_REBOOT_REASON { - BBOX_REBOOT_REASON_LABEL0 = 0x0, // label0:重启相关 - DEVICE_COLDBOOT = BBOX_REBOOT_REASON_LABEL0, // 冷启动,如关机后第一次开机;掉电后第一次开机 - BIOS_EXCEPTION = 0x1, // bios异常重启,前一次启动bios异常 - DEVICE_HOTBOOT = 0x2, // 热复位,如按键复位,芯片硬复位等 - BBOX_REBOOT_REASON_LABEL1 = 0x10, // label1:硬件原因复位 - ABNORMAL_EXCEPTION = BBOX_REBOOT_REASON_LABEL1, // 未检测到的异常 - TSENSOR_EXCEPTION = 0x1f, // soc温保复位 - PMU_EXCEPTION = 0x20, // 过流、欠压、PMU过温引起的硬件复位 - DDR_FATAL_EXCEPTION = 0X22, // ddr fatal异常复位,如:ddr颗粒超温复位 - BBOX_REBOOT_REASON_LABEL2 = 0x24, // label2:os软件原因复位 - OS_PANIC = BBOX_REBOOT_REASON_LABEL2, // os panic,如访问非法地址 - OS_SOFT_LOCKUP = 0x26, // soft lockup - OS_OOM = 0x2a, // OOM 异常 - OS_HDC = 0x2b, // HDC 断连 - BBOX_REBOOT_REASON_LABEL3 = 0x2c, // label3:其他模块复位 - STARTUP_EXCEPTION = 0x2c, // 模块启动异常 - HEARTBEAT_EXCEPTION = 0x2d, // 模块心跳异常 - RUN_EXCEPTION = 0x2e, // 模块运行异常 - LPM_EXCEPTION = 0x32, // LPM子系统检测到的各种异常 - TS_EXCEPTION = 0x33, // TS子系统检测到的各种异常 - DVPP_EXCEPTION = 0x35, // DVPP异常 - DRIVER_EXCEPTION = 0x36, // DRIVER异常 - ZIP_EXCEPTION = 0x37, // ZIP异常 - TEE_EXCEPTION = 0x38, // teeos异常 - LPFW_EXCEPTION = 0x39, // LPFW异常 - NETWORK_EXCEPTION = 0x3A, // NETWORK异常 - HSM_EXCEPTION = 0x3B, // HSM异常 - ATF_EXCEPTION = 0x3C, // ATF异常 - ISP_EXCEPTION = 0x3D, // ISP异常 - SAFETYISLAND_EXCEPTION = 0x3E, // SAFETYISLAND异常 - TOOLCHAIN_EXCEPTION = 0x3F, // TOOLCHAIN异常 - CLUSTER_EXCEPTION = 0x40, // CLUSTER异常 - COMISOLATOR_EXCEPTION = 0x41, // COMISOLATOR异常 - SD_EXCEPTION = 0x42, // SD异常 - DP_EXCEPTION = 0x43, // DP异常 - BBOX_REBOOT_REASON_LABEL4 = 0x50, // label4: - BBOX_REBOOT_REASON_LABEL5 = 0x65, // label5:电源异常 - BBOX_REBOOT_REASON_LABEL6 = 0x6A, // label6:xloader异常 - BBOX_REBOOT_REASON_LABEL7 = 0x74, // label7:fastboot异常 - BBOX_REBOOT_REASON_LABEL8 = 0x89, // label8: host侧异常 - DEVICE_LTO_EXCEPTION = 0x8A, // 设备启动超时: load timeout - DEVICE_HBL_EXCEPTION = 0x8B, // 设备心跳丢失: heart beat lost - DEVICE_USER_RESET = 0x8C, // 用户复位 - DEVICE_AER_EXCEPTION = 0x8D, // 设备AER错误: advanced err report - BBOX_REBOOT_REASON_LABEL9 = 0x90, // label9: - BBOX_REBOOT_REASON_LABEL10 = 0xB0, // label10: - BBOX_EXCEPTION_REASON_INVALID = 0xFF, -}; - -enum BBOX_PROCESS_PRI { - BBOX_OTHER = 0x0, // 不确定 - BBOX_NOTICE = 0x1, // 提示 - BBOX_MINOR = 0x2, // 次要 - BBOX_MAJOR = 0x3, // 重要 - BBOX_CRITICAL = 0x4, // 紧急 - BBOX_PPRI_MAX -}; - -enum BBOX_REBOOT_PRI { - BBOX_REBOOT_NOW = 0x01, // 立即重启 - BBOX_REBOOT_WAIT, // 等待重启 - BBOX_REBOOT_NO, // 不重启 - BBOX_REBOOT_MAX -}; - -enum BBOX_REENTRANT { - BBOX_REENTRANT_ALLOW = 0x01, // 可重复触发的异常 - BBOX_REENTRANT_DISALLOW // 不可重复触发的异常 -}; - -#define BBOX_COREID_VALID(coreid) ((((coreid) == BBOX_UNDEF) || ((coreid) >= BBOX_CORE_MAX)) ? BBOX_FALSE : BBOX_TRUE) -#define BBOX_COREID_MASK(coreid) (u64)((BBOX_COREID_VALID(coreid) == BBOX_TRUE) ? (1ull << (u8)((coreid) - 1)) : BBOX_UNDEF) - - -/********************************************************** - * module exception register definitions * - **********************************************************/ -#define BBOX_MODULE_CTRL_NUM 6 - -struct bbox_module_exception_ctrl { - excep_time e_clock; // 模块触发异常时间 - u32 e_excepid; // 模块触发的异常id - u32 e_block_offset; // 模块异常信息划分块起始偏移值,基于模块预留内存首地址,从magic开始 - u32 e_block_len; // 模块异常信息划分块长度 - u32 e_info_len; // 模块异常信息实际长度 -}; - -#define BBOX_MODULE_MAGIC 0x56312e32 -#define BBOX_MODULE_CTRL_PAD 3 -#define BBOX_MODULE_CTRL_NUM 6 -#define BBOX_MODULE_CTRL_RESERV 312 - -struct bbox_module_ctrl { - u32 magic; // 使用宏BBOX_MAGIC - u8 e_block_num; // 需要使用的控制块个数,最多BBOX_PROXY_CTRL_NUM - u8 padding[BBOX_MODULE_CTRL_PAD]; // padding - struct bbox_module_exception_ctrl block[BBOX_MODULE_CTRL_NUM]; // 模块dump信息控制状态 - u8 reserved[BBOX_MODULE_CTRL_RESERV]; // 预留空间,用于后续扩展 -}; - -#define BBOX_MODULE_CTRL_BLOCK_SIZE sizeof(struct bbox_module_ctrl) // total 512 byte - - -/********************************************************** - * bbox interfaces definitions * - **********************************************************/ -#ifndef BBOX_COMMON_STRUCT -#define BBOX_COMMON_STRUCT -typedef void (*bbox_e_callback)(u32, void*); - -typedef struct bbox_exception_info { - u32 e_excepid; // exception id; - u32 e_excepid_end; // can register exception id region. [excepid~excepid_end] - u64 e_notify_core_mask; // need notify other core mask - u64 e_reset_core_mask; // need reset other core mask - u8 e_exce_type; // the type of exception - u8 e_from_core; // the core of happen exception - u8 e_process_priority; // exception process priority - u8 e_reboot_priority; // exception reboot priority, just recommended host operation - u8 e_reentrant; // whether to allow exception reentrant - u8 e_from_module[BBOX_MODULE_NAME_LEN]; // the module of happen excption - u8 e_desc[BBOX_EXCEPTIONDESC_MAXLEN]; // the desc of happen excption - bbox_e_callback e_callback; // will be called when excption has processed. -} bbox_exception_info_s; -#endif - -struct bbox_report_info { - u32 devid; // device chip id, may NOT same with device slot id - u32 excepid; // exception id - excep_time time; // exception time - u32 arg; // arg -}; - -struct bbox_dump_done_ops_info { - u32 devid; // device chip id, may NOT same with device slot id - u32 excepid; // exception id - u8 coreid; // which core done - u8 etype; // exception type - excep_time time; // exception time -}; - -/* - * @brief : callback function, tell bbox dump done - * @param [in] : struct bbox_dump_done_ops_info *info dump done info - * @return : NA - */ -typedef void (*bbox_dump_done_ops)(const struct bbox_dump_done_ops_info *info); - -struct bbox_dump_ops_info { - u32 devid; // device chip id, may NOT same with device slot id - u32 excepid; // exception id - u8 coreid; // exception core id - u8 etype; // exception type - excep_time time; // exception time - u32 arg; // arg from exception report -}; - -/* - * @brief : module dump operate, the function over, need call fndone to mark dump over - * @param [in] : struct bbox_dump_ops_info *info module dump info - * @param [in] : bbox_dump_done_ops done dump done function pointer - * @return : NA - */ -typedef void (*bbox_dump_ops)(const struct bbox_dump_ops_info *info, bbox_dump_done_ops done); - -struct bbox_reset_ops_info { - u32 devid; // device chip id, may NOT same with device slot id - u32 excepid; // exception id - u8 coreid; // exception core id - u8 etype; // exception type -}; - -/* - * @brief : module reset operate - * @param [in] : struct bbox_reset_ops_info *info module reset info - * @return : NA - */ -typedef void (*bbox_reset_ops)(const struct bbox_reset_ops_info *info); - -struct bbox_module_info { - u8 coreid; // core id - bbox_dump_ops ops_dump; // dump operate pointer - bbox_reset_ops ops_reset; // reset operate pointer -}; - -struct bbox_module_result { - u64 log_addr; // reserved physical address - u32 log_len; // reserved physical length -}; - -/* - * @brief : register module - * @param [in] : struct bbox_module_info *info module info - * @param [in] : struct bbox_module_result *result register result - * @return : <0 failure; ==0 success - */ -int bbox_register_module(const struct bbox_module_info *info, struct bbox_module_result *result); - -/* - * @brief : unregister module - * @param [in] : u8 core_id core id - * @return : <0 failure; ==0 success - */ -int bbox_unregister_module(u8 coreid); - -/* - * @brief : report exception - * @param [in] : struct bbox_report_ops_info *info report info - * @return : =1: disallow reentrant - * =0: success - * <0: failure - */ -int bbox_exception_report(const struct bbox_report_info *info); - -#ifndef BBOX_COMMON_INTERFACE -#define BBOX_COMMON_INTERFACE -/* - * @brief : register exception - * @param [in] : struct bbox_exception_info *e exception info - * @return : e_excepid - * == 0 fail; >0 success - */ -u32 bbox_register_exception(const struct bbox_exception_info *e); - -/* - * func name: bbox_unregister_exception - * func args: u32 excepid, exception id; - * return : < 0 fail - * >=0 success - */ -int bbox_unregister_exception(u32 excepid); - -/* - * @brief : get device error code - * @param [in] : dev_id device chip id, may NOT same with device slot id - * @param [out] : u32 *e_code exception code array; - * @param [in] : u32 e_capacity array num, max value is 128 - * @return : >0: error of num; - * =0: none of error; - * <0: failure; - */ -int bbox_get_device_errorcode(u32 dev_id, u32 *e_code, u32 e_capacity); - -/* - * @brief : get the exception description - * @param [in] : u32 ecode exception id - * @param [out] : u8 *desc string array, exception description - * @param [in] : u32 length string array length - * @return : <0 failure; ==0 success - */ -int bbox_get_device_ecode_info(u32 ecode, u8 *desc, u32 length); -#endif - -#endif - diff --git a/inc/toolchain/bbox/device/bbox_pub_cloud.h b/inc/toolchain/bbox/device/bbox_pub_cloud.h deleted file mode 100644 index 61af75a097cfe70012206e2f071cd37bb9d4754d..0000000000000000000000000000000000000000 --- a/inc/toolchain/bbox/device/bbox_pub_cloud.h +++ /dev/null @@ -1,288 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef BB_PUB_CLOUD_H -#define BB_PUB_CLOUD_H - -#include "bbox_types.h" - -#ifndef RDR_BASIC_TYPE -#define RDR_BASIC_TYPE -/* 模块id列表 */ -typedef enum CORE_LIST { - RDR_UNDEF = 0x0, - RDR_DRIVER = 0x1, - RDR_AP = 0x2, - RDR_TS = 0x3, - RDR_DVPP = 0xa, - RDR_BIOS = 0x14, - RDR_TEEOS = 0x15, - RDR_LPFW = 0x17, - RDR_NETWORK = 0x18, - RDR_TF = 0x1A, - RDR_CORE_MAX = 0x1B, -} rdr_coreid; - -#define RDR_COREID_VALID(coreid) ((((coreid) == 0) || ((coreid) >= RDR_CORE_MAX)) ? BBOX_FALSE : BBOX_TRUE) -#define RDR_COREID_MASK(coreid) (u64)(unsigned)(1 << ((coreid) - 1)) - -/* 异常类型 */ -typedef enum { - REBOOT_REASON_LABEL0 = 0x0, /* label0:重启相关 */ - AP_S_COLDBOOT = REBOOT_REASON_LABEL0, /* 冷启动,如关机后第一次开机;掉电后第一次开机 */ - BIOS_S_EXCEPTION = 0x1, /* bios异常重启,前一次启动bios异常 */ - AP_S_HOTBOOT = 0x2, /* 热复位,如按键复位,芯片硬复位等 */ - REBOOT_REASON_LABEL1 = 0x10, /* label1:硬件原因复位 */ - AP_S_ABNORMAL = REBOOT_REASON_LABEL1, /* 未检测到的异常 */ - AP_S_TSENSOR = 0x1f, /* soc温保复位 */ - AP_S_PMU = 0x20, /* 过流、欠压、PMU过温引起的硬件复位 */ - AP_S_DDR_FATAL = 0X22, /* ddr fatal异常复位,如:ddr颗粒超温复位 */ - REBOOT_REASON_LABEL2 = 0x24, /* label2:ap软件原因复位 */ - AP_S_PANIC = REBOOT_REASON_LABEL2, /* A核panic,如访问非法地址 */ - AP_S_OOM = 0x2a, /* OOM 异常 */ - AP_S_HDC = 0x2b, /* HDC 断连 */ - REBOOT_REASON_LABEL3 = 0x2c, /* label3:其他模块复位 */ - STARTUP_S_EXCEPTION = 0x2c, /* 模块启动异常 */ - HEARTBEAT_S_EXCEPTION = 0x2d, /* 模块心跳异常 */ - TS_S_EXCEPTION = 0x33, /* TS子系统检测到的各种异常 */ - DVPP_S_EXCEPTION = 0x35, /* DVPP异常 */ - DRIVER_S_EXCEPTION = 0x36, /* DRIVER异常 */ - TEE_S_EXCEPTION = 0x38, /* teeos异常 */ - LPFW_S_EXCEPTION = 0x39, /* LPFW异常 */ - NETWORK_S_EXCEPTION = 0x3A, /* NETWORK异常 */ - REBOOT_REASON_LABEL4 = 0x40, /* label4: */ - REBOOT_REASON_LABEL5 = 0x65, /* label5:电源异常 */ - REBOOT_REASON_LABEL6 = 0x6A, /* label6:xloader异常 */ - REBOOT_REASON_LABEL7 = 0x74, /* label7:fastboot异常 */ - REBOOT_REASON_LABEL8 = 0x89, /* label8: host侧异常 */ - DEVICE_LOAD_TIMEOUT = 0x8A, /* 设备启动超时 */ - DEVICE_HEAT_BEAT_LOST = 0x8B, /* 设备心跳丢失 */ - DEVICE_RESET_INFORM = 0x8C, /* 用户复位 */ - DEVICE_ADVANCED_ERR_REPORT = 0x8D, /* 设备AER错误 */ - REBOOT_REASON_LABEL9 = 0x90, /* label9: */ - REBOOT_REASON_LABEL10 = 0xB0, /* label10: */ - RDR_EXCEPTION_REASON_INVALID = 0xFF, -} EXCH_SOURCE; - -enum PROCESS_PRI { - RDR_OTHER = 0x0, /* 不确定 */ - RDR_NOTICE = 0x1, /* 提示 */ - RDR_MINOR = 0x2, /* 次要 */ - RDR_WARN = RDR_MINOR, - RDR_MAJOR = 0x3, /* 重要 */ - RDR_ERR = RDR_MAJOR, - RDR_CRITICAL = 0x4, /* 紧急 */ - RDR_DEAD = RDR_CRITICAL, - RDR_PPRI_MAX -}; - -enum REBOOT_PRI { - RDR_REBOOT_NOW = 0x01, /* 立即重启 */ - RDR_REBOOT_WAIT, /* 等待重启 */ - RDR_REBOOT_NO, /* 不重启 */ - RDR_REBOOT_MAX -}; - -enum REENTRANT { - RDR_REENTRANT_ALLOW = 0x01, /* 可重复触发的异常 */ - RDR_REENTRANT_DISALLOW /* 不可重复触发的异常 */ -}; - -#define MODULE_MAGIC 0xbaba0514 -#define MODULE_VALID 1 -#define MODULE_EXCEPTION_REGISTER_MAXNUM 512 - -struct exc_description_s { - u32 e_excepid; /* 异常id */ - u8 e_process_level; /* 异常处理级别:BBOX_PROCESS_PRI */ - u8 e_reboot_priority; /* 异常重启级别:BBOX_REBOOT_PRI */ - u8 e_excep_type; /* 异常类型 */ - u8 e_reentrant; /* 异常是否可重入 */ - u64 e_notify_core_mask; /* 异常联动掩码 */ - u8 e_desc[BBOX_EXCEPTIONDESC_MAXLEN]; /* 异常描述 */ -}; - -struct exc_info_s { - excep_time e_clock; /* 模块触发异常时间 */ - u32 e_excepid; /* 模块触发的异常id */ - u16 e_dump_status; /* 模块将异常信息存预留内存的控制状态 */ - u16 e_save_status; /* 代理将异常信息从预留内存导出的控制状态 */ -}; - -/* 通过共享内存注册异常 */ -struct exc_module_info_s { - u32 magic; /* 使用宏MODULE_MAGIC */ - u16 e_excep_valid; /* 模块写完注册的异常,则设置MODULE_VALID */ - u16 e_excep_num; /* 模块注册异常个数 */ - u8 e_from_module[BBOX_MODULE_NAME_LEN]; /* 模块名 */ - struct exc_info_s cur_info; /* 模块dump信息控制状态 */ - u32 e_mini_offset; /* 模块最小集异常信息偏移值,基于模块预留内存首地址,从magic开始 */ - u32 e_mini_len; /* 模块最小集异常信息长度 */ - u32 e_info_offset; /* 模块全部异常信息偏移值,基于模块预留内存首地址,从magic开始 */ - u32 e_info_len; /* 模块全部异常信息长度 */ - struct exc_description_s e_description[1]; /* 模块异常注册信息 */ -}; - -/* 通过注册函数注册异常 */ -struct rdr_ddr_module_info_s { - u32 magic; /* 使用宏MODULE_MAGIC */ - u32 e_mini_offset; /* 模块最小集异常信息偏移值,基于模块预留内存首地址,从magic开始 */ - u32 e_mini_len; /* 模块最小集异常信息长度 */ - u32 e_info_offset; /* 模块全部异常信息偏移值,基于模块预留内存首地址,从magic开始 */ - u32 e_info_len; /* 模块全部异常信息长度 */ -}; - -enum MODULE_DUMP_STATUS { - STATUS_INIT = 0, - STATUS_DOING = 1, - STATUS_DONE = 2, -}; -#endif - -#ifndef BBOX_COMMON_STRUCT -#define BBOX_COMMON_STRUCT -typedef void (*bbox_e_callback)(u32, void*); - -typedef struct bbox_exception_info { - u32 e_excepid; // exception id; - u32 e_excepid_end; // can register exception id region. [excepid~excepid_end] - u64 e_notify_core_mask; // need notify other core mask - u64 e_reset_core_mask; // need reset other core mask - u8 e_exce_type; // the type of exception - u8 e_from_core; // the core of happen exception - u8 e_process_priority; // exception process priority - u8 e_reboot_priority; // exception reboot priority, just recommended host operation - u8 e_reentrant; // whether to allow exception reentrant - u8 e_from_module[BBOX_MODULE_NAME_LEN]; // the module of happen excption - u8 e_desc[BBOX_EXCEPTIONDESC_MAXLEN]; // the desc of happen excption - bbox_e_callback e_callback; // will be called when excption has processed. -} bbox_exception_info_s; -#endif - -/* - * @brief : bbox dump done - * @param [in] : u32 dev_id device chip id, may NOT same with device slot id - * @param [in] : u8 core_id exception core - * @param [in] : u32 excep_id exception id - * @param [in] : u8 etype exception type - * @param [in] : excep_time *time exception time - * @param [in] : bbox_cb_dump_done fndone callback function - * @return : NA - */ -typedef void (*bbox_cb_dump_done)(u32 dev_id, u8 core_id, u32 excep_id, u8 etype, const excep_time *time); - -/* - * @brief : bbox dump, the function over, need call fndone to mark dump over - * @param [in] : u32 dev_id device chip id, may NOT same with device slot id - * @param [in] : u8 core_id exception core - * @param [in] : u32 excep_id exception id - * @param [in] : u8 etype exception type - * @param [in] : excep_time *time exception time - * @param [in] : u32 arg arg - * @param [in] : bbox_cb_dump_done fndone callback function - * @return : NA - */ -typedef void (*bbox_dump)(u32 dev_id, u8 core_id, u32 excep_id, u8 etype, - const excep_time *time, u32 arg, bbox_cb_dump_done fndone); - -/* - * @brief : bbox reset - * @param [in] : u32 dev_id device chip id, may NOT same with device slot id - * @param [in] : u8 core_id exception core - * @param [in] : u32 excep_id exception id - * @param [in] : u8 etype exception type - * @return : NA - */ -typedef void (*bbox_reset)(u32 dev_id, u8 core_id, u32 excep_id, u8 etype); - -struct bbox_module_ops { - bbox_dump ops_dump; - bbox_reset ops_reset; -}; - -struct bbox_register_module_result { - u64 log_addr; // reserved physical address - u32 log_len; // reserved physical length -}; - -/* - * @brief : register module - * @param [in] : u8 core_id core id - * @param [in] : struct bbox_module_ops* ops module ops - * @param [in] : struct bbox_register_module_result *retinfo register result info - * @return : <0 failure; ==0 success - */ -int bbox_register_module_ops(u8 core_id, const struct bbox_module_ops *ops, - struct bbox_register_module_result *retinfo); - -/* - * @brief : unregister module - * @param [in] : u8 core_id core id - * @return : <0 failure; ==0 success - */ -int bbox_unregister_module_ops(u8 core_id); - -/* - * @brief : unregister module - * @param [in] : dev_id device chip id, may NOT same with device slot id - * @param [in] : u32 excep_id exception id - * @param [in] : excep_time *timestamp exception time - * @param [in] : u32 arg arg - * @return : NA - */ -void bbox_system_error(u32 dev_id, u32 excep_id, const excep_time *timestamp, u32 arg); - -#ifndef BBOX_COMMON_INTERFACE -#define BBOX_COMMON_INTERFACE -/* - * @brief : register exception - * @param [in] : struct bbox_exception_info *e exception info - * @return : e_excepid - * == 0 fail; >0 success - */ -u32 bbox_register_exception(const struct bbox_exception_info *e); - -/* - * func name: bbox_unregister_exception - * func args: u32 excepid, exception id; - * return : < 0 fail - * >=0 success - */ -int bbox_unregister_exception(u32 excepid); - -/* - * @brief : get device error code - * @param [in] : dev_id device chip id, may NOT same with device slot id - * @param [out] : u32 *e_code exception code array; - * @param [in] : u32 e_capacity array num, max value is 128 - * @return : >0: error of num; - * =0: none of error; - * <0: failure; - */ -int bbox_get_device_errorcode(u32 dev_id, u32 *e_code, u32 e_capacity); - -/* - * @brief : get the exception description - * @param [in] : u32 ecode exception id - * @param [out] : u8 *desc string array, exception description - * @param [in] : u32 length string array length - * @return : <0 failure; ==0 success - */ -int bbox_get_device_ecode_info(u32 ecode, u8 *desc, u32 length); -#endif - -#endif // BB_PUB_CLOUD_H - diff --git a/inc/toolchain/bbox/device/bbox_pub_mini.h b/inc/toolchain/bbox/device/bbox_pub_mini.h deleted file mode 100644 index 03e9fa3a8b664bd840136727a78ce4a36c0200c1..0000000000000000000000000000000000000000 --- a/inc/toolchain/bbox/device/bbox_pub_mini.h +++ /dev/null @@ -1,288 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef BB_PUB_MINI_H -#define BB_PUB_MINI_H - -#include "bbox_types.h" - -#ifndef RDR_BASIC_TYPE -#define RDR_BASIC_TYPE -/* 模块id列表 */ -typedef enum CORE_LIST { - RDR_UNDEF = 0x0, - RDR_DRIVER = 0x1, - RDR_AP = 0x2, - RDR_TS = 0x3, - RDR_AICPU = 0x5, - RDR_DVPP = 0xa, - RDR_LPM3 = 0xc, - RDR_BIOS = 0x14, - RDR_TEEOS = 0x15, - RDR_TF = 0x1A, - RDR_CORE_MAX = 0x1B, -} rdr_coreid; - -#define RDR_COREID_VALID(coreid) ((((coreid) == 0) || ((coreid) >= RDR_CORE_MAX)) ? BBOX_FALSE : BBOX_TRUE) -#define RDR_COREID_MASK(coreid) (u64)(unsigned)(1 << ((coreid) - 1)) - -/* 异常类型 */ -typedef enum { - REBOOT_REASON_LABEL0 = 0x0, /* label0:重启相关 */ - AP_S_COLDBOOT = REBOOT_REASON_LABEL0, /* 冷启动,如关机后第一次开机;掉电后第一次开机 */ - BIOS_S_EXCEPTION = 0x1, /* bios异常重启,前一次启动bios异常 */ - AP_S_HOTBOOT = 0x2, /* 热复位,如按键复位,芯片硬复位等 */ - REBOOT_REASON_LABEL1 = 0x10, /* label1:硬件原因复位 */ - AP_S_ABNORMAL = REBOOT_REASON_LABEL1, /* 未检测到的异常 */ - AP_S_TSENSOR = 0x1f, /* soc温保复位 */ - AP_S_PMU = 0x20, /* 过流、欠压、PMU过温引起的硬件复位 */ - AP_S_DDR_FATAL = 0X22, /* ddr fatal异常复位,如:ddr颗粒超温复位 */ - REBOOT_REASON_LABEL2 = 0x24, /* label2:ap软件原因复位 */ - AP_S_PANIC = REBOOT_REASON_LABEL2, /* A核panic,如访问非法地址 */ - AP_S_OOM = 0x2a, /* OOM 异常 */ - AP_S_HDC = 0x2b, /* HDC 断连 */ - REBOOT_REASON_LABEL3 = 0x2c, /* label3:其他模块复位 */ - STARTUP_S_EXCEPTION = 0x2c, /* 模块启动异常 */ - HEARTBEAT_S_EXCEPTION = 0x2d, /* 模块心跳异常 */ - LPM3_S_EXCEPTION = 0x32, /* LPM3子系统检测到的各种异常 */ - TS_S_EXCEPTION = 0x33, /* TS子系统检测到的各种异常 */ - DVPP_S_EXCEPTION = 0x35, /* DVPP异常 */ - DRIVER_S_EXCEPTION = 0x36, /* DRIVER异常 */ - TEE_S_EXCEPTION = 0x38, /* teeos异常 */ - REBOOT_REASON_LABEL4 = 0x40, /* label4: */ - REBOOT_REASON_LABEL5 = 0x65, /* label5:电源异常 */ - REBOOT_REASON_LABEL6 = 0x6A, /* label6:xloader异常 */ - REBOOT_REASON_LABEL7 = 0x74, /* label7:fastboot异常 */ - REBOOT_REASON_LABEL8 = 0x89, /* label8: host侧异常 */ - DEVICE_LOAD_TIMEOUT = 0x8A, /* 设备启动超时 */ - DEVICE_HEAT_BEAT_LOST = 0x8B, /* 设备心跳丢失 */ - DEVICE_RESET_INFORM = 0x8C, /* 用户复位 */ - DEVICE_ADVANCED_ERR_REPORT = 0x8D, /* 设备AER错误 */ - REBOOT_REASON_LABEL9 = 0x90, /* label9: */ - REBOOT_REASON_LABEL10 = 0xB0, /* label10: */ - RDR_EXCEPTION_REASON_INVALID = 0xFF, -} EXCH_SOURCE; - -enum PROCESS_PRI { - RDR_OTHER = 0x0, /* 不确定 */ - RDR_NOTICE = 0x1, /* 提示 */ - RDR_MINOR = 0x2, /* 次要 */ - RDR_WARN = RDR_MINOR, - RDR_MAJOR = 0x3, /* 重要 */ - RDR_ERR = RDR_MAJOR, - RDR_CRITICAL = 0x4, /* 紧急 */ - RDR_DEAD = RDR_CRITICAL, - RDR_PPRI_MAX -}; - -enum REBOOT_PRI { - RDR_REBOOT_NOW = 0x01, /* 立即重启 */ - RDR_REBOOT_WAIT, /* 等待重启 */ - RDR_REBOOT_NO, /* 不重启 */ - RDR_REBOOT_MAX -}; - -enum REENTRANT { - RDR_REENTRANT_ALLOW = 0x01, /* 可重复触发的异常 */ - RDR_REENTRANT_DISALLOW /* 不可重复触发的异常 */ -}; - -#define MODULE_MAGIC 0xbaba0514 -#define MODULE_VALID 1 -#define MODULE_EXCEPTION_REGISTER_MAXNUM 512 - -struct exc_description_s { - u32 e_excepid; /* 异常id */ - u8 e_process_level; /* 异常处理级别:BBOX_PROCESS_PRI */ - u8 e_reboot_priority; /* 异常重启级别:BBOX_REBOOT_PRI */ - u8 e_excep_type; /* 异常类型 */ - u8 e_reentrant; /* 异常是否可重入 */ - u64 e_notify_core_mask; /* 异常联动掩码 */ - u8 e_desc[BBOX_EXCEPTIONDESC_MAXLEN]; /* 异常描述 */ -}; - -struct exc_info_s { - excep_time e_clock; /* 模块触发异常时间 */ - u32 e_excepid; /* 模块触发的异常id */ - u16 e_dump_status; /* 模块将异常信息存预留内存的控制状态 */ - u16 e_save_status; /* 代理将异常信息从预留内存导出的控制状态 */ -}; - -/* 通过共享内存注册异常 */ -struct exc_module_info_s { - u32 magic; /* 使用宏MODULE_MAGIC */ - u16 e_excep_valid; /* 模块写完注册的异常,则设置MODULE_VALID */ - u16 e_excep_num; /* 模块注册异常个数 */ - u8 e_from_module[BBOX_MODULE_NAME_LEN]; /* 模块名 */ - struct exc_info_s cur_info; /* 模块dump信息控制状态 */ - u32 e_mini_offset; /* 模块最小集异常信息偏移值,基于模块预留内存首地址,从magic开始 */ - u32 e_mini_len; /* 模块最小集异常信息长度 */ - u32 e_info_offset; /* 模块全部异常信息偏移值,基于模块预留内存首地址,从magic开始 */ - u32 e_info_len; /* 模块全部异常信息长度 */ - struct exc_description_s e_description[1]; /* 模块异常注册信息 */ -}; - -/* 通过注册函数注册异常 */ -struct rdr_ddr_module_info_s { - u32 magic; /* 使用宏MODULE_MAGIC */ - u32 e_mini_offset; /* 模块最小集异常信息偏移值,基于模块预留内存首地址,从magic开始 */ - u32 e_mini_len; /* 模块最小集异常信息长度 */ - u32 e_info_offset; /* 模块全部异常信息偏移值,基于模块预留内存首地址,从magic开始 */ - u32 e_info_len; /* 模块全部异常信息长度 */ -}; - -enum MODULE_DUMP_STATUS { - STATUS_INIT = 0, - STATUS_DOING = 1, - STATUS_DONE = 2, -}; -#endif - -#define RDR_MODULE_NAME_LEN 16 -#define RDR_EXCEPTIONDESC_MAXLEN 48 - -typedef void (*rdr_e_callback)(u32, void*); - -/* - * struct list_head e_list; - * u32 excepid, exception id; - * if excepid equal 0, will auto generation excepid, and return it. - * u32 excepid_end, can register exception id region. [excepid~excepid_end]; - need excepid_end >= excepid, - * if excepid_end equal 0, will be register excepid only, - but excepid & excepid_end cant equal 0 at the same time. - * u64 notify_core_mask, need notify other core mask - * u64 reset_core_mask, need reset other core mask - * u8 exce_type, the type of exception - * u8 from_core, the core of happen exception - * u8 process_priority, exception process priority - * u8 reboot_priority, exception reboot priority - * u8 reentrant, whether to allow exception reentrant - * char* from_module, the module of happen excption - * char* desc, the desc of happen excption - * rdr_e_callback callback, will be called when excption has processed. - */ -struct rdr_exception_info_s { - struct list_head e_list; - u32 e_excepid; - u32 e_excepid_end; - u64 e_notify_core_mask; - u64 e_reset_core_mask; - u8 e_exce_type; - u8 e_from_core; - u8 e_process_priority; - u8 e_reboot_priority; - u8 e_reentrant; - u8 e_from_module[RDR_MODULE_NAME_LEN]; - u8 e_desc[RDR_EXCEPTIONDESC_MAXLEN]; - rdr_e_callback e_callback; -}; - -/* - * @brief : module dump done, callback it, tell bbox dump done - * @param [in] : u32 excep_id exception id - * @param [in] : u8 core_id which core done - * @param [in] : u8 etype exception type - * @param [in] : excep_time *time exception time - * @return : NA - */ -typedef void (*pfn_cb_dump_done)(u32 excepid, u8 coreid, u8 etype, excep_time time); - -/* - * @brief : call module dump exception info - * the function over, need call fndone to mark dump over - * @param [in] : u32 excepid exception id - * @param [in] : u8 coreid exception core - * @param [in] : u8 etype exception type - * @param [in] : excep_time time exception time - * @param [in] : char* logpath exception log path - * @param [in] : pfn_cb_dump_done fndone - * @return : NA - */ -typedef void (*pfn_dump)(u32 excepid, u8 etype, u8 coreid, excep_time time, - char* logpath, pfn_cb_dump_done fndone); - -/* - * @brief : call module reset - * @param [in] : u32 excepid exception id - * @param [in] : u8 core_id exception core - * @param [in] : u8 etype exception type - * @return : NA - */ -typedef void (*pfn_reset)(u32 excepid, u8 etype, u8 coreid); - -struct rdr_module_ops { - pfn_dump ops_dump; - pfn_reset ops_reset; -}; - -struct rdr_register_module_result { - u64 log_vaddr; // reserved physical address - u32 log_len; // reserved physical address length -}; - -/* - * @brief : register exception - * @param [in] : struct rdr_exception_info_s* e exception info - * @return : excepid - * == 0 error; > 0 success - */ -u32 rdr_register_exception(struct rdr_exception_info_s *e); - -/* - * @brief : unregister exception - * @param [in] : u32 excep_id, exception id; - * @return : < 0 fail; >=0 success - */ -int rdr_unregister_exception(u32 excepid); - -/* - * @brief : register module - * @param [in] : u8 core_id core id - * @param [in] : struct rdr_module_ops* ops ops info - * @param [out] : struct rdr_register_module_result* retinfo return info - * @return : < 0 error; >=0 success - */ -int rdr_register_module_ops(u8 coreid, const struct rdr_module_ops* ops, struct rdr_register_module_result* retinfo); - -/* - * @brief : unregister module - * @param [in] :u8 core_id core id - * @return : < 0 fail; >=0 success - */ -int rdr_unregister_module_ops(u8 coreid); - -/* - * @brief : report exception - * @param [in] : u32 excep_id exception id - * @param [in] : excep_time timestamp exception time - * @param [in] : u32 arg arg - * @return : NA - */ -void mntn_system_error(u32 excep_id, excep_time timestamp, u32 arg); - -/* - * @brief : get arg value - * @param [in] : u32 excepid exception id - * @param [in] : u8 coreid exception core id - * @param [in] : excep_time timestamp exception time - * @param [out] : u32 *arg return arg value - * @return : NA - */ -int rdr_module_dump_get_arg(u32 excepid, u8 coreid, excep_time time, u32 *arg); - -#endif // BB_PUB_MINI_H - diff --git a/inc/toolchain/bbox/device/bbox_types.h b/inc/toolchain/bbox/device/bbox_types.h deleted file mode 100644 index b7d862f454b56b6877988c59735743ac8e02ccae..0000000000000000000000000000000000000000 --- a/inc/toolchain/bbox/device/bbox_types.h +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef RDR_TYPES_H -#define RDR_TYPES_H -#include - -typedef signed char s8; -typedef unsigned char u8; - -typedef signed short s16; -typedef unsigned short u16; - -typedef signed int s32; -typedef unsigned int u32; - -typedef signed long long s64; -typedef unsigned long long u64; - -typedef struct excep_time_t { - u64 tv_sec; - u64 tv_usec; -} excep_time; - -#define BBOX_TRUE 1 -#define BBOX_FALSE 0 - -#define BBOX_SUCCESS 0 -#define BBOX_FAILURE (-1) -#define BBOX_COMM_INVAL (-2) // communication failure -#define BBOX_COMM_TIMEOUT (-3) // communication timeout -#define BBOX_MSG_INVAL (-4) // message invalid -#define BBOX_MSG_NONE (-5) // no have data -#define BBOX_NO_SUPPORT (-10) // not support - -#define BBOX_DISALLOW_REETRANT 1 // return value - -#define BBOX_EOK 0 -#define BBOX_ENXIO (-6) // No such device or address -#define BBOX_ENODEV (-19) // No such device -#define BBOX_EINVAL (-22) // Invalid argument -#define BBOX_ENOSPC (-28) // No space left on device - -#define BBOX_NOTIFY_DONE (NOTIFY_DONE) -#define BBOX_NOTIFY_OK (NOTIFY_OK) - -#define BBOX_UCHAR_INVALID 0xFF -#define BBOX_UINT_INVALID 0xFFFFFFFF -#define BBOX_MODULE_NAME_LEN 16 -#define BBOX_EXCEPTIONDESC_MAXLEN 48 - -#endif // RDR_TYPES_H - diff --git a/inc/toolchain/ide_daemon_api.h b/inc/toolchain/ide_daemon_api.h deleted file mode 100644 index 929f5f782a5968aff93e0a6b8a830258dbf80e5a..0000000000000000000000000000000000000000 --- a/inc/toolchain/ide_daemon_api.h +++ /dev/null @@ -1,177 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** @defgroup dump dump接口 */ -#ifndef IDE_DAEMON_API_H -#define IDE_DAEMON_API_H - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * @ingroup dump。 - * - * dump ip信息缓冲区长度 - */ -typedef void *IDE_SESSION; - -/** - * @ingroup dump。 - * - * dump ip信息缓冲区长度 - */ -#ifndef uint32_t -typedef unsigned int uint32_t; -#endif - -/** - * @ingroup dump。 - * - * dump ip信息缓冲区长度 - */ -#define IDE_DAEMON_IP_LEN (16) - -/** - * @ingroup dump。 - * - * dump 连接信息 - */ -typedef struct tagConnInfo { - char ip[IDE_DAEMON_IP_LEN]; /**< IP地址 */ - int port; /**< 端口号 */ - int deviceID; /**< 设备ID号 */ -} connInfo_t; - -/** - * @ingroup dump。 - * - * dump 错误信息 - */ -typedef enum tagIdeError { - IDE_DAEMON_NONE_ERROR = 0, /**< 无错误 */ - IDE_DAEMON_UNKNOW_ERROR = 1, /**< 未知错误 */ - IDE_DAEMON_WRITE_ERROR = 2, /**< 写入失败 */ - IDE_DAEMON_NO_SPACE_ERROR = 3, /**< 磁盘已满 */ - IDE_DAEMON_INVALID_PATH_ERROR = 4, /**< 无效路径 */ - IDE_DAEMON_INVALID_PARAM_ERROR = 5, /**< 无效参数 */ - IDE_DAEMON_TCP_CONNECT_ERROR = 6, /**< TCP连接失败 */ - IDE_DAEMON_TCP_CHANNEL_ERROR = 7, /**< TCP通道异常 */ - IDE_DAEMON_MALLOC_ERROR = 8, /**< 申请堆内存失败 */ - IDE_DAEMON_HDC_CHANNEL_ERROR = 9, /**< HDC通路异常 */ - IDE_DAEMON_CHANNEL_ERROR = 10, /**< 通路异常 */ - IDE_DAEMON_MKDIR_ERROR = 11, /**< 创建目录失败 */ - IDE_DAEMON_MEMCPY_ERROR = 12, /**< 内存拷贝失败 */ - IDE_DAEMON_MEMSET_ERROR = 13, /**< 内存清零失败 */ - IDE_DAEMON_INVALID_IP_ERROR = 14, /**< 无效的IP地址 */ - IDE_DAEMON_INTERGER_REVERSED_ERROR = 15, /**< 整形溢出 */ - IDE_DAEMON_DUMP_QUEUE_FULL = 16, /**< dump队列已满 */ - NR_IDE_DAEMON_ERROR, /**< 枚举最大值 */ -}ideError_t; - -/** - * @ingroup dump。 - * - * dump 错误信息 - */ -typedef ideError_t IdeErrorT; - -/** - * @ingroup dump。 - * - * dump回传数据块标识信息 - */ -enum IdeDumpFlag { - IDE_DUMP_NONE_FLAG = 0, /**< 无标志位 */ -}; - -/** - * @ingroup dump。 - * - * dump回传数据块 - */ -struct IdeDumpChunk { - char *fileName; /**< 文件名,绝对路径 */ - unsigned char *dataBuf; /**< 写入的数据Buffer */ - unsigned int bufLen; /**< 写入的数据Buffer长度 */ - unsigned int isLastChunk; /**< 是否最后一块数据 0:非最后一块数据;1:最后一块数据 */ - long long offset; /**< 文件写入的偏移位 -1为追加形式写入 */ - enum IdeDumpFlag flag; /**< 标志位 */ -}; - -/** - * @ingroup dump - * @par 描述: 创建Dump通路。 - * - * @attention 无 - * @param privInfo [IN] 启动Dump通路数据(格式host:port;device_id(HDC), local;device_id(Local)) - * @retval #非空 创建会话成功 - * @retval #NULL 创建会话失败 - * @par 依赖: - * @li ide_daemon_api.cpp:该接口所属的开发包。 - * @li ide_daemon_api.h:该接口声明所在的头文件。 - * @see 无 - * @since - */ -extern IDE_SESSION IdeDumpStart(const char *privInfo); - -/** - * @ingroup dump - * @par 描述: 进行数据Dump,Dump完成数据落盘后返回。 - * - * @attention 无 - * @param session [IN] 会话句柄 - * @param dumpChunk [IN] Dump的数据结构体 - * @retval #IDE_DAEMON_NONE_ERROR 写数据成功 - * @retval #IDE_DAEMON_INVALID_PARAM_ERROR 非法参数 - * @retval #IDE_DAEMON_UNKNOW_ERROR 写数据失败 - * @par 依赖: - * @li ide_daemon_api.cpp:该接口所属的开发包。 - * @li ide_daemon_api.h:该接口声明所在的头文件。 - * @see 无 - * @since - */ -extern IdeErrorT IdeDumpData(IDE_SESSION session, const struct IdeDumpChunk *dumpChunk); - -/** - * @ingroup dump - * @par 描述: 关闭Dump通路。 - * - * @attention 无 - * @param session [IN] 会话句柄 - * @retval #IDE_DAEMON_NONE_ERROR 关闭会话成功 - * @retval #IDE_DAEMON_INVALID_PARAM_ERROR 非法参数 - * @retval #IDE_DAEMON_UNKNOW_ERROR 关闭会话失败 - * @par 依赖: - * @li ide_daemon_api.cpp:该接口所属的开发包。 - * @li ide_daemon_api.h:该接口声明所在的头文件。 - * @see 无 - * @since - */ -extern IdeErrorT IdeDumpEnd(IDE_SESSION session); - -#ifdef __cplusplus -} -#endif - -#endif -/* - * History: \n - * 2018-10-10, huawei, 初始化该文件。 \n - * 2020-02-10, huawei, 更改API规范化。 \n - * - * vi: set expandtab ts=4 sw=4 tw=120: - */ diff --git a/inc/toolchain/ide_tlv.h b/inc/toolchain/ide_tlv.h deleted file mode 100644 index 044adcb50a90748c46f4c90f4d262312b59e38f7..0000000000000000000000000000000000000000 --- a/inc/toolchain/ide_tlv.h +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** @defgroup adx ADX */ -#ifndef IDE_TLV_H -#define IDE_TLV_H - -/** - * @ingroup adx - * - * adx 命令请求列表 - */ -enum cmd_class { - IDE_EXEC_COMMAND_REQ = 0, /**< 执行device命令请求\n */ - IDE_SEND_FILE_REQ, /**< 发送文件到device命令请求\n */ - IDE_DEBUG_REQ, /**< Debug命令请求\n */ - IDE_BBOX_REQ, /**< Bbox命令请求\n */ - IDE_LOG_REQ, /**< Log命令请求\n */ - IDE_PROFILING_REQ, /**< Profiling命令请求\n */ - IDE_OME_DUMP_REQ, /**< Ome dump命令请求\n */ - IDE_FILE_SYNC_REQ, /**< 发送文件到AiHost 命令请求\n */ - IDE_EXEC_API_REQ, /**< 执行AiHost Api命令请求\n */ - IDE_EXEC_HOSTCMD_REQ, /**< 执行AiHost 命令命令请求\n */ - IDE_DETECT_REQ, /**< 执行AiHost 通路命令请求\n */ - IDE_FILE_GET_REQ, /**< 获取AiHost侧文件命令请求\n */ - IDE_NV_REQ, /**< 执行AiHost Nv命令请求\n */ - IDE_DUMP_REQ, /**< Dump命令请求\n */ - IDE_FILE_GETD_REQ, /**< 获取Device侧文件命令请求\n */ - IDE_INVALID_REQ, /**< 无效命令请求\n */ - NR_IDE_CMD_CLASS, /**< 标识命令请求最大值\n */ -}; - -/** - * @ingroup adx - * - * adx 命令请求列表 - */ -typedef enum cmd_class CmdClassT; - -/** - * @ingroup adx - * - * adx 数据交互格式 - */ -struct tlv_req { - enum cmd_class type; /**< 数据包命令类型 */ - int dev_id; /**< 设备 ID */ - int len; /**< 数据包数据长度 */ - char value[0]; /**< 数据包数据 */ -}; - -/** - * @ingroup adx - * - * adx 数据交互格式 - */ -typedef struct tlv_req TlvReqT; - -#endif -/* - * History: \n - * 2018-10-10, huawei, 初始化该文件。 \n - * 2020-02-10, huawei, 更改API规范化。 \n - * - * vi: set expandtab ts=4 sw=4 tw=120: - */ diff --git a/inc/toolchain/prof_acl_api.h b/inc/toolchain/prof_acl_api.h deleted file mode 100644 index 74f6606508b055099b4ea6f8c72780e0f130a31a..0000000000000000000000000000000000000000 --- a/inc/toolchain/prof_acl_api.h +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MSPROF_ENGINE_PROF_ACL_API_H_ -#define MSPROF_ENGINE_PROF_ACL_API_H_ - -#define MSVP_PROF_API __attribute__((visibility("default"))) - -#include -#include - -/** - * @name ProrErrorCode - * @brief error code of prof_acl_apis - */ -enum ProfErrorCode { - PROF_ERROR_NONE = 0, // ok - PROF_ERROR_PARAM_INVALID, // param invalid, for example nullptr - PROF_ERROR_REPEAT_INIT, // profiling has already been inited - PROF_ERROR_CONFIG_INVALID, // config invalid, for example invalid json string - PROF_ERROR_DIR_NO_ACCESS, // dir is not accessable - PROF_ERROR_FAILURE, // failed to init or start profiling - PROF_ERROR_NOT_INITED, // profiling has not been inited - PROF_ERROR_DEVICE_INVALID, // device id invalid - PROF_ERROR_UNSUPPORTED, // unsupported data type or ai core metrics - PROF_ERROR_REPEAT_START, // profiilng has already been started - PROF_ERROR_NOT_STARTED, // profiling has not been started -}; - -/** - * @brief transfer profiling config in acl.json to sample config - * @param aclCfg [IN] profiling json string from acl.json as {"switch":"on", "result_path":"/home",...} - * @param sampleCfg [OUT] json string for GE as {"startCfg":[{"deviceID":"all","jobID":"1234",...}]} - * @return ProfErrorCode - */ -MSVP_PROF_API int32_t ProfAclCfgToSampleCfg(const std::string &aclCfg, std::string &sampleCfg); - -#endif // MSPROF_ENGINE_PROF_ACL_API_H_ diff --git a/inc/toolchain/profiler_client.h b/inc/toolchain/profiler_client.h deleted file mode 100644 index 0176b10e6d3fd60d1619d107082e6c5aa8ff0ffe..0000000000000000000000000000000000000000 --- a/inc/toolchain/profiler_client.h +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef PROFILER_CLIENT_H_INCLUDED -#define PROFILER_CLIENT_H_INCLUDED - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -#ifdef WIN32 -#ifdef PROFILERCLIENT_EXPORTS -#define MSVP_PROF_CLN_API __declspec(dllexport) -#else -#define MSVP_PROF_CLN_API __declspec(dllimport) -#endif -#else -#define MSVP_PROF_CLN_API -#endif -/** - * the data structure to write - */ -struct data_chunk { - char* relative_file_name;// from subpath begin; For example: subA/subB/example.txt; Note: the begin don't has '/'; - unsigned char* data_buf;// the pointer to the data - unsigned int buf_len;// the len of data_buf - unsigned int is_last_chunk;// = 1, the last chunk of the file; != 1, not the last chunk of the file - long long offset;// the begin location of the file to write; if the offset is -1, directly append data. -}; - -struct collect_dev_info_s { - int dev_id; -}; - -/** \brief use it to connect the server. - * - * \param const unsigned char* target: The engine gets it from ENV. Don't need care about it. - * \param const unsigned char* engine_name: For example OME;CCE; Runtime;Matrix... - * \return the return value void* point the client handle - * - */ - -MSVP_PROF_CLN_API extern void* create_collect_client(const char* target, const char* engine_name); - -/** \brief notify profiling the device list - * - * \param void* handle: the return value of the create_collect_client function - * \param const char* job_ctx: identifies profiling job - * \param const collect_dev_info_s* dev_list: pointer to the device list - * \param int dev_num: the device number - * \return 0 on success - */ -MSVP_PROF_CLN_API extern int collect_host_sync_dev_list(void* handle, const char* job_ctx, const collect_dev_info_s* dev_list, int dev_num); - -/** \brief write data by this function to transfer - * - * \param void* handle: the return value of the create_collect_client function - * \param struct data_chunk* data: record the value to restore the sampling data - * \param const unsigned char* job_ctx: The engine gets it from ENV. Don't need care about it. Represent the context about profiling job. - * \return On success, the number of bytes written is returned(zero indicates nothing was written); On error, <0 is returned, and the value is set appropriately. - * - */ - -MSVP_PROF_CLN_API extern int collect_write(void* handle, const char* job_ctx, struct data_chunk* data); - -/** \brief release the handle - * - * \param void* handle: the return value of the create_collect_client function - * \return - * - */ - -MSVP_PROF_CLN_API extern void release_collect_client(void* handle); - -/** \brief update job ctx for specific device, colllect_update_job_ctx uses malloc() to allocate a buffer to hold the - * new job_ctx and return a pointer to the buffer. The caller should deallocate this buffer using free() - * - * \param const char* job_ctx: pointer to current job_ctx - * \param collect_ctx_info* info: update job_ctx with the info - * \return pointer to buffer which holds the new job_ctx - */ -MSVP_PROF_CLN_API extern char* collect_dev_update_job_ctx(const char* job_ctx, const collect_dev_info_s * info); -#ifdef __cplusplus -} -#endif // __cplusplus - -#endif // PROFILER_CLIENT_H_INCLUDED - - diff --git a/tf_adapter/kernels/amct_ascend_anti_quant.cc b/tf_adapter/kernels/amct_ascend_anti_quant.cc new file mode 100644 index 0000000000000000000000000000000000000000..a731b9d76597c49506172ccd55cff2cd21ea4202 --- /dev/null +++ b/tf_adapter/kernels/amct_ascend_anti_quant.cc @@ -0,0 +1,46 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Copyright (C) 2019-2020. Huawei Technologies Co., Ltd. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/framework/op_kernel.h" + +using namespace tensorflow; + +template +class AscendAntiQuantOp : public OpKernel { +public: + explicit AscendAntiQuantOp(OpKernelConstruction* context) : OpKernel(context){} + + ~AscendAntiQuantOp(){} + + void Compute(OpKernelContext* context) override{} +}; + +REGISTER_KERNEL_BUILDER( + Name("AscendAntiQuant").Device(tensorflow::DEVICE_CPU).TypeConstraint("T"), + AscendAntiQuantOp); diff --git a/tf_adapter/kernels/amct_ascend_dequant.cc b/tf_adapter/kernels/amct_ascend_dequant.cc new file mode 100644 index 0000000000000000000000000000000000000000..f9713593181a467059faa8181fff90190c569707 --- /dev/null +++ b/tf_adapter/kernels/amct_ascend_dequant.cc @@ -0,0 +1,46 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Copyright (C) 2019-2020. Huawei Technologies Co., Ltd. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/framework/op_kernel.h" + +using namespace tensorflow; + +template +class AscendDequantOp : public OpKernel { +public: + explicit AscendDequantOp(OpKernelConstruction* context) : OpKernel(context){} + + ~AscendDequantOp(){} + + void Compute(OpKernelContext* context) override{} +}; + +REGISTER_KERNEL_BUILDER( + Name("AscendDequant").Device(tensorflow::DEVICE_CPU).TypeConstraint("T"), + AscendDequantOp); diff --git a/tf_adapter/kernels/amct_ascend_quant.cc b/tf_adapter/kernels/amct_ascend_quant.cc new file mode 100644 index 0000000000000000000000000000000000000000..95ea17b63b5eca97b3715961949dd93fbce35b5d --- /dev/null +++ b/tf_adapter/kernels/amct_ascend_quant.cc @@ -0,0 +1,46 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Copyright (C) 2019-2020. Huawei Technologies Co., Ltd. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/framework/op_kernel.h" + +using namespace tensorflow; + +template +class AscendQuantOp : public OpKernel { +public: + explicit AscendQuantOp(OpKernelConstruction* context) : OpKernel(context){} + + ~AscendQuantOp(){} + + void Compute(OpKernelContext* context) override {} +}; + +REGISTER_KERNEL_BUILDER( + Name("AscendQuant").Device(tensorflow::DEVICE_CPU).TypeConstraint("T"), + AscendQuantOp); diff --git a/tf_adapter/kernels/amct_ascend_weight_quant.cc b/tf_adapter/kernels/amct_ascend_weight_quant.cc new file mode 100644 index 0000000000000000000000000000000000000000..f81533e5091c05705bc8c891fcb40ea674c5595d --- /dev/null +++ b/tf_adapter/kernels/amct_ascend_weight_quant.cc @@ -0,0 +1,46 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Copyright (C) 2019-2020. Huawei Technologies Co., Ltd. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/framework/op_kernel.h" + +using namespace tensorflow; + +template +class AscendWeightQuantOp : public OpKernel { +public: + explicit AscendWeightQuantOp(OpKernelConstruction* context) : OpKernel(context){} + + ~AscendWeightQuantOp(){} + + void Compute(OpKernelContext* context) override{} +}; + +REGISTER_KERNEL_BUILDER( + Name("AscendWeightQuant").Device(tensorflow::DEVICE_CPU).TypeConstraint("T"), + AscendWeightQuantOp); diff --git a/tf_adapter/kernels/decode_image_ops.cc b/tf_adapter/kernels/decode_image_ops.cc index 57e998be269c00dfe70903b94ef62e9475dc51a3..b7d033e36d136f097090fcf30c2ee577267a09e4 100644 --- a/tf_adapter/kernels/decode_image_ops.cc +++ b/tf_adapter/kernels/decode_image_ops.cc @@ -25,7 +25,7 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "ExternalSoftDp.h" +#include "soft_dp/ExternalSoftDp.h" #include "tensorflow/core/framework/op_kernel.h" #include "tf_adapter/util/plugin_load_manager.h" #include diff --git a/tf_adapter/kernels/geop_dataset_op.cc b/tf_adapter/kernels/geop_dataset_op.cc index 5092362493e9b64009cf89b126bcdd4e5ff2f9ff..f1c06ef6eebb83e330d4e7ae27bfc071759f96e8 100644 --- a/tf_adapter/kernels/geop_dataset_op.cc +++ b/tf_adapter/kernels/geop_dataset_op.cc @@ -34,7 +34,10 @@ namespace data { namespace { class GEOPDatasetOp : public DatasetOpKernel { public: - explicit GEOPDatasetOp(OpKernelConstruction *ctx) : DatasetOpKernel(ctx), f_handle_(kInvalidHandle) { + explicit GEOPDatasetOp(OpKernelConstruction *ctx) + : DatasetOpKernel(ctx), + f_handle_(kInvalidHandle), + lib_(nullptr) { FunctionMetadata::Params params; OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, "f", params, &func_metadata_)); } @@ -84,6 +87,9 @@ class GEOPDatasetOp : public DatasetOpKernel { string DebugString() const override { return "GEOPDatasetOp::Dataset"; } + GEOPDatasetOp *op_kernel_; + std::string tf_session_; + protected: Status AsGraphDefInternal(SerializationContext *ctx, DatasetGraphDefBuilder *b, Node **output) const override { return Status::OK(); @@ -168,8 +174,6 @@ class GEOPDatasetOp : public DatasetOpKernel { private: mutex mu_; }; - GEOPDatasetOp *op_kernel_; - std::string tf_session_; }; std::shared_ptr func_metadata_ = nullptr; FunctionLibraryRuntime::Handle f_handle_; diff --git a/tf_adapter/kernels/geop_npu.cc b/tf_adapter/kernels/geop_npu.cc index 17ac5fef214e9479c5efd836b066cd36266a1814..84861b25696bc9fecd4266904c4f6968b95265ce 100644 --- a/tf_adapter/kernels/geop_npu.cc +++ b/tf_adapter/kernels/geop_npu.cc @@ -116,8 +116,8 @@ Status BuildOutputTensorInfo(OpKernelContext *ctx, std::vector(static_cast(dst_ptr) + SECUREC_MEM_MAX_LEN); src_ptr = static_cast(static_cast(src_ptr) + SECUREC_MEM_MAX_LEN); } + REQUIRES_NOT_NULL(dst_ptr); + REQUIRES_NOT_NULL(src_ptr); auto err = memcpy_s(dst_ptr, left_size, src_ptr, left_size); if (err != EOK) { LOG(ERROR) << "[GEOP] Outputs mem copy failed, index:" << i << ", errret:" << err - << ", dst_ptr:" << (int64_t) dst_ptr << ", dst_size:" << left_size - << ", src_ptr:" << (int64_t) src_ptr << ", src_size:" << left_size; + << ", dst_ptr:" << (uintptr_t)dst_ptr << ", dst_size:" << left_size + << ", src_ptr:" << (uintptr_t)src_ptr << ", src_size:" << left_size; return errors::InvalidArgument("Outputs mem copy failed, index:", i); } } @@ -160,8 +162,8 @@ const int kMaxCacheNum = 10; const int kFatalSleepTime = 3000; GeOp::GeOp(OpKernelConstruction *ctx) - : AsyncOpKernel(ctx), init_flag_(false), build_flag_(false), shape_flag_(false), add_graph_flag_(false), - sess_init_flag_(false), compute_graph_empty_(false), data_format_(""), graph_id_(0), cache_graph_id_(1), + : AsyncOpKernel(ctx), init_flag_(false), build_flag_(false), add_graph_flag_(false), + sess_init_flag_(false), compute_graph_empty_(false), data_format_(""), graph_id_(0), is_initialized_graph_(false), need_iteration_(false), tf_session_(""), ge_session_(nullptr), job_type_("") { Initialize(ctx); } @@ -192,6 +194,7 @@ void GeOp::Initialize(OpKernelConstruction *ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("_NpuOptimizer", &sess_config)); std::map init_options = NpuAttrs::GetInitOptions(ctx); std::map pass_options = NpuAttrs::GetPassOptions(ctx); + iteration_per_loop_ = std::atoi(pass_options["iterations_per_loop"].c_str()); job_type_ = pass_options["job"]; if (GePlugin::GetInstance()->IsGlobal()) { LOG(INFO) << "[GEOP] GePlugin global, skip GePlugin init"; @@ -227,15 +230,14 @@ void GeOp::Finalize() { if (!SessionManager::GetInstance().IsGeSessionExist()) { if (!GePlugin::GetInstance()->IsGlobal()) { - Status save_ret = GenerateReport::GetInstance()->SaveUnsupportedInfo(); - if (save_ret != Status::OK()) { - LOG(WARNING) << "[GEOP] Save check report failed."; - } GePlugin::GetInstance()->Finalize(); LOG(INFO) << "[GEOP] GePlugin Finalize success"; } else { LOG(INFO) << "[GEOP] GePlugin global, skip GePlugin Finalize"; } + if (!GenerateReport::GetInstance()->SaveUnsupportedInfo().ok()) { + LOG(WARNING) << "[GEOP] Save check report failed."; + } } } } @@ -244,9 +246,9 @@ void GeOp::Finalize() { return; } -int GeOp::InitRebuildFlag() { +int GeOp::InitRebuildFlag(uint32_t cache_graph_id) { if (!build_flag_) { - LOG(INFO) << "[GEOP] tf session " << tf_session_ << ", graph id: " << cache_graph_id_ + LOG(INFO) << "[GEOP] tf session " << tf_session_ << ", graph id: " << cache_graph_id << " does not build yet, no need to check rebuild"; return 0; } @@ -254,18 +256,18 @@ int GeOp::InitRebuildFlag() { LOG(ERROR) << "[GEOP] GE session is nullptr"; return -1; } - if (!ge_session_->IsGraphNeedRebuild(cache_graph_id_)) { - LOG(INFO) << "[GEOP] tf session " << tf_session_ << ", graph id: " << cache_graph_id_ << " no need to rebuild"; + if (!ge_session_->IsGraphNeedRebuild(cache_graph_id)) { + LOG(INFO) << "[GEOP] tf session " << tf_session_ << ", graph id: " << cache_graph_id << " no need to rebuild"; return 0; } - LOG(INFO) << "[GEOP] The graph need rebuild, graph id " << cache_graph_id_; + LOG(INFO) << "[GEOP] The graph need rebuild, graph id " << cache_graph_id; // The graph need to rebuild, remove it from GE first. - LOG(INFO) << "[GEOP] tf session: " << tf_session_ << ", graph id: " << cache_graph_id_; - auto ret = ge_session_->RemoveGraph(cache_graph_id_); + LOG(INFO) << "[GEOP] tf session: " << tf_session_ << ", graph id: " << cache_graph_id; + auto ret = ge_session_->RemoveGraph(cache_graph_id); if (ret != ge::SUCCESS) { - LOG(ERROR) << "[GEOP] Failed to remove graph " << cache_graph_id_ << " from ge, error code " << ret; + LOG(ERROR) << "[GEOP] Failed to remove graph " << cache_graph_id << " from ge, error code " << ret; return -1; } @@ -316,39 +318,33 @@ void GeOp::ClearGraphIdCount(std::string &tf_session) { if (it != session_and_graph_id_map_.end()) { session_and_graph_id_map_.erase(it); } } -void GeOp::CacheShapeChangeGraphs() { +void GeOp::GetExecGraphId(OpKernelContext *ctx, uint32_t &cache_graph_id, + std::vector input_shapes) { size_t num = cache_graphs_.size(); - std::pair, uint32_t>::iterator, bool> ret; - uint32_t tmp_graph_id = 0; - if (num >= kMaxCacheNum) { - LOG(INFO) << "[GEOP] the cache vector size is : " << num << " , begin erase the least uesed"; - std::sort(graph_counts_.begin(), graph_counts_.end(), CmpValue); - uint32_t erased_graph_id = cache_graphs_[graph_counts_[0].first]; - cache_graphs_.erase(graph_counts_[0].first); - graph_counts_.erase(graph_counts_.begin()); - ge::Status status = ge_session_->RemoveGraph(erased_graph_id); - if (status != ge::SUCCESS) { LOG(WARNING) << "[GEOP] GE Remove Graph failed, ret : " << ToString(status); } - ret = cache_graphs_.insert(std::make_pair(inputs_shape_string_, erased_graph_id)); - tmp_graph_id = erased_graph_id; - } else { - ret = cache_graphs_.insert(std::make_pair(inputs_shape_string_, graph_id_ + num)); - tmp_graph_id = graph_id_ + num; - } - if (ret.second) { - build_flag_ = false; - compute_graph_empty_ = false; - graph_counts_.push_back(std::make_pair(inputs_shape_string_, 1)); - cache_graph_id_ = tmp_graph_id; - } else { + if (cache_graphs_.find(input_shapes) != cache_graphs_.end()) { for (auto &graph_count : graph_counts_) { - if (graph_count.first == inputs_shape_string_) { + if (graph_count.first == input_shapes) { graph_count.second += 1; break; } } - cache_graph_id_ = cache_graphs_[inputs_shape_string_]; + cache_graph_id = cache_graphs_[input_shapes]; build_flag_ = true; - shape_flag_ = false; + } else { + if (num >= kMaxCacheNum) { + LOG(INFO) << "[GEOP] the cache vector size is : " << num << " , begin erase the least uesed"; + std::sort(graph_counts_.begin(), graph_counts_.end(), CmpValue); + uint32_t erased_graph_id = cache_graphs_[graph_counts_[0].first]; + cache_graphs_.erase(graph_counts_[0].first); + graph_counts_.erase(graph_counts_.begin()); + ge::Status status = ge_session_->RemoveGraph(erased_graph_id); + if (status != ge::SUCCESS) { LOG(WARNING) << "[GEOP] GE Remove Graph failed, ret : " << ToString(status); } + cache_graph_id = erased_graph_id; + } else { + cache_graph_id = graph_id_ + num; + } + build_flag_ = false; + compute_graph_empty_ = false; } } @@ -371,7 +367,6 @@ void GeOp::ComputeAsync(OpKernelContext *ctx, DoneCallback done) { { mutex_lock lock{mu_}; bool res = IncrementGraphIdCount(tf_session_, graph_id_); - cache_graph_id_ = graph_id_; if (!res || graph_id_ < kInvalidGraphId) { OP_REQUIRES_ASYNC(ctx, false, errors::Unavailable("Get ge session failed."), done); return; @@ -393,33 +388,14 @@ void GeOp::ComputeAsync(OpKernelContext *ctx, DoneCallback done) { LOG(INFO) << "[GEOP] Begin GeOp::ComputeAsync" << ", kernel_name:" << geop_name << ", num_inputs:" << num_inputs << ", num_outputs:" << ctx->num_outputs(); int64 startTime = InferShapeUtil::GetCurrentTimestap(); - - if (!build_flag_) { - // record input shape - inputs_shape_string_.clear(); - for (uint32_t i = 0; i < num_inputs; i++) { - Tensor input(ctx->input(i)); - inputs_shape_string_.push_back(input.shape().DebugString()); - } - cache_graphs_.insert(std::make_pair(inputs_shape_string_, cache_graph_id_)); - graph_counts_.push_back(std::make_pair(inputs_shape_string_, 1)); - } else if (inputs_shape_string_.size() == num_inputs) { - for (uint32_t i = 0; i < num_inputs; i++) { - if (inputs_shape_string_.at(i) == ctx->input(i).shape().DebugString()) { - continue; - } else { - // input_shape change, build GEOP for one more time - inputs_shape_string_.at(i) = ctx->input(i).shape().DebugString(); - shape_flag_ = true; - } - } - if (shape_flag_) { CacheShapeChangeGraphs(); } - } else { - build_flag_ = false; - compute_graph_empty_ = false; + std::vector input_shapes; + for (int i = 0; i < ctx->num_inputs(); i++) { + input_shapes.push_back(ctx->input(i).shape().DebugString()); } - - auto ret = InitRebuildFlag(); + // if input shapes changed, cache graphs + uint32_t cache_graph_id; + GetExecGraphId(ctx, cache_graph_id, input_shapes); + auto ret = InitRebuildFlag(cache_graph_id); if (ret != 0) { OP_REQUIRES_ASYNC(ctx, false, errors::Unavailable("Failed to check rebuild flag"), done); return; @@ -464,7 +440,7 @@ void GeOp::ComputeAsync(OpKernelContext *ctx, DoneCallback done) { LOG(INFO) << "[GEOP] In GEOP computeAsync, kernel_name:" << geop_name << " ,TFadapter cost time: [" << ((endTime - startTime) / kMicrosToMillis) << " ms]"; LOG(INFO) << "[GEOP] TFadpter process graph success, GE parser begin, kernel_name:" << geop_name - << " ,tf session: " << tf_session_ << " ,graph id :" << cache_graph_id_; + << " ,tf session: " << tf_session_ << " ,graph id :" << cache_graph_id; // parser, tensorflow graph to ge graph std::shared_ptr model_parser = domi::ModelParserFactory::Instance()->CreateModelParser(domi::FrameworkType::TENSORFLOW); @@ -486,8 +462,8 @@ void GeOp::ComputeAsync(OpKernelContext *ctx, DoneCallback done) { return nullptr; } // get infershape - Graph subgraph(OpRegistry::Global()); - Status status = InferShapeUtil::getSubGraphFromFunctionDef(*func_def, &subgraph); + Graph subgraph(flib_def); + Status status = InferShapeUtil::GetSubGraphFromFunctionDef(*flib_def, *func_def, &subgraph); if (status != Status::OK()) { LOG(ERROR) << "[GEOP] Get subgraph from functiondef fail."; return nullptr; @@ -512,9 +488,6 @@ void GeOp::ComputeAsync(OpKernelContext *ctx, DoneCallback done) { } subgraph.ToGraphDef(sub_graph_def.get()); - // change function op to subgraph type - ChangeFunctionOpToSubgraph(*sub_graph_def.get(), *flib_def); - unique_ptr graph_def_out(std::move(sub_graph_def)); char *need_print = getenv("PRINT_MODEL"); @@ -533,7 +506,7 @@ void GeOp::ComputeAsync(OpKernelContext *ctx, DoneCallback done) { done); LOG(INFO) << "[GEOP] Tensorflow graph parse to ge graph success, kernel_name:" << geop_name - << " ,tf session: " << tf_session_ << " ,graph id: " << cache_graph_id_; + << " ,tf session: " << tf_session_ << " ,graph id: " << cache_graph_id; size_t nodes = compute_graph->GetAllNodesSize(); if (nodes == 0) { @@ -542,36 +515,40 @@ void GeOp::ComputeAsync(OpKernelContext *ctx, DoneCallback done) { int64 endTime = InferShapeUtil::GetCurrentTimestap(); LOG(INFO) << "[GEOP] End GeOp::ComputeAsync, compute_graph is empty, kernel_name:" << geop_name << ", ret_status:" << ToString(ge::SUCCESS) << " , tf session: " << tf_session_ - << " ,graph id: " << cache_graph_id_ << " [" << ((endTime - startTime) / kMicrosToMillis) << " ms]"; + << " ,graph id: " << cache_graph_id << " [" << ((endTime - startTime) / kMicrosToMillis) << " ms]"; done(); return; } // convert to ge::graph ge::Graph ge_graph = ge::GraphUtils::CreateGraphFromComputeGraph(compute_graph); - ge_graph.SetNeedIteration(this->need_iteration_); + if (iteration_per_loop_ > 1) { + ge_graph.SetNeedIteration(this->need_iteration_); + } // call ge session addGraph api - status = ge_session_->AddGraph(cache_graph_id_, ge_graph); + status = ge_session_->AddGraph(cache_graph_id, ge_graph); if (status != ge::SUCCESS) { std::this_thread::sleep_for(std::chrono::milliseconds(kFatalSleepTime)); LOG(FATAL) << "[GEOP] call ge session add graph failed, kernel: " << geop_name << " ,tf session: " << tf_session_ - << ", graph id: " << cache_graph_id_; + << ", graph id: " << cache_graph_id; OP_REQUIRES_ASYNC(ctx, status == ge::SUCCESS, errors::Unavailable("[GEOP] GE session add graph failed, domi_ret : ", ToString(status)), done); } else { add_graph_flag_ = true; LOG(INFO) << "[GEOP] Add graph to ge session success, kernel_name:" << geop_name - << " ,tf session: " << tf_session_ << " ,graph id:" << cache_graph_id_; + << " ,tf session: " << tf_session_ << " ,graph id:" << cache_graph_id; } build_flag_ = true; + cache_graphs_.insert(std::make_pair(input_shapes, cache_graph_id)); + graph_counts_.push_back(std::make_pair(input_shapes, 1)); } else { if (compute_graph_empty_) { int64 endTime = InferShapeUtil::GetCurrentTimestap(); LOG(INFO) << "[GEOP] End GeOp::ComputeAsync, compute_graph is empty, kernel_name:" << geop_name << ", ret_status:" << ToString(ge::SUCCESS) << " , tf session: " << tf_session_ - << " ,graph id: " << cache_graph_id_ << " [" << ((endTime - startTime) / kMicrosToMillis) << " ms]"; + << " ,graph id: " << cache_graph_id << " [" << ((endTime - startTime) / kMicrosToMillis) << " ms]"; done(); return; } @@ -601,36 +578,24 @@ void GeOp::ComputeAsync(OpKernelContext *ctx, DoneCallback done) { OP_REQUIRES_OK_ASYNC(ctx, (BuildInputTensorInfo(ctx, inputs)), done); LOG(INFO) << "[GEOP] Call ge session RunGraphAsync, kernel_name:" << geop_name << " ,tf session: " << tf_session_ - << " ,graph id: " << cache_graph_id_; + << " ,graph id: " << cache_graph_id; // call ge session runGraphAsync api - ge::Status status = ge_session_->RunGraphAsync(cache_graph_id_, inputs, callback); + ge::Status status = ge_session_->RunGraphAsync(cache_graph_id, inputs, callback); if (status != ge::SUCCESS) { std::this_thread::sleep_for(std::chrono::milliseconds(kFatalSleepTime)); LOG(FATAL) << "[GEOP] call ge session RunGraphAsync Failed, kernel:" << geop_name << " ,tf session: " << tf_session_ - << " ,graph id: " << cache_graph_id_; + << " ,graph id: " << cache_graph_id; } OP_REQUIRES_ASYNC(ctx, status == ge::SUCCESS, errors::Unavailable("ge session run graph failed, ret_status:", ToString(status)), done); int64 endTime = InferShapeUtil::GetCurrentTimestap(); LOG(INFO) << "[GEOP] End GeOp::ComputeAsync, kernel_name:" << geop_name << ", ret_status:" << ToString(status) - << " ,tf session: " << tf_session_ << " ,graph id: " << cache_graph_id_ << " [" + << " ,tf session: " << tf_session_ << " ,graph id: " << cache_graph_id << " [" << ((endTime - startTime) / kMicrosToMillis) << " ms]"; return; } -void GeOp::ChangeFunctionOpToSubgraph(GraphDef &sub_graph_def, const FunctionLibraryDefinition &flib_def) { - std::vector function_names = flib_def.ListFunctionNames(); - for (NodeDef &node_def : *sub_graph_def.mutable_node()) { - for (string func_name : function_names) { - if (node_def.op() == func_name) { - node_def.set_op(SubGraph); - LOG(INFO) << "Node " << node_def.name() << " change op type from " << func_name << " to " << SubGraph; - } - } - } -} - void GeOp::AddNodeAttrs(Node *node, bool &is_initialize) { // Add dp custom kernel label if (node->type_string() == "IteratorGetNext") { node->AddAttr("_kernel", "dp"); } diff --git a/tf_adapter/kernels/geop_npu.h b/tf_adapter/kernels/geop_npu.h index 6cfb6f339558d52152db2f4a56d8b01bdcb2e041..518e69217b52f4a4243ca7d0a92f40399d8949bf 100644 --- a/tf_adapter/kernels/geop_npu.h +++ b/tf_adapter/kernels/geop_npu.h @@ -60,9 +60,6 @@ class GeOp : public AsyncOpKernel { const FunctionDef &func_def, const std::vector &input_vec, GraphDef &graph_def, bool &is_initialize); - // Find and change op type to SubGraph - void ChangeFunctionOpToSubgraph(GraphDef &graph_def, const FunctionLibraryDefinition &flib_def); - // prepare input tensor Status BuildInputTensorInfo(OpKernelContext *ctx, std::vector &inputs); @@ -74,7 +71,7 @@ class GeOp : public AsyncOpKernel { private: void AddNodeAttrs(Node *node, bool &is_initialize); - int InitRebuildFlag(); + int InitRebuildFlag(uint32_t cache_graph_id); bool IncrementGraphIdCount(std::string &tf_session, uint32_t &graph_id); @@ -82,7 +79,8 @@ class GeOp : public AsyncOpKernel { void ClearGraphIdCount(std::string &tf_session); - void CacheShapeChangeGraphs(); + void GetExecGraphId(OpKernelContext *ctx, uint32_t &cache_graph_id, + std::vector input_shapes); private: static const std::string INPUT_DESC; @@ -96,7 +94,6 @@ class GeOp : public AsyncOpKernel { bool init_flag_; bool build_flag_; - bool shape_flag_; bool add_graph_flag_; bool sess_init_flag_; bool compute_graph_empty_; @@ -104,17 +101,16 @@ class GeOp : public AsyncOpKernel { NameAttrList function_; std::string data_format_; uint32_t graph_id_; - uint32_t cache_graph_id_; bool is_initialized_graph_; bool need_iteration_; std::string tf_session_; ge::Session *ge_session_; std::string job_type_; - std::vector inputs_shape_string_; std::map, uint32_t> cache_graphs_; std::vector, uint32_t>> graph_counts_; std::map sess_options_; static std::unordered_map session_and_graph_id_map_; + uint32_t iteration_per_loop_; }; } // namespace tensorflow #endif // TENSORFLOW_KERNELS_GEOP_NPU_H_ diff --git a/tf_adapter/kernels/host_queue_dataset_op.cc b/tf_adapter/kernels/host_queue_dataset_op.cc index 16a442699d12111583d721e5299bc95cb742bfa0..798b59024ac4c72a3a478635c949848639b9fc5b 100644 --- a/tf_adapter/kernels/host_queue_dataset_op.cc +++ b/tf_adapter/kernels/host_queue_dataset_op.cc @@ -37,6 +37,7 @@ limitations under the License. #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/util/env_var.h" #include "tf_adapter/common/common.h" +#include "tf_adapter/util/npu_attrs.h" #include #include #include @@ -49,7 +50,6 @@ namespace { using namespace std; using namespace tdt; -const static int kMaxDeviceId = 7; const static uint32_t kMaxValue = 128; // total memory usage controlled below 2G const uint64_t kTotalBytes = 2147483648; @@ -67,27 +67,16 @@ class HostQueueDatasetOp : public DatasetOpKernel { OP_REQUIRES_OK(ctx, ctx->GetAttr("output_types", &output_types_)); OP_REQUIRES_OK(ctx, ctx->GetAttr("output_shapes", &output_shapes_)); LOG(INFO) << "Start to init tdt."; - string lib_path = "libdatatransfer.so"; - handle_ = dlopen(lib_path.c_str(), RTLD_NOW); - OP_REQUIRES(ctx, handle_ != nullptr, errors::InvalidArgument("libdatatransfer.so dlopen failed.")); - init_api_ = (InitFunc) dlsym(handle_, "TdtHostInit"); - push_api_ = (PushDataFunc) dlsym(handle_, "TdtHostPushData"); - destroy_api_ = (DestroyFunc) dlsym(handle_, "TdtHostDestroy"); - OP_REQUIRES(ctx, init_api_ != nullptr && push_api_ != nullptr && destroy_api_ != nullptr, - errors::InvalidArgument("dlsym tdt API failed.")); - int64 id = -1; - OP_REQUIRES_OK(ctx, ReadInt64FromEnvVar("DEVICE_ID", -1, &id)); - - OP_REQUIRES(ctx, id >= 0 && id <= kMaxDeviceId, errors::InvalidArgument("device_id should be in [0, 7].")); - uint32_t u_id = (uint32_t) id; - int32_t tdt_status = (*init_api_)(u_id); + uint32_t device_id = 0; + OP_REQUIRES_OK(ctx, GetEnvDeviceID(device_id)); + int32_t tdt_status = TdtHostInit(device_id); OP_REQUIRES(ctx, tdt_status == 0, errors::InvalidArgument("Tdt client init failed.")); tdt_release = false; } ~HostQueueDatasetOp() { LOG(INFO) << "Start to destroy tdt."; if (!tdt_release) { - int32_t tdt_status = (*destroy_api_)(); + int32_t tdt_status = TdtHostDestroy(); if (tdt_status != 0) { LOG(ERROR) << "Tdt client close failed."; } else { @@ -95,12 +84,6 @@ class HostQueueDatasetOp : public DatasetOpKernel { tdt_release = true; } } - if (handle_ != nullptr) { - dlclose(handle_); - LOG(INFO) << "dlclose handle finish."; - } else { - LOG(INFO) << "handle is null."; - } } void MakeDataset(OpKernelContext *ctx, DatasetBase **output) override { std::vector inputs; @@ -110,7 +93,7 @@ class HostQueueDatasetOp : public DatasetOpKernel { OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(i), &input)); inputs.push_back(input); } - *output = new (nothrow) Dataset(ctx, this, inputs, channel_name_, output_types_, output_shapes_); + *output = new (nothrow) Dataset(ctx, inputs, channel_name_, output_types_, output_shapes_); OP_REQUIRES(ctx, *output != nullptr, errors::InvalidArgument("Data process host queue dataset op: new dataset failed.")); } @@ -118,11 +101,10 @@ class HostQueueDatasetOp : public DatasetOpKernel { private: class Dataset : public DatasetBase { public: - Dataset(OpKernelContext *ctx, HostQueueDatasetOp *op_kernel, const std::vector &inputs, - const string &channelName, const DataTypeVector &outputTypes, - const vector &outputShapes) - : DatasetBase(DatasetContext(ctx)), op_kernel_(op_kernel), inputs_(inputs), channel_name_(channelName), - output_types_(outputTypes), output_shapes_(outputShapes) { + Dataset(OpKernelContext *ctx, const std::vector &inputs, const string &channelName, + const DataTypeVector &outputTypes, const vector &outputShapes) + : DatasetBase(DatasetContext(ctx)), inputs_(inputs), channel_name_(channelName), output_types_(outputTypes), + output_shapes_(outputShapes) { for (const auto &input : inputs_) { input->Ref(); } } @@ -130,8 +112,6 @@ class HostQueueDatasetOp : public DatasetOpKernel { for (const auto &input : inputs_) { input->Unref(); } } - HostQueueDatasetOp *kernel() const { return op_kernel_; } - unique_ptr MakeIteratorInternal(const string &prefix) const override { return unique_ptr(new (nothrow) Iterator({this, strings::StrCat(prefix, "::HostQueue")})); } @@ -221,18 +201,20 @@ class HostQueueDatasetOp : public DatasetOpKernel { } void SendDataThread(const std::shared_ptr &ctx) { vector args; - while (!cancelled_) { + while (true) { { mutex_lock lck(mu_); - if (buffer_.empty()) { + while (!cancelled_ && !finish_send_ && buffer_.empty()) { RecordStop(ctx.get()); cond_var_.wait(lck); RecordStart(ctx.get()); } - } - - { - mutex_lock l(mu_); + if (cancelled_ || finish_send_) { + LOG(INFO) << "Host queue " << dataset()->channel_name_ + << " push data thread exit with cancelled: " << cancelled_ << ", finished:" << finish_send_ + << " when wait data."; + return; + } if (buffer_.front().host_thread_finished) { std::vector items; DataItem end_item; @@ -241,16 +223,17 @@ class HostQueueDatasetOp : public DatasetOpKernel { LOG(INFO) << "Push data finish, end_of_sequence_ is true."; } else { end_item.dataType_ = TDT_ABNORMAL; - LOG(ERROR) << "Get data failed."; + LOG(ERROR) << "Get data failed " << buffer_.front().status.ToString(); } items.emplace_back(end_item); - int32_t tdt_status = (*(dataset()->kernel()->push_api_))(dataset()->channel_name_, items); - if (tdt_status != 0) { LOG(ERROR) << "Push the end data to tdt failed."; } + int32_t tdt_status = TdtHostPushData(dataset()->channel_name_, items); + if (tdt_status != 0) { LOG(INFO) << "End training as tdt host push end data failed " << tdt_status; } cancelled_ = true; cond_var_.notify_all(); return; } else { args = buffer_.front().value; + buffer_.pop_front(); } } @@ -269,7 +252,8 @@ class HostQueueDatasetOp : public DatasetOpKernel { std::shared_ptr(const_cast(tensor.tensor_data().data()), [](void *elem) {}); } else if (tensor.dtype() == DT_STRING) { if (tensor.dims() != 0) { - LOG(ERROR) << "Input of DT_STRING type should be scalar, current dims:" + LOG(ERROR) << "input of DT_STRING type should be scalar," + " current dims:" << tensor.dims(); mutex_lock lck(mu_); cancelled_ = true; @@ -280,30 +264,29 @@ class HostQueueDatasetOp : public DatasetOpKernel { data_item.dataLen_ = value.size(); data_item.dataPtr_ = std::shared_ptr(const_cast(value.data()), [](void *elem) {}); } else { - LOG(ERROR) << "Unexpected data type."; + LOG(ERROR) << "Unexpected data type " << DataTypeString(tensor.dtype()); mutex_lock lck(mu_); cancelled_ = true; cond_var_.notify_all(); return; } items.push_back(data_item); - // total_bytes is smaller than total_bytes_ total_bytes += tensor.TotalBytes(); } // call tdt interface - int32_t tdt_status = (*(dataset()->kernel()->push_api_))(dataset()->channel_name_, items); - if (tdt_status != 0 || cancelled_ || finish_send_) { + int32_t tdt_status = TdtHostPushData(dataset()->channel_name_, items); + if (tdt_status != 0) { + LOG(INFO) << "End training as tdt host push data failed " << tdt_status; mutex_lock lck(mu_); cancelled_ = true; - LOG(INFO) << "End training."; cond_var_.notify_all(); return; } - mutex_lock lck(mu_); - buffer_.pop_front(); - // total_bytes is smaller than total_bytes_ - total_bytes_ -= total_bytes; - cond_var_.notify_all(); + { + mutex_lock lck(mu_); + total_bytes_ -= total_bytes; + cond_var_.notify_all(); + } } } @@ -383,14 +366,16 @@ class HostQueueDatasetOp : public DatasetOpKernel { condition_variable cond_var_; string prefix_end_; std::deque buffer_ GUARDED_BY(mu_); - std::unique_ptr receive_thread_ GUARDED_BY(mu_); - std::unique_ptr send_thread_ GUARDED_BY(mu_); bool cancelled_ GUARDED_BY(mu_) = false; bool finish_send_ GUARDED_BY(mu_) = false; bool host_thread_finished_ GUARDED_BY(mu_) = false; uint64_t total_bytes_ GUARDED_BY(mu_) = 0; + // The following two thread must be the first member to be destructed, because tensorflow::Thread does not provide + // an explicit join function. If the thread is destructed after other members, such as buffer_, when the thread + // joins, it will access the already destructed buffer_ , Resulting in an unknown error. + std::unique_ptr receive_thread_ GUARDED_BY(mu_); + std::unique_ptr send_thread_ GUARDED_BY(mu_); }; - HostQueueDatasetOp *op_kernel_; const std::vector inputs_; std::string channel_name_; const DataTypeVector output_types_; @@ -399,10 +384,6 @@ class HostQueueDatasetOp : public DatasetOpKernel { std::string channel_name_; DataTypeVector output_types_; vector output_shapes_; - void *handle_; - InitFunc init_api_; - PushDataFunc push_api_; - DestroyFunc destroy_api_; }; REGISTER_KERNEL_BUILDER(Name("HostQueueDataset").Device(DEVICE_CPU), HostQueueDatasetOp); diff --git a/tf_adapter/kernels/npu_aicore_ops.cc b/tf_adapter/kernels/npu_aicore_ops.cc new file mode 100644 index 0000000000000000000000000000000000000000..4589608dc49bf4cb59fdccc2cdbc962443194c6d --- /dev/null +++ b/tf_adapter/kernels/npu_aicore_ops.cc @@ -0,0 +1,123 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +Copyright (C) 2019-2020. Huawei Technologies Co., Ltd. All rights reserved. foss@huawei.com + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/framework/op_kernel.h" +#include "tensorflow/core/framework/register_types.h" +#include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/tensor_shape.h" +#include "tensorflow/core/framework/bounds_check.h" +#include "tensorflow/core/lib/core/status.h" +#include "tensorflow/core/lib/strings/str_util.h" +#include "tensorflow/core/platform/logging.h" +#include "tf_adapter/common/common.h" + +namespace tensorflow { +template +class FastGeluOp : public tensorflow::OpKernel { + public: + explicit FastGeluOp(tensorflow::OpKernelConstruction *context) + : OpKernel(context) {} + ~FastGeluOp() {} + void Compute(tensorflow::OpKernelContext *context) override { + // Grab the input tensor + CHECK_NOT_NULL(context); + const Tensor &input_tensor = context->input(0); + auto input = input_tensor.flat(); + + // Create an output tensor + Tensor *output_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(0, input_tensor.shape(), + &output_tensor)); + // handle any data type for input and output + auto output_flat = output_tensor->flat(); + + } +}; + +REGISTER_KERNEL_BUILDER( + Name("FastGelu") +. +Device(tensorflow::DEVICE_CPU) +.TypeConstraint("T"), +FastGeluOp); + +REGISTER_KERNEL_BUILDER( + Name("FastGelu") +. +Device(tensorflow::DEVICE_CPU) +.TypeConstraint("T"), +FastGeluOp); + +REGISTER_KERNEL_BUILDER( + Name("FastGelu") +. +Device(tensorflow::DEVICE_CPU) +.TypeConstraint("T"), +FastGeluOp); + +template +class FastGeluGradOp : public tensorflow::OpKernel { + public: + explicit FastGeluGradOp(tensorflow::OpKernelConstruction *context) + : OpKernel(context) {} + ~FastGeluGradOp() {} + void Compute(tensorflow::OpKernelContext *context) override { + // Grab the grad input tensor + CHECK_NOT_NULL(context); + const Tensor &grad_input_tensor = context->input(0); + auto grad_input = grad_input_tensor.flat(); + + // Grab the input tensor + const Tensor &input_tensor = context->input(1); + auto input = input_tensor.flat(); + + OP_REQUIRES( + context, grad_input.size() == input.size(), + errors::InvalidArgument("grad_input size is not equal input size")); + + // Create an output tensor + Tensor *grad_output_tensor = nullptr; + OP_REQUIRES_OK(context, context->allocate_output(0, grad_input_tensor.shape(), + &grad_output_tensor)); + // handle any data type for input and output + auto grad_output_flat = grad_output_tensor->flat(); + + } +}; + +REGISTER_KERNEL_BUILDER( + Name("FastGeluGrad") +. +Device(tensorflow::DEVICE_CPU) +.TypeConstraint("T"), +FastGeluGradOp); + +REGISTER_KERNEL_BUILDER( + Name("FastGeluGrad") +. +Device(tensorflow::DEVICE_CPU) +.TypeConstraint("T"), +FastGeluGradOp); + +REGISTER_KERNEL_BUILDER( + Name("FastGeluGrad") +. +Device(tensorflow::DEVICE_CPU) +.TypeConstraint("T"), +FastGeluGradOp); +} // namespace tensorflow + + diff --git a/tf_adapter/ops/amct_ops.cc b/tf_adapter/ops/amct_ops.cc new file mode 100644 index 0000000000000000000000000000000000000000..31fffb3f8f2c8784e3cd1b5cd712ef8d80248dec --- /dev/null +++ b/tf_adapter/ops/amct_ops.cc @@ -0,0 +1,77 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Copyright (C) 2019-2020. Huawei Technologies Co., Ltd. All rights reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/framework/op_kernel.h" + +namespace tensorflow { +REGISTER_OP("AscendQuant") + .Attr("T: {float16, float32, float64}") + .Attr("quant_bits: int = 8") + .Attr("scale: float") + .Attr("offset: float") + .Input("x: T") + .Output("y: T") + .SetIsStateful() + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + c->set_output(0, c->input(0)); + return Status::OK(); }); + +REGISTER_OP("AscendWeightQuant") + .Attr("T: {float16, float32, float64}") + .Input("x: int8") + .Input("offset_w: int8") + .Output("y: T") + .SetIsStateful() + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + c->set_output(0, c->input(0)); + return Status::OK(); }); + +REGISTER_OP("AscendDequant") + .Attr("T: {float16, float32, float64}") + .Attr("ksize: list(int)") + .Attr("data_format: string = 'NHWC'") + .Input("x: T") + .Input("deq_scale: uint64") + .Output("y: T") + .SetIsStateful() + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + c->set_output(0, c->input(0)); + return Status::OK(); }); + +REGISTER_OP("AscendAntiQuant") + .Attr("T: {float16, float32, float64}") + .Attr("scale: float") + .Attr("offset: float") + .Input("x: T") + .Output("y: T") + .SetIsStateful() + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + c->set_output(0, c->input(0)); + return Status::OK(); }); +} // namespace tensorflow diff --git a/tf_adapter/ops/hccl_ops.cc b/tf_adapter/ops/hccl_ops.cc index ae09c65ea48beef17d593ac33d5244f6bf4829b5..6f8e40bbdda961676bbf8783e702bbd645ce5227 100644 --- a/tf_adapter/ops/hccl_ops.cc +++ b/tf_adapter/ops/hccl_ops.cc @@ -61,7 +61,7 @@ group: all devices of the group participating in this reduction. REGISTER_OP("HcomAllGather") .Input("input: T") .Output("output: T") - .Attr("T: {int8, int16, int32, float16, float32}") + .Attr("T: {int8, int16, int32, float16, float32, int64, uint64}") .Attr("group: string") .Attr("rank_size: int") .SetIsStateful() diff --git a/tf_adapter/ops/npu_aicore_ops.cc b/tf_adapter/ops/npu_aicore_ops.cc new file mode 100644 index 0000000000000000000000000000000000000000..2023c2456be391afcbf7519ce509639b6408f8b7 --- /dev/null +++ b/tf_adapter/ops/npu_aicore_ops.cc @@ -0,0 +1,34 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +Copyright (C) 2019-2020. Huawei Technologies Co., Ltd. All rights reserved. foss@huawei.com + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "tensorflow/core/framework/common_shape_fns.h" +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/framework/shape_inference.h" + +namespace tensorflow { +REGISTER_OP("FastGelu") + .Input("features: T") + .Output("activations: T") + .Attr("T: realnumbertype") + .SetShapeFn(tensorflow::shape_inference::UnchangedShape); + +REGISTER_OP("FastGeluGrad") + .Input("gradients: T") + .Input("features: T") + .Output("backprops: T") + .Attr("T: realnumbertype") + .SetShapeFn(tensorflow::shape_inference::MergeBothInputsShapeFn); +} // namespace tensorflow diff --git a/tf_adapter/optimizers/om_partition_subgraphs_pass.cc b/tf_adapter/optimizers/om_partition_subgraphs_pass.cc index c013267ef434af7cf2e9f738ebdde6a5127b640c..f07f91d3a76f2ce896f52b72d56eacf35b8a4fdc 100644 --- a/tf_adapter/optimizers/om_partition_subgraphs_pass.cc +++ b/tf_adapter/optimizers/om_partition_subgraphs_pass.cc @@ -47,7 +47,6 @@ limitations under the License. #include "tensorflow/core/lib/gtl/map_util.h" #include "tensorflow/core/public/session_options.h" #include "tf_adapter/common/common.h" -#include "tf_adapter/util/generate_report.h" #include "tf_adapter/util/infershape_util.h" #include "tf_adapter/util/npu_attrs.h" #include "tf_adapter/util/npu_ops_identifier.h" @@ -220,15 +219,10 @@ bool IsWhiteListSupport(const string &op_name, bool mix_compile_mode, const stri auto identifier = NpuOpsIdentifier::GetInstance(mix_compile_mode); - bool ans = (identifier->IsNpuSupported(op_name, node_name)) && !EndsWith(op_name, suffix_op) - && !EndsWith(op_name, suffix_op_v2) && !(op_name == "Const") && !(op_name == "_Arg") && !(op_name == "_Retval") - && !(op_name == "StringJoin"); + bool ans = (identifier->IsNpuSupported(op_name, node_name)) && !EndsWith(op_name, suffix_op) && + !EndsWith(op_name, suffix_op_v2) && !(op_name == "Const") && !(op_name == "_Arg") && + !(op_name == "_Retval") && !(op_name == "StringJoin"); if (!ans) { - GenerateReport::Details infos; - static const std::string message = "This op can only excute on host"; - infos.code = GenerateReport::NotSupport; - infos.message = message; - GenerateReport::GetInstance()->AddUnSupportedInfo(node_name, op_name, infos); auto ret = not_support_nodes.insert(op_name); if (ret.second) { LOG(INFO) << "node: " << op_name << " is not in white list, " @@ -265,25 +259,20 @@ Status SetIteratorShardName(Node *node) { return Status::OK(); } -bool IsWithoutNpuScope(Node *node) { +bool IsWithoutNpuScope(const NodeDef &node_def) { if (!compile_mode) { return false; } - bool is_npu_compile = false; - Status status = GetNodeAttr(node->attrs(), ATTR_VALUE_SCOPE_NAME, &is_npu_compile); - if (status.ok() && is_npu_compile) { return true; } + if (node_def.attr().count(ATTR_VALUE_SCOPE_NAME)) { return node_def.attr().at(ATTR_VALUE_SCOPE_NAME).b(); } return false; } -bool IsWithoutNpuScope(NodeDef &node_def) { - if (!compile_mode) { return false; } - if (node_def.attr().count(ATTR_VALUE_SCOPE_NAME)) { return node_def.attr().at(ATTR_VALUE_SCOPE_NAME).b(); } - return false; +bool IsWithoutNpuScope(Node *node) { + return IsWithoutNpuScope(node->def()); } // Make sure we don't recurse infinitely on recursive functions. const int kMaxRecursionDepth = 10; bool IsNpuSupportingFunc(const string &func_name, FunctionLibraryDefinition *func_lib, int depth) { - LOG(INFO) << "function name is " << func_name << ", depth is " << depth; if (func_lib == nullptr) { LOG(ERROR) << "func lib is nullptr, function name is " << func_name; return false; @@ -294,13 +283,12 @@ bool IsNpuSupportingFunc(const string &func_name, FunctionLibraryDefinition *fun } const FunctionDef *func_def = func_lib->Find(func_name); if (func_def == nullptr) { - LOG(ERROR) << "func def is nullptr, function name is " << func_name; return false; } for (NodeDef node_def : func_def->node_def()) { if (node_def.op() == "Const") { LOG(INFO) << "Const in func can dump"; - } else if (!IsWhiteListSupport(node_def.op(), compile_mode, node_def.name()) || IsWithoutNpuScope(node_def)) { + } else if (!IsNpuSupportingNode(node_def, compile_mode, func_lib)) { return false; } for (const auto &item : node_def.attr()) { @@ -322,6 +310,17 @@ bool IsNpuSupportingFunc(Node *node, FunctionLibraryDefinition *func_lib, int de return true; } +bool IsNpuSupportingNode(const NodeDef &node_def, bool mix_compile_mode, FunctionLibraryDefinition *func_lib) { + if (IsWithoutNpuScope(node_def)) { return false; } + if (IsWhiteListSupport(node_def.op(), mix_compile_mode, node_def.name())) { return true; } + if (IsNpuSupportingFunc(node_def.op(), func_lib, 0)) { return true; } + return false; +} + +bool IsNpuSupportingNode(Node *node, bool mix_compile_mode, FunctionLibraryDefinition *func_lib) { + return IsNpuSupportingNode(node->def(), mix_compile_mode, func_lib); +} + Status FindNpuSupportCandidates(const Graph &graph, OrderedNodeSet *candidates, FunctionLibraryDefinition *func_lib, bool enableDP, bool mix_compile_mode) { int64 startTime = InferShapeUtil::GetCurrentTimestap(); @@ -364,54 +363,27 @@ Status FindNpuSupportCandidates(const Graph &graph, OrderedNodeSet *candidates, OrderedNodeSet outSet; for (Node *node : sortedNodes) { // 0 is function depth - if (!IsNpuSupportingFunc(node, func_lib, 0)) { - GenerateReport::Details infos; - static const std::string message = "This function node is not supported in npu."; - infos.code = GenerateReport::NotSupport; - infos.message = message; - GenerateReport::GetInstance()->AddUnSupportedInfo(node, infos); - continue; - } + if (!IsNpuSupportingFunc(node, func_lib, 0)) { continue; } if (!node->IsOp()) { // Ship Sink/Source nodes. - GenerateReport::Details infos; - static const std::string message = "Sink/Source is not compute node."; - infos.code = GenerateReport::NotSupport; - infos.message = message; - GenerateReport::GetInstance()->AddUnSupportedInfo(node, infos); continue; } if (enableDP && (node->type_string() == "Iterator" || node->type_string() == "IteratorV2" || node->type_string() == "IteratorGetNext")) { - bool is_sink = false; if (node->type_string() == "IteratorGetNext") { for (Node *n : node->in_nodes()) { REQUIRES_NOT_NULL(n); LOG(INFO) << node->name() << " has in nodes " << n->name(); - if (n->type_string() == "Iterator" || n->type_string() == "IteratorV2") { - is_sink = true; - candidates->insert(node); - } + if (n->type_string() == "Iterator" || n->type_string() == "IteratorV2") { candidates->insert(node); } } } if (node->type_string() == "Iterator" || node->type_string() == "IteratorV2") { for (Node *n : node->out_nodes()) { REQUIRES_NOT_NULL(n); LOG(INFO) << node->name() << " has in nodes " << n->name(); - if (n->type_string() == "IteratorGetNext") { - is_sink = true; - candidates->insert(node); - } + if (n->type_string() == "IteratorGetNext") { candidates->insert(node); } } } - if (!is_sink) { - GenerateReport::Details infos; - static const std::string message = - "Only if Iterator/IteratorV2 connect to IteratorGetNext, will them be excuted on npu."; - infos.code = GenerateReport::ScenarioProblems; - infos.message = message; - GenerateReport::GetInstance()->AddUnSupportedInfo(node, infos); - } } else { // Const down when it need down if (node->type_string() == "Const") { @@ -419,31 +391,20 @@ Status FindNpuSupportCandidates(const Graph &graph, OrderedNodeSet *candidates, for (auto edge : node->in_edges()) { REQUIRES_NOT_NULL(edge); REQUIRES_NOT_NULL(edge->src()); - if (edge->IsControlEdge() && edge->src()->name() != "_SOURCE" - && IsWhiteListSupport(edge->src()->type_string(), mix_compile_mode, edge->src()->name()) - && !IsWithoutNpuScope(edge->src())) { + if (edge->IsControlEdge() && edge->src()->name() != "_SOURCE" && + IsNpuSupportingNode(edge->src(), compile_mode, func_lib)) { candidates->insert(node); ctrlEdgeNum++; break; } } - GenerateReport::Details infos; - static const std::string message = "This node is not satisfy the needs of Const excuted on npu."; - infos.code = GenerateReport::ScenarioProblems; - infos.message = message; - GenerateReport::GetInstance()->AddUnSupportedInfo(node, infos); if (ctrlEdgeNum >= 1) { continue; } } // normal needed down op - if (IsWhiteListSupport(node->type_string(), mix_compile_mode, node->name()) && !IsWithoutNpuScope(node)) { + if (IsNpuSupportingNode(node, compile_mode, func_lib)) { candidates->insert(node); } else { outSet.insert(node); - GenerateReport::Details infos; - static const std::string message = "This node is not supported on npu"; - infos.code = GenerateReport::NotSupport; - infos.message = message; - GenerateReport::GetInstance()->AddUnSupportedInfo(node, infos); } } } @@ -465,11 +426,6 @@ Status FindNpuSupportCandidates(const Graph &graph, OrderedNodeSet *candidates, if (unsupportedFrames.find(cfInfo.frame_name) != unsupportedFrames.end()) { outSet.insert(*it); it = candidates->erase(it); - GenerateReport::Details infos; - static const std::string message = "This node is will not be excuted on npu in mix_compile_mode"; - infos.code = GenerateReport::ScenarioProblems; - infos.message = message; - GenerateReport::GetInstance()->AddUnSupportedInfo(*it, infos); } else { ++it; } @@ -482,10 +438,7 @@ Status FindNpuSupportCandidates(const Graph &graph, OrderedNodeSet *candidates, auto node = *iter; if (mix_compile_mode && (node->type_string() == "Where")) { bool isInitializedGraph = InferShapeUtil::IsInitializedGraph(node); - if (isInitializedGraph) { - candidates->insert(node); - GenerateReport::GetInstance()->DeleteUnSupportedInfo(node); - } + if (isInitializedGraph) { candidates->insert(node); } } outSet.erase(iter); @@ -497,11 +450,6 @@ Status FindNpuSupportCandidates(const Graph &graph, OrderedNodeSet *candidates, if (IsRefType(dtypeDst) && candidates->count(edge->dst()) > 0) { candidates->erase(edge->dst()); outSet.insert(edge->dst()); - GenerateReport::Details infos; - static const std::string message = "This node is will not be excuted on npu because of REF input"; - infos.code = GenerateReport::ScenarioProblems; - infos.message = message; - GenerateReport::GetInstance()->AddUnSupportedInfo(edge->dst(), infos); LOG(INFO) << "Remove node : " << edge->dst()->name() << " from candidates, because of node : " << node->name() << " REF input."; continue; @@ -509,14 +457,7 @@ Status FindNpuSupportCandidates(const Graph &graph, OrderedNodeSet *candidates, if (dtypeDst == DT_STRING || dtypeDst == DT_RESOURCE) { if (edge->dst()->type_string() == "Assert") { continue; } if (node->type_string() == "Const") { continue; } - if (candidates->erase(edge->dst()) > 0) { - outSet.insert(edge->dst()); - GenerateReport::Details infos; - static const std::string message = "An unsinked node link to this node by DT_STRING/DT_RESOURCE edge"; - infos.code = GenerateReport::ScenarioProblems; - infos.message = message; - GenerateReport::GetInstance()->AddUnSupportedInfo(edge->dst(), infos); - } + if (candidates->erase(edge->dst()) > 0) { outSet.insert(edge->dst()); } } } } @@ -529,24 +470,12 @@ Status FindNpuSupportCandidates(const Graph &graph, OrderedNodeSet *candidates, if (IsRefType(dtypeDst) && candidates->count(edge->src()) > 0) { candidates->erase(edge->src()); outSet.insert(edge->src()); - GenerateReport::Details infos; - static const std::string message = "This node is will not be excuted on npu because of REF output"; - infos.code = GenerateReport::ScenarioProblems; - infos.message = message; - GenerateReport::GetInstance()->AddUnSupportedInfo(edge->dst(), infos); LOG(INFO) << "Remove node : " << edge->dst()->name() << " from candidates, because of node : " << node->name() << " REF Output."; continue; } if (dtypeDst == DT_STRING || dtypeDst == DT_RESOURCE) { - if (candidates->erase(edge->src()) > 0) { - outSet.insert(edge->src()); - GenerateReport::Details infos; - static const std::string message = "This node link to an unsinked node by DT_STRING/DT_RESOURCE edge"; - infos.code = GenerateReport::ScenarioProblems; - infos.message = message; - GenerateReport::GetInstance()->AddUnSupportedInfo(edge->dst(), infos); - } + if (candidates->erase(edge->src()) > 0) { outSet.insert(edge->src()); } } } } @@ -1135,6 +1064,11 @@ class OMSplitter { Status SetOptions(std::map npu_optimizer_options, std::map pass_options); + // GEOp node(s) in the output graph. Not owned. + // both point to the function call node. + Node *GEOpNodeInputs_; + Node *GEOpNodeOutputs_; + private: // The subgraph extracted from the input graph, suitable for being turned // into a FunctionDef. Inputs are fed by _Arg nodes, and outputs are @@ -1150,11 +1084,6 @@ class OMSplitter { // Name that is used for the GEOp node. string functionDefName_; - // GEOp node(s) in the output graph. Not owned. - // both point to the function call node. - Node *GEOpNodeInputs_; - Node *GEOpNodeOutputs_; - // Maps from source (producer node/slot) and destination // (consumer node/slot) tensors in the input graph to _Arg numbers in // the subgraph. @@ -1830,6 +1759,8 @@ Status OMPartitionSubgraphsPass::ProcessGraph(std::unique_ptr *graph, Fun } else { return Status::OK(); } + + LOG(INFO) << "OMPartition subgraph_" << std::to_string(graph_num) << " begin."; LOG(INFO) << "mix_compile_mode is " << (mix_compile_mode ? "True" : "False"); LOG(INFO) << "iterations_per_loop is " << iterations_per_loop; @@ -2009,7 +1940,7 @@ Status OMPartitionSubgraphsPass::ProcessGraph(std::unique_ptr *graph, Fun } TF_RETURN_IF_ERROR(OMSplitter::OMPartitionSubgraphsInFunctions( OMSplitter::PARTITION_SUB_GRAPH_ATTR, graph, graph_format_value, func_lib, all_options, pass_options)); - LOG(INFO) << "OMPartition subgraph_" << std::to_string(graph_num) << "SubgraphsInFunctions success."; + LOG(INFO) << "OMPartition subgraph_" << std::to_string(graph_num) << " SubgraphsInFunctions success."; FixupSourceAndSinkEdges(graph->get()); if (need_print != nullptr && strcmp("1", need_print) == 0) { diff --git a/tf_adapter/optimizers/om_partition_subgraphs_pass.h b/tf_adapter/optimizers/om_partition_subgraphs_pass.h index 890592028e637c085f06c4dbe980ebbd46f9073a..ca4e55b4caa7e80ac11e7a008737f3157e74d010 100644 --- a/tf_adapter/optimizers/om_partition_subgraphs_pass.h +++ b/tf_adapter/optimizers/om_partition_subgraphs_pass.h @@ -46,6 +46,9 @@ Status MarkForPartition(const GraphOptimizationPassOptions &options, int &cluste // functions to introduce. Status OMPartitionSubgraphsInFunctions(string groupAttribute, const GraphOptimizationPassOptions &options, string graph_format); + +bool IsNpuSupportingNode(const NodeDef &node_def, bool mix_compile_mode, FunctionLibraryDefinition *func_lib); +bool IsNpuSupportingNode(Node *node, bool mix_compile_mode, FunctionLibraryDefinition *func_lib); } // namespace OMSplitter class OMPartitionSubgraphsPass : public GraphOptimizationPass { diff --git a/tf_adapter/python/npu_bridge/__init__.py b/tf_adapter/python/npu_bridge/__init__.py index c3b77fe3540a03e3da26c9a1665300aee3988e60..d6c2996b3028cb67ede8ec6e2cee6454b699db9e 100644 --- a/tf_adapter/python/npu_bridge/__init__.py +++ b/tf_adapter/python/npu_bridge/__init__.py @@ -1,20 +1,6 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ from npu_bridge.helper.helper import npu_bridge_handle from npu_bridge.helper.helper import version as __version__ from npu_bridge.helper import helper from npu_bridge.estimator.npu import npu_estimator from npu_bridge.hccl import hccl_ops -__all__ = [_s for _s in dir() if not _s.startswith('_')] +__all__ = [_s for _s in dir() if not _s.startswith('_')] \ No newline at end of file diff --git a/tf_adapter/python/npu_bridge/estimator/npu/keras_to_npu.py b/tf_adapter/python/npu_bridge/estimator/npu/keras_to_npu.py index 8a139a4108fb6a243b0c5a4597db24922bfd39be..9bafe38bca52f028dbd43f3eba13b6bace6c79d4 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/keras_to_npu.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/keras_to_npu.py @@ -1,17 +1,3 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function diff --git a/tf_adapter/python/npu_bridge/estimator/npu/mnist_softmax_npu.py b/tf_adapter/python/npu_bridge/estimator/npu/mnist_softmax_npu.py index dc28cdb709dd24436926ff29b00f39fad9e23d04..e30f42f81c4f265a8869d1effba5df5898782cb9 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/mnist_softmax_npu.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/mnist_softmax_npu.py @@ -1,17 +1,4 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ + """Simple MNIST classifier example with npu and timelines. Note: Please see further comments in the document. diff --git a/tf_adapter/python/npu_bridge/estimator/npu/mnist_with_estimator.py b/tf_adapter/python/npu_bridge/estimator/npu/mnist_with_estimator.py index 6b87c0a5d767eed57c4847f375ddfac7edc5fc5f..fd13a7163156fee1ba99c2efdcc1ded087b10cc0 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/mnist_with_estimator.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/mnist_with_estimator.py @@ -1,17 +1,3 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function diff --git a/tf_adapter/python/npu_bridge/estimator/npu/npu_common.py b/tf_adapter/python/npu_bridge/estimator/npu/npu_common.py index a4da6a612404d1cf5a12023a4110015dc61f3fe7..1ffb1a96104566ee2736829728dd4586b60623f5 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/npu_common.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/npu_common.py @@ -1,17 +1,3 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ """Inter-process communication using HCOM.""" from __future__ import absolute_import @@ -172,12 +158,6 @@ class NPUBasics(object): index = device_info.get('Index', None) util_lib.check_nonnegative_integer(index, 'Index') - dev_index = device_info.get('dev_index', None) - util_lib.check_nonnegative_integer(dev_index, 'dev_index') - - server_id = device_info.get('server_id', None) - util_lib.check_not_none(device_info, 'server_id') - # 2. Get the rank_table_file and check it. rank_table_file = data.get('rank_table_file', None) util_lib.check_not_none(rank_table_file, 'rank_table_file') @@ -190,7 +170,7 @@ class NPUBasics(object): local_checkpoint_dir = data.get('local_checkpoint_dir', None) # 5. Init the JobInfo. - device_info = DeviceInfo(index=str(index), server_id=server_id, dev_index=dev_index) + device_info = DeviceInfo(index=str(index)) job_info = JobInfo(device_info=device_info, rank_table_file=rank_table_file, local_checkpoint_dir=local_checkpoint_dir, rank_size=rank_size) return job_info @@ -210,12 +190,6 @@ class NPUBasics(object): if(identity == ""): identity = os.getenv('RANK_ID', "") - dev_index = os.getenv('DEVICE_ID') - if dev_index != None and dev_index.isdigit() and int(dev_index) <=7 and int(dev_index) >= 0: - dev_index = int(dev_index) - else: - raise RuntimeError("DEVICE_ID environment variable should in [0, 7]") - checkpoint_dir = os.getenv('LOCAL_CHECKPOINT_DIR', "") # cann't get rank_size from env, set to default 1 @@ -224,7 +198,7 @@ class NPUBasics(object): print("set rank_size to default 1") rank_size = 1 - device_info = DeviceInfo(index=str(identity), server_id="192.168.1.1", dev_index=int(dev_index)) + device_info = DeviceInfo(index=str(identity)) job_info = JobInfo(job_id=job_id, heartbeat_time=heartbeat, device_info=device_info, diff --git a/tf_adapter/python/npu_bridge/estimator/npu/npu_config.py b/tf_adapter/python/npu_bridge/estimator/npu/npu_config.py index d3784439e11e57d0b5a925b3d55d1965a3643316..657bdeef2085453713744d54dcecd2ce11c9038b 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/npu_config.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/npu_config.py @@ -1,17 +1,3 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function @@ -21,6 +7,7 @@ from tensorflow.python.platform import gfile import os import re import json +from enum import Enum from npu_bridge.estimator.npu import util from tensorflow.python.estimator import run_config as run_config_lib from tensorflow.distribute.experimental import ParameterServerStrategy @@ -57,10 +44,13 @@ class NPURunConfig(run_config_lib.RunConfig): dump_config=None, stream_max_parallel_num=None, is_tailing_optimization=False, - horovod_mode = False, - graph_run_mode = 1, - op_debug_level = 0, - enable_scope_fusion_passes = None + horovod_mode=False, + graph_run_mode=1, + op_debug_level=0, + enable_scope_fusion_passes=None, + enable_exception_dump=0, + op_select_implmode=None, + optypelist_for_implmode=None ): """ Constructs a NPUConfig. @@ -95,7 +85,7 @@ class NPURunConfig(run_config_lib.RunConfig): log_step_count_steps: The frequency, in number of global steps, that the global step/sec and the loss will be logged during training. enabel_data_pre_proc: This is the switch of data preprocess. - precision_mode: enable or disable mix precision. + precision_mode: if train, default is: allow_fp32_to_fp16; if inference, default is: force_fp16. variable_format_optimize: enable or disable variable format optimize while graph engineer optimize process. mix_compile_mode: This is the swith of mix_compile_mode. When the value is @@ -109,6 +99,9 @@ class NPURunConfig(run_config_lib.RunConfig): dump_config: The dump configuration. stream_max_parallel_num: Specify the degree of parallelism of the AICPU / AICORE engine to achieve parallel execution between AICPU / AICORE operators. + op_select_implmode: Selecting whether the operator is implemented with high precision + or high performance. + optypelist_for_implmode: Operator list. """ # Check iterations_per_loop. @@ -164,6 +157,10 @@ class NPURunConfig(run_config_lib.RunConfig): experimental_distribute = None if tmp_cluster_spec and isinstance(distribute, ParameterServerStrategy): experimental_distribute = DistributeConfig(distribute, distribute, None) + util.check_nonnegative_integer(enable_exception_dump, "enable_exception_dump") + self.enable_exception_dump = enable_exception_dump + self._op_select_implmode = op_select_implmode + self._optypelist_for_implmode = optypelist_for_implmode super(NPURunConfig, self).__init__( model_dir=model_dir, @@ -182,18 +179,23 @@ class ProfilingConfig(): def __init__(self, enable_profiling=False, - enable_options=[]): + enable_options=[], + fp_point=None, + bp_point=None): """ Constructs a ProfilingConfig. Args: enable_profiling: Enable profiling, default is False. enable_options: Profiling options, list of `training_trace` or `task_trace` or `op_trace`. + fp_point: Forward propagation first node name. + bp_point: back propagation last node name. """ self._enable_profiling = enable_profiling self._enable_options = enable_options - + self._fp_point = fp_point + self._bp_point = bp_point class DumpConfig(): """Dump Config with NPU support.""" @@ -221,3 +223,12 @@ class DumpConfig(): self._dump_mode = dump_mode self._enable_dump_debug = enable_dump_debug self._dump_debug_mode = dump_debug_mode + +class NpuExecutePlacement(Enum): + """npu execute place option. """ + ALL = "all" + CUBE = "cube" + VECTOR = "vector" + TAISHAN = "taishan" + DVPP = "dvpp" + HOST = "host" diff --git a/tf_adapter/python/npu_bridge/estimator/npu/npu_estimator.py b/tf_adapter/python/npu_bridge/estimator/npu/npu_estimator.py index fb66d8b5ac4896b217d19525b12864d8d09679ca..d3df23f6d914abbfe206c9e3c72bf2b2a54d1c40 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/npu_estimator.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/npu_estimator.py @@ -1,17 +1,3 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function @@ -566,6 +552,12 @@ class NPUEstimator(estimator_lib.Estimator): # check profiling ,and get valid options profiling_options = self.__check_profiling_options(config._profiling_config._enable_options) custom_op.parameter_map["profiling_options"].s = tf.compat.as_bytes(profiling_options) + if "task_trace" in profiling_options or "training_trace" in profiling_options: + if config._profiling_config._fp_point is None or config._profiling_config._bp_point is None: + logging.warning("profiling training_trace option should use with bp_point and fp_point") + else: + custom_op.parameter_map["bp_point"].s = tf.compat.as_bytes(config._profiling_config._bp_point) + custom_op.parameter_map["fp_point"].s = tf.compat.as_bytes(config._profiling_config._fp_point) else: # User disable profiling, custom_op.parameter_map["profiling_mode"].b = False @@ -580,6 +572,12 @@ class NPUEstimator(estimator_lib.Estimator): """ if config._precision_mode is not None: custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes(config._precision_mode) + else: + if config.graph_run_mode: + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_fp32_to_fp16") + else: + custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("force_fp16") + custom_op.parameter_map["enable_reduce_precision"].b = config._enable_reduce_precision def __load__variable_format_optimize(self, config, custom_op): @@ -656,6 +654,18 @@ class NPUEstimator(estimator_lib.Estimator): custom_op.parameter_map["job"].s = tf.compat.as_bytes('localhost') custom_op.parameter_map["task_index"].i = 0 + def _load_op_performance_config(self, config, custom_op): + """Load _load_op_performance_config ,and add to custom_optimizers + Args: + config: NPURunConfig. + custom_op: Customer optimizers. + """ + if config._op_select_implmode is not None: + custom_op.parameter_map["op_select_implmode"].s = tf.compat.as_bytes(config._op_select_implmode) + if config._optypelist_for_implmode is not None: + custom_op.parameter_map["optypelist_for_implmode"].s = tf.compat.as_bytes(config._optypelist_for_implmode) + + def __load_graph_optimizers(self, config): """Change the session config and load the graph optimizers: GradFusionOptimizer and OMPartitionSubgraphsPass.""" @@ -691,6 +701,7 @@ class NPUEstimator(estimator_lib.Estimator): custom_op.parameter_map["op_debug_level"].i = config.op_debug_level if config.enable_scope_fusion_passes is not None: custom_op.parameter_map["enable_scope_fusion_passes"].s = tf.compat.as_bytes(config.enable_scope_fusion_passes) + custom_op.parameter_map["enable_exception_dump"].i = config.enable_exception_dump # add profiling options to custom_op self.__load_profiling_options(config, custom_op) @@ -712,6 +723,8 @@ class NPUEstimator(estimator_lib.Estimator): self.__load_ps_mode_config(config, custom_op) + self._load_op_performance_config(config, custom_op) + return config diff --git a/tf_adapter/python/npu_bridge/estimator/npu/npu_hook.py b/tf_adapter/python/npu_bridge/estimator/npu/npu_hook.py index 881532c2fc915a2175bf46706dcabeb2669e35a2..f1fc8bb697af5790fa4bb312a1d54524d2c2d26c 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/npu_hook.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/npu_hook.py @@ -1,17 +1,3 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ import tensorflow as tf from six.moves import queue as Queue import threading @@ -159,12 +145,11 @@ def broadcast_global_variables(root_rank, index): op_list = [] for var in tf.global_variables(): # the input and out tensor of HCOMBroadcast interface are list - if "float" in var.dtype.name: - inputs = [var] - outputs=hccl_ops.broadcast(tensor=inputs,root_rank=root_rank) - if outputs is not None: - op_list.append(outputs[0].op) - op_list.append(tf.assign(var, outputs[0])) + inputs = [var] + outputs=hccl_ops.broadcast(tensor=inputs,root_rank=root_rank) + if outputs is not None: + op_list.append(outputs[0].op) + op_list.append(tf.assign(var, outputs[0])) return tf.group(op_list) diff --git a/tf_adapter/python/npu_bridge/estimator/npu/npu_loss_scale_manager.py b/tf_adapter/python/npu_bridge/estimator/npu/npu_loss_scale_manager.py index 75a3efdda170afc315436d0304dda2dd21633a6b..4f5f46cb4803781a75bca0abf0e09062caeb12ab 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/npu_loss_scale_manager.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/npu_loss_scale_manager.py @@ -12,20 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -# -# Copyright 2019-2020 Huawei Technologies Co., Ltd. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. """LossScaleManager classes for mixed precision training.""" from __future__ import absolute_import from __future__ import division diff --git a/tf_adapter/python/npu_bridge/estimator/npu/npu_loss_scale_optimizer.py b/tf_adapter/python/npu_bridge/estimator/npu/npu_loss_scale_optimizer.py index 9a23778da231ee6c3f5d0a8860160e12280f44cb..0356f7f8f30cd167694e60880aef12da5cd6742e 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/npu_loss_scale_optimizer.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/npu_loss_scale_optimizer.py @@ -1,17 +1,4 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ + # Optimizer for mixed precision training for Davinci NPU. """Loss scaling optimizer.""" diff --git a/tf_adapter/python/npu_bridge/estimator/npu/npu_optimizer.py b/tf_adapter/python/npu_bridge/estimator/npu/npu_optimizer.py index 23ea950bf1e4abaa1171b1b1272cd2269aaa1aa5..8e89daca98a451e20286c5a71dd6bc86734ea0c3 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/npu_optimizer.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/npu_optimizer.py @@ -1,17 +1,3 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ """ Optimizer that implements distributed gradient reduction for NPU. """ diff --git a/tf_adapter/python/npu_bridge/estimator/npu/npu_plugin.py b/tf_adapter/python/npu_bridge/estimator/npu/npu_plugin.py index c2bb1ae22d42f709b5151333067f7840122d6214..e97623f9e9aa9421577e2d91f590b75fc23a3a7c 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/npu_plugin.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/npu_plugin.py @@ -1,19 +1,6 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ from npu_bridge import tf_adapter from npu_bridge.estimator.npu import util +from tensorflow.python.platform import tf_logging as logging import json import os @@ -24,37 +11,46 @@ __option_exec_profiling_mode = str(tf_adapter.OPTION_EXEC_PROFILING_MODE) __option_exec_profiling_options = str(tf_adapter.OPTION_EXEC_PROFILING_OPTIONS) __option_graph_run_mode = str(tf_adapter.OPTION_GRAPH_RUN_MODE) __option_exec_option_exec_hccl_flag = str(tf_adapter.OPTION_EXEC_HCCL_FLAG) +__option_exec_fp_point = str(tf_adapter.OPTION_EXEC_PROFILING_FPPONIT_OPTIONS) +__option_exec_bp_point = str(tf_adapter.OPTION_EXEC_PROFILING_BPPONIT_OPTIONS) def npu_global_init(graph_run_mode = 1, op_debug_level = 0, is_tailing_optimization = False, enable_profiling = False, - enable_options = "training_trace", + enable_options = ["training_trace"], auto_tune_mode = None, precision_mode = None, enable_scope_fusion_passes = None, - ): + enable_exception_dump = 0, + fp_point = None, + bp_point = None): util.check_nonnegative_integer(graph_run_mode, "graph_run_mode") if graph_run_mode > 1: raise ValueError('"graph_run_mode" value must be 0 or 1') - + util.check_nonnegative_integer(enable_exception_dump, "enable_exception_dump") util.check_nonnegative_integer(op_debug_level, "op_debug_level") util.check_bool_type(is_tailing_optimization, "is_tailing_optimization") util.check_bool_type(enable_profiling, "enable_profiling") - graph_run_mode = str(graph_run_mode) - op_debug_level = str(op_debug_level) is_tailing_optimization = str(util.convert_bool_to_int(is_tailing_optimization)) - enable_profiling = str(util.convert_bool_to_int(enable_profiling)) + enable_profiling = util.convert_bool_to_int(enable_profiling) init={} - init[__option_graph_run_mode] = graph_run_mode - init[__op_debug_level] = op_debug_level + init[__option_graph_run_mode] = str(graph_run_mode) + init[__op_debug_level] = str(op_debug_level) init["ge.exec.isTailingOptimization"] = is_tailing_optimization - init[__option_exec_profiling_mode] = enable_profiling + init[__option_exec_profiling_mode] = str(enable_profiling) - if enable_profiling is True: - init[__option_exec_profiling_options] = str(util.check_profiling_options(enable_options)) + if enable_profiling: + enable_options = str(util.check_profiling_options(enable_options)) + init[__option_exec_profiling_options] = enable_options + if "task_trace" in enable_options or "training_trace" in enable_options: + if fp_point is None or bp_point is None: + logging.warning("profiling training_trace option should use with bp_point and fp_point") + else: + init[__option_exec_fp_point] = str(fp_point) + init[__option_exec_bp_point] = str(bp_point) else: init[__option_exec_profiling_options] = str("training_trace") @@ -63,17 +59,16 @@ def npu_global_init(graph_run_mode = 1, if precision_mode is not None: init["ge.exec.precision_mode"] = str(precision_mode) + else: + if graph_run_mode: + init["ge.exec.precision_mode"] = str("allow_fp32_to_fp16") + else: + init["ge.exec.precision_mode"] = str("force_fp16") if enable_scope_fusion_passes is not None: init[__option_exec_enable_scope_fusion_passes] = str(enable_scope_fusion_passes) - config_info = json.loads(os.environ.get('TF_CONFIG') or '{}') - task_env = config_info.get('task', {}) - task_type = task_env.get('type', None) - exec_hccl_flag = 1 - if task_type == 'evaluator': - exec_hccl_flag = 0 - init[__option_exec_option_exec_hccl_flag] = str(exec_hccl_flag) + init["ge.exec.enable_exception_dump"] = str(enable_exception_dump) init_options=tf_adapter.map_string_string(init) tf_adapter.PluginInit(init_options) diff --git a/tf_adapter/python/npu_bridge/estimator/npu/npu_rnn.py b/tf_adapter/python/npu_bridge/estimator/npu/npu_rnn.py index dae7d36a81c5f8218892601b9d4fa608737dbda0..dbd60742070c635dc2b343b67cafe6daac0161e7 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/npu_rnn.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/npu_rnn.py @@ -1,17 +1,3 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ import tensorflow as tf def npu_dynamic_rnn(cell, diff --git a/tf_adapter/python/npu_bridge/estimator/npu/npu_scope.py b/tf_adapter/python/npu_bridge/estimator/npu/npu_scope.py index 9747ce7a8e7811d718784b65ec75d3184c32322b..72702e146f32e0bc4a1f3931e186b4a28ce30399 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/npu_scope.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/npu_scope.py @@ -1,32 +1,34 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ """ Config the non npu compilation scope for NPU in mix compute mode. """ import contextlib from tensorflow.core.framework import attr_value_pb2 from tensorflow.python.framework import ops +from tensorflow.python.util import compat +from npu_bridge.estimator.npu.npu_config import NpuExecutePlacement @contextlib.contextmanager def without_npu_compile_scope(): - ''' - Enable the non npu compilation of operators within the scope. - ''' - attrs = { - "_without_npu_compile" : attr_value_pb2.AttrValue(b=True) - } + ''' + Enable the non npu compilation of operators within the scope. + ''' + attrs = { + "_without_npu_compile" : attr_value_pb2.AttrValue(b=True) + } - with ops.get_default_graph()._attr_scope(attrs): - yield + with ops.get_default_graph()._attr_scope(attrs): + yield + +@contextlib.contextmanager +def npu_variable_scope(placement=NpuExecutePlacement.ALL): + ''' + Enable the node in the scope adding _variable_placement attr. + ''' + if placement not in NpuExecutePlacement: + raise ValueError("placement vaule must be in NpuExecutePlacement's vaule") + + attrs = { + "_variable_placement" : attr_value_pb2.AttrValue(s=compat.as_bytes(placement.value)) + } + with ops.get_default_graph()._attr_scope(attrs): + yield \ No newline at end of file diff --git a/tf_adapter/python/npu_bridge/estimator/npu/util.py b/tf_adapter/python/npu_bridge/estimator/npu/util.py index a00df761b93116f28f949ca0ba2624f2ad477e84..8082154b4ab97192da18d767ca00240f2937a55c 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu/util.py +++ b/tf_adapter/python/npu_bridge/estimator/npu/util.py @@ -1,17 +1,3 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function @@ -86,7 +72,7 @@ def format_string(value, name): return str(value) -def check_profiling_options(self, profiling_options=[]): +def check_profiling_options(profiling_options=[]): """Check profiling options . Args: profiling_options: Profiling options. diff --git a/tf_adapter/python/npu_bridge/estimator/npu_aicore_ops.py b/tf_adapter/python/npu_bridge/estimator/npu_aicore_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..ab1a6372a2164be27c61377bd54df003d2898917 --- /dev/null +++ b/tf_adapter/python/npu_bridge/estimator/npu_aicore_ops.py @@ -0,0 +1,46 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""All bert ops.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensorflow.contrib.util import loader +from tensorflow.python.platform import resource_loader +from tensorflow.python.framework import ops + + +from npu_bridge.helper import helper +npu_aicore_ops = helper.get_gen_ops(); + +@ops.RegisterGradient("FastGelu") +def _fast_gelu_grad(op, grad): + """The gradient for `fast_gelu`. + + Args: + op: The `fast_gelu` `Operation` that we are differentiating, which we can use + to find the inputs and outputs of the original op. + grad: Gradient with respect to the output of the `fast_gelu` op. + + Returns: + Gradients with respect to the input of `fast_gelu`. + """ + return [npu_aicore_ops.fast_gelu_grad(grad, op.inputs[0])] # List of one Tensor, since we have one input + +# go/tf-wildcard-import +#from tensorflow.python.util.tf_export import tf_export + diff --git a/tf_adapter/python/npu_bridge/estimator/npu_ops.py b/tf_adapter/python/npu_bridge/estimator/npu_ops.py index 83f3697c97ec896bbc2459a1d88bf23287cc349e..ab88bf56c4b1af5617839f3bdb6e4ec82030fc57 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu_ops.py +++ b/tf_adapter/python/npu_bridge/estimator/npu_ops.py @@ -12,20 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -# -# Copyright 2019-2020 Huawei Technologies Co., Ltd. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. """Ops for collective operations implemented using hccl.""" from __future__ import absolute_import from __future__ import division diff --git a/tf_adapter/python/npu_bridge/estimator/npu_unary_ops.py b/tf_adapter/python/npu_bridge/estimator/npu_unary_ops.py index dd8b2e3cd923fec2796e58480f73ab8245f20fe8..a9b6733067146b6aeb844747025567f1d7c55583 100644 --- a/tf_adapter/python/npu_bridge/estimator/npu_unary_ops.py +++ b/tf_adapter/python/npu_bridge/estimator/npu_unary_ops.py @@ -12,20 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -# -# Copyright 2019-2020 Huawei Technologies Co., Ltd. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. """All bert ops.""" diff --git a/tf_adapter/python/npu_bridge/hccl/hccl_ops.py b/tf_adapter/python/npu_bridge/hccl/hccl_ops.py index a25d1c69134f48f0d45dc6ad3ac2d82e6df99259..e15a4243b31e315ba05f4f9cb263e9a070844dc6 100644 --- a/tf_adapter/python/npu_bridge/hccl/hccl_ops.py +++ b/tf_adapter/python/npu_bridge/hccl/hccl_ops.py @@ -1,17 +1,3 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ ## @file hccl_ops.py # HCCL 算子API diff --git a/tf_adapter/python/npu_bridge/helper/helper.py b/tf_adapter/python/npu_bridge/helper/helper.py index 66dd0e27f748d32f1965759e4922768ca36b8467..64774fbd27ef56f51119ffff2c37b489631c9a15 100644 --- a/tf_adapter/python/npu_bridge/helper/helper.py +++ b/tf_adapter/python/npu_bridge/helper/helper.py @@ -1,17 +1,3 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ import tensorflow import npu_bridge import os diff --git a/tf_adapter/python/npu_bridge/image/image_ops.py b/tf_adapter/python/npu_bridge/image/image_ops.py index 8f3d8f372a4df35fad68a0185c770cd862a23b96..ecb7c6ccd786699fca46f046ee109d6dfbf3780f 100644 --- a/tf_adapter/python/npu_bridge/image/image_ops.py +++ b/tf_adapter/python/npu_bridge/image/image_ops.py @@ -1,17 +1,3 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ from tensorflow.contrib.util import loader from tensorflow.python.framework import load_library from tensorflow.python.platform import resource_loader diff --git a/tf_adapter/python/npu_bridge/npu_cpu/npu_cpu_ops.py b/tf_adapter/python/npu_bridge/npu_cpu/npu_cpu_ops.py index ae7c199d550d671584837bc154c108f0834065a4..391241c46a0cb836a98ee1367e7044e5db070c3d 100644 --- a/tf_adapter/python/npu_bridge/npu_cpu/npu_cpu_ops.py +++ b/tf_adapter/python/npu_bridge/npu_cpu/npu_cpu_ops.py @@ -1,17 +1,3 @@ -# Copyright 2019-2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ from tensorflow.contrib.util import loader from tensorflow.python.framework import load_library from tensorflow.python.framework import ops diff --git a/tf_adapter/python/setup.py b/tf_adapter/python/setup.py index 82fb00bd498ac6463683d71d05bd7ba2a8a3f88f..6c76feae7c09421843fd1edbe2cc0974fe2d0c49 100644 --- a/tf_adapter/python/setup.py +++ b/tf_adapter/python/setup.py @@ -12,20 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -# -# Copyright 2019-2020 Huawei Technologies Co., Ltd. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. """npu bridge for tensorflow v1.15.0. diff --git a/tf_adapter/swig/ge_plugin.i b/tf_adapter/swig/ge_plugin.i index 02931535c784bf10a2c3178dcb1c489a93f775c1..50e65da5f76417a7b5de34971c2c69b5618c11e7 100644 --- a/tf_adapter/swig/ge_plugin.i +++ b/tf_adapter/swig/ge_plugin.i @@ -17,6 +17,9 @@ extern const char* const OPTION_EXEC_PROFILING_MODE; extern const char* const OPTION_EXEC_PROFILING_OPTIONS; extern const char* const OPTION_GRAPH_RUN_MODE; extern const char* const OPTION_EXEC_HCCL_FLAG; +extern const char* const OPTION_EXEC_PROFILING_FPPONIT_OPTIONS; +extern const char* const OPTION_EXEC_PROFILING_BPPONIT_OPTIONS; + extern void PluginInit(std::map& init_options); diff --git a/tf_adapter/util/ge_plugin.cc b/tf_adapter/util/ge_plugin.cc index 1f09948d253f5fe21548f8fb2a7f9956865b76a0..7864487b1e2cd14699b41ff338cabe9b64032279 100644 --- a/tf_adapter/util/ge_plugin.cc +++ b/tf_adapter/util/ge_plugin.cc @@ -27,18 +27,17 @@ limitations under the License. #include "framework/common/ge_inner_error_codes.h" #include "framework/common/types.h" -#include "framework/omg/parser/model_parser.h" #include "framework/omg/parser/parser_api.h" -#include "framework/omg/parser/parser_factory.h" #include "ge/ge_api.h" #include "ge/ge_api_types.h" #include "tdt/tdt_host_interface.h" -#include "tdt/tsd_client.h" #include "tensorflow/core/util/env_var.h" #include "tf_adapter/common/common.h" #include "tf_adapter/util/npu_attrs.h" #include "tf_adapter/util/npu_plugin.h" #include +#include "nlohmann/json.hpp" +using json = nlohmann::json; using namespace tensorflow; using namespace tdt; @@ -71,15 +70,33 @@ void GePlugin::Init(std::map &init_options, bool is_gl return; } + const char *tf_config = std::getenv("TF_CONFIG"); + int exec_hccl_flag = 1; + if (tf_config != nullptr) { + json config_info; + try { + config_info = json::parse(tf_config); + } catch (json::exception &e) { + LOG(WARNING) << "[GePlugin] Failed to convert TF_CONFIG info from string to json ,reason: " << e.what(); + } + if (config_info.is_object()) { + if (config_info["task"]["type"] == "ps") { + LOG(INFO) << "The ps process does not need to be initialized"; + return; + } + if (config_info["task"]["type"] == "evaluator") { + exec_hccl_flag = 0; + } + } + } + init_options[OPTION_EXEC_HCCL_FLAG] = std::to_string(exec_hccl_flag); + LOG(INFO) << "[GePlugin] graph run mode : " << init_options[ge::OPTION_GRAPH_RUN_MODE]; - // prepare options for ge Initialize - const int64 kMaxDeviceID = 7; - (void) ReadInt64FromEnvVar("DEVICE_ID", 0, &device_id_); - if (device_id_ < 0 || device_id_ > kMaxDeviceID) { - LOG(WARNING) << "[GePlugin] device_id should in [0, 7]. use default device id : 0."; - } + Status s = GetEnvDeviceID(device_id_); + if (!s.ok()) { LOG(FATAL) << s.error_message(); } init_options[ge::OPTION_EXEC_DEVICE_ID] = std::to_string(device_id_); + LOG(INFO) << "[GePlugin] device id : " << init_options[ge::OPTION_EXEC_DEVICE_ID]; const char *env_job_id = std::getenv("JOB_ID"); if (env_job_id != nullptr) { @@ -97,22 +114,24 @@ void GePlugin::Init(std::map &init_options, bool is_gl bool is_use_hcom = false; bool deploy_mode = false; - char *env_rank_id = std::getenv("RANK_ID"); - char *env_pod_name = std::getenv("POD_NAME"); char *env_rank_table_file = std::getenv("RANK_TABLE_FILE"); if ((env_rank_table_file != nullptr) && (rankSizeNum > 0)) { LOG(INFO) << "[GePlugin] env RANK_TABLE_FILE:" << env_rank_table_file; is_use_hcom = true; init_options[ge::OPTION_EXEC_RANK_TABLE_FILE] = env_rank_table_file; + char *env_pod_name = std::getenv("POD_NAME"); if (env_pod_name != nullptr) { deploy_mode = true; init_options[ge::OPTION_EXEC_POD_NAME] = env_pod_name; - } else if (env_rank_id != nullptr) { - LOG(INFO) << "[GePlugin] env RANK_ID:" << env_rank_id; - deploy_mode = false; - init_options[ge::OPTION_EXEC_RANK_ID] = env_rank_id; } else { - LOG(ERROR) << "[GePlugin] Can't find rank_id or pod_name in env."; + char *env_rank_id = std::getenv("RANK_ID"); + if (env_rank_id != nullptr) { + LOG(INFO) << "[GePlugin] env RANK_ID:" << env_rank_id; + deploy_mode = false; + init_options[ge::OPTION_EXEC_RANK_ID] = env_rank_id; + } else { + LOG(ERROR) << "[GePlugin] Can't find rank_id or pod_name in env."; + } } } @@ -124,7 +143,9 @@ void GePlugin::Init(std::map &init_options, bool is_gl // profiling configuration LOG(INFO) << "[GePlugin] profiling_mode : " << init_options[ge::OPTION_EXEC_PROFILING_MODE] - << ", profiling_options:" << init_options[ge::OPTION_EXEC_PROFILING_OPTIONS]; + << ", profiling_options:" << init_options[ge::OPTION_EXEC_PROFILING_OPTIONS] + << ", fp_point: " << init_options[ge::OPTION_EXEC_PROFILING_FPPONIT_OPTIONS] + << ", bp_point: " << init_options[ge::OPTION_EXEC_PROFILING_BPPONIT_OPTIONS]; // mix precision configuration LOG(INFO) << "[GePlugin] precision_mode : " << init_options[ge::PRECISION_MODE]; @@ -138,21 +159,17 @@ void GePlugin::Init(std::map &init_options, bool is_gl // scope fusion configuration LOG(INFO) << "[GePlugin] enable_scope_fusion_passes : " << init_options[ge::OPTION_EXEC_ENABLE_SCOPE_FUSION_PASSES]; + // exception dump configuration + LOG(INFO) << "[GePlugin] enable_exception_dump : " << init_options["ge.exec.enable_exception_dump"]; + // Open TsdClient first, then call GEInitialize - LOG(INFO) << "[GePlugin] Open TsdClient and Init tdt host."; + LOG(INFO) << "[GePlugin] Start Init tdt host."; int32_t ret = tdt::TdtHostInit(static_cast(device_id_)); if (ret != 0) { std::this_thread::sleep_for(std::chrono::milliseconds(kFatalSleepTime)); LOG(FATAL) << "[GePlugin] Tdt host init failed, tdt error code : " << ret; } - TDT_StatusT tdt_status = TsdOpen(static_cast(device_id_), static_cast(rankSizeNum)); - if (tdt_status != TDT_OK) { - std::this_thread::sleep_for(std::chrono::milliseconds(kFatalSleepTime)); - LOG(FATAL) << "[GePlugin] Open TsdClient failed, tdt error code : " << tdt_status - << ", error message : " << TDT_GET_ERROR_STR(tdt_status); - } - LOG(INFO) << "[GePlugin] Open TsdClient success and tdt host init success."; - + LOG(INFO) << "[GePlugin] Tdt host init succeed."; // ge Initialize ge::Status status = ge::GEInitialize(init_options); if (status != ge::SUCCESS) { @@ -187,15 +204,14 @@ void GePlugin::Finalize() { ge::Status status_parser = ge::ParserFinalize(); if (status_parser != ge::SUCCESS) { LOG(ERROR) << "[GePlugin] Parser finalize failed, ret : " << ToString(status); } - LOG(INFO) << "[GePlugin] Close TsdClient and destroy tdt."; + LOG(INFO) << "[GePlugin] Start destroy tdt host."; int32_t ret = tdt::TdtHostDestroy(); - if (ret != 0) { LOG(ERROR) << "[GePlugin] Close tdt failed, tdt_ret : " << ret; } - TDT_StatusT tdt_status = TsdClose(device_id_); - if (tdt_status != TDT_OK) { - LOG(ERROR) << "[GePlugin] Close TsdClient failed, tdt_ret : " << tdt_status; + if (ret != 0) { + LOG(ERROR) << "[GePlugin] Tdt host destroy failed, ret : " << ret; } else { - LOG(INFO) << "[GePlugin] Close TsdClient success."; + LOG(INFO) << "[GePlugin] Tdt host destroy succeed."; } + isInit_ = false; } diff --git a/tf_adapter/util/ge_plugin.h b/tf_adapter/util/ge_plugin.h index 5f7b3483f9196276e8be5acd6815dd1db0209a59..e83d73cb7ec049973d215007770443cc067fb28d 100644 --- a/tf_adapter/util/ge_plugin.h +++ b/tf_adapter/util/ge_plugin.h @@ -51,7 +51,7 @@ class GePlugin { ~GePlugin(); - int64 device_id_; + uint32_t device_id_; bool isInit_; bool isGlobal_; std::map init_options_; diff --git a/tf_adapter/util/generate_report.cc b/tf_adapter/util/generate_report.cc index 9ee6d53450faa1c3bf8296a4a0e3b06d76b75915..30b19cfc25dd04c3d971933001aab77e58c61c70 100644 --- a/tf_adapter/util/generate_report.cc +++ b/tf_adapter/util/generate_report.cc @@ -1,5 +1,17 @@ -/* Copyright (C) 2019-2020. Huawei Technologies Co., Ltd. All rights reserved. +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Copyright (C) 2019-2020. Huawei Technologies Co., Ltd. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -10,16 +22,20 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the License.*/ +limitations under the License. +==============================================================================*/ +#include "tf_adapter/util/generate_report.h" + +#include +#include #include "nlohmann/json.hpp" #include "tensorflow/core/platform/env.h" -#include "tf_adapter/util/generate_report.h" namespace tensorflow { using Json = nlohmann::json; const static uint32_t kInterval = 2; -const static std::string kUnsupportedInfoPath = "checkresult.tf.json"; +const static std::string kUnsupportedInfoPath = "check_result.tf.json"; // json file keys const static std::string kKeyName = "name"; @@ -31,9 +47,15 @@ const static std::string kKeyIsSupport = "is_support"; const static std::string kKeyMessage = "message"; GenerateReport::GenerateReport() { - char *need_save = std::getenv("ENABLE_NETWORK_ANALYSIS"); - if (need_save != nullptr && strcmp("1", need_save) == 0) { - save_report_ = true; + char current_path[PATH_MAX]; + if (getcwd(current_path, PATH_MAX) != nullptr){ + string path = current_path; + path = path + "/" + kUnsupportedInfoPath; + if (remove(path.c_str()) == -1){ + LOG(WARNING) << "[GenerateReport] Remove check report failed. path:" << path; + } else { + LOG(INFO) << "[GenerateReport] Remove check report success. path:" << path; + } } } @@ -45,11 +67,8 @@ GenerateReport *GenerateReport::GetInstance() { Status GenerateReport::AddUnSupportedInfo(Node *node, Details &infos) { return GenerateReport::AddUnSupportedInfo(node->name(), node->type_string(), infos); } -Status GenerateReport::AddUnSupportedInfo(const std::string &name, const std::string &type, Details &infos) { - if (!save_report_) { - return Status::OK(); - } +Status GenerateReport::AddUnSupportedInfo(const std::string &name, const std::string &type, Details &infos) { if (check_info_map_.find(name) != check_info_map_.end()) { return Status::OK(); } else { @@ -63,9 +82,6 @@ Status GenerateReport::AddUnSupportedInfo(const std::string &name, const std::st } Status GenerateReport::DeleteUnSupportedInfo(Node *node) { - if (!save_report_) { - return Status::OK(); - } auto info_iter = check_info_map_.find(node->name()); if (info_iter == check_info_map_.end()) { return Status::OK(); @@ -76,20 +92,21 @@ Status GenerateReport::DeleteUnSupportedInfo(Node *node) { } Status GenerateReport::SaveUnsupportedInfo() { - if (!save_report_) { + if (check_info_map_.empty()){ + LOG(INFO) << "[GenerateReport] All nodes are supported, no need to save report."; return Status::OK(); } Json graph_info; - for (auto info : check_info_map_) { - Json reason = {{kKeyCode, info.second.info_details.code}, {kKeyMessage, info.second.info_details.message}}; - Json op = {{kKeyName, info.second.name}, - {kKeyType, info.second.type}, - {kKeyIsSupport, info.second.is_support}, - {kKeyReason, reason}}; - graph_info[kKeyOp].push_back(op); - } std::string info_str; try { + for (auto info : check_info_map_) { + Json reason = {{kKeyCode, info.second.info_details.code}, {kKeyMessage, info.second.info_details.message}}; + Json op = {{kKeyName, info.second.name}, + {kKeyType, info.second.type}, + {kKeyIsSupport, info.second.is_support}, + {kKeyReason, reason}}; + graph_info[kKeyOp].push_back(op); + } info_str = graph_info.dump(kInterval, ' ', false, Json::error_handler_t::ignore); } catch (std::exception &e) { return errors::Internal("Failed to convert json to string ,reason:", e.what()); @@ -98,4 +115,6 @@ Status GenerateReport::SaveUnsupportedInfo() { } return tensorflow::WriteStringToFile(Env::Default(), kUnsupportedInfoPath, info_str); } + +GenerateReport::~GenerateReport(){}; } // namespace tensorflow diff --git a/tf_adapter/util/generate_report.h b/tf_adapter/util/generate_report.h index e4f35ad4d4923ef6009eb22fd0bba0ed0a48a0bc..705483ad892d87e6797486ad0a2e4637db8d2327 100644 --- a/tf_adapter/util/generate_report.h +++ b/tf_adapter/util/generate_report.h @@ -32,7 +32,7 @@ limitations under the License. // Op will be written to json if it can not sink to device during one excute. namespace tensorflow { class GenerateReport { - public: +public: struct Details { int code; std::string message; @@ -40,12 +40,18 @@ class GenerateReport { enum ReasonCode { TypeNoDefine = 1, TypeGray = 2, ScenarioProblems = 3, NotSupport = 4 }; static GenerateReport *GetInstance(); + Status AddUnSupportedInfo(const std::string &name, const std::string &type, Details &infos); + Status AddUnSupportedInfo(Node *node, Details &infos); + Status DeleteUnSupportedInfo(Node *node); + Status SaveUnsupportedInfo(); - private: + ~GenerateReport(); + +private: GenerateReport(); struct UnSupportedInfo { std::string name; @@ -54,7 +60,6 @@ class GenerateReport { Details info_details; }; std::map check_info_map_; - bool save_report_ = false; }; } // namespace tensorflow diff --git a/tf_adapter/util/infershape_util.cc b/tf_adapter/util/infershape_util.cc index 6aa07d1d981b9e00c437ee36ae12a86c5386de9d..090433ad61c125ac40973560e1631950cc5f8f36 100644 --- a/tf_adapter/util/infershape_util.cc +++ b/tf_adapter/util/infershape_util.cc @@ -79,18 +79,26 @@ Status InferShapeUtil::setArgShapeFromTensorShape(std::vector vecTensor, return Status::OK(); } -Status InferShapeUtil::getSubGraphFromFunctionDef(const FunctionDef &func_def, Graph *graph) { +Status InferShapeUtil::GetSubGraphFromFunctionDef(const FunctionLibraryDefinition &flib_def, + const FunctionDef &func_def, Graph *graph) { LOG(INFO) << "The signature name of FunctionDef is " << func_def.signature().name() << "."; InstantiationResult result; AttrSlice attrs(&func_def.attr()); TF_RETURN_IF_ERROR(InstantiateFunction( - func_def, attrs, [](const string &op, const OpDef **sig) { return OpRegistry::Global()->LookUpOpDef(op, sig); }, - &result)); + func_def, attrs, [&flib_def](const string &op, const OpDef **sig) { + Status s = OpRegistry::Global()->LookUpOpDef(op, sig); + if (!s.ok()) { + return flib_def.LookUpOpDef(op, sig); + } + return s; + }, &result)); + LOG(INFO) << "InstantiateFunction " << func_def.signature().name() << " success."; GraphConstructorOptions opts; opts.allow_internal_ops = true; opts.expect_device_spec = false; TF_RETURN_IF_ERROR(ConvertNodeDefsToGraph(opts, result.nodes, graph)); + LOG(INFO) << "ConvertNodeDefsToGraph " << func_def.signature().name() << " success."; return Status::OK(); } @@ -343,7 +351,7 @@ Status InferShapeUtil::InferShape(const std::vector &vecTensor, const Fu return errors::Internal("Input tensor num ", iTensorNums, " is less than arg num ", iInputArgNums, "."); } - TF_RETURN_IF_ERROR(getSubGraphFromFunctionDef(*func_def, graph)); + TF_RETURN_IF_ERROR(GetSubGraphFromFunctionDef(*flib_def, *func_def, graph)); // Control flow loops in the graph; we have to break them. std::vector NextIterationEdges; diff --git a/tf_adapter/util/infershape_util.h b/tf_adapter/util/infershape_util.h index b47ad53c6054d4a4650e9c6581c8e5230bb4036e..5e89bda0ff15410016f5e112a4a6fc777560671a 100644 --- a/tf_adapter/util/infershape_util.h +++ b/tf_adapter/util/infershape_util.h @@ -50,7 +50,8 @@ class InferShapeUtil { const FunctionLibraryDefinition *flib_def, const FunctionDef *func_def, Graph *graph); - static Status getSubGraphFromFunctionDef(const FunctionDef &func_def, Graph *graph); + static Status GetSubGraphFromFunctionDef(const FunctionLibraryDefinition &flib_def, + const FunctionDef &func_def, Graph *graph); static int64 GetCurrentTimestap(); static bool IsInitializedGraph(Node *node); diff --git a/tf_adapter/util/npu_attrs.cc b/tf_adapter/util/npu_attrs.cc index af853c43ab68b3b3b0a28ffb7f04f961acd21dd3..de110dea1e919f0b53902e685c707ab718e6881b 100644 --- a/tf_adapter/util/npu_attrs.cc +++ b/tf_adapter/util/npu_attrs.cc @@ -25,13 +25,45 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include "tdt/index_transform.h" #include "tf_adapter/util/npu_attrs.h" +#include "tensorflow/core/lib/strings/str_util.h" #include "securec.h" +#include "mmpa/mmpa_api.h" #include #include namespace tensorflow { - +Status GetEnvDeviceID(uint32_t &device_id) { + int64 phy_device_id = -1; + int64 logic_device_id = -1; + const char* tmp_ascend_device_id = std::getenv("ASCEND_DEVICE_ID"); + string env_ascend_device_id(tmp_ascend_device_id == nullptr ? "" : tmp_ascend_device_id); + const char* tmp_device_id = std::getenv("DEVICE_ID"); + string env_device_id(tmp_device_id == nullptr ? "" : tmp_device_id); + if (env_ascend_device_id.empty() && env_device_id.empty()) { + LOG(WARNING) << "[GePlugin] DEVICE_ID and ASCEND_DEVICE_ID is none, use default device id : 0"; + } else if (!env_ascend_device_id.empty()) { + if (!strings::safe_strto64(env_ascend_device_id, &logic_device_id)) { + return errors::InvalidArgument("ASCEND_DEVICE_ID is valid, not digit."); + } + if (logic_device_id < 0) { + return errors::InvalidArgument("ASCEND_DEVICE_ID should be >= 0."); + } + device_id = static_cast(logic_device_id); + } else { + if (!strings::safe_strto64(env_device_id, &phy_device_id)) { + return errors::InvalidArgument("DEVICE_ID is valid, not digit."); + } + if (phy_device_id < 0) { + return errors::InvalidArgument("DEVICE_ID should be >= 0."); + } + if (IndexTransform(static_cast(phy_device_id), device_id) != 0) { + return errors::InvalidArgument("get logic device id by DEVICE_ID failed."); + } + } + return Status::OK(); +} inline void split(const std::string &s, std::vector &result, const char *delchar = " ") { if (s.empty()) { return; } result.clear(); @@ -44,7 +76,9 @@ inline void split(const std::string &s, std::vector &result, const } char *p_tmp = nullptr; char *p = strtok_s(buffer, delchar, &p_tmp); - do { result.emplace_back(p); } while ((p = strtok_s(nullptr, delchar, &p_tmp))); + if (p != nullptr) { + do { result.emplace_back(p); } while ((p = strtok_s(nullptr, delchar, &p_tmp))); + } delete[] buffer; } @@ -112,6 +146,31 @@ inline Status checkDumpDebugMode(const string &dump_debug_mode) { } } +inline Status CheckPath(const string &input, string &output) { + if (mmIsDir(input.c_str()) != EN_OK) { + return errors::InvalidArgument("the path ", input.c_str(), " is not directory."); + } + char trusted_path[MMPA_MAX_PATH] = { "\0" }; + if (mmRealPath(input.c_str(), trusted_path, MMPA_MAX_PATH) != EN_OK) { + return errors::InvalidArgument("the path ", input.c_str(), " is invalid."); + } + if (mmAccess2(trusted_path, R_OK | W_OK) != EN_OK) { + return errors::InvalidArgument("the path ", input.c_str(), " does't have read, write permissions."); + } + output = trusted_path; + return Status::OK(); +} + +inline Status CheckOpImplMode(const string &op_select_implmode) { + std::set op_impl_mode_list = {"high_precision", "high_performance"}; + + if (op_impl_mode_list.find(op_select_implmode) != op_impl_mode_list.end()) { + return Status::OK(); + } else { + return errors::InvalidArgument("op select impl mode should be one of the list:[high_precision, high_performance]"); + } +} + std::map NpuAttrs::GetSessOptions(OpKernelConstruction *ctx) { std::map sess_options; std::string variable_format_optimize = std::to_string(true); @@ -126,6 +185,8 @@ std::map NpuAttrs::GetSessOptions(OpKernelConstruction std::string dump_debug_mode = "all"; std::string stream_max_parallel_num; string npuOptimizer; + std::string op_select_implmode; + std::string optypelist_for_implmode; if (ctx != nullptr && ctx->GetAttr("_NpuOptimizer", &npuOptimizer) == Status::OK()) { ctx->GetAttr("_variable_format_optimize", &variable_format_optimize); @@ -154,6 +215,8 @@ std::map NpuAttrs::GetSessOptions(OpKernelConstruction } } ctx->GetAttr("_stream_max_parallel_num", &stream_max_parallel_num); + ctx->GetAttr("_op_select_implmode", &op_select_implmode); + ctx->GetAttr("_optypelist_for_implmode", &optypelist_for_implmode); } // session options @@ -168,6 +231,8 @@ std::map NpuAttrs::GetSessOptions(OpKernelConstruction sess_options[ge::OPTION_EXEC_DUMP_MODE] = dump_mode; sess_options[ge::OPTION_EXEC_ENABLE_DUMP_DEBUG] = enable_dump_debug; sess_options[ge::OPTION_EXEC_DUMP_DEBUG_MODE] = dump_debug_mode; + sess_options[ge::OP_SELECT_IMPL_MODE] = op_select_implmode; + sess_options[ge::OPTYPELIST_FOR_IMPLMODE] = optypelist_for_implmode; return sess_options; } @@ -175,13 +240,15 @@ std::map NpuAttrs::GetSessOptions(OpKernelConstruction std::map NpuAttrs::GetDefaultInitOptions() { std::map init_options; init_options["ge.exec.isTailingOptimization"] = std::to_string(false); - init_options["ge.exec.precision_mode"] = ""; + init_options["ge.exec.precision_mode"] = "allow_fp32_to_fp16"; init_options[ge::OPTION_EXEC_PROFILING_MODE] = std::to_string(false); init_options[ge::OPTION_EXEC_PROFILING_OPTIONS] = "training_trace"; init_options[ge::AUTO_TUNE_MODE] = ""; init_options[ge::OPTION_GRAPH_RUN_MODE] = "1"; init_options[ge::OP_DEBUG_LEVEL] = "0"; init_options[ge::OPTION_EXEC_ENABLE_SCOPE_FUSION_PASSES] = ""; + init_options[ge::OPTION_EXEC_PROFILING_FPPONIT_OPTIONS] = ""; + init_options[ge::OPTION_EXEC_PROFILING_BPPONIT_OPTIONS] = ""; return init_options; } @@ -195,7 +262,10 @@ std::map NpuAttrs::GetInitOptions(OpKernelConstruction std::string graph_run_mode = "1"; std::string op_debug_level = "0"; std::string enable_scope_fusion_passes; + std::string enable_exception_dump; string npuOptimizer; + string bp_point; + string fp_point; if (ctx != nullptr && ctx->GetAttr("_NpuOptimizer", &npuOptimizer) == Status::OK()) { ctx->GetAttr("_is_tailing_optimization", &is_tailing_optimization); @@ -206,6 +276,9 @@ std::map NpuAttrs::GetInitOptions(OpKernelConstruction ctx->GetAttr("_graph_run_mode", &graph_run_mode); ctx->GetAttr("_op_debug_level", &op_debug_level); ctx->GetAttr("_enable_scope_fusion_passes", &enable_scope_fusion_passes); + ctx->GetAttr("_bp_point", &bp_point); + ctx->GetAttr("_fp_point", &fp_point); + ctx->GetAttr("_enable_exception_dump", &enable_exception_dump); } init_options["ge.exec.isTailingOptimization"] = is_tailing_optimization; @@ -219,6 +292,9 @@ std::map NpuAttrs::GetInitOptions(OpKernelConstruction init_options[ge::OPTION_GRAPH_RUN_MODE] = graph_run_mode; init_options[ge::OP_DEBUG_LEVEL] = op_debug_level; init_options[ge::OPTION_EXEC_ENABLE_SCOPE_FUSION_PASSES] = enable_scope_fusion_passes; + init_options[ge::OPTION_EXEC_PROFILING_BPPONIT_OPTIONS] = bp_point; + init_options[ge::OPTION_EXEC_PROFILING_FPPONIT_OPTIONS] = fp_point; + init_options["ge.exec.enable_exception_dump"] = enable_exception_dump; return init_options; } @@ -380,7 +456,12 @@ std::map NpuAttrs::GetAllAttrOptions(AttrSlice attrs) std::string graph_run_mode = "1"; std::string op_debug_level = "0"; std::string enable_scope_fusion_passes; + std::string enable_exception_dump; string npuOptimizer; + string bp_point; + string fp_point; + std::string op_select_implmode; + std::string optypelist_for_implmode; if (attrs.Find("_NpuOptimizer") != nullptr) { do_npu_optimizer = std::to_string(true); @@ -454,6 +535,17 @@ std::map NpuAttrs::GetAllAttrOptions(AttrSlice attrs) if (attrs.Find("_enable_scope_fusion_passes") != nullptr) { enable_scope_fusion_passes = attrs.Find("_enable_scope_fusion_passes")->s(); } + if (attrs.Find("_fp_point") != nullptr) { fp_point = attrs.Find("_fp_point")->s(); } + if (attrs.Find("_bp_point") != nullptr) { bp_point = attrs.Find("_bp_point")->s(); } + if (attrs.Find("_enable_exception_dump") != nullptr) { + enable_exception_dump = attrs.Find("_enable_exception_dump")->s(); + } + if (attrs.Find("_op_select_implmode") != nullptr) { + op_select_implmode = attrs.Find("_op_select_implmode")->s(); + } + if (attrs.Find("_optypelist_for_implmode") != nullptr) { + optypelist_for_implmode = attrs.Find("_optypelist_for_implmode")->s(); + } } all_options["variable_format_optimize"] = variable_format_optimize; @@ -480,6 +572,7 @@ std::map NpuAttrs::GetAllAttrOptions(AttrSlice attrs) all_options["graph_run_mode"] = graph_run_mode; all_options["op_debug_level"] = op_debug_level; all_options["enable_scope_fusion_passes"] = enable_scope_fusion_passes; + all_options["enable_exception_dump"] = enable_exception_dump; all_options["do_npu_optimizer"] = do_npu_optimizer; all_options["enable_data_pre_proc"] = enable_dp; @@ -489,6 +582,10 @@ std::map NpuAttrs::GetAllAttrOptions(AttrSlice attrs) all_options["lower_functional_ops"] = lower_functional_ops; all_options["job"] = job; all_options["task_index"] = task_index; + all_options["fp_point"] = fp_point; + all_options["bp_point"] = bp_point; + all_options["op_select_implmode"] = op_select_implmode; + all_options["optypelist_for_implmode"] = optypelist_for_implmode; return all_options; } @@ -539,6 +636,11 @@ Status NpuAttrs::SetNpuOptimizerAttr(const GraphOptimizationPassOptions &options bool lower_functional_ops = false; string job = "default"; int task_index = 0; + string bp_point; + string fp_point; + int enable_exception_dump = 0; + string op_select_implmode; + string optypelist_for_implmode; const RewriterConfig &rewrite_options = options.session_options->config.graph_options().rewrite_options(); for (const auto &custom_optimizer : rewrite_options.custom_optimizers()) { @@ -555,7 +657,13 @@ Status NpuAttrs::SetNpuOptimizerAttr(const GraphOptimizationPassOptions &options if (params.count("enable_dump")) { enable_dump = params.at("enable_dump").b(); } if (params.count("enable_dump_debug")) { enable_dump_debug = params.at("enable_dump_debug").b(); } if (enable_dump || enable_dump_debug) { - if (params.count("dump_path")) { dump_path = params.at("dump_path").s(); } + if (params.count("dump_path")) { + string tmp_path = params.at("dump_path").s(); + Status s = CheckPath(tmp_path, dump_path); + if (!s.ok()) { LOG(FATAL) << s.error_message(); } + } else { + LOG(FATAL) << "if use dump function, dump_path must be set."; + } } if (enable_dump) { if (params.count("dump_step")) { @@ -583,17 +691,30 @@ Status NpuAttrs::SetNpuOptimizerAttr(const GraphOptimizationPassOptions &options if (params.count("is_tailing_optimization")) { is_tailing_optimization = params.at("is_tailing_optimization").b(); } - if (params.count("precision_mode")) { precision_mode = params.at("precision_mode").s(); } if (params.count("profiling_mode")) { profiling_mode = params.at("profiling_mode").b(); } if (params.count("profiling_options") && profiling_mode) { profiling_options = params.at("profiling_options").s(); } + if (params.count("fp_point")) { fp_point = params.at("fp_point").s(); } + if (params.count("bp_point")) { bp_point = params.at("bp_point").s(); } if (params.count("auto_tune_mode")) { auto_tune_mode = params.at("auto_tune_mode").s(); } - if (params.count("graph_run_mode")) { graph_run_mode = params.at("graph_run_mode").i(); } + if (params.count("graph_run_mode")) { + graph_run_mode = params.at("graph_run_mode").i(); + if (graph_run_mode > 1) { LOG(FATAL) << "graph_run_mode value must be 0 or 1"; } + } if (params.count("op_debug_level")) { op_debug_level = params.at("op_debug_level").i(); } if (params.count("enable_scope_fusion_passes")) { enable_scope_fusion_passes = params.at("enable_scope_fusion_passes").s(); } + if (params.count("precision_mode")) { + precision_mode = params.at("precision_mode").s(); + } else { + if (graph_run_mode) { + precision_mode = "allow_fp32_to_fp16"; + } else { + precision_mode = "force_fp16"; + } + } do_npu_optimizer = true; if (params.count("enable_data_pre_proc")) { enable_dp = params.at("enable_data_pre_proc").b(); } @@ -609,6 +730,21 @@ Status NpuAttrs::SetNpuOptimizerAttr(const GraphOptimizationPassOptions &options } if (params.count("task_index")) { task_index = params.at("task_index").i(); } } + if (params.count("enable_exception_dump")) { enable_exception_dump = params.at("enable_exception_dump").i(); } + if (!params.count("op_select_implmode") && !params.count("optypelist_for_implmode")) { + op_select_implmode = "high_performance"; + } else if (params.count("op_select_implmode") && !params.count("optypelist_for_implmode")) { + op_select_implmode = params.at("op_select_implmode").s(); + Status s = CheckOpImplMode(op_select_implmode); + if (!s.ok()) { LOG(FATAL) << s.error_message(); } + } else if (params.count("optypelist_for_implmode") && !params.count("op_select_implmode")) { + LOG(FATAL) << "when use optypelist_for_implmode, op_select_implmode must be set."; + } else { + op_select_implmode = params.at("op_select_implmode").s(); + Status s = CheckOpImplMode(op_select_implmode); + if (!s.ok()) { LOG(FATAL) << s.error_message(); } + optypelist_for_implmode = params.at("optypelist_for_implmode").s(); + } } } @@ -625,6 +761,8 @@ Status NpuAttrs::SetNpuOptimizerAttr(const GraphOptimizationPassOptions &options sess_options["dump_mode"] = dump_mode; sess_options["enable_dump_debug"] = std::to_string(enable_dump_debug); sess_options["dump_debug_mode"] = dump_debug_mode; + sess_options["op_select_implmode"] = op_select_implmode; + sess_options["optypelist_for_implmode"] = optypelist_for_implmode; init_options["is_tailing_optimization"] = std::to_string(is_tailing_optimization); init_options["precision_mode"] = precision_mode; @@ -632,11 +770,21 @@ Status NpuAttrs::SetNpuOptimizerAttr(const GraphOptimizationPassOptions &options if (profiling_mode && !checkProfilingOptions(profiling_options)) { LOG(FATAL) << "profiling options must be in 'training_trace', 'task_trace' or 'op_trace'"; } + if (profiling_mode && (profiling_options.find("task_trace") != string::npos || + profiling_options.find("training_trace") != string::npos)) { + if (bp_point == "" || fp_point == "") { + LOG(WARNING) << "profiling training_trace options should use with bp_point and fp_point"; + } else { + init_options["bp_point"] = bp_point; + init_options["fp_point"] = fp_point; + } + } init_options["profiling_options"] = profiling_options; init_options["auto_tune_mode"] = auto_tune_mode; init_options["graph_run_mode"] = std::to_string(graph_run_mode); init_options["op_debug_level"] = std::to_string(op_debug_level); init_options["enable_scope_fusion_passes"] = enable_scope_fusion_passes; + init_options["enable_exception_dump"] = std::to_string(enable_exception_dump); pass_options["do_npu_optimizer"] = std::to_string(do_npu_optimizer); pass_options["enable_data_pre_proc"] = std::to_string(enable_dp); diff --git a/tf_adapter/util/npu_attrs.h b/tf_adapter/util/npu_attrs.h index 1f6526ad1d48573dd6f516ace7f196fe86fea024..9e37c0beb60a423b915da6aa3f4bca8a3f6428af 100644 --- a/tf_adapter/util/npu_attrs.h +++ b/tf_adapter/util/npu_attrs.h @@ -39,6 +39,7 @@ limitations under the License. // single load all npu mode namespace tensorflow { +Status GetEnvDeviceID(uint32_t &device_id); class NpuAttrs { public: // This method returns instance Pointers diff --git a/tf_adapter/util/npu_ops_identifier.cc b/tf_adapter/util/npu_ops_identifier.cc index 6072ff7abeede8ed4799cf6d8345bd22991f5ec2..76b3e1f20dd4432822e8081dd4b2b0030028d01e 100644 --- a/tf_adapter/util/npu_ops_identifier.cc +++ b/tf_adapter/util/npu_ops_identifier.cc @@ -104,14 +104,7 @@ bool NpuOpsIdentifier::IsNpuSupported(const std::string &op_name, const std::str tensorflow::GenerateReport::GetInstance()->AddUnSupportedInfo(node_name, op_name, infos); return false; } - if (is_mix_ && ops_info_[op_name][kGray].is_boolean()) { - tensorflow::GenerateReport::Details infos; - static const std::string message = "This op will not be excuted on npu in mix_compile_mode."; - infos.code = tensorflow::GenerateReport::TypeGray; - infos.message = message; - tensorflow::GenerateReport::GetInstance()->AddUnSupportedInfo(node_name, op_name, infos); - return !ops_info_[op_name][kGray]; - } + if (is_mix_ && ops_info_[op_name][kGray].is_boolean()) { return !ops_info_[op_name][kGray]; } return true; } // Determine if the node is performance-sensitive on NPU, this should diff --git a/tf_adapter/util/npu_plugin.h b/tf_adapter/util/npu_plugin.h index fe483d7a9f0e0f199c64b6877866c64d125ce140..d432947d3208a48aecabd2ab417b4613f0c21a53 100644 --- a/tf_adapter/util/npu_plugin.h +++ b/tf_adapter/util/npu_plugin.h @@ -40,6 +40,8 @@ const char *const OPTION_EXEC_PROFILING_MODE = ge::OPTION_EXEC_PROFILING_MODE; const char *const OPTION_EXEC_PROFILING_OPTIONS = ge::OPTION_EXEC_PROFILING_OPTIONS; const char *const OPTION_GRAPH_RUN_MODE = ge::OPTION_GRAPH_RUN_MODE; const char* const OPTION_EXEC_HCCL_FLAG = ge::OPTION_EXEC_HCCL_FLAG; +const char* const OPTION_EXEC_PROFILING_FPPONIT_OPTIONS = ge::OPTION_EXEC_PROFILING_FPPONIT_OPTIONS; +const char* const OPTION_EXEC_PROFILING_BPPONIT_OPTIONS = ge::OPTION_EXEC_PROFILING_BPPONIT_OPTIONS; void PluginInit(std::map &init_options); diff --git a/tf_adapter/util/session_manager.cc b/tf_adapter/util/session_manager.cc index 8faffdf598363c2568daf31655cebe7a285624a3..91b27c3326b91f19bfc9c1e89d99eebe76b6f397 100644 --- a/tf_adapter/util/session_manager.cc +++ b/tf_adapter/util/session_manager.cc @@ -118,6 +118,10 @@ void SessionManager::PrintGeSessionOptions(std::map &s sess_options.erase(ge::VARIABLE_MEMORY_MAX_SIZE); } + LOG(INFO) << "[GEOP] op_select_implmode : " << sess_options[ge::OP_SELECT_IMPL_MODE]; + + LOG(INFO) << "[GEOP] optypelist_for_implmode : " << sess_options[ge::OPTYPELIST_FOR_IMPLMODE]; + // reuse memory env const char *disable_reuse_memory = std::getenv("DISABLE_REUSE_MEMORY"); if (disable_reuse_memory == nullptr) { @@ -132,6 +136,6 @@ void SessionManager::PrintGeSessionOptions(std::map &s << ", dump_path :" << sess_options[ge::OPTION_EXEC_DUMP_PATH] << ", dump_step :" << (dump_step.empty() ? "NA" : dump_step) << ", dump_mode :" << sess_options[ge::OPTION_EXEC_DUMP_MODE] - << ", enable_dump_enable :" << sess_options[ge::OPTION_EXEC_ENABLE_DUMP_DEBUG] + << ", enable_dump_debug :" << sess_options[ge::OPTION_EXEC_ENABLE_DUMP_DEBUG] << ", dump_debug_mode :" << sess_options[ge::OPTION_EXEC_DUMP_DEBUG_MODE]; } \ No newline at end of file