diff --git a/samples/contribute/ACT/NNN/CMakeLists.txt b/samples/contribute/ACT/NNN/CMakeLists.txt
new file mode 100755
index 0000000000000000000000000000000000000000..63f9d9ab8eabb06c7c974f7a82aab40951b8bb2d
--- /dev/null
+++ b/samples/contribute/ACT/NNN/CMakeLists.txt
@@ -0,0 +1,7 @@
+# CMake lowest version requirement
+cmake_minimum_required(VERSION 3.5.1)
+
+# project information
+project(ACL_CLASSIFICATION_RESNET50)
+
+add_subdirectory("./src")
diff --git a/samples/contribute/ACT/NNN/README.md b/samples/contribute/ACT/NNN/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..6d4b39d4f25d6a270c47e8e9ac372808c134a320
--- /dev/null
+++ b/samples/contribute/ACT/NNN/README.md
@@ -0,0 +1,302 @@
+# 基于ACT网络实现模仿学习
+## 概述
+ACT(Action Chunking with Transformers)是面向机器人学习场景的高性能端到端动作控制模型。相比传统模块化机器人控制模型,ACT采用轻量化Transformer架构作为核心骨干进行动作表征学习,结合多模态感知融合模块和时序动作优化网络,在控制精度和实时响应速度上均有显著提升。
+
+- 参考实现:
+ ```
+ https://gitcode.com/openeuler/lerobot_ros2/tree/master/src/lerobot/policies/act
+ ```
+
+- 输入数据
+ | 输入数据 | 数据类型 | 大小 | 数据排布格式 |
+ | -------- | -------- | ---------------- | ------------ |
+ | observation.state | FP32 | 1 x 6 | - |
+ | observation.images.top | RGB_FP32 | 1 x 3 x 240 x 320 | NCHW |
+ | observation.images.wrist | RGB_FP32 | 1 x 3 x 240 x 320 | NCHW |
+
+- 输出数据
+ | 输出数据 | 数据类型 | 大小 | 数据排布格式 |
+ | -------- | -------- | ----------- | ----------- |
+ | feature_map_1 | FP32 | 1x100x6 | [N, action_n, motor_n] |
+
+## 原理介绍
+本样例涉及的关键功能点如下:
+- **初始化**
+ - 调用`aclInit`接口初始化ACL配置;
+ - 调用`aclFinalize`接口实现ACL去初始化。
+- **Device管理**
+ - 调用`aclrtSetDevice`接口指定运算Device;
+ - 调用`aclrtGetRunMode`接口获取运行模式,按模式差异化处理流程;
+ - 调用`aclrtResetDevice`接口复位Device,回收资源。
+- **Context管理**
+ - 调用`aclrtCreateContext`接口创建Context;
+ - 调用`aclrtDestroyContext`接口销毁Context。
+- **Stream管理**
+ - 调用`aclrtCreateStream`接口创建Stream;
+ - 调用`aclrtDestroyStream`接口销毁Stream。
+- **内存管理**
+ - 调用`aclrtMalloc`接口申请Device内存;
+ - 调用`aclrtFree`接口释放Device内存。
+- **数据传输**
+ - 调用`aclrtMemcpy`接口通过内存复制实现数据传输。
+- **模型推理**
+ - 调用`aclmdlLoadFromMem`接口从`*.om`文件加载模型;
+ - 调用`aclmdlExecute`接口执行同步模型推理;
+ - 调用`aclmdlUnload`接口卸载模型。
+
+## 目录结构
+样例代码结构如下:
+```
+├── ACT // 项目根目录(基于ACT网络的机械臂模仿学习)
+│
+├── build // 编译构建目录(存放中间文件、可执行程序)
+│
+├── data // 输入数据与推理结果目录
+│ ├── observation.images.top_240_320.bin // 顶部摄像头图像数据(二进制)
+│ ├── observation.images.wrist_240_320.bin // 腕部摄像头图像数据(二进制)
+│ ├── observation.state_240_320.bin // 机械臂状态数据(关节角度、位姿等,二进制)
+│
+├── inc // 头文件目录(存放CPP头文件)
+├── model // 模型文件目录(存放OM离线模型、配置文件)
+├── out // 输出目录(存放推理结果、日志)
+│
+├── script // 辅助脚本目录
+│ ├── model_test.py // ACT模型推理测试脚本(验证功能、调试输入输出)
+│
+├── src // 核心代码目录(CPP实现推理、数据处理逻辑)
+│ ├── acl.json // ACL系统初始化配置文件(指定设备、上下文等)
+│ ├── CMakeLists.txt // src目录编译脚本(定义编译规则)
+│ ├── main.cpp // 主函数入口(调度:数据加载→模型推理→结果处理)
+│ ├── model_process.cpp // 模型处理模块(OM加载、异步推理提交)
+│ ├── result.txt // 推理结果临时存储文件(记录机械臂动作输出)
+│ ├── sample_process.cpp // 样本处理模块(二进制数据加载、解析、预处理)
+│
+├── utils // 工具模块目录(通用辅助功能)
+│ ├── CMakeLists.txt // utils目录编译脚本
+│
+├── config.yaml // 全局配置文件(定义数据路径、模型参数、推理参数)
+├── README.md // 项目说明文档(环境依赖、编译运行步骤、目录解释)
+```
+
+## 推理环境准备
+1. 查看芯片名称:
+ ```bash
+ cat /proc/umap/sys
+ # 示例回显(芯片名为SS928V100,需自行替换实际芯片名)
+ [SYS] Version: [SS928V100XXXXXXXXX]
+ ```
+
+2. 环境版本配套要求:
+ | 芯片型号 | 算力引擎 | soc_version | 环境准备指导 | CANN包版本 | 编译工具链 | 板端OS | SDK |
+ | -------- | ------- | ----------- | ------------ | ---------- | ---------- | --- | ---- |
+ | Hi3403V100 | NNN | SS928V100 | [推理环境准备](https://gitee.com/HiSpark/modelzoo/blob/master/docs/Hi3403V100%E5%BC%80%E5%8F%91%E7%8E%AF%E5%A2%83%E6%90%AD%E5%BB%BA.md) | [5.20.t6.2.b060] | aarch64-openeuler-linux-gnu-g++ | [openEuler](https://pages.openeuler.openatom.cn/embedded/docs/build/html/master/bsp/arm64/hisilicon/hieulerpi/update.html) | SS928 V100R001C02SPC022 |
+
+ 系统驱动安装,参考:https://gitee.com/HiSpark/ss928v100_gcc/tree/Beta-v0.9.2
+
+ SDK编译工具链的安装,参考:https://pages.openeuler.openatom.cn/embedded/docs/build/html/master/getting_started/index.html#install-openeuler-embedded-sdk
+
+
+## 快速上手
+### 获取源码
+1. 克隆参考代码仓:
+ ```bash
+ git clone https://gitcode.com/openeuler/lerobot_ros2.git
+ cd lerobot_ros2
+ ```
+2. 安装依赖:参考代码仓README文档完成环境依赖安装。
+3. 训练数据集下载:https://huggingface.co/datasets/lwh2017/grab_banana/tree/main/banana_grasp_100_320x240
+
+### 模型转化
+通过PyTorch将多`.safetensors`权重文件夹转为`.onnx`文件,再用ATC工具转为`.om`离线推理模型:
+1. 准备权重文件:
+ 模型下载链接:https://huggingface.co/datasets/lwh2017/grab_banana
+ ```bash
+ mkdir model # 创建模型目录
+ # 将下载的模型权重文件夹(含.safetensors文件)放入model目录
+ ```
+2. 导出ONNX文件:
+ ```bash
+ cd lerobot_ros2
+ # pretrained_model:权重文件夹路径;act:模型类型
+ python src/lerobot/oee/export_onnx.py ../model/pretrained_model/ act
+ # 移动生成的ONNX模型到model目录
+ mv ./model/pretrained_model/act_ros2_simplified.onnx ../model/act_ros2_simplified.onnx
+ cd ..
+ ```
+ **export_onnx.py参数说明**:
+ | 参数/位置参数 | 说明 |
+ |---------------|------|
+ | 位置参数1(pretrained_model) | 必选,ACT预训练权重文件夹路径(内含.safetensors文件) |
+ | 位置参数2(model_type) | 必选,模型类型,固定为"act" |
+3. ATC工具转OM模型(Hi3403V100 SVP_NNN平台):
+ ```bash
+ # 若无数值校准bin文件,需先通过preprocess.py生成;多文件用;分隔
+ atc --model="./act_ros2_simplified.onnx" \
+ --framework="5" \
+ --input_format="NCHW" \
+ --save_original_model="true" \
+ --output="./act_ros2_simplified" \
+ --soc_version=OPTG \
+ --release=0
+ ```
+ 成功后生成`act_ros2_simplified.om`文件,通过以下命令,将模型进行重命名,供main文件加载。
+ ```
+ mv ./model/act_ros2_simplified.om ./model/act_distill_fp32_for_mindcmd_simp_release.om
+ ```
+ **ATC命令核心参数说明**:
+ | 参数 | 说明 |
+ |------|------|
+ | --model | 必选,待转换的ONNX模型文件路径 |
+ | --framework | 必选,原始框架类型(5=ONNX) |
+ | --output | 必选,输出OM模型的路径(无需后缀) |
+ | --image_list | 必选,量化校准数据路径,格式为“输入名:文件路径;输入名:文件路径” |
+ | --soc_version | 必选,处理器型号(如SS928V100) |
+|
+ 注意:若找不到atc命令,参考“推理环境准备”配置环境。
+
+### 模型推理
+#### 步骤1:编译代码
+1. 创建编译目录:
+ ```bash
+ mkdir -p build
+ cd build
+ ```
+2. 生成编译文件(交叉编译示例:X86→ARM):
+ ```bash
+ cmake ../src -Dtarget=board -DCMAKE_BUILD_TYPE=Release -DCMAKE_CXX_COMPILER=aarch64-mix210-linux-g++ -DCMAKE_C_COMPILER=/usr/bin/cc -DCMAKE_SKIP_RPATH=TRUE -DCMAKE_CXX_FLAGS="-I/home/Ascend/ascend-toolkit/5.20.t6.2.b060/arm64-lmixlinux200/aarch64-linux/include" -DCMAKE_CXX_LINK_FLAGS="-L/home/Ascend/ascend-toolkit/5.20.t6.2.b060/arm64-lmixlinux200/aarch64-linux/devlib -lascendcl -lpthread -ldl" -DCMAKE_CXX_COMPILER_WORKS=1
+ ```
+ | 参数 | 说明 |
+ |------|------|
+ |-Dtarget=board |必选,指定编译目标为板端运行
+ |-DCMAKE_BUILD_TYPE=Release |可选,编译模式(Release = 生产模式,Debug = 调试模式)
+ |-DCMAKE_CXX_COMPILER=aarch64-mix210-linux-gnu-g++ | 必选,指定 C++ 交叉编译工具链为 aarch64-mix210-linux-gnu-g++
+ |-DCMAKE_C_COMPILER=/usr/bin/cc | 必选,指定 C 语言编译器路径为系统默认的 /usr/bin/cc
+ |-DCMAKE_SKIP_RPATH=TRUE |可选,禁用运行时库路径(RPATH)的生成,避免编译产物依赖特定库路径
+ |-DCMAKE_CXX_FLAGS="-I/home/Ascend/ascend-toolkit/5.20.t6.2.b060/arm64-lmixlinux200/aarch64-linux/include" |必选,C++ 编译选项:添加 Ascend(昇腾)工具链的头文件搜索路径
+ |-DCMAKE_CXX_LINK_FLAGS="-L/home/Ascend/ascend-toolkit/5.20.t6.2.b060/arm64-lmixlinux200/aarch64-linux/devlib -lascendcl -lpthread -ldl" | 必选,C++ 链接
+ |-DCMAKE_CXX_COMPILER_WORKS=1 |必选,强制指定 C++ 编译器可用
+
+3. 编译生成可执行文件:
+ ```bash
+ make # 生成的main在./out目录
+ ```
+
+#### 步骤2:运行推理应用
+在运行环境(板端)通过`model_test.py`调用C++可执行文件,完成数据预处理、推理、结果解析:
+1. 部署文件:将样例目录上传至运行环境(Host),如`$HOME/ACT`;
+2. 授权可执行文件:
+ ```bash
+ cd $HOME/ACT/out
+ chmod +x main
+ ```
+3. 配置Python环境:
+ ```bash
+ # 创建并激活conda虚拟环境
+ conda create -n act_om python=3.8 -y
+ conda activate act_om
+ # 安装依赖
+ pip install numpy==1.24.3
+ ```
+4. 准备推理数据:将输入`.bin`文件放入`./data`目录;
+5. 运行推理脚本:
+ ```bash
+ python model_test.py --image_list "bin文件路径1;bin文件路径2;..."
+ ```
+ **model_test.py参数说明**:
+ | 参数 | 类型 | 必选 | 说明 |
+ |------|------|------|------|
+ | --image_list | str | 是 | 以分号分隔的bin文件路径列表,顺序需与模型输入(state/top/wrist)对应 |
+6. 推理结果:保存于`./result.txt`。
+
+#### 步骤3:验证精度和性能
+1. 精度验证:
+ - 测试方法
+ 1. 数据准备:选取20个有效测试样本,构建标准化输入批次(batch_path: ../ACT/data/batches.json);
+ 2. 基准推理:通过PyTorch原生框架加载ACT模型,完成20个样本的推理,保存首个1×6 action向量作为基准值;
+ 3. OM推理:通过昇腾OM模型加载器执行相同20个样本的推理,提取首个1×6 action向量;
+ 4. 误差计算:逐样本计算OM输出与PyTorch输出的L1 Loss,统计平均值、极值等关键指标。
+ - 开发环境生成目标动作:
+ ```bash
+ cd lerobot_ros2
+ python ./src/lerobot/oee/ascend/utils/loss_compare.py \
+ --device cpu --generate-target \
+ --batch_path ../ACT/data/batches.json \
+ --target_path ../ACT/data/target.json \
+ --policy_path_act model/pretrained_model/
+ ```
+ - 运行环境对比误差:
+ ```bash
+ cd lerobot_ros2
+ python ./src/lerobot/oee/ascend/utils/loss_compare.py \
+ --device cpu \
+ --batch_path ../ACT/data/batches.json \
+ --target_path ../ACT/data/target.json \
+ --policy_path_act model/pretrained_model/
+ ```
+ **loss_compare.py核心参数说明**:
+ | 参数 | 类型 | 必选 | 说明 |
+ |------|------|------|------|
+ | --device | str | 是 | 运行设备,可选"cpu"/"cuda"/"npu" |
+ | --generate-target | 开关 | 否 | 若指定,基于PyTorch模型生成目标动作并保存至target_path |
+ | --batch_path | str | 是 | 输入batches.json文件路径(机械臂观测数据) |
+ | --target_path | str | 是 | 目标动作文件(target.json)路径,用于精度对比 |
+ | --policy_path_act | str | 是 | ACT预训练模型权重文件夹路径 |
+
+ NNN平台精度结果:(根据实际测试补充)
+
+ 1. 测试对象
+ - 模型类型:ACT动作预测模型,推理输出维度为`1×100×6`的动作矩阵(对应100个动作步,每个步为6维动作向量);
+ - 对比维度:取模型输出矩阵中首个`1×6`的action向量作为核心对比对象(实际部署中优先执行该向量);
+ - 测试样本:共20个独立测试样本,覆盖不同观测输入场景。
+
+ 2. 测试指标
+ 采用**L1 Loss(平均绝对误差)** 作为精度差异量化指标,计算公式为:
+ $$L1_{loss} = \sum_{i=1}^6 |a_{OM,i} - a_{PyTorch,i}|$$
+ 其中,$a_{OM,i}$ 为OM模型输出的1×6向量第$i$维值,$a_{PyTorch,i}$ 为PyTorch原生模型输出的1×6向量第$i$维值。
+
+ 3. 测试结果
+ - 单样本L1 Loss明细
+
+ | 样本序号 | L1 Loss值 | 样本序号 | L1 Loss值 |
+ |----------|-----------------|----------|-----------------|
+ | 0 | 0.000008265178 | 10 | 0.000013351440 |
+ | 1 | 0.000010410945 | 11 | 0.000003337860 |
+ | 2 | 0.000002702077 | 12 | 0.000007390976 |
+ | 3 | 0.000007867813 | 13 | 0.000007947286 |
+ | 4 | 0.000006357829 | 14 | 0.000004688899 |
+ | 5 | 0.000006675720 | 15 | 0.000004847845 |
+ | 6 | 0.000008344650 | 16 | 0.000008821487 |
+ | 7 | 0.000011364619 | 17 | 0.000007947286 |
+ | 8 | 0.000008583069 | 18 | 0.000002940496 |
+ | 9 | 0.000006198883 | 19 | 0.000000715256 |
+
+ - 统计指标
+
+ | 统计项 | 数值 |
+ |--------------|-----------------|
+ | 平均L1 Loss | 0.000006937981 |
+ | 最小L1 Loss | 0.000000715256(样本 19)|
+ | 最大L1 Loss | 0.000013351440(样本 10)|
+ | 中位数L1 Loss| 0.000007029395 |
+ | 标准差 | 0.0000028943|
+
+2. 性能验证
+ ```bash
+ cd lerobot_ros2
+ python ./src/lerobot/oee/ascend/utils/loss_compare.py \
+ --device cpu --generate-target \
+ --batch_path ../ACT/data/batches.json \
+ --target_path ../ACT/data/target.json \
+ --policy_path_act model/pretrained_model/
+ ```
+
+ **性能验证说明**
+ 脚本运行时会输出多维度性能指标和数据规格信息,用于全面评估ACT模型推理性能,核心信息如下:
+ 1. **输入数据规格**(单次推理输入):
+ - 共3路输入数据,具体维度/类型/大小:
+ - 输入0:形状`(1, 6)`,float32类型,元素数6,字节数24;
+ - 输入1:形状`(1, 3, 240, 320)`(3通道240×320图像),float32类型,元素数230400,字节数921600;
+ - 输入2:形状`(1, 3, 240, 320)`(3通道240×320图像),float32类型,元素数230400,字节数921600;
+ 1. **推理耗时指标**:
+ - 模型核心推理时间:单次推理约**2.69秒**(仅模型前向计算耗时,不含数据传输/解析);
+ - 端到端推理时间:单次推理约**8秒**(含数据打包、C++进程通信、输出解析、张量转换全流程耗时),可通过流水线设计增加推理吞吐;
diff --git a/samples/contribute/ACT/NNN/inc/model_process.h b/samples/contribute/ACT/NNN/inc/model_process.h
new file mode 100644
index 0000000000000000000000000000000000000000..3f7ffcc56596e0111c7b0e9404a5368cfe0c62a3
--- /dev/null
+++ b/samples/contribute/ACT/NNN/inc/model_process.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2026 Huawei Technologies Co., Ltd
+ * This file is part of [Hispark/modelzoo].
+ *
+ * [Hispark/modelzoo] is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, version 3 of the License only.
+ *
+ * [Hispark/modelzoo] is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with [Hispark/modelzoo]. If not, see .
+ */
+#ifndef MODEL_PROCESS_H
+#define MODEL_PROCESS_H
+
+#include
+#include
+#include "utils.h"
+#include "acl/acl.h"
+
+class ModelProcess {
+public:
+ /**
+ * Function: Class constructor
+ * Description: Initialize a ModelProcess instance with default resource states
+ */
+ ModelProcess();
+
+ /**
+ * Function: Class destructor
+ * Description: Release all allocated resources and destroy the ModelProcess instance
+ */
+ ~ModelProcess();
+
+ /**
+ * Function: Load model file into memory and initialize model resources
+ * Input: modelPath - full file path of the target model file (including file name and extension)
+ * Return: Result status code (success if the code indicates normal execution, failure otherwise)
+ */
+ Result LoadModelFromFileWithMem(const std::string& modelPath);
+
+ /**
+ * Function: Unload loaded model and release associated memory
+ * Description: Clear model-related resources without returning any status
+ */
+ void Unload();
+
+ /**
+ * Function: Initialize input dataset for model inference
+ * Return: Result status code (success/failure of initialization)
+ */
+ Result InitInput();
+
+ /**
+ * Function: Create model description structure
+ * Description: Build and initialize the model descriptor to store model metadata
+ * Return: Result status code (success/failure of descriptor creation)
+ */
+ Result CreateDesc();
+
+ /**
+ * Function: Destroy model description structure
+ * Description: Release memory occupied by the model descriptor and clear related data
+ */
+ void DestroyDesc();
+
+ /**
+ * Function: Create input buffer for model inference
+ * Input: inputDataBuffer - pointer to the host/device buffer containing input data
+ * Input: bufferSize - total byte size of the inputDataBuffer
+ * Input: stride - alignment step size for input data processing
+ * Return: Result status code (success/failure of input buffer creation)
+ */
+ Result CreateInput(void *inputDataBuffer, size_t bufferSize, int stride);
+
+ /**
+ * Function: Create input buffer from a specified file
+ * Input: filePath - full path of the file containing input data
+ * Return: Result status code (success/failure of buffer creation)
+ */
+ Result CreateInputBuf(const std::string& filePath);
+
+ /**
+ * Function: Create task buffer and work buffer for model execution
+ * Return: Result status code (success/failure of buffer allocation)
+ */
+ Result CreateTaskBufAndWorkBuf();
+
+ /**
+ * Function: Release all resources related to model input
+ * Description: Free input buffers and clear input dataset configuration
+ */
+ void DestroyInput();
+
+ /**
+ * Function: Allocate output buffer to store model inference results
+ * Return: Result status code (success/failure of output buffer creation)
+ */
+ Result CreateOutput();
+
+ /**
+ * Function: Release all resources related to model output
+ * Description: Free output buffers and clear inference result data
+ */
+ void DestroyOutput();
+
+ /**
+ * Function: Execute model inference with prepared input data
+ * Return: Result status code (success/failure of inference execution)
+ */
+ Result Execute();
+
+ /**
+ * Function: Export model inference output results to a file
+ * Description: Write output buffer data to a local file (read-only operation, no state change)
+ */
+ void DumpModelOutputResult() const;
+
+ /**
+ * Function: Retrieve and display model inference output results
+ * Description: Extract data from output buffer and present it (read-only operation, no state change)
+ */
+ void OutputModelResult() const;
+
+ /**
+ * Function: Create dedicated buffer for specified input/output index
+ * Input: index - target index of the input/output buffer (starting from 0)
+ * Return: Result status code (success/failure of buffer creation)
+ */
+ Result CreateBuf(int index);
+
+ // Result GetInputStrideParam(int index, size_t& bufSize, size_t& stride, aclmdlIODims& dims) const;
+
+ /**
+ * Function: Get stride parameters of specified output index
+ * Input: index - target index of the output buffer (starting from 0)
+ * Output: bufSize - byte size of the output buffer (output parameter)
+ * Output: stride - alignment step size of the output data (output parameter)
+ * Output: dims - dimension information of the output model I/O (output parameter)
+ * Return: Result status code (success/failure of parameter retrieval)
+ */
+ Result GetOutputStrideParam(int index, size_t& bufSize, size_t& stride, aclmdlIODims& dims) const;
+
+ /**
+ * Function: Get data size of specified input index
+ * Input: index - target index of the input buffer (starting from 0)
+ * Return: Total byte size of the input buffer at the specified index
+ */
+ size_t GetInputDataSize(int index) const;
+
+ /**
+ * Function: Get data size of specified output index
+ * Input: index - target index of the output buffer (starting from 0)
+ * Return: Total byte size of the output buffer at the specified index
+ */
+ size_t GetOutputDataSize(int index) const;
+
+ /**
+ * Function: Get the total number of model input nodes
+ * Return: Count of input nodes (non-negative integer)
+ */
+ size_t GetInputNum() const;
+
+ /**
+ * Function: Get stride parameters of specified input index
+ * Input: index - target index of the input buffer (starting from 0)
+ * Output: buf_size - byte size of the input buffer (output parameter)
+ * Output: stride - alignment step size of the input data (output parameter)
+ * Output: dims - dimension information of the input model I/O (output parameter)
+ * Return: Result status code (success/failure of parameter retrieval)
+ */
+ Result GetInputStrideParam(int index, size_t& buf_size, size_t& stride, aclmdlIODims& dims) const;
+
+ /**
+ * Function: Create model input buffer from raw data
+ * Input: data - pointer to the raw input data buffer
+ * Input: data_size - total byte size of the raw data buffer
+ * Return: Result status code (success/failure of input buffer creation)
+ */
+ Result CreateInputFromData(const void* data, size_t data_size);
+
+ /**
+ * Function: Create model input buffers from multiple raw data sources
+ * Input: input_datas - vector of pointers to multiple raw input data buffers
+ * Input: input_sizes - vector of byte sizes corresponding to each input data buffer
+ * Return: Result status code (success/failure of multi-input buffer creation)
+ */
+ Result CreateInputFromData(const std::vector& input_datas,
+ const std::vector& input_sizes);
+
+ /**
+ * Function: Release all model-related resources
+ * Description: Unified release of input/output buffers, model descriptor, task/work buffers, etc.
+ */
+ void DestroyResource();
+
+private:
+ void WriteOutput(const std::string& outputFileName, size_t index) const;
+
+ Result ClearOutputStrideInvalidBuf(std::vector& buffer, size_t index) const;
+
+ uint32_t executeNum_ { 0 };
+ uint32_t modelId_ { 0 };
+ size_t modelMemSize_ { 0 };
+ size_t modelWeightSize_ { 0 };
+ void *modelMemPtr_ { nullptr };
+ void *modelWeightPtr_ { nullptr };
+ bool loadFlag_ { false };
+ aclmdlDesc *modelDesc_ { nullptr };
+ aclmdlDataset *input_ { nullptr };
+ aclmdlDataset *output_ { nullptr };
+};
+
+#endif // MODEL_PROCESS_H
diff --git a/samples/contribute/ACT/NNN/inc/sample_process.h b/samples/contribute/ACT/NNN/inc/sample_process.h
new file mode 100644
index 0000000000000000000000000000000000000000..fc5b549b59b58d0d22650e241267711afbea35b8
--- /dev/null
+++ b/samples/contribute/ACT/NNN/inc/sample_process.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2026 Huawei Technologies Co., Ltd
+ * This file is part of [Hispark/modelzoo].
+ *
+ * [Hispark/modelzoo] is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, version 3 of the License only.
+ *
+ * [Hispark/modelzoo] is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with [Hispark/modelzoo]. If not, see .
+ */
+
+#ifndef SAMPLE_PROCESS_H
+#define SAMPLE_PROCESS_H
+
+#include
+#include "utils.h"
+#include "acl/acl.h"
+#include "model_process.h"
+
+/**
+* Class: SampleProcess
+* Description: Core processing class for model inference sample, manages input data, model loading and resource lifecycle
+*/
+class SampleProcess {
+public:
+ /**
+ * Function: Class constructor
+ * Description: Initialize a SampleProcess instance with default values for all member variables
+ */
+ SampleProcess();
+
+ /**
+ * Function: Class destructor
+ * Description: Release allocated resources and reset member variable states when instance is destroyed
+ */
+ ~SampleProcess();
+
+ /**
+ * Function: Initialize system and model inference related resources
+ * Description: Set up device context, stream and basic runtime environment
+ * Return: Result status code (success if the code indicates normal execution, failure otherwise)
+ */
+ Result InitResource();
+
+ /**
+ * Function: Execute the full process of sample inference
+ * Description: Orchestrate input data preparation, model loading and inference execution
+ * Return: Result status code (success/failure of the entire inference process)
+ */
+ Result Process();
+ // void DestroyResource();
+
+ /**
+ * Function: Set the file path of the input data source
+ * Input: path - full file path of the input data file (including file name and extension)
+ */
+ void SetInputPath(const std::string& path);
+ std::string input_path_; // Member variable: Stores the full file path of the input data for inference
+
+ const char* input_data_ = nullptr; // Member variable: Pointer to the binary input data buffer (nullptr by default)
+ size_t input_data_size_ = 0; // Member variable: Total byte size of the binary input data buffer (0 by default)
+
+ /**
+ * Function: Set multiple input data sources for multi-input model inference
+ * Input: input_datas - Vector of pointers to multiple binary input data buffers
+ * Input: input_sizes - Vector of byte sizes corresponding to each input data buffer
+ */
+ void SetInputDatas(const std::vector& input_datas,
+ const std::vector& input_sizes);
+ std::vector input_datas_; // Member variable: Stores pointers to multiple input data buffers
+ std::vector input_sizes_; // Member variable: Stores byte sizes of corresponding multiple input data buffers
+
+ /**
+ * Function: Load pre-configured model file into memory
+ * Description: Initialize model resources and prepare for inference execution
+ * Return: Result status code (success/failure of model loading)
+ */
+ Result LoadModel();
+
+ /**
+ * Function: Release all allocated resources
+ * Description: Unified release of device context, stream, model resources and input/output buffers
+ */
+ void DestroyResource();
+
+private:
+ int32_t deviceId_ { 0 }; // Member variable: ID of the target computing device (default to 0)
+ aclrtContext context_ { nullptr }; // Member variable: Runtime context of the computing device (nullptr by default)
+ aclrtStream stream_ { nullptr }; // Member variable: Runtime stream for asynchronous execution (nullptr by default)
+
+ bool isInited_ = false; // Member variable: Flag indicating if resource initialization is completed (false by default)
+ ModelProcess modelProcess_; // Member variable: Instance of ModelProcess class (used for model management, not local variable)
+ bool isModelLoaded_ = false; // Member variable: Flag indicating if model loading is completed (false by default)
+};
+
+/**
+* Function: Friend function to set input file path for SampleProcess instance
+* Description: Directly access the input_path_ member variable of SampleProcess (no need to add new Set method)
+* Input: sample - Reference to the target SampleProcess instance
+* Input: path - Full file path to be set for input data
+*/
+inline void set_input_path(SampleProcess& sample, const std::string& path) {
+ sample.input_path_ = path; // Assign input file path to the member variable of the SampleProcess instance
+}
+
+#endif // SAMPLE_PROCESS_H
diff --git a/samples/contribute/ACT/NNN/inc/utils.h b/samples/contribute/ACT/NNN/inc/utils.h
new file mode 100644
index 0000000000000000000000000000000000000000..ff58c760a46c60549db6263c4c66987188bd3add
--- /dev/null
+++ b/samples/contribute/ACT/NNN/inc/utils.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2026 Huawei Technologies Co., Ltd
+ * This file is part of [Hispark/modelzoo].
+ *
+ * [Hispark/modelzoo] is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, version 3 of the License only.
+ *
+ * [Hispark/modelzoo] is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with [Hispark/modelzoo]. If not, see .
+ */
+#ifndef UTILS_H
+#define UTILS_H
+
+#include
+#include
+#include "acl/acl.h"
+#include "acl/acl_mdl.h"
+
+#define INFO_LOG(fmt, ...) fprintf(stdout, "[INFO] " fmt "\n", ##__VA_ARGS__)
+#define WARN_LOG(fmt, ...) fprintf(stdout, "[WARN] " fmt "\n", ##__VA_ARGS__)
+#define ERROR_LOG(fmt, ...) fprintf(stdout, "[ERROR] " fmt "\n", ##__VA_ARGS__)
+
+#ifdef _WIN32
+#define S_ISREG(m) (((m) & 0170000) == (0100000))
+#endif
+
+typedef enum Result {
+ SUCCESS = 0,
+ FAILED = 1
+} Result;
+
+class Utils {
+public:
+ /**
+ * Function: Allocate and initialize device-side buffer using data from a specified file
+ * Input: fileName - full path/name of the input file
+ * Input: dims - dimension information of the model I/O data
+ * Input: stride - step size for data alignment
+ * Input: dataSize - total size of the data to be read
+ * Return: Pointer to the allocated device buffer (nullptr if failed)
+ */
+ static void* GetDeviceBufferOfFile(const std::string& fileName, const aclmdlIODims& dims,
+ size_t stride, size_t dataSize);
+
+ /**
+ * Function: Read binary file content into a memory buffer
+ * Input: fileName - path and name of the binary file to read
+ * Output: fileSize - total byte size of the read file (output parameter)
+ * Return: Pointer to the host-side buffer containing file data (nullptr if failed)
+ */
+ static void* ReadBinFile(const std::string& fileName, uint32_t& fileSize);
+
+ /**
+ * Function: Get the total byte size of a specified file
+ * Input: fileName - path and name of the target file
+ * Output: fileSize - byte size of the file (output parameter)
+ * Return: Result code (success/failure status)
+ */
+ static Result GetFileSize(const std::string& fileName, uint32_t& fileSize);
+
+ /**
+ * Function: Read binary file with stride alignment and fill to specified dimension
+ * Input: fileName - path and name of the binary file
+ * Input: dims - dimension configuration of the model I/O
+ * Input: stride - alignment step size for data processing
+ * Input: dataSize - expected total size of the output buffer
+ * Return: Pointer to the aligned data buffer (nullptr if failed)
+ */
+ static void* ReadBinFileWithStride(const std::string& fileName, const aclmdlIODims& dims,
+ size_t stride, size_t dataSize);
+
+ /**
+ * Function: Initialize int8_t type data buffer with default values
+ * Input: data - pointer to the int8_t data buffer to initialize
+ * Input: dataSize - total size of the data buffer in bytes
+ * Note: This function will overwrite all existing data in the buffer
+ */
+ static void InitData(int8_t* data, size_t dataSize);
+};
+
+#endif
diff --git a/samples/contribute/ACT/NNN/script/model_test.py b/samples/contribute/ACT/NNN/script/model_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..272ad47bd00a2b9580c378be63323f2865ffdc78
--- /dev/null
+++ b/samples/contribute/ACT/NNN/script/model_test.py
@@ -0,0 +1,137 @@
+"""
+ACTWrapper.py
+
+加载 act om 模型并推理
+"""
+
+import numpy as np
+import subprocess
+import os
+import re
+import struct
+import json
+import sys
+import time
+import argparse
+
+
+class ACT3403Policy:
+ def __init__(self, cpp_executable):
+ super().__init__()
+ self.cpp_executable = cpp_executable
+ self.cpp_dir = os.path.dirname(cpp_executable) # 获取二进制所在目录
+ # self.cpp_process = self.model_init()
+
+ def predict(self, batch) -> tuple:
+ input_arr = batch
+ start_time = time.perf_counter()
+ cpp_outputs = self.run_cpp_and_get_float_output(input_arr)
+
+ if cpp_outputs is not None:
+ data = cpp_outputs.get(2)
+ if data is not None:
+ action = []
+ for i in range(0, len(data), 8):
+ action.append(data[i:i+6])
+ action = np.array(action, dtype=np.float32)
+ action = action.reshape([1, 100, 6])
+ return action
+ return None
+
+ def run_cpp_and_get_float_output(self, input_arrays):
+ # 启动C++进程,通过管道通信
+ process = subprocess.Popen(
+ self.cpp_executable,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ text=False, # 二进制模式通信
+ cwd=self.cpp_dir
+ )
+
+ try:
+ # 发送输入数据
+ for i, arr in enumerate(input_arrays):
+ data_bytes = arr.tobytes() # 获取原始二进制数据
+ data_size = struct.pack('
+#include
+#include "sample_process.h"
+#include "utils.h"
+#include
+#include
+using namespace std;
+
+int main() {
+ // 初始化推理环境(只执行一次)
+ SampleProcess sample;
+ if (sample.InitResource() != SUCCESS) {
+ cerr << "Init resource failed" << endl;
+ return -1;
+ }
+
+ // 加载模型(只执行一次)
+ if (sample.LoadModel() != SUCCESS) {
+ cerr << "Load model failed" << endl;
+ sample.DestroyResource();
+ return -1;
+ }
+
+ // 循环处理多次输入
+ while (true) {
+ vector input_datas;
+ vector input_sizes;
+ const int INPUT_COUNT = 3;
+
+ // 读取输入数据(保持原有逻辑)
+ bool readSuccess = true;
+ for (int i = 0; i < INPUT_COUNT; ++i) {
+ uint32_t data_size;
+ cin.read(reinterpret_cast(&data_size), sizeof(data_size));
+ if (!cin.good()) {
+ cerr << "Read input " << i << " size failed" << endl;
+ readSuccess = false;
+ break;
+ }
+
+ void* data = nullptr;
+ aclError ret = aclrtMalloc(&data, data_size, ACL_MEM_MALLOC_NORMAL_ONLY);
+ if (ret != ACL_SUCCESS || data == nullptr) {
+ cerr << "Malloc buffer for input " << i << " failed" << endl;
+ readSuccess = false;
+ break;
+ }
+
+ cin.read(reinterpret_cast(data), data_size);
+ if (!cin.good()) {
+ cerr << "Read input " << i << " data failed" << endl;
+ aclrtFree(data);
+ readSuccess = false;
+ break;
+ }
+
+ input_datas.push_back(data);
+ input_sizes.push_back(data_size);
+ }
+
+ // 检查是否读取失败(比如到达输入末尾)
+ if (!readSuccess) {
+ // 释放已分配的内存
+ for (auto ptr : input_datas) aclrtFree(const_cast(ptr));
+ break;
+ }
+
+ // 设置输入并执行推理
+ sample.SetInputDatas(input_datas, input_sizes);
+ if (sample.Process() != SUCCESS) {
+ cerr << "Inference failed" << endl;
+ } else {
+ cout << "3-input inference success" << endl; // 注意这里修正了原代码的数字错误(5->3)
+ }
+
+ // 释放当前批次的输入内存
+ for (auto data : input_datas) aclrtFree(const_cast(data));
+ }
+
+ // 最后释放所有资源
+ sample.DestroyResource();
+ return 0;
+}
diff --git a/samples/contribute/ACT/NNN/src/model_porcess.cpp b/samples/contribute/ACT/NNN/src/model_porcess.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c850fe5785f6cd1f60de1894c42581bff2f490ba
--- /dev/null
+++ b/samples/contribute/ACT/NNN/src/model_porcess.cpp
@@ -0,0 +1,744 @@
+/**
+* Copyright (c) Huawei Technologies Co., Ltd. 2022-2022. All rights reserved.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+
+* http://www.apache.org/licenses/LICENSE-2.0
+
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "model_process.h"
+
+#include