diff --git a/bundle.json b/bundle.json index 89327cdb8977be8bec2702454e684bf220bf04c0..8146cfc588a22589d34c113472cac102423cb3ef 100644 --- a/bundle.json +++ b/bundle.json @@ -32,7 +32,8 @@ "mindspore", "init", "json", - "jsoncpp" + "jsoncpp", + "eventhandler" ], "third_party": [] }, diff --git a/frameworks/native/neural_network_core/executor.h b/frameworks/native/neural_network_core/executor.h index d63ad3994d3b04d6c75c404b94e2750261136e84..2d74f64c5e82e7375d898c2a5eb30d3b3417825e 100644 --- a/frameworks/native/neural_network_core/executor.h +++ b/frameworks/native/neural_network_core/executor.h @@ -17,7 +17,6 @@ #define NEURAL_NETWORK_RUNTIME_EXECUTOR_H #include -#include #include #include "compiler.h" @@ -60,6 +59,18 @@ public: virtual size_t GetBackendID() = 0; virtual OH_NN_ReturnCode SetExtensionConfig(const std::unordered_map>& configs) = 0; virtual ExecutorConfig* GetExecutorConfig() const = 0; + virtual bool DeinitModel(std::string mode) + { + return true; + } + virtual OH_NN_ReturnCode SetDeinitModelCallBack() + { + return OH_NN_SUCCESS; + } + virtual OH_NN_ReturnCode UnSetDeinitModelCallBack() + { + return OH_NN_SUCCESS; + } }; } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/frameworks/native/neural_network_core/executor_config.h b/frameworks/native/neural_network_core/executor_config.h index 00a59d4fbc50a8a83e7d312744db5c1484e1fe41..45df37a42d4d4b26d1033854e417757c9fc14baf 100644 --- a/frameworks/native/neural_network_core/executor_config.h +++ b/frameworks/native/neural_network_core/executor_config.h @@ -16,8 +16,6 @@ #ifndef NEURAL_NETWORK_RUNTIME_EXECUTOR_CONFIG_H #define NEURAL_NETWORK_RUNTIME_EXECUTOR_CONFIG_H -#include "nnrt_client.h" - namespace OHOS { namespace NeuralNetworkRuntime { struct ExecutorConfig { diff --git a/frameworks/native/neural_network_core/neural_network_core.cpp b/frameworks/native/neural_network_core/neural_network_core.cpp index b55a08be15c8272871f1709a6b546946b7e82ca1..8a8e87c5fb92fe4275113aecbfa5ed54684b62d5 100644 --- a/frameworks/native/neural_network_core/neural_network_core.cpp +++ b/frameworks/native/neural_network_core/neural_network_core.cpp @@ -1390,6 +1390,12 @@ OH_NN_ReturnCode ExecutorPrepare(Executor** executor, Compilation** compilation) return ret; } + ret = executorImpl->SetDeinitModelCallBack(); + if (ret != OH_NN_SUCCESS) { + LOGE("SetDeinitModelCallBack failed, failed to set DeinitModelCallBack to client."); + return ret; + } + return OH_NN_SUCCESS; } diff --git a/frameworks/native/neural_network_core/nnrt_client.cpp b/frameworks/native/neural_network_core/nnrt_client.cpp index 1dcf43f737202955022de4dc7a54ad1a2c34417d..94a1338d28ba52ddbe52cd75d747767e884ab7d8 100644 --- a/frameworks/native/neural_network_core/nnrt_client.cpp +++ b/frameworks/native/neural_network_core/nnrt_client.cpp @@ -73,6 +73,11 @@ NNRtServiceApi& NNRtServiceApi::GetInstance() LoadFunction(libNNRtService, "UpdateModelLatency", &nnrtService.UpdateModelLatency); LoadFunction(libNNRtService, "Unload", &nnrtService.Unload); LoadFunction(libNNRtService, "PullUpDlliteService", &nnrtService.PullUpDlliteService); + LoadFunction(libNNRtService, "AutoReinitSetModelID", &nnrtService.AutoReinitSetModelID); + LoadFunction(libNNRtService, "AutoReinitScheduling", &nnrtService.AutoReinitScheduling); + LoadFunction(libNNRtService, "AutoUnload", &nnrtService.AutoUnload); + LoadFunction(libNNRtService, "SetDeinitModelCallBack", &nnrtService.SetDeinitModelCallBack); + LoadFunction(libNNRtService, "UnSetDeinitModelCallBack", &nnrtService.UnSetDeinitModelCallBack); nnrtService.m_serviceAvailable = true; return nnrtService; diff --git a/frameworks/native/neural_network_core/nnrt_client.h b/frameworks/native/neural_network_core/nnrt_client.h index 03fb8b03041f657b2ef7ee102553039270597236..0850601c3735497f652d4682cf4d092d5871aa83 100644 --- a/frameworks/native/neural_network_core/nnrt_client.h +++ b/frameworks/native/neural_network_core/nnrt_client.h @@ -17,7 +17,7 @@ #define NEURAL_NETWORK_RUNTIME_CLIENT_H #include -#include +#include "executor.h" namespace OHOS { namespace NeuralNetworkRuntime { @@ -42,7 +42,12 @@ public: int (*UpdateModelLatency)(uint32_t hiaiModelId, int modelLatency) = nullptr; int (*Unload)(uint32_t hiaiModelId) = nullptr; bool (*PullUpDlliteService)() = nullptr; - + int (*AutoReinitSetModelID)(uint32_t hiaimodelID, size_t nnrtModelID) = nullptr; + int (*AutoReinitScheduling)(uint32_t originHiaimodelID, uint32_t hiaiModelId, + bool* needModelLatency, const char* cachePath) = nullptr; + int (*AutoUnload)(uint32_t originHiaimodelID, uint32_t hiaiModelId) = nullptr; + int (*SetDeinitModelCallBack)(uint32_t hiaiModelId, OHOS::NeuralNetworkRuntime::Executor* callback) = nullptr; + int (*UnSetDeinitModelCallBack)(uint32_t hiaiModelId) = nullptr; private: bool m_serviceAvailable = false; NNRtServiceApi() = default; diff --git a/frameworks/native/neural_network_runtime/BUILD.gn b/frameworks/native/neural_network_runtime/BUILD.gn index 5ef450f5a81f243770636c8a0a9dd305c958fa17..aa6b389532cce43d698a81a975b9fc27fde1e87c 100644 --- a/frameworks/native/neural_network_runtime/BUILD.gn +++ b/frameworks/native/neural_network_runtime/BUILD.gn @@ -197,6 +197,7 @@ ohos_shared_library("libneural_network_runtime") { "ipc:ipc_core", "json:nlohmann_json_static", "mindspore:mindir_lib", + "eventhandler:libeventhandler", ] deps = [ "../neural_network_core:libneural_network_core" ] diff --git a/frameworks/native/neural_network_runtime/nncompiler.cpp b/frameworks/native/neural_network_runtime/nncompiler.cpp index 0b66abf1c8c73c758dc1add9797c0228fcd3be07..0f3b5af16c2c43bd3c966d3f1a83590417e469ba 100644 --- a/frameworks/native/neural_network_runtime/nncompiler.cpp +++ b/frameworks/native/neural_network_runtime/nncompiler.cpp @@ -777,7 +777,8 @@ NNExecutor* NNCompiler::CreateExecutor() } NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, + m_cachePath, m_cacheVersion, m_extensionConfig, m_enableFp16, m_performance, m_priority); if (nnExecutor == nullptr) { LOGE("[NNCompiler] CreateExecutor failed, error happend when allocating NN Executor."); return nullptr; diff --git a/frameworks/native/neural_network_runtime/nnexecutor.cpp b/frameworks/native/neural_network_runtime/nnexecutor.cpp index d8f1e6df9bed244b380c8f8e62d1054d8e190c76..08e760b7438d60f266f4157dee17d1c0826383da 100644 --- a/frameworks/native/neural_network_runtime/nnexecutor.cpp +++ b/frameworks/native/neural_network_runtime/nnexecutor.cpp @@ -16,8 +16,11 @@ #include "nnexecutor.h" #include "nntensor.h" -#include "log.h" +#include "nncompiled_cache.h" #include "cpp_type.h" +#include "neural_network_runtime_inner.h" +#include "nnrt_client.h" +#include "log.h" #include "securec.h" #include "utils.h" @@ -26,16 +29,130 @@ namespace OHOS { constexpr size_t EXTENSION_MAX_SIZE = 200; +constexpr int AUTOUNLOAD_TIME = 15 * 60 * 1000; namespace NeuralNetworkRuntime { +constexpr int CACHE_INPUT_TENSORDESC_OFFSET = 2; +constexpr int CACHE_OUTPUT_TENSORDESC_OFFSET = 1; +struct SerializedTensorDesc { +public: + SerializedTensorDesc() = default; + ~SerializedTensorDesc() = default; + + OH_NN_ReturnCode CopyFromTensorDesc(const std::pair, OH_NN_TensorType>& tensorDesc) + { + if (tensorDesc.first == nullptr) { + LOGE("CopyFromTensorDesc failed, tensor desc is nullptr."); + return OH_NN_NULL_PTR; + } + OH_NN_ReturnCode ret = tensorDesc.first->GetDataType(&m_dataType); + if (ret != OH_NN_SUCCESS) { + LOGE("CopyFromTensorDesc failed, error happened when getting data type from tensor desc."); + return ret; + } + + ret = tensorDesc.first->GetFormat(&m_format); + if (ret != OH_NN_SUCCESS) { + LOGE("CopyFromTensorDesc failed, error happened when getting format from tensor desc."); + return ret; + } + + ret = tensorDesc.first->GetShape(&m_shape, &m_shapeNum); + if (ret != OH_NN_SUCCESS) { + LOGE("CopyFromTensorDesc failed, error happened when getting shape from tensor desc."); + return ret; + } + + ret = tensorDesc.first->GetName(&m_name); + if (ret != OH_NN_SUCCESS) { + LOGE("CopyFromTensorDesc failed, error happened when getting name from tensor desc."); + return ret; + } + + m_tensorType = tensorDesc.second; + + return ret; + } + + OH_NN_ReturnCode CopyToTensorDesc(TensorDesc& tensorDesc) const + { + OH_NN_ReturnCode ret = tensorDesc.SetDataType(m_dataType); + if (ret != OH_NN_SUCCESS) { + LOGE("CopyToTensorDesc failed, error happened when setting data type to tensor desc."); + return ret; + } + + ret = tensorDesc.SetFormat(m_format); + if (ret != OH_NN_SUCCESS) { + LOGE("CopyToTensorDesc failed, error happened when setting format to tensor desc."); + return ret; + } + + ret = tensorDesc.SetShape(m_shape, m_shapeNum); + if (ret != OH_NN_SUCCESS) { + LOGE("CopyToTensorDesc failed, error happened when setting shape to tensor desc."); + return ret; + } + + ret = tensorDesc.SetName(m_name); + if (ret != OH_NN_SUCCESS) { + LOGE("CopyToTensorDesc failed, error happened when setting name to tensor desc."); + } + + return ret; + } + +public: + OH_NN_DataType m_dataType{OH_NN_UNKNOWN}; + OH_NN_Format m_format{OH_NN_FORMAT_NONE}; + OH_NN_TensorType m_tensorType{OH_NN_TENSOR}; + size_t m_shapeNum{0}; + int32_t* m_shape{nullptr}; + const char* m_name{nullptr}; // null-terminated +}; +const size_t SIZE_OF_DATATYPE = sizeof(SerializedTensorDesc::m_dataType); +const size_t SIZE_OF_FORMAT = sizeof(SerializedTensorDesc::m_format); +const size_t SIZE_OF_TENSOR_TYPE = sizeof(SerializedTensorDesc::m_tensorType); +const size_t SIZE_OF_SHAPE_NUM = sizeof(SerializedTensorDesc::m_shapeNum); + +uint64_t GenRandom(void) +{ + uint64_t random = 0; + int fd = open("/dev/random", O_RDONLY); + if (fd >= 0) { + read(fd, &random, sizeof(random)); + close(fd); + } + return random; +} + NNExecutor::NNExecutor(size_t backendID, std::shared_ptr device, std::shared_ptr preparedModel, const std::vector, OH_NN_TensorType>>& inputTensorDescs, - const std::vector, OH_NN_TensorType>>& outputTensorDescs) + const std::vector, OH_NN_TensorType>>& outputTensorDescs, + std::string cachePath, uint32_t cacheVersion, ExtensionConfig extensionConfig, bool enableFp16, + OH_NN_PerformanceMode performance, OH_NN_Priority priority) : m_backendID(backendID), m_device(device), m_preparedModel(preparedModel), m_inputTensorDescs(inputTensorDescs), - m_outputTensorDescs(outputTensorDescs) {} + m_outputTensorDescs(outputTensorDescs), + m_cachePath(cachePath), + m_cacheVersion(cacheVersion), + m_extensionConfig(extensionConfig), + m_enableFp16(enableFp16), + m_performance(performance), + m_priority(priority), + m_loadtime(std::chrono::steady_clock::now()) { + m_executorid = GenRandom(); + m_autoUnloadRunner = OHOS::AppExecFwk::EventRunner::Create + ("nnexecutor_autounload" + std::to_string(m_executorid)); + m_autoUnloadHandler = std::make_shared(m_autoUnloadRunner); + auto AutoUnloadTask = [this]() { + DeinitModel("DelayUnload"); + }; + m_autoUnloadHandler->PostTask(AutoUnloadTask, + "nnexecutor_autounload" + std::to_string(m_executorid), AUTOUNLOAD_TIME); + } OH_NN_ReturnCode NNExecutor::GetInputDimVec() const { @@ -248,81 +365,281 @@ OH_NN_ReturnCode NNExecutor::SetOnServiceDied(NN_OnServiceDied onServiceDied) return OH_NN_OPERATION_FORBIDDEN; } -OH_NN_ReturnCode NNExecutor::RunSync(NN_Tensor* inputTensors[], size_t inputSize, - NN_Tensor* outputTensors[], size_t outputSize) +void ReleaseDescShape(std::vector& immediateTensorDescs) +{ + for (auto desc : immediateTensorDescs) { + delete[] desc.m_shape; + } + immediateTensorDescs.clear(); +} + +OH_NN_ReturnCode NNExecutor::DeserializedTensorsFromBuffer( + const Buffer& buffer, std::vector, OH_NN_TensorType>>& tensorDescs) { - if (m_inputTensorDescs.size() != inputSize) { - LOGE("NNExecutor::RunSync failed, inputSize:%{public}zu is not equal to model input size:%{public}zu", - inputSize, m_inputTensorDescs.size()); + std::vector immediateTensorDescs; + const char* ptr = static_cast(buffer.data); + const char* end = ptr + buffer.length; + while (ptr < end) { + SerializedTensorDesc desc; + + auto memRet = memcpy_s(&desc.m_dataType, SIZE_OF_DATATYPE, ptr, sizeof(desc.m_dataType)); + if (memRet != EOK) { + LOGE("[NNExecutor] DeserializedTensorsFromBuffer failed, failed to memcpy_s data type."); + ReleaseDescShape(immediateTensorDescs); + return OH_NN_MEMORY_ERROR; + } + ptr += sizeof(desc.m_dataType); + + memRet = memcpy_s(&desc.m_format, SIZE_OF_FORMAT, ptr, sizeof(desc.m_format)); + if (memRet != EOK) { + LOGE("[NNExecutor] DeserializedTensorsFromBuffer failed, failed to memcpy_s format."); + ReleaseDescShape(immediateTensorDescs); + return OH_NN_MEMORY_ERROR; + } + ptr += sizeof(desc.m_format); + + memRet = memcpy_s(&desc.m_tensorType, SIZE_OF_TENSOR_TYPE, ptr, sizeof(desc.m_tensorType)); + if (memRet != EOK) { + LOGE("[NNExecutor] DeserializedTensorsFromBuffer failed, failed to memcpy_s tensor type."); + ReleaseDescShape(immediateTensorDescs); + return OH_NN_MEMORY_ERROR; + } + ptr += sizeof(desc.m_tensorType); + + memRet = memcpy_s(&desc.m_shapeNum, SIZE_OF_SHAPE_NUM, ptr, sizeof(desc.m_shapeNum)); + if (memRet != EOK) { + LOGE("[NNExecutor] DeserializedTensorsFromBuffer failed, failed to memcpy_s shape num."); + ReleaseDescShape(immediateTensorDescs); + return OH_NN_MEMORY_ERROR; + } + ptr += sizeof(desc.m_shapeNum); + + desc.m_shape = new (std::nothrow) int32_t[desc.m_shapeNum]; + if (desc.m_shape == nullptr) { + LOGE("[NNExecutor] DeserializedTensorsFromBuffer failed, failed to create shape buffer."); + ReleaseDescShape(immediateTensorDescs); + return OH_NN_NULL_PTR; + } + memRet = memcpy_s(desc.m_shape, desc.m_shapeNum * sizeof(int32_t), ptr, desc.m_shapeNum * sizeof(int32_t)); + if (memRet != EOK) { + LOGE("[NNExecutor] DeserializedTensorsFromBuffer failed, failed to memcpy_s shape."); + ReleaseDescShape(immediateTensorDescs); + return OH_NN_MEMORY_ERROR; + } + ptr += desc.m_shapeNum * sizeof(int32_t); + + desc.m_name = ptr; + ptr += std::strlen(desc.m_name) + 1; // +1 for null terminator + + immediateTensorDescs.push_back(desc); + } + + OH_NN_ReturnCode ret {OH_NN_SUCCESS}; + for (const auto& immediateTensorDesc : immediateTensorDescs) { + std::pair, OH_NN_TensorType> tensorDescPair; + tensorDescPair.first = CreateSharedPtr(); + if (tensorDescPair.first == nullptr) { + LOGE("[NNExecutor] DeserializedTensorsFromBuffer failed, failed to create tensor desc."); + tensorDescs.clear(); + ReleaseDescShape(immediateTensorDescs); + return OH_NN_NULL_PTR; + } + ret = immediateTensorDesc.CopyToTensorDesc(*(tensorDescPair.first.get())); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNExecutor] DeserializedTensorsFromBuffer failed, error happened when copying " + "SerializedTensorDesc to TensorDesc."); + tensorDescs.clear(); + ReleaseDescShape(immediateTensorDescs); + return ret; + } + tensorDescPair.second = immediateTensorDesc.m_tensorType; + + tensorDescs.emplace_back(tensorDescPair); + } + + ReleaseDescShape(immediateTensorDescs); + return ret; +} + +OH_NN_ReturnCode NNExecutor::Reload() +{ + if (m_cachePath.empty()) { + LOGE("[NNExecutor] RestoreFromCacheFile failed, path is empty."); return OH_NN_INVALID_PARAMETER; } - if (m_outputTensorDescs.size() != outputSize) { - LOGE("NNExecutor::RunSync failed, outputSize:%{public}zu is not equal to model output size:%{public}zu", - outputSize, m_outputTensorDescs.size()); + + if (m_cacheVersion == INVALID_CAHCE_VERSION) { + LOGE("[NNExecutor] RestoreFromCacheFile failed, cache version is invalid. Please set a valid cache version."); return OH_NN_INVALID_PARAMETER; } - OH_NN_ReturnCode ret {OH_NN_FAILED}; - ret = CheckInputDimRanges(inputTensors, inputSize); - if (ret != OH_NN_OPERATION_FORBIDDEN && ret != OH_NN_SUCCESS) { - LOGE("NNExecutor::RunSync failed, failed to check input dim ranges."); - return ret; + if (m_preparedModel != nullptr) { + LOGE("[NNExecutor] RestoreFromCacheFile failed, m_preparedModel is not nullptr."); + return OH_NN_FAILED; } - OHOS::NeuralNetworkRuntime::IOTensor tensor; - std::vector inputTensorsVec; - for (size_t i = 0; i < inputSize; ++i) { - if (inputTensors[i] == nullptr) { - LOGE("NNExecutor::RunSync failed, input[%{public}zu] is nullptr.", i); - return OH_NN_INVALID_PARAMETER; - } - inputTensorsVec.emplace_back(inputTensors[i]); + NNCompiledCache compiledCache; + OH_NN_ReturnCode ret = compiledCache.SetBackend(m_backendID); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNExecutor] RestoreFromCacheFile failed, fail to set backend."); + return ret; } - std::vector outputTensorsVec; - for (size_t i = 0; i < outputSize; ++i) { - if (outputTensors[i] == nullptr) { - LOGE("NNExecutor::RunSync failed, output[%{public}zu] is nullptr.", i); - return OH_NN_INVALID_PARAMETER; - } - outputTensorsVec.emplace_back(outputTensors[i]); + std::vector caches; + compiledCache.SetModelName(m_extensionConfig.modelName); + ret = compiledCache.Restore(m_cachePath, m_cacheVersion, caches); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNExecutor] RestoreFromCacheFile failed, error happened when restoring model cache."); + compiledCache.ReleaseCacheBuffer(caches); + return ret; } - std::vector> outputsDims; - std::vector isSufficientDataBuffer; + size_t cacheNum = caches.size(); + std::vector, OH_NN_TensorType>> inputTensorDescs; + ret = DeserializedTensorsFromBuffer(caches[cacheNum - CACHE_INPUT_TENSORDESC_OFFSET], inputTensorDescs); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNExecutor] RestoreFromCacheFile failed, error happened when deserializing input tensor desc."); + compiledCache.ReleaseCacheBuffer(caches); + return ret; + } - ret = m_preparedModel->Run(inputTensorsVec, outputTensorsVec, outputsDims, isSufficientDataBuffer); + std::vector, OH_NN_TensorType>> outputTensorDescs; + ret = DeserializedTensorsFromBuffer(caches[cacheNum - CACHE_OUTPUT_TENSORDESC_OFFSET], outputTensorDescs); if (ret != OH_NN_SUCCESS) { - LOGE("NNExecutor::RunSync failed, failed to run in prepared model."); + LOGE("[NNExecutor] RestoreFromCacheFile failed, error happened when deserializing output tensor desc."); + compiledCache.ReleaseCacheBuffer(caches); return ret; } - // Set the output NNTensor2_0's dimensions from output IOTensor if it is dynamic. - // NNTensor2_0::SetDimensions will check if the tensor buffer is enough for the new dimensions. - if (outputsDims.size() != outputSize) { - LOGE("NNExecutor::RunSync failed, size of outputsDims is not equal to outputTensors."); - return OH_NN_INVALID_PARAMETER; + ModelConfig config; + config.enableFloat16 = m_enableFp16; + config.mode = m_performance; + config.priority = m_priority; + config.extensionConfig.isNpuFmShared = m_extensionConfig.isNpuFmShared; + std::vector modelOnlyCaches(caches.begin(), caches.end() - CACHE_INPUT_TENSORDESC_OFFSET); + bool isUpdatable = false; + ret = m_device->PrepareModelFromModelCache(modelOnlyCaches, config, m_preparedModel, isUpdatable); + if (ret != OH_NN_SUCCESS) { + LOGE("[NNExecutor] RestoreFromCacheFile failed, error happened when preparing model from cache."); + compiledCache.ReleaseCacheBuffer(caches); + return ret; } - for (size_t i = 0; i < outputSize; ++i) { - NNTensor2_0* nnTensor = reinterpret_cast(outputTensors[i]); - TensorDesc* nnTensorDesc = nnTensor->GetTensorDesc(); - if (nnTensorDesc == nullptr) { - LOGE("NNExecutor::RunSync failed, failed to get desc from tensor."); - return OH_NN_NULL_PTR; + + compiledCache.ReleaseCacheBuffer(caches); + + m_inputTensorDescs = inputTensorDescs; + m_outputTensorDescs = outputTensorDescs; + LOGI("[NNExecutor] Restore model cache successfully."); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode NNExecutor::RunSync(NN_Tensor* inputTensors[], size_t inputSize, + NN_Tensor* outputTensors[], size_t outputSize) +{ + std::lock_guard lock(m_mutex); + { + uint32_t modelId; + GetModelID(modelId); + LOGI("NNExecutor::RunSync pid=%{public}d originHiaiModelId=%{public}d hiaiModelId=%{public}d", + getpid(), originHiaiModelId_, modelId); + m_autoUnloadHandler->RemoveTask("nnexecutor_autounload" + std::to_string(m_executorid)); + if (m_inputTensorDescs.size() != inputSize) { + LOGE("NNExecutor::RunSync failed, inputSize:%{public}zu is not equal to model input size:%{public}zu", + inputSize, m_inputTensorDescs.size()); + return OH_NN_INVALID_PARAMETER; } - ret = nnTensorDesc->SetShape(outputsDims[i].data(), outputsDims[i].size()); - if (ret != OH_NN_SUCCESS) { - LOGE("NNExecutor::RunSync failed, error happened when setting output tensor's dimensions," - " output id: %zu.", i); + if (m_outputTensorDescs.size() != outputSize) { + LOGE("NNExecutor::RunSync failed, outputSize:%{public}zu is not equal to model output size:%{public}zu", + outputSize, m_outputTensorDescs.size()); + return OH_NN_INVALID_PARAMETER; + } + + if (m_preparedModel == nullptr) { + if (Reload() != OH_NN_SUCCESS) { + return OH_NN_INVALID_PARAMETER; + } + + auto _ret = GetModelID(modelId); + LOGI("AutoReload pid=%{public}d originHiaiModelId=%{public}d hiaiModelId=%{public}d", + getpid(), originHiaiModelId_, modelId); + if (_ret != OH_NN_SUCCESS) { + LOGW("GetModelID failed, some error happen when get model id for device."); + } + _ret = ReinitScheduling(modelId, &m_executorConfig->isNeedModelLatency, m_cachePath.c_str()); + if (_ret != OH_NN_SUCCESS) { + LOGW("ReinitScheduling failed, some error happen when ReinitScheduling model."); + } + } + + OH_NN_ReturnCode ret {OH_NN_FAILED}; + ret = CheckInputDimRanges(inputTensors, inputSize); + if (ret != OH_NN_OPERATION_FORBIDDEN && ret != OH_NN_SUCCESS) { + LOGE("NNExecutor::RunSync failed, failed to check input dim ranges."); return ret; } - ret = m_outputTensorDescs[i].first->SetShape(outputsDims[i].data(), outputsDims[i].size()); + + OHOS::NeuralNetworkRuntime::IOTensor tensor; + std::vector inputTensorsVec; + for (size_t i = 0; i < inputSize; ++i) { + if (inputTensors[i] == nullptr) { + LOGE("NNExecutor::RunSync failed, input[%{public}zu] is nullptr.", i); + return OH_NN_INVALID_PARAMETER; + } + inputTensorsVec.emplace_back(inputTensors[i]); + } + + std::vector outputTensorsVec; + for (size_t i = 0; i < outputSize; ++i) { + if (outputTensors[i] == nullptr) { + LOGE("NNExecutor::RunSync failed, output[%{public}zu] is nullptr.", i); + return OH_NN_INVALID_PARAMETER; + } + outputTensorsVec.emplace_back(outputTensors[i]); + } + + std::vector> outputsDims; + std::vector isSufficientDataBuffer; + + ret = m_preparedModel->Run(inputTensorsVec, outputTensorsVec, outputsDims, isSufficientDataBuffer); if (ret != OH_NN_SUCCESS) { - LOGE("NNExecutor::RunSync failed, error happened when setting inner output tensor's dimensions," - " output id: %zu.", i); + LOGE("NNExecutor::RunSync failed, failed to run in prepared model."); return ret; } + + // Set the output NNTensor2_0's dimensions from output IOTensor if it is dynamic. + // NNTensor2_0::SetDimensions will check if the tensor buffer is enough for the new dimensions. + if (outputsDims.size() != outputSize) { + LOGE("NNExecutor::RunSync failed, size of outputsDims is not equal to outputTensors."); + return OH_NN_INVALID_PARAMETER; + } + for (size_t i = 0; i < outputSize; ++i) { + NNTensor2_0* nnTensor = reinterpret_cast(outputTensors[i]); + TensorDesc* nnTensorDesc = nnTensor->GetTensorDesc(); + if (nnTensorDesc == nullptr) { + LOGE("NNExecutor::RunSync failed, failed to get desc from tensor."); + return OH_NN_NULL_PTR; + } + ret = nnTensorDesc->SetShape(outputsDims[i].data(), outputsDims[i].size()); + if (ret != OH_NN_SUCCESS) { + LOGE("NNExecutor::RunSync failed, error happened when setting output tensor's dimensions," + " output id: %zu.", i); + return ret; + } + ret = m_outputTensorDescs[i].first->SetShape(outputsDims[i].data(), outputsDims[i].size()); + if (ret != OH_NN_SUCCESS) { + LOGE("NNExecutor::RunSync failed, error happened when setting inner output tensor's dimensions," + " output id: %zu.", i); + return ret; + } + } } + auto AutoUnloadTask = [this]() { + DeinitModel("DelayUnload"); + }; + m_loadtime = std::chrono::steady_clock::now(); + m_autoUnloadHandler->PostTask(AutoUnloadTask, + "nnexecutor_autounload" + std::to_string(m_executorid), AUTOUNLOAD_TIME); + return OH_NN_SUCCESS; } @@ -335,13 +652,16 @@ OH_NN_ReturnCode NNExecutor::RunAsync(NN_Tensor* inputTensors[], size_t inputSiz OH_NN_ReturnCode NNExecutor::GetModelID(uint32_t& modelId) const { - OH_NN_ReturnCode ret = m_preparedModel->GetModelID(modelId); - if (ret != OH_NN_SUCCESS) { - LOGE("GetModelID failed, some error happen when get model id for device."); - return ret; + if (m_preparedModel != nullptr) { + OH_NN_ReturnCode ret = m_preparedModel->GetModelID(modelId); + if (ret != OH_NN_SUCCESS) { + LOGE("GetModelID failed, some error happen when get model id for device."); + return ret; + } + return OH_NN_SUCCESS; } - return OH_NN_SUCCESS; + return OH_NN_OPERATION_FORBIDDEN; } size_t NNExecutor::GetBackendID() @@ -1123,6 +1443,180 @@ NNExecutor::~NNExecutor() delete m_executorConfig; m_executorConfig = nullptr; } + + UnSetDeinitModelCallBack(); + + uint32_t modelId; + GetModelID(modelId); + LOGI("manualUnload pid=%{public}d originHiaiModelId=%{public}d hiaiModelId=%{public}d", + getpid(), originHiaiModelId_, modelId); +} + +OH_NN_ReturnCode NNExecutor::SetDeinitModelCallBack() +{ + NNRtServiceApi& nnrtService = NNRtServiceApi::GetInstance(); + if (!nnrtService.IsServiceAvaliable()) { + LOGW("SetDeinitModelCallBack failed, fail to get nnrt service, skip SetDeinitModelCallBack."); + return OH_NN_SUCCESS; + } + + if (nnrtService.SetDeinitModelCallBack == nullptr) { + LOGE("SetDeinitModelCallBack failed, nnrtService SetDeinitModelCallBack func is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + if (m_preparedModel == nullptr) { + LOGE("SetDeinitModelCallBack failed, m_preparedModel is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + int ret = nnrtService.SetDeinitModelCallBack(m_executorid, reinterpret_cast(this)); + if (ret != static_cast(OH_NN_SUCCESS)) { + LOGE("SetDeinitModelCallBack failed, some error happened when SetDeinitModelCallBack."); + return static_cast(ret); + } + + auto _ret = GetModelID(originHiaiModelId_); + if (_ret != OH_NN_SUCCESS) { + LOGW("GetModelID failed, some error happen when get model id for device."); + } + + uint32_t modelId; + GetModelID(modelId); + LOGI("manualload pid=%{public}d originHiaiModelId=%{public}d hiaiModelId=%{public}d", + getpid(), originHiaiModelId_, modelId); + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode NNExecutor::UnSetDeinitModelCallBack() +{ + NNRtServiceApi& nnrtService = NNRtServiceApi::GetInstance(); + if (!nnrtService.IsServiceAvaliable()) { + LOGW("UnSetDeinitModelCallBack failed, fail to get nnrt service, skip UnSetDeinitModelCallBack."); + return OH_NN_SUCCESS; + } + + if (nnrtService.UnSetDeinitModelCallBack == nullptr) { + LOGE("UnSetDeinitModelCallBack failed, nnrtService UnSetDeinitModelCallBack func is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + int ret = nnrtService.UnSetDeinitModelCallBack(m_executorid); + if (ret != static_cast(OH_NN_SUCCESS)) { + LOGE("UnSetDeinitModelCallBack failed, some error happened when UnSetDeinitModelCallBack."); + return static_cast(ret); + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode NNExecutor::ReinitScheduling(uint32_t hiaimodelID, bool* needModelLatency, const char* cachePath) +{ + NNRtServiceApi& nnrtService = NNRtServiceApi::GetInstance(); + if (!nnrtService.IsServiceAvaliable()) { + LOGE("[HiaiExecutorImpl] ReinitScheduling failed, fail to get nnrt service, skip ReinitScheduling."); + return OH_NN_SUCCESS; + } + + if (nnrtService.AutoReinitSetModelID == nullptr) { + LOGE("HiaiExecutorImpl] ReinitScheduling failed, nnrtService AutoReinitSetModelId func is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + size_t nnrtmodelID = nnrtService.GetNNRtModelIDFromCache(m_cachePath.c_str(), m_extensionConfig.modelName.c_str()); + if (nnrtmodelID == 0) { + LOGE("[HiaiExecutorImpl] ReinitScheduling is failed."); + return OH_NN_INVALID_PARAMETER; + } + + int ret = nnrtService.AutoReinitSetModelID(hiaimodelID, nnrtmodelID); + if (ret != static_cast(OH_NN_SUCCESS)) { + LOGE("[HiaiExecutorImpl] ReinitScheduling failed, some error happened when AutoReinitSetModelID."); + return OH_NN_INVALID_PARAMETER; + } + + if (nnrtService.IsSupportScheduling == nullptr) { + LOGE("[HiaiExecutorImpl] ReinitScheduling failed, nnrtService IsSupportScheduling func is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + bool supportStat = false; + ret = nnrtService.IsSupportScheduling(&supportStat); + if (ret != static_cast(OH_NN_SUCCESS)) { + LOGE("ReinitScheduling failed, some error happened when judge if support scheduling."); + return OH_NN_INVALID_PARAMETER; + } + if (!supportStat) { + LOGW("device not support scheduling, jumper over scheduling."); + return OH_NN_SUCCESS; + } + + if (nnrtService.AutoReinitScheduling == nullptr) { + LOGE("ReinitScheduling failed, nnrtService IsSupportScheduling func is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + ret = nnrtService.AutoReinitScheduling(originHiaiModelId_, hiaimodelID, needModelLatency, cachePath); + if (ret != static_cast(OH_NN_SUCCESS)) { + LOGE("ReinitScheduling failed, some error happened when scheduling."); + return OH_NN_INVALID_PARAMETER; + } + + return OH_NN_SUCCESS; +} + +OH_NN_ReturnCode NNExecutor::DeinitScheduling(uint32_t hiaimodelID) +{ + NNRtServiceApi& nnrtService = NNRtServiceApi::GetInstance(); + if (nnrtService.AutoUnload == nullptr) { + LOGE("[HiaiExecutorImpl] AutoUnload failed, nnrtService AutoUnload func is nullptr."); + return OH_NN_INVALID_PARAMETER; + } + + int ret = nnrtService.AutoUnload(originHiaiModelId_, hiaimodelID); + if (ret != static_cast(OH_NN_SUCCESS)) { + LOGE("[HiaiExecutorImpl] AutoUnload failed, some error happen when AutoUnload hiaiModelId."); + return OH_NN_INVALID_PARAMETER; + } + + return OH_NN_SUCCESS; +} + +bool NNExecutor::DeinitModel(std::string mode) +{ + if (m_preparedModel == nullptr) { + return false; + } + + std::lock_guard lock(m_mutex); + + if (m_preparedModel != nullptr && + OH_NNModel_HasCache(m_cachePath.c_str(), + m_extensionConfig.modelName.c_str(), + m_cacheVersion)) { + uint32_t modelId; + auto _ret = GetModelID(modelId); + if (_ret != OH_NN_SUCCESS) { + LOGW("GetModelID failed, some error happen when get model id for device."); + } + + _ret = DeinitScheduling(modelId); + if (_ret != OH_NN_SUCCESS) { + LOGW("DeinitScheduling failed, some error happen when DeinitScheduling model."); + } + m_preparedModel.reset(); + std::chrono::duration duration = std::chrono::steady_clock::now() - m_loadtime; + if (mode == "FrozenDeinit") { + m_autoUnloadHandler->RemoveTask("nnexecutor_autounload" + std::to_string(m_executorid)); + LOGI("FrozenDeinit pid=%{public}d originHiaiModelId=%{public}d hiaiModelId=%{public}d time=%{public}f", + getpid(), originHiaiModelId_, modelId, duration.count()); + } else { + LOGI("AutoUnload pid=%{public}d originHiaiModelId=%{public}d hiaiModelId=%{public}d time=%{public}f", + getpid(), originHiaiModelId_, modelId, duration.count()); + } + } + + return true; } } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/frameworks/native/neural_network_runtime/nnexecutor.h b/frameworks/native/neural_network_runtime/nnexecutor.h index 6b46fe0ceab42c19106e2d2363dee150d96e84b2..72d6ff1fe29867eb8efb3dfd95bf0cb04c3ba9fb 100644 --- a/frameworks/native/neural_network_runtime/nnexecutor.h +++ b/frameworks/native/neural_network_runtime/nnexecutor.h @@ -16,11 +16,17 @@ #ifndef NEURAL_NETWORK_RUNTIME_NNEXECUTOR_H #define NEURAL_NETWORK_RUNTIME_NNEXECUTOR_H +#include #include "executor.h" #include "device.h" #include "prepared_model.h" #include "nn_tensor.h" +#include "log.h" +#include "event_handler.h" +#include "event_runner.h" + +#include namespace OHOS { namespace NeuralNetworkRuntime { class NNExecutor : public Executor { @@ -29,7 +35,9 @@ public: std::shared_ptr device, std::shared_ptr preparedModel, const std::vector, OH_NN_TensorType>>& inputTensorDescs, - const std::vector, OH_NN_TensorType>>& outputTensorDescs); + const std::vector, OH_NN_TensorType>>& outputTensorDescs, + std::string cachePath, uint32_t cacheVersion, ExtensionConfig extensionConfig, bool enableFp16, + OH_NN_PerformanceMode performance, OH_NN_Priority priority); ~NNExecutor() override; OH_NN_ReturnCode GetInputDimRange(size_t inputIndex, @@ -73,6 +81,10 @@ public: OH_NN_ReturnCode Run(); + bool DeinitModel(std::string mode) override; + OH_NN_ReturnCode SetDeinitModelCallBack() override; + OH_NN_ReturnCode UnSetDeinitModelCallBack() override; + private: OH_NN_ReturnCode GetInputDimVec() const; OH_NN_ReturnCode CheckInputDimRanges(NN_Tensor* inputTensors[], size_t inputSize); @@ -91,6 +103,11 @@ private: void SetInputTensorWithNewBuffer(uint32_t index, std::shared_ptr inputTensor, const void* inputBuffer, size_t length, bool isInnerMem); OH_NN_ReturnCode CheckInputDimRanges(uint32_t index, const OH_NN_Tensor& nnTensor) const; + OH_NN_ReturnCode DeserializedTensorsFromBuffer( + const Buffer& buffer, std::vector, OH_NN_TensorType>>& tensorDescs); + OH_NN_ReturnCode Reload(); + OH_NN_ReturnCode ReinitScheduling(uint32_t hiaimodelID, bool* needModelLatency, const char* cachePath); + OH_NN_ReturnCode DeinitScheduling(uint32_t hiaimodelID); private: size_t m_backendID {0}; @@ -98,6 +115,13 @@ private: std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + std::string m_cachePath; + uint32_t m_cacheVersion {0}; + ExtensionConfig m_extensionConfig; + bool m_enableFp16 {false}; + OH_NN_PerformanceMode m_performance {OH_NN_PERFORMANCE_NONE}; + OH_NN_Priority m_priority {OH_NN_PRIORITY_NONE}; + uint32_t originHiaiModelId_; // The following parameters are provided for compatibility with older versions struct ExeTensor { @@ -114,6 +138,12 @@ private: std::unordered_map> m_outputCreatedMem; mutable std::vector> m_minInputDimsVec; mutable std::vector> m_maxInputDimsVec; + + std::shared_ptr m_autoUnloadRunner; + std::shared_ptr m_autoUnloadHandler; + uint64_t m_executorid; + std::mutex m_mutex; + std::chrono::time_point m_loadtime; }; } // namespace NeuralNetworkRuntime } // namespace OHOS diff --git a/test/unittest/components/BUILD.gn b/test/unittest/components/BUILD.gn index 77e425c21e93b35c01c421b80263ef187db01cc9..c5e20840c6b7ebe596f88beda35e6f84d4d30173 100644 --- a/test/unittest/components/BUILD.gn +++ b/test/unittest/components/BUILD.gn @@ -38,6 +38,7 @@ ohos_unittest("CompilationV1_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -58,6 +59,7 @@ ohos_unittest("ExecutorV1_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -78,6 +80,7 @@ ohos_unittest("DeviceManagerV1_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -98,6 +101,7 @@ ohos_unittest("DeviceRegistrarV1_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -119,6 +123,7 @@ ohos_unittest("HDIDeviceV1_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -140,6 +145,7 @@ ohos_unittest("HDIPreparedModelV1_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -151,6 +157,7 @@ ohos_unittest("MemoryManagerTest") { configs = [ ":module_private_config" ] external_deps = [ + "c_utils:utils", "drivers_interface_nnrt:libnnrt_proxy_1.0", "googletest:gmock_main", "googletest:gtest_main", @@ -159,6 +166,7 @@ ohos_unittest("MemoryManagerTest") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -179,6 +187,7 @@ ohos_unittest("NeuralNetworkCoreV1_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -189,6 +198,7 @@ ohos_unittest("QuantParamsTest") { configs = [ ":module_private_config" ] external_deps = [ + "c_utils:utils", "drivers_interface_nnrt:libnnrt_proxy_1.0", "googletest:gmock_main", "googletest:gtest_main", @@ -197,6 +207,7 @@ ohos_unittest("QuantParamsTest") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -207,6 +218,7 @@ ohos_unittest("NNBackendTest") { configs = [ ":module_private_config" ] external_deps = [ + "c_utils:utils", "drivers_interface_nnrt:libnnrt_proxy_1.0", "googletest:gmock_main", "googletest:gtest_main", @@ -215,6 +227,7 @@ ohos_unittest("NNBackendTest") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -225,6 +238,7 @@ ohos_unittest("NNCompiledCacheTest") { configs = [ ":module_private_config" ] external_deps = [ + "c_utils:utils", "drivers_interface_nnrt:libnnrt_proxy_1.0", "googletest:gmock_main", "googletest:gtest_main", @@ -234,6 +248,7 @@ ohos_unittest("NNCompiledCacheTest") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -244,6 +259,7 @@ ohos_unittest("NNCompilerTest") { configs = [ ":module_private_config" ] external_deps = [ + "c_utils:utils", "drivers_interface_nnrt:libnnrt_proxy_1.0", "googletest:gmock_main", "googletest:gtest_main", @@ -252,6 +268,7 @@ ohos_unittest("NNCompilerTest") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -262,6 +279,7 @@ ohos_unittest("NNExecutorTest") { configs = [ ":module_private_config" ] external_deps = [ + "c_utils:utils", "drivers_interface_nnrt:libnnrt_proxy_1.0", "googletest:gmock_main", "googletest:gtest_main", @@ -270,6 +288,7 @@ ohos_unittest("NNExecutorTest") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -289,6 +308,7 @@ ohos_unittest("NNTensor2_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -299,6 +319,7 @@ ohos_unittest("TransformV1_0Test") { configs = [ ":module_private_config" ] external_deps = [ + "c_utils:utils", "drivers_interface_nnrt:libnnrt_proxy_1.0", "googletest:gmock_main", "googletest:gtest_main", @@ -307,6 +328,7 @@ ohos_unittest("TransformV1_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -328,6 +350,7 @@ ohos_unittest("InnerModelV1_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -348,6 +371,7 @@ ohos_unittest("NnTensorV1_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -368,6 +392,7 @@ ohos_unittest("NnTensorDescV1_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -388,6 +413,7 @@ ohos_unittest("NnValidationV1_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -408,6 +434,7 @@ ohos_unittest("OpsRegistryV1_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -431,6 +458,7 @@ ohos_unittest("NeuralNetworkRuntimeV1_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -451,6 +479,7 @@ ohos_unittest("CompilationV2_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -471,6 +500,7 @@ ohos_unittest("ExecutorV2_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -491,6 +521,7 @@ ohos_unittest("DeviceManagerV2_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -511,6 +542,7 @@ ohos_unittest("DeviceRegistrarV2_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -532,6 +564,7 @@ ohos_unittest("HDIDeviceV2_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -553,6 +586,7 @@ ohos_unittest("HDIPreparedModelV2_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -574,6 +608,7 @@ ohos_unittest("HDIPreparedModelV2_1Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -584,6 +619,7 @@ ohos_unittest("TransformV2_0Test") { configs = [ ":module_private_config" ] external_deps = [ + "c_utils:utils", "drivers_interface_nnrt:libnnrt_proxy_2.0", "googletest:gmock_main", "googletest:gtest_main", @@ -592,6 +628,7 @@ ohos_unittest("TransformV2_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -613,6 +650,7 @@ ohos_unittest("InnerModelV2_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -633,6 +671,7 @@ ohos_unittest("NnTensorV2_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -653,6 +692,7 @@ ohos_unittest("NnValidationV2_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -673,6 +713,7 @@ ohos_unittest("OpsRegistryV2_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } @@ -696,6 +737,7 @@ ohos_unittest("NeuralNetworkRuntimeV2_0Test") { "mindspore:mindir_lib", "neural_network_runtime:libneural_network_core", "neural_network_runtime:libneural_network_runtime", + "eventhandler:libeventhandler", ] } diff --git a/test/unittest/components/nn_executor/nn_executor_test.cpp b/test/unittest/components/nn_executor/nn_executor_test.cpp index f5380a660eccef8fe1f6e64f4fbb22794cbd8a57..711ff5e8258f0fb3f1cd60f106e1b119f6887378 100644 --- a/test/unittest/components/nn_executor/nn_executor_test.cpp +++ b/test/unittest/components/nn_executor/nn_executor_test.cpp @@ -160,8 +160,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_construct_001, TestSize.Level0) EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first)) .WillRepeatedly(::testing::Return(reinterpret_cast(0x1000))); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); EXPECT_NE(nullptr, nnExecutor); OH_NN_Memory** memory = nullptr; @@ -198,8 +203,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_001, TestSize.Level0) .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t index = 0; size_t min = 1; @@ -229,8 +239,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_002, TestSize.Level0) .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t index = 0; size_t max = 10; @@ -258,8 +273,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_003, TestSize.Level0) .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t index = 0; size_t min = 1; @@ -287,8 +307,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_004, TestSize.Level0) .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t index = 0; size_t min = 1; @@ -317,8 +342,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_005, TestSize.Level0) .WillRepeatedly(::testing::Return(OH_NN_SUCCESS)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t index = 0; size_t min = 1; @@ -357,8 +387,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_006, TestSize.Level0) })); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t index = 0; size_t min = 1; @@ -397,8 +432,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_007, TestSize.Level0) })); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t index = 0; size_t min = 1; @@ -437,8 +477,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getinputdimrange_008, TestSize.Level0) })); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t index = 0; size_t min = 1; @@ -465,8 +510,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_001, TestSize.Level0) std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); int32_t expectDim[2] = {3, 3}; int32_t* ptr = expectDim; @@ -494,8 +544,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_002, TestSize.Level0) std::pair, OH_NN_TensorType> pair2; m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); int32_t expectDim[2] = {3, 3}; int32_t* ptr = expectDim; @@ -521,12 +576,17 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_003, TestSize.Level0) std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::pair, OH_NN_TensorType> pair1; std::pair, OH_NN_TensorType> pair2; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + std::shared_ptr tensorDesr = std::make_shared(); pair1.first = tensorDesr; m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); int32_t expectDim[2] = {3, 3}; int32_t* ptr = expectDim; @@ -562,8 +622,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getoutputshape_004, TestSize.Level0) pair2.first = tensorDesr; m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); int32_t expectDim2[2] = {3, 3}; int32_t* ptr2 = expectDim2; @@ -587,8 +652,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getinputnum_001, TestSize.Level0) std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t ret = nnExecutor->GetInputNum(); EXPECT_EQ(0, ret); @@ -607,8 +677,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getoutputnum_001, TestSize.Level0) std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t ret = nnExecutor->GetOutputNum(); @@ -628,8 +703,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createinputtensordesc_001, TestSize.Leve std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t index = 1; NN_TensorDesc* ret = nnExecutor->CreateInputTensorDesc(index); @@ -655,8 +735,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createinputtensordesc_002, TestSize.Leve m_inputTensorDescs.emplace_back(pair1); m_inputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t index = 1; NN_TensorDesc* ret = nnExecutor->CreateInputTensorDesc(index); @@ -688,9 +773,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createinputtensordesc_003, TestSize.Leve pair2.first = tensorDesr; m_inputTensorDescs.emplace_back(pair1); m_inputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t index = 0; NN_TensorDesc* ret = nnExecutor->CreateInputTensorDesc(index); @@ -710,8 +799,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createoutputtensordesc_001, TestSize.Lev std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t index = 1; NN_TensorDesc* ret = nnExecutor->CreateOutputTensorDesc(index); @@ -737,8 +831,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createoutputtensordesc_002, TestSize.Lev m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t index = 1; NN_TensorDesc* ret = nnExecutor->CreateOutputTensorDesc(index); @@ -771,8 +870,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createoutputtensordesc_003, TestSize.Lev m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t index = 1; NN_TensorDesc* ret = nnExecutor->CreateOutputTensorDesc(index); @@ -806,8 +910,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setonrundone_001, TestSize.Level0) std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_ReturnCode ret = nnExecutor->SetOnRunDone(MyOnRunDone); EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); @@ -831,8 +940,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setonservicedied_001, TestSize.Level0) std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_ReturnCode ret = nnExecutor->SetOnServiceDied(MyOnServiceDied); EXPECT_EQ(OH_NN_OPERATION_FORBIDDEN, ret); @@ -851,8 +965,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_runsync_001, TestSize.Level0) std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t inputSize = 1; size_t outputSize = 1; @@ -873,8 +992,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_runsync_002, TestSize.Level0) std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t inputSize = 0; size_t outputSize = 1; @@ -923,8 +1047,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_runsync_003, TestSize.Level0) m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t backendID = 1; std::shared_ptr device = std::make_shared(); @@ -984,8 +1113,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_runsync_004, TestSize.Level0) m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t backendID = 1; std::shared_ptr device = std::make_shared(); @@ -1044,8 +1178,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_runsync_005, TestSize.Level0) m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t backendID = 1; std::shared_ptr device = std::make_shared(); @@ -1076,8 +1215,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_runasync_001, TestSize.Level0) std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); void* buffer = m_dataArry; size_t inputSize = 1; @@ -1100,8 +1244,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_runasync_002, TestSize.Level0) std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); void* buffer = m_dataArry; size_t inputSize = 0; @@ -1151,8 +1300,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_runasync_003, TestSize.Level0) m_inputTensorDescs.emplace_back(pair2); m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t backendID = 1; std::shared_ptr device = std::make_shared(); @@ -1185,8 +1339,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_getbackendid_001, TestSize.Level0) std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t ret = nnExecutor->GetBackendID(); EXPECT_EQ(0, ret); @@ -1207,8 +1366,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setinput_001, TestSize.Level0) .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); void* buffer = m_dataArry; @@ -1240,9 +1404,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setinput_002, TestSize.Level0) std::pair, OH_NN_TensorType> pair2; m_inputTensorDescs.emplace_back(pair1); m_inputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); void* buffer = m_dataArry; @@ -1281,9 +1449,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setinput_003, TestSize.Level0) pair2.first = tensorDesr; m_inputTensorDescs.emplace_back(pair1); m_inputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); void* buffer = m_dataArry; @@ -1310,8 +1482,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_001, TestSize.Level0) .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); void* const data = m_dataArry; @@ -1343,8 +1520,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_002, TestSize.Level0) std::pair, OH_NN_TensorType> pair2; m_inputTensorDescs.emplace_back(pair1); m_inputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); void* const data = m_dataArry; @@ -1383,8 +1565,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setinputfrommemory_003, TestSize.Level0) pair2.first = tensorDesr; m_inputTensorDescs.emplace_back(pair1); m_inputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); void* const data = m_dataArry; @@ -1409,8 +1596,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setoutput_001, TestSize.Level0) std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t length = 9 * sizeof(float); void* buffer = m_dataArry; @@ -1437,8 +1629,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setoutput_002, TestSize.Level0) std::pair, OH_NN_TensorType> pair2; m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t length = 9 * sizeof(float); void* buffer = m_dataArry; @@ -1472,8 +1669,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setoutput_003, TestSize.Level0) pair2.first = tensorDesr; m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t length = 9 * sizeof(float); void* buffer = m_dataArry; @@ -1495,8 +1697,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setoutputfrommemory_001, TestSize.Level0 std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); void* const data = m_dataArry; OH_NN_Memory memory = {data, 9 * sizeof(float)}; @@ -1523,8 +1730,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setoutputfrommemory_002, TestSize.Level0 std::pair, OH_NN_TensorType> pair2; m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); void* const data = m_dataArry; OH_NN_Memory memory = {data, 9 * sizeof(float)}; @@ -1558,8 +1770,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setoutputfrommemory_003, TestSize.Level0 pair2.first = tensorDesr; m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); void* const data = m_dataArry; OH_NN_Memory memory = {data, 9 * sizeof(float)}; @@ -1581,8 +1798,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_001, TestSize.Level0) std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_Memory** memory = nullptr; float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; @@ -1614,8 +1836,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_002, TestSize.Level0) std::pair, OH_NN_TensorType> pair2; m_inputTensorDescs.emplace_back(pair1); m_inputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_Memory** memory = nullptr; float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; @@ -1655,6 +1882,9 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_003, TestSize.Level0) pair2.first = tensorDesr; m_inputTensorDescs.emplace_back(pair1); m_inputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; size_t length = 9 * sizeof(float); @@ -1662,7 +1892,8 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_003, TestSize.Level0) .WillRepeatedly(::testing::Return(nullptr)); NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_Memory** memory = nullptr; void* const data = dataArry; @@ -1702,6 +1933,9 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_004, TestSize.Level0) pair2.first = tensorDesr; m_inputTensorDescs.emplace_back(pair1); m_inputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; size_t length = 9 * sizeof(float); @@ -1709,7 +1943,8 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createinputmemory_004, TestSize.Level0) .WillRepeatedly(::testing::Return(reinterpret_cast(0x1000))); NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_Memory** memory = nullptr; void* const data = dataArry; @@ -1736,8 +1971,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_destroyinputmemory_001, TestSize.Level0) std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t length = 9 * sizeof(float); OH_NN_Memory** memory = nullptr; @@ -1770,8 +2010,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_destroyinputmemory_002, TestSize.Level0) std::pair, OH_NN_TensorType> pair2; m_inputTensorDescs.emplace_back(pair1); m_inputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t length = 9 * sizeof(float); OH_NN_Memory** memory = nullptr; @@ -1811,13 +2056,17 @@ HWTEST_F(NNExecutorTest, nnexecutortest_destroyinputmemory_003, TestSize.Level0) pair2.first = tensorDesr; m_inputTensorDescs.emplace_back(pair1); m_inputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; size_t length = 9 * sizeof(float); EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[m_index].first)) .WillRepeatedly(::testing::Return(reinterpret_cast(0x1000))); NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_Memory** memory = nullptr; void* const data = dataArry; @@ -1845,8 +2094,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_001, TestSize.Level0) std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t length = 9 * sizeof(float); OH_NN_Memory** memory = nullptr; @@ -1878,8 +2132,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_002, TestSize.Level0) std::pair, OH_NN_TensorType> pair2; m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_Memory** memory = nullptr; float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; @@ -1919,6 +2178,9 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_003, TestSize.Level0) pair2.first = tensorDesr; m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; size_t length = 9 * sizeof(float); @@ -1926,7 +2188,8 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_003, TestSize.Level0) .WillRepeatedly(::testing::Return(nullptr)); NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_Memory** memory = nullptr; void* const data = dataArry; @@ -1966,6 +2229,9 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_004, TestSize.Level0) pair2.first = tensorDesr; m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; size_t length = 9 * sizeof(float); @@ -1973,7 +2239,8 @@ HWTEST_F(NNExecutorTest, nnexecutortest_createoutputmemory_004, TestSize.Level0) .WillRepeatedly(::testing::Return(reinterpret_cast(0x1000))); NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_Memory** memory = nullptr; void* const data = dataArry; @@ -2000,8 +2267,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_destroyoutputmemory_001, TestSize.Level0 std::shared_ptr m_preparedModel {nullptr}; std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t length = 9 * sizeof(float); OH_NN_Memory** memory = nullptr; @@ -2034,8 +2306,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_destroyoutputmemory_002, TestSize.Level0 std::pair, OH_NN_TensorType> pair2; m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t length = 9 * sizeof(float); OH_NN_Memory** memory = nullptr; @@ -2075,13 +2352,17 @@ HWTEST_F(NNExecutorTest, nnexecutortest_destroyoutputmemory_003, TestSize.Level0 pair2.first = tensorDesr; m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; float dataArry[9] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; size_t length = 9 * sizeof(float); EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[m_index].first)) .WillRepeatedly(::testing::Return(reinterpret_cast(0x1000))); NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NN_Memory** memory = nullptr; void* const data = dataArry; @@ -2111,8 +2392,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_run_001, TestSize.Level0) .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t length = 9 * sizeof(float); OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); @@ -2146,8 +2432,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_run_002, TestSize.Level0) std::pair, OH_NN_TensorType> pair2; m_inputTensorDescs.emplace_back(pair1); m_inputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t length = 9 * sizeof(float); OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); @@ -2183,8 +2474,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_run_003, TestSize.Level0) m_inputTensorDescs.emplace_back(pair2); m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); size_t length = 9 * sizeof(float); OH_NN_Tensor tensor = SetTensor(OH_NN_FLOAT32, m_dimensionCount, m_dimArry, nullptr, OH_NN_TENSOR); @@ -2220,8 +2516,13 @@ HWTEST_F(NNExecutorTest, nnexecutortest_setextensionconfig_001, TestSize.Level0) m_inputTensorDescs.emplace_back(pair2); m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* nnExecutor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); std::unordered_map> configMap; std::string callingPidStr = "callingPid"; diff --git a/test/unittest/components/v1_0/neural_network_core_test/neural_network_core_test.cpp b/test/unittest/components/v1_0/neural_network_core_test/neural_network_core_test.cpp index ee68a83d7003a1524b115cc6209c00f913c24002..378a9ccfa3f107600ac0e3227a1be8038dcf0bc3 100644 --- a/test/unittest/components/v1_0/neural_network_core_test/neural_network_core_test.cpp +++ b/test/unittest/components/v1_0/neural_network_core_test/neural_network_core_test.cpp @@ -1751,8 +1751,13 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_nnexecutor_getputputshape_002, testing::ext: std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); uint32_t outputIndex = 0; @@ -1789,8 +1794,12 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_nnexecutor_getinputcount_002, testing::ext:: std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); size_t* inputCount = nullptr; @@ -1825,8 +1834,12 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_nnexecutor_getoutputcount_002, testing::ext: std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); size_t* outputCount = nullptr; @@ -1894,8 +1907,12 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_nnexecutor_getinputdimRange_002, testing::ex std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); OH_NN_ReturnCode ret = OH_NNExecutor_GetInputDimRange(nnExecutor, index, @@ -1922,8 +1939,12 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_nnexecutor_getinputdimRange_003, testing::ex std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); OH_NN_ReturnCode ret = OH_NNExecutor_GetInputDimRange(nnExecutor, index, @@ -1958,8 +1979,12 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_nnexecutor_setonrundone_002, testing::ext::T std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); NN_OnRunDone rundone = nullptr; @@ -1993,8 +2018,12 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_nnexecutor_setonservicedied_002, testing::ex std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); NN_OnServiceDied servicedied = nullptr; @@ -2031,8 +2060,12 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_executor_runsync_002, testing::ext::TestSize std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); NN_Tensor* inputTensor[] = {nullptr}; @@ -2056,8 +2089,12 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_executor_runsync_003, testing::ext::TestSize std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); NN_Tensor* inputTensor[sizetensor]; @@ -2081,8 +2118,12 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_executor_runsync_004, testing::ext::TestSize std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); NN_Tensor* inputTensor[sizetensor]; @@ -2106,8 +2147,12 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_executor_runsync_005, testing::ext::TestSize std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); NN_Tensor* inputTensor[sizetensor]; @@ -2150,8 +2195,12 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_executor_runasync_002, testing::ext::TestSiz std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); NN_Tensor* inputTensor[] = {nullptr}; @@ -2178,8 +2227,12 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_executor_runasync_003, testing::ext::TestSiz std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); NN_Tensor* inputTensor[sizetensor]; @@ -2206,8 +2259,12 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_executor_runasync_004, testing::ext::TestSiz std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); NN_Tensor* inputTensor[sizetensor]; @@ -2234,8 +2291,12 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_executor_runasync_005, testing::ext::TestSiz std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); NN_Tensor* inputTensor[sizetensor]; @@ -2262,8 +2323,12 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_executor_runasync_006, testing::ext::TestSiz std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); NN_Tensor* inputTensor[sizetensor]; @@ -2290,8 +2355,12 @@ HWTEST_F(NeuralNetworkCoreTest, nnt_executor_runasync_007, testing::ext::TestSiz std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; std::shared_ptr device = std::make_shared(); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(&executor); NN_Tensor* inputTensor[sizetensor]; diff --git a/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp b/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp index a58ace084458acd3d1f5b656c53d277f6a136747..d3b8c76a5812c03a5c4515ceb48faacd217546c7 100644 --- a/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp +++ b/test/unittest/components/v1_0/neural_network_runtime_test/neural_network_runtime_test.cpp @@ -1250,8 +1250,12 @@ HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_006, testing::ext::TestSize. .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t inputIndex = 0; @@ -1282,8 +1286,12 @@ HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_007, testing::ext::TestSize. .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t inputIndex = 0; @@ -1312,8 +1320,12 @@ HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_008, testing::ext::TestSize. .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t inputIndex = 0; @@ -1342,8 +1354,12 @@ HWTEST_F(NeuralNetworkRuntimeTest, excutor_setinput_009, testing::ext::TestSize. .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t inputIndex = 0; @@ -1450,8 +1466,12 @@ HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_005, testing::ext::TestSize .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t outputIndex = 0; @@ -1477,8 +1497,12 @@ HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_006, testing::ext::TestSize .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t outputIndex = 0; @@ -1503,8 +1527,12 @@ HWTEST_F(NeuralNetworkRuntimeTest, excutor_setoutput_007, testing::ext::TestSize .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t outputIndex = 0; @@ -1664,8 +1692,12 @@ HWTEST_F(NeuralNetworkRuntimeTest, excutor_run_003, testing::ext::TestSize.Level .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); int32_t inputDims[2] = {3, 4}; @@ -1769,8 +1801,12 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_005, testing:: .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t outputIndex = 0; @@ -1810,13 +1846,17 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_006, testing:: m_inputTensorDescs.emplace_back(pair2); m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; size_t length = 9 * sizeof(float); EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_inputTensorDescs[0].first)) .WillRepeatedly(::testing::Return(reinterpret_cast(0x1000))); NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); EXPECT_NE(nullptr, executor); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); @@ -1843,8 +1883,13 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_input_memory_007, testing:: .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t outputIndex = 0; @@ -1949,8 +1994,13 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_005, testing: .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t outputIndex = 0; @@ -1990,13 +2040,17 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_006, testing: m_inputTensorDescs.emplace_back(pair2); m_outputTensorDescs.emplace_back(pair1); m_outputTensorDescs.emplace_back(pair2); + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; size_t length = 9 * sizeof(float); EXPECT_CALL(*((MockIDevice *) device.get()), AllocateTensorBuffer(length, m_outputTensorDescs[0].first)) .WillRepeatedly(::testing::Return(reinterpret_cast(0x1000))); NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, device, m_preparedModel, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); EXPECT_NE(nullptr, executor); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); @@ -2023,8 +2077,13 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_allocate_output_memory_007, testing: .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t outputIndex = 0; @@ -2071,8 +2130,13 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_002, testing::e .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t inputIndex = 0; @@ -2098,8 +2162,13 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_003, testing::e .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); EXPECT_NE(executor, nullptr); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); @@ -2126,8 +2195,13 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_input_memory_004, testing::e .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t inputIndex = 6; @@ -2173,8 +2247,13 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_002, testing:: .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t outputIndex = 0; @@ -2200,8 +2279,13 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_003, testing:: .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); EXPECT_NE(executor, nullptr); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); @@ -2228,8 +2312,13 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_004, testing:: .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t outputIndex = 6; @@ -2258,8 +2347,13 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_destroy_output_memory_005, testing:: .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); float dataArry[9] {0, 1, 2, 3, 4, 5, 6, 7, 8}; @@ -2308,8 +2402,13 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_002, testing:: .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); OH_NN_Tensor* operand = nullptr; @@ -2340,8 +2439,13 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_003, testing:: .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); SetTensor(); @@ -2369,8 +2473,13 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_input_with_memory_004, testing:: .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t inputIndex = 0; @@ -2419,8 +2528,13 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_002, testing: .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t outputIndex = 0; @@ -2446,8 +2560,13 @@ HWTEST_F(NeuralNetworkRuntimeTest, executor_set_output_with_memory_003, testing: .WillRepeatedly(::testing::Return(OH_NN_FAILED)); std::vector, OH_NN_TensorType>> m_inputTensorDescs; std::vector, OH_NN_TensorType>> m_outputTensorDescs; + ExtensionConfig extensionConfig; + OH_NN_PerformanceMode performance {OH_NN_PERFORMANCE_EXTREME}; + OH_NN_Priority priority {OH_NN_PRIORITY_HIGH}; + NNExecutor* executor = new (std::nothrow) NNExecutor( - m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs); + m_backendID, m_device, mockIPreparedMode, m_inputTensorDescs, m_outputTensorDescs, "", 0, extensionConfig, + false, performance, priority); OH_NNExecutor* nnExecutor = reinterpret_cast(executor); uint32_t outputIndex = 0;