diff --git a/test/fuzztest/BUILD.gn b/test/fuzztest/BUILD.gn index a95ded5e099e37bfa170cc93aa1fb949c8029591..895d24b4aeb5404ff03fac7fde20adafc93c44b0 100644 --- a/test/fuzztest/BUILD.gn +++ b/test/fuzztest/BUILD.gn @@ -18,6 +18,6 @@ group("fuzztest") { deps += [ "hdinnrtdevice_fuzzer:HdiNnrtDeviceFuzzTest" ] deps += [ "hdinnrtpreparedmodel_fuzzer:HdiNnrtPreparedModelFuzzTest" ] - deps += [ "neural_network_core_fuzzer:NNCoreFuzzTest" ] + deps += [ "nncore_fuzzer:NNCoreFuzzTest" ] deps += [ "nnrtops_fuzzer:NnrtOpsFuzzTest" ] } diff --git a/test/fuzztest/neural_network_core_fuzzer/BUILD.gn b/test/fuzztest/nncore_fuzzer/BUILD.gn similarity index 96% rename from test/fuzztest/neural_network_core_fuzzer/BUILD.gn rename to test/fuzztest/nncore_fuzzer/BUILD.gn index 6a49eebfa3da7faae1ec9ffeaf85a04eb3532736..bac4cff434cc8e8b08add924aeaf8e9bb195f019 100644 --- a/test/fuzztest/neural_network_core_fuzzer/BUILD.gn +++ b/test/fuzztest/nncore_fuzzer/BUILD.gn @@ -20,7 +20,7 @@ module_output_path = "neural_network_runtime/" ##############################fuzztest########################################## ohos_fuzztest("NNCoreFuzzTest") { module_out_path = module_output_path - fuzz_config_file = "../neural_network_core_fuzzer" + fuzz_config_file = "../nncore_fuzzer" include_dirs = [ "../../..", diff --git a/test/fuzztest/neural_network_core_fuzzer/corpus/init b/test/fuzztest/nncore_fuzzer/corpus/init similarity index 100% rename from test/fuzztest/neural_network_core_fuzzer/corpus/init rename to test/fuzztest/nncore_fuzzer/corpus/init diff --git a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp b/test/fuzztest/nncore_fuzzer/nncore_fuzzer.cpp similarity index 50% rename from test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp rename to test/fuzztest/nncore_fuzzer/nncore_fuzzer.cpp index f01dba5f27d8bba394fd900ca513e5092b55e647..fdfe5eb13ebd64cdf8388b892491d7b771f0a6b3 100644 --- a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.cpp +++ b/test/fuzztest/nncore_fuzzer/nncore_fuzzer.cpp @@ -25,117 +25,163 @@ namespace NeuralNetworkRuntime { const size_t SIZE_ONE = 1; const size_t CACHE_VERSION = 1; const size_t BUFFER_SIZE = 32; -const size_t TENSOR_TWO = 2; -const size_t TENSOR_THREE = 3; const size_t SHAPE_LENTH = 4; -// 返回值检查宏 -#define CHECKNEQ(realRet, expectRet, retValue, ...) \ - do { \ - if ((realRet) != (expectRet)) { \ - printf(__VA_ARGS__); \ - return (retValue); \ - } \ - } while (0) - -#define CHECKEQ(realRet, expectRet, retValue, ...) \ - do { \ - if ((realRet) == (expectRet)) { \ - printf(__VA_ARGS__); \ - return (retValue); \ - } \ - } while (0) - -OH_NN_ReturnCode AddTensorDescToModel(OH_NNModel* model, int32_t* inputDims, size_t shapeLength, size_t inputIndex) +struct OHNNOperandTest { + OH_NN_DataType dataType; + OH_NN_TensorType type; + std::vector shape; + void *data {nullptr}; + int32_t length {0}; + OH_NN_Format format = OH_NN_FORMAT_NONE; + const OH_NN_QuantParam *quantParam = nullptr; +}; + +struct OHNNGraphArgs { + OH_NN_OperationType operationType; + std::vector operands; + std::vector paramIndices; + std::vector inputIndices; + std::vector outputIndices; + bool build = true; + bool specifyIO = true; + bool addOperation = true; +}; + +struct Model0 { + float value = 1; + OHNNOperandTest input = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, &value, sizeof(float)}; + OHNNOperandTest output = {OH_NN_FLOAT32, OH_NN_TENSOR, {1}, &value, sizeof(float)}; + OHNNGraphArgs graphArgs = {.operationType = OH_NN_OPS_ADD, + .operands = {input, output}, + .paramIndices = {}, + .inputIndices = {0}, + .outputIndices = {1}}; +}; + +OH_NN_UInt32Array TransformUInt32Array(const std::vector& vector) { - NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); - CHECKEQ(tensorDesc, nullptr, OH_NN_NULL_PTR, "Create TensorDesc failed."); - - auto returnCode = OH_NNTensorDesc_SetShape(tensorDesc, inputDims, shapeLength); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc shape failed."); - - returnCode = OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_FLOAT32); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc data type failed."); - - returnCode = OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc format failed."); - - returnCode = OH_NNModel_AddTensorToModel(model, tensorDesc); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add TensorDesc to model failed."); - - returnCode = OH_NNModel_SetTensorType(model, inputIndex, OH_NN_TENSOR); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set model tensor type failed."); - - return OH_NN_SUCCESS; + uint32_t* data = (vector.empty()) ? nullptr : const_cast(vector.data()); + return {data, vector.size()}; } -OH_NN_ReturnCode BuildModel(OH_NNModel** pmodel) +NN_TensorDesc* createTensorDesc(const int32_t* shape, size_t shapeNum, OH_NN_DataType dataType, OH_NN_Format format) { - // 创建模型实例model,进行模型构造 - OH_NNModel* model = OH_NNModel_Construct(); - CHECKEQ(model, nullptr, OH_NN_NULL_PTR, "Create model failed."); - - // 添加Add算子的第一个输入张量,类型为float32,张量形状为[1, 2, 2, 3] - int32_t inputDims[4] = {1, 2, 2, 3}; - auto returnCode = AddTensorDescToModel(model, inputDims, SHAPE_LENTH, 0); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add first TensorDesc to model failed."); - - // 添加Add算子的第二个输入张量,类型为float32,张量形状为[1, 2, 2, 3] - returnCode = AddTensorDescToModel(model, inputDims, SHAPE_LENTH, 1); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add second TensorDesc to model failed."); - - // 添加Add算子的参数张量,该参数张量用于指定激活函数的类型,张量的数据类型为int8。 - NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); - CHECKEQ(tensorDesc, nullptr, OH_NN_NULL_PTR, "Create TensorDesc failed."); - - int32_t activationDims = 1; - returnCode = OH_NNTensorDesc_SetShape(tensorDesc, &activationDims, 1); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc shape failed."); + NN_TensorDesc* tensorDescTmp = OH_NNTensorDesc_Create(); + if (tensorDescTmp == nullptr) { + LOGE("[NncoreFuzzTest]OH_NNTensorDesc_Create failed!"); + return nullptr; + } - returnCode = OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_INT8); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc data type failed."); + OH_NN_ReturnCode ret = OH_NNTensorDesc_SetDataType(tensorDescTmp, dataType); + if (ret != OH_NN_SUCCESS) { + LOGE("[NncoreFuzzTest]OH_NNTensorDesc_SetDataType failed!ret = %d\n", ret); + return nullptr; + } - returnCode = OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set TensorDesc format failed."); + if (shape != nullptr) { + ret = OH_NNTensorDesc_SetShape(tensorDescTmp, shape, shapeNum); + if (ret != OH_NN_SUCCESS) { + LOGE("[NncoreFuzzTest]OH_NNTensorDesc_SetShape failed!ret = %d\n", ret); + return nullptr; + } + } - returnCode = OH_NNModel_AddTensorToModel(model, tensorDesc); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add second TensorDesc to model failed."); + ret = OH_NNTensorDesc_SetFormat(tensorDescTmp, format); + if (ret != OH_NN_SUCCESS) { + LOGE("[NncoreFuzzTest]OH_NNTensorDesc_SetShape failed!ret = %d\n", ret); + return nullptr; + } - returnCode = OH_NNModel_SetTensorType(model, TENSOR_TWO, OH_NN_ADD_ACTIVATIONTYPE); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set model tensor type failed."); + return tensorDescTmp; +} - // 将激活函数类型设置为OH_NNBACKEND_FUSED_NONE,表示该算子不添加激活函数。 - int8_t activationValue = OH_NN_FUSED_NONE; - returnCode = OH_NNModel_SetTensorData(model, TENSOR_TWO, &activationValue, sizeof(int8_t)); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Set model tensor data failed."); +int SingleModelBuildEndStep(OH_NNModel *model, const OHNNGraphArgs &graphArgs) +{ + int ret = 0; + auto paramIndices = TransformUInt32Array(graphArgs.paramIndices); + auto inputIndices = TransformUInt32Array(graphArgs.inputIndices); + auto outputIndices = TransformUInt32Array(graphArgs.outputIndices); + + if (graphArgs.addOperation) { + ret = OH_NNModel_AddOperation(model, graphArgs.operationType, ¶mIndices, &inputIndices, + &outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("[NncoreFuzzTest] OH_NNModel_AddOperation failed! ret=%{public}d\n", ret); + return ret; + } + } - // 设置Add算子的输出张量,类型为float32,张量形状为[1, 2, 2, 3] - returnCode = AddTensorDescToModel(model, inputDims, SHAPE_LENTH, TENSOR_THREE); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add third TensorDesc to model failed."); + if (graphArgs.specifyIO) { + ret = OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices); + if (ret != OH_NN_SUCCESS) { + LOGE("[NncoreFuzzTest] OH_NNModel_SpecifyInputsAndOutputs failed! ret=%{public}d\n", ret); + return ret; + } + } - // 指定Add算子的输入张量、参数张量和输出张量的索引 - uint32_t inputIndicesValues[2] = {0, 1}; - uint32_t paramIndicesValues = 2; - uint32_t outputIndicesValues = 3; - OH_NN_UInt32Array paramIndices = {¶mIndicesValues, 1 * 4}; - OH_NN_UInt32Array inputIndices = {inputIndicesValues, 2 * 4}; - OH_NN_UInt32Array outputIndices = {&outputIndicesValues, 1 * 4}; + if (graphArgs.build) { + ret = OH_NNModel_Finish(model); + if (ret != OH_NN_SUCCESS) { + LOGE("[NncoreFuzzTest] OH_NNModel_Finish failed! ret=%d\n", ret); + return ret; + } + } + return ret; +} - // 向模型实例添加Add算子 - returnCode = OH_NNModel_AddOperation(model, OH_NN_OPS_ADD, ¶mIndices, &inputIndices, &outputIndices); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Add operation to model failed."); +int BuildSingleOpGraph(OH_NNModel *model, const OHNNGraphArgs &graphArgs) +{ + int ret = 0; + for (size_t i = 0; i < graphArgs.operands.size(); i++) { + const OHNNOperandTest &operandTem = graphArgs.operands[i]; + NN_TensorDesc* tensorDesc = createTensorDesc(operandTem.shape.data(), + (uint32_t) operandTem.shape.size(), + operandTem.dataType, operandTem.format); + + ret = OH_NNModel_AddTensorToModel(model, tensorDesc); + if (ret != OH_NN_SUCCESS) { + LOGE("[NncoreFuzzTest] OH_NNModel_AddTensor failed! ret=%d\n", ret); + return ret; + } + + ret = OH_NNModel_SetTensorType(model, i, operandTem.type); + if (ret != OH_NN_SUCCESS) { + LOGE("[NncoreFuzzTest] OH_NNBackend_SetModelTensorType failed! ret=%d\n", ret); + return ret; + } + + if (std::find(graphArgs.paramIndices.begin(), graphArgs.paramIndices.end(), i) != + graphArgs.paramIndices.end()) { + ret = OH_NNModel_SetTensorData(model, i, operandTem.data, operandTem.length); + if (ret != OH_NN_SUCCESS) { + LOGE("[NncoreFuzzTest] OH_NNModel_SetTensorData failed! ret=%{public}d\n", ret); + return ret; + } + } + OH_NNTensorDesc_Destroy(&tensorDesc); + } + ret = SingleModelBuildEndStep(model, graphArgs); + return ret; +} - // 设置模型实例的输入张量、输出张量的索引 - returnCode = OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Specify model inputs and outputs failed."); +OH_NNModel* buildModel0(uint32_t opsType) +{ + OH_NNModel *model = OH_NNModel_Construct(); - // 完成模型实例的构建 - returnCode = OH_NNModel_Finish(model); - CHECKNEQ(returnCode, OH_NN_SUCCESS, returnCode, "Build model failed."); + Model0 model0; + OHNNGraphArgs graphArgs = model0.graphArgs; + graphArgs.operationType = static_cast(opsType); + BuildSingleOpGraph(model, graphArgs); + return model; +} - // 返回模型实例 - *pmodel = model; - return OH_NN_SUCCESS; +OH_NNCompilation* BuildCompilation(OH_NNModel* model, size_t deviceId) +{ + OH_NNCompilation* nnCompilation = OH_NNCompilation_Construct(model); + OH_NNCompilation_SetDevice(nnCompilation, deviceId); + OH_NNCompilation_Build(nnCompilation); + return nnCompilation; } void NNCoreDeviceFuzzTest(const uint8_t* data, size_t size) @@ -163,8 +209,10 @@ void NNCoreDeviceFuzzTest(const uint8_t* data, size_t size) bool NNCoreCompilationConstructTest(const uint8_t* data, size_t size) { Data dataFuzz(data, size); - InnerModel model = dataFuzz.GetData(); - OH_NNCompilation* compilationC = OH_NNCompilation_Construct(reinterpret_cast(&model)); + uint32_t opsType = dataFuzz.GetData() + % (OH_NN_OPS_GATHER_ND - OH_NN_OPS_ADD + 1); + OH_NNModel* model = buildModel0(opsType); + OH_NNCompilation* nnCompilation = OH_NNCompilation_Construct(model); size_t bufferSize = BUFFER_SIZE; auto bufferAddr = dataFuzz.GetSpecificData(0, bufferSize); @@ -173,8 +221,6 @@ bool NNCoreCompilationConstructTest(const uint8_t* data, size_t size) OH_NNCompilation_ConstructWithOfflineModelBuffer(bufferAddr, bufferSize); - Compilation compilation = dataFuzz.GetData(); - OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); size_t modelSize = 0; char buffer[SIZE_ONE]; OH_NNCompilation_ExportCacheToBuffer(nnCompilation, buffer, SIZE_ONE, &modelSize); @@ -183,19 +229,10 @@ bool NNCoreCompilationConstructTest(const uint8_t* data, size_t size) OH_NNCompilation_Build(nnCompilation); - OH_NNCompilation_Destroy(&compilationC); + OH_NNCompilation_Destroy(&nnCompilation); - OH_NNModel* validModel; - if (BuildModel(&validModel) != OH_NN_SUCCESS) { - LOGE("NNCoreCompilationConstructTest failed, build model failed."); - return false; - } - OH_NNCompilation* validCompilation = OH_NNCompilation_Construct(validModel); - OH_NNModel_Destroy(&validModel); - if (validCompilation == nullptr) { - LOGE("NNCoreCompilationConstructTest failed, construct valid compilation failed."); - return false; - } + OH_NNCompilation* validCompilation = OH_NNCompilation_Construct(model); + OH_NNModel_Destroy(&model); OH_NNCompilation_AddExtensionConfig(validCompilation, "test", bufferAddr, bufferSize); size_t deviceid = dataFuzz.GetData(); @@ -203,10 +240,12 @@ bool NNCoreCompilationConstructTest(const uint8_t* data, size_t size) OH_NNCompilation_SetCache(validCompilation, path.c_str(), CACHE_VERSION); - OH_NN_PerformanceMode perf = dataFuzz.GetData(); + OH_NN_PerformanceMode perf = static_cast( + dataFuzz.GetData() % (OH_NN_PERFORMANCE_EXTREME - OH_NN_PERFORMANCE_NONE + 1)); OH_NNCompilation_SetPerformanceMode(validCompilation, perf); - OH_NN_Priority priority = dataFuzz.GetData(); + OH_NN_Priority priority = static_cast( + dataFuzz.GetData() % (OH_NN_PRIORITY_HIGH - OH_NN_PRIORITY_NONE + 1)); OH_NNCompilation_SetPriority(validCompilation, priority); bool enableFloat16 = dataFuzz.GetData(); @@ -232,7 +271,8 @@ bool NNCoreTensorDescFuzzTest(const uint8_t* data, size_t size) const char* name = nullptr; OH_NNTensorDesc_GetName(tensorDesc, &name); - OH_NN_DataType dataType = dataFuzz.GetData(); + OH_NN_DataType dataType = static_cast( + dataFuzz.GetData() % (OH_NN_FLOAT64 - OH_NN_UNKNOWN + 1)); OH_NNTensorDesc_SetDataType(tensorDesc, dataType); OH_NN_DataType dataTypeOut; OH_NNTensorDesc_GetDataType(tensorDesc, &dataTypeOut); @@ -243,7 +283,8 @@ bool NNCoreTensorDescFuzzTest(const uint8_t* data, size_t size) size_t shapeLength = 0; OH_NNTensorDesc_GetShape(tensorDesc, &shape, &shapeLength); - OH_NN_Format format = dataFuzz.GetData(); + OH_NN_Format format = static_cast( + dataFuzz.GetData() % (OH_NN_FORMAT_ND - OH_NN_FORMAT_NONE + 1)); OH_NNTensorDesc_SetFormat(tensorDesc, format); OH_NN_Format formatOut; OH_NNTensorDesc_GetFormat(tensorDesc, &formatOut); @@ -264,8 +305,7 @@ bool NNCoreTensorFuzzTest(const uint8_t* data, size_t size) size_t deviceId = dataFuzz.GetData(); NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create(); int32_t inputDims[4] = {1, 2, 2, 3}; - OH_NNModel* model = nullptr; - BuildModel(&model); + OH_NNModel* model = OH_NNModel_Construct(); OH_NNTensorDesc_SetShape(tensorDesc, inputDims, SHAPE_LENTH); OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_FLOAT32); OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE); @@ -297,8 +337,10 @@ bool NNCoreTensorFuzzTest(const uint8_t* data, size_t size) bool NNCoreExecutorFuzzTest(const uint8_t* data, size_t size) { Data dataFuzz(data, size); - Compilation compilation = dataFuzz.GetData(); - OH_NNCompilation* nnCompilation = reinterpret_cast(&compilation); + uint32_t opsType = dataFuzz.GetData() % (OH_NN_OPS_GATHER_ND - OH_NN_OPS_ADD + 1); + OH_NNModel* model = buildModel0(opsType); + size_t deviceid = dataFuzz.GetData(); + OH_NNCompilation* nnCompilation = BuildCompilation(model, deviceid); OH_NNExecutor* nnExecutor = OH_NNExecutor_Construct(nnCompilation); uint32_t outputIndex = dataFuzz.GetData(); @@ -311,39 +353,39 @@ bool NNCoreExecutorFuzzTest(const uint8_t* data, size_t size) size_t outputCount = 0; OH_NNExecutor_GetOutputCount(nnExecutor, &outputCount); - - size_t index = dataFuzz.GetData(); - OH_NNExecutor_CreateInputTensorDesc(nnExecutor, index); - - OH_NNExecutor_CreateOutputTensorDesc(nnExecutor, index); + std::vector inputTensorDescs; + std::vector outputTensorDescs; + size_t index = 0; + for (size_t i = 0; i < inputCount; i++) { + index = (inputCount == 0) ? 0 : (dataFuzz.GetData() % inputCount); + NN_TensorDesc* nnTensorDesc = OH_NNExecutor_CreateInputTensorDesc(nnExecutor, index); + inputTensorDescs.emplace_back(nnTensorDesc); + } + for (size_t i = 0; i < outputCount; i++) { + index = (outputCount == 0) ? 0 : (dataFuzz.GetData() % outputCount); + NN_TensorDesc* nnTensorDesc = OH_NNExecutor_CreateOutputTensorDesc(nnExecutor, index); + outputTensorDescs.emplace_back(nnTensorDesc); + } size_t *minInputDims = nullptr; size_t *maxInputDIms = nullptr; size_t shapeLength = 0; OH_NNExecutor_GetInputDimRange(nnExecutor, index, &minInputDims, &maxInputDIms, &shapeLength); - NN_OnRunDone onRunDone = dataFuzz.GetData(); - OH_NNExecutor_SetOnRunDone(nnExecutor, onRunDone); - - NN_OnServiceDied onServiceDied = dataFuzz.GetData(); - OH_NNExecutor_SetOnServiceDied(nnExecutor, onServiceDied); - std::vector inputTensors; std::vector outputTensors; - inputCount = dataFuzz.GetData(); - outputCount = dataFuzz.GetData(); for (size_t i = 0; i < inputCount; ++i) { - NN_Tensor* inputTensor = dataFuzz.GetData(); - inputTensors.emplace_back(inputTensor); + NN_Tensor* tensor = OH_NNTensor_Create(deviceid, inputTensorDescs[i]); + inputTensors.emplace_back(tensor); } for (size_t i = 0; i < outputCount; ++i) { - NN_Tensor* outputTensor = dataFuzz.GetData(); - outputTensors.emplace_back(outputTensor); + NN_Tensor* tensor = OH_NNTensor_Create(deviceid, outputTensorDescs[i]); + outputTensors.emplace_back(tensor); } + void* userData = dataFuzz.GetData(); OH_NNExecutor_RunSync(nnExecutor, inputTensors.data(), inputCount, outputTensors.data(), outputCount); int32_t timeout = dataFuzz.GetData(); - void* userData = dataFuzz.GetData(); OH_NNExecutor_RunAsync(nnExecutor, inputTensors.data(), inputCount, outputTensors.data(), outputCount, timeout, userData); diff --git a/test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.h b/test/fuzztest/nncore_fuzzer/nncore_fuzzer.h similarity index 100% rename from test/fuzztest/neural_network_core_fuzzer/nncore_fuzzer.h rename to test/fuzztest/nncore_fuzzer/nncore_fuzzer.h diff --git a/test/fuzztest/neural_network_core_fuzzer/project.xml b/test/fuzztest/nncore_fuzzer/project.xml similarity index 100% rename from test/fuzztest/neural_network_core_fuzzer/project.xml rename to test/fuzztest/nncore_fuzzer/project.xml