diff --git a/debug/accuracy_tools/api_accuracy_checker/compare/algorithm.py b/debug/accuracy_tools/api_accuracy_checker/compare/algorithm.py index 7868ab721ec67921a2dcf6cad09f18c883fb0133..c45a093eaf6816d312b5528eff2bd5651eb5540e 100644 --- a/debug/accuracy_tools/api_accuracy_checker/compare/algorithm.py +++ b/debug/accuracy_tools/api_accuracy_checker/compare/algorithm.py @@ -157,26 +157,28 @@ def compare_core(bench_out, npu_out, alg): if not isinstance(bench_out, type(npu_out)): return [(CompareConst.NAN, "bench and npu output type is different.")], False, CompareConst.NA, CompareConst.NA if isinstance(bench_out, (list, tuple)): - compare_result, test_success, bench_dtype, npu_dtype = [], True, [], [] + compare_result, test_success, bench_dtype, npu_dtype, shape = [], True, [], [], [] if len(bench_out) != len(npu_out): return [(CompareConst.NAN, "bench and npu output structure is different")], False, CompareConst.NA, CompareConst.NA for b_out_i, n_out_i in zip(bench_out, npu_out): - compare_result_i, test_success_i, bench_dtype_i, npu_dtype_i = compare_core(b_out_i, n_out_i, alg) + compare_result_i, test_success_i, bench_dtype_i, npu_dtype_i, shape_i = compare_core(b_out_i, n_out_i, alg) compare_result.append(compare_result_i) test_success = test_success and test_success_i bench_dtype.append(bench_dtype_i) npu_dtype.append(npu_dtype_i) + shape.append(shape_i) elif isinstance(bench_out, dict): b_keys, n_keys = set(bench_out.keys()), set(npu_out.keys()) if b_keys != n_keys: - compare_result, test_success, bench_dtype, npu_dtype = [(CompareConst.NAN, "bench and npu output dict keys are different")], False, \ - CompareConst.NA, CompareConst.NA - compare_result, test_success, bench_dtype, npu_dtype = compare_core(list(bench_out.values()), list(npu_out.values()), alg) + compare_result, test_success, bench_dtype, npu_dtype, shape = [(CompareConst.NAN, "bench and npu output dict keys are different")], False, \ + CompareConst.NA, CompareConst.NA, CompareConst.NA + compare_result, test_success, bench_dtype, npu_dtype, shape = compare_core(list(bench_out.values()), list(npu_out.values()), alg) elif isinstance(bench_out, torch.Tensor): copy_bench_out = bench_out.detach().clone() copy_npu_out = npu_out.detach().clone() bench_dtype = str(copy_bench_out.dtype) npu_dtype = str(copy_npu_out.dtype) + shape = list(npu_out.shape) if copy_bench_out.dtype in [torch.float32, torch.float64] and copy_bench_out.dtype != copy_npu_out.dtype: copy_npu_out = copy_npu_out.type(copy_bench_out.dtype) compare_result, test_success, msg = compare_torch_tensor(copy_bench_out.numpy(), copy_npu_out.cpu().numpy(), alg) @@ -184,15 +186,18 @@ def compare_core(bench_out, npu_out, alg): compare_result, test_success, msg = compare_builtin_type(bench_out, npu_out) bench_dtype = str(type(bench_out)) npu_dtype = str(type(npu_out)) + shape = str(type(npu_out)) elif bench_out is None: compare_result, test_success, msg = CompareConst.NA, True, "output is None" bench_dtype = CompareConst.NAN npu_dtype = CompareConst.NAN + shape = CompareConst.NAN else: compare_result, test_success, msg = CompareConst.NA, True, "Unexpected output type \ in compare_core: {}".format(type(bench_out)) bench_dtype = CompareConst.NAN npu_dtype = CompareConst.NAN + shape = CompareConst.NAN if isinstance(compare_result, list): compare_result = flatten_compare_result(compare_result) else: @@ -200,9 +205,11 @@ def compare_core(bench_out, npu_out, alg): if isinstance(bench_dtype, list): bench_dtype = flatten_compare_result(bench_dtype) npu_dtype = flatten_compare_result(npu_dtype) + shape = flatten_compare_result(shape) else: bench_dtype = [bench_dtype] npu_dtype = [npu_dtype] - return compare_result, test_success, bench_dtype, npu_dtype + shape = [shape] + return compare_result, test_success, bench_dtype, npu_dtype, shape diff --git a/debug/accuracy_tools/api_accuracy_checker/compare/compare.py b/debug/accuracy_tools/api_accuracy_checker/compare/compare.py index a7221cb31f3de857427235d9c7840b8fdade144c..a584405c007ec02ca56a92d8cc008eea35dd29db 100644 --- a/debug/accuracy_tools/api_accuracy_checker/compare/compare.py +++ b/debug/accuracy_tools/api_accuracy_checker/compare/compare.py @@ -58,7 +58,7 @@ class Comparator: write_csv(summary_test_rows, self.save_path) detail_test_rows = [[ - "Npu Name", "Bench Dtype", "NPU Dtype", + "Npu Name", "Bench Dtype", "NPU Dtype", "Shape", "Cosine Similarity", "Cosine Similarity Message", "Max Rel Error", "Max Rel Err Message", "Max Abs Error", "Max Abs Err Message", @@ -135,12 +135,14 @@ class Comparator: detailed_result_total = [] bench_dtype_total = [] npu_dtype_total = [] + shape_total = [] test_success_total = True for name in self.compare_alg.keys(): alg = self.compare_alg[name][0] - detailed_result, test_success, bench_dtype, npu_dtype = compare_core(bench_out, npu_out, alg) + detailed_result, test_success, bench_dtype, npu_dtype, shape = compare_core(bench_out, npu_out, alg) bench_dtype_total = bench_dtype npu_dtype_total = npu_dtype + shape_total = shape if name != "Max Relative Error" and name != "Max Absolute Error": test_success_total = test_success_total and test_success if detailed_result_total: @@ -153,6 +155,7 @@ class Comparator: detailed_result = list(detailed_result_total[i]) detailed_result.insert(0, bench_dtype_total[i]) detailed_result.insert(1, npu_dtype_total[i]) + detailed_result.insert(2, shape_total[i]) detailed_result.append(str(test_success_total)) detailed_result_total[i] = tuple(detailed_result) return test_success_total, detailed_result_total diff --git a/debug/accuracy_tools/api_accuracy_checker/run_ut/run_ut.py b/debug/accuracy_tools/api_accuracy_checker/run_ut/run_ut.py index 20b80ae6274d156e81b8aad314a6c34e0321663f..89f34d0a1867c5a7d3a7f63425a0e62f850e91e6 100644 --- a/debug/accuracy_tools/api_accuracy_checker/run_ut/run_ut.py +++ b/debug/accuracy_tools/api_accuracy_checker/run_ut/run_ut.py @@ -224,7 +224,7 @@ def _run_ut_parser(parser): required=False) parser.add_argument('-save_error_data', dest="save_error_data", action="store_true", help=" Save compare failed api output.", required=False) - parser.add_argument("-c", "--jit_compile", dest="jit_compile", help=" whether to turn on jit compile", + parser.add_argument("-j", "--jit_compile", dest="jit_compile", help=" whether to turn on jit compile", default=False, required=False) parser.add_argument("-d", "--device", dest="device_id", type=int, help=" set NPU device id to run ut", default=0, required=False)