diff --git a/debug/accuracy_tools/api_accuracy_checker/compare/algorithm.py b/debug/accuracy_tools/api_accuracy_checker/compare/algorithm.py index a79125d832b39e9074c8dff9d3eb5c24f2003c4d..2ab8735a3128d004ecfd44597b240ac125b16b1b 100644 --- a/debug/accuracy_tools/api_accuracy_checker/compare/algorithm.py +++ b/debug/accuracy_tools/api_accuracy_checker/compare/algorithm.py @@ -30,7 +30,7 @@ def get_max_rel_err(n_value, b_value): print_warn_log("Max rel err only support numpy array!") raise ValueError("Max rel err only support numpy array!") if n_value.dtype != b_value.dtype: - raise ValueError("npu and bench value dtype is different.") + return CompareConst.NA, False if n_value.dtype in Const.FLOAT_TYPE: rel_err = np.abs((n_value - b_value) / (b_value + np.finfo(b_value.dtype).eps)).max() return rel_err, rel_err < 0.001 diff --git a/debug/accuracy_tools/api_accuracy_checker/compare/compare.py b/debug/accuracy_tools/api_accuracy_checker/compare/compare.py index b877bc50d4d71ed8d9b5550bab245ae6f5c87bb6..5cb0777e43f0d51d2c3864b55ee887e927750f13 100644 --- a/debug/accuracy_tools/api_accuracy_checker/compare/compare.py +++ b/debug/accuracy_tools/api_accuracy_checker/compare/compare.py @@ -23,7 +23,8 @@ class Comparator: self.compare_alg_names = [] self.register_compare_algorithm("Cosine Similarity", cosine_sim, cosine_standard) self.test_results = [] - self.test_result_cnt = {"forward_fail_num":0, "backward_fail_num":0, "forward_and_backward_fail_num":0, "success_num":0} + self.test_result_cnt = {"forward_fail_num": 0, "backward_fail_num": 0, "forward_and_backward_fail_num": 0, + "success_num": 0} def print_pretest_result(self): res_dict = { @@ -34,7 +35,7 @@ class Comparator: } tb = PrettyTable() tb.add_column("Category", list(res_dict.keys())) - tb.add_column("statistics",list(res_dict.values())) + tb.add_column("statistics", list(res_dict.values())) info_tb = str(tb) print_info_log(info_tb) @@ -62,9 +63,15 @@ class Comparator: self.compare_alg_names.append(name) def compare_output(self, api_name, bench_out, npu_out, bench_grad=None, npu_grad=None): - is_fwd_success, fwd_compare_alg_results = self._compare_core_wrapper(bench_out, npu_out) + if "dropout" in api_name: + is_fwd_success, fwd_compare_alg_results = self._compare_dropout(bench_out, npu_out) + else: + is_fwd_success, fwd_compare_alg_results = self._compare_core_wrapper(bench_out, npu_out) if bench_grad and npu_grad: - is_bwd_success, bwd_compare_alg_results = self._compare_core_wrapper(bench_grad, npu_grad) + if "dropout" in api_name: + is_bwd_success, bwd_compare_alg_results = self._compare_dropout(bench_grad[0], npu_grad[0]) + else: + is_bwd_success, bwd_compare_alg_results = self._compare_core_wrapper(bench_grad, npu_grad) else: is_bwd_success, bwd_compare_alg_results = CompareConst.NA, None self.record_results(api_name, is_fwd_success, is_bwd_success, fwd_compare_alg_results, bwd_compare_alg_results) @@ -80,4 +87,15 @@ class Comparator: def _compare_core_wrapper(self, bench_out, npu_out): name = self.compare_alg_names[0] detailed_result, test_success = compare_core(bench_out, npu_out, self.compare_alg[name][0]) - return test_success, detailed_result \ No newline at end of file + return test_success, detailed_result + + @staticmethod + def _compare_dropout(bench_out, npu_out): + tensor_num = bench_out.numel() + if tensor_num >= 100: + if abs((bench_out == 0).sum() - (npu_out == 0).sum()) / tensor_num < 0.1: + return True, 1 + else: + return False, 0 + else: + return True, 1