diff --git a/debug/accuracy_tools/atat/test/core_ut/data_dump/test_json_writer.py b/debug/accuracy_tools/atat/test/core_ut/data_dump/test_json_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..925fd576e6f946b500a05c481a3480d7b49f49d5 --- /dev/null +++ b/debug/accuracy_tools/atat/test/core_ut/data_dump/test_json_writer.py @@ -0,0 +1,179 @@ +import unittest +from atat.core.data_dump.json_writer import DataWriter + +import os +import csv + +from pathlib import Path +import json + +class TestDataWriter(unittest.TestCase): + def test_write_data_to_csv(self): + cur_path = os.path.dirname(os.path.realpath(__file__)) + file_path = os.path.join(cur_path, "test.csv") + is_exists = os.path.exists(file_path) + if is_exists: + os.remove(file_path) + + data = {"A":"1", "B":"2", "C":"3"} + result = data.values() + header = data.keys() + DataWriter.write_data_to_csv(result, header, file_path) + with open(file_path, "r") as f: + reader = csv.DictReader(f) + column_first = [row for row in reader][0] + self.assertEqual(data, column_first) + + # + is_exists = os.path.exists(file_path) + self.assertTrue(is_exists) + + data = {"A":"4", "B":"5", "C":"6"} + result = data.values() + header = data.keys() + DataWriter.write_data_to_csv(result, header, file_path) + with open(file_path, "r") as f: + reader = csv.DictReader(f) + column_last = [row for row in reader][-1] + self.assertEqual(data, column_last) + + os.remove(file_path) + + def test_initialize_json_file(self): + cur_path = os.path.dirname(os.path.realpath(__file__)) + dump_tensor_data_dir = os.path.join(cur_path, "dump_tensor_data.json") + dump_file_path = os.path.join(cur_path, "dump_file.json") + stack_file_path = os.path.join(cur_path, "stack_file.json") + construct_file_path = os.path.join(cur_path, "construct_file.json") + if not os.path.exists(stack_file_path): + Path(stack_file_path).touch() + if not os.path.exists(construct_file_path): + Path(construct_file_path).touch() + + test = DataWriter() + test.stack_file_path = stack_file_path + test.dump_file_path = dump_file_path + test.dump_tensor_data_dir = dump_tensor_data_dir + test.construct_file_path = construct_file_path + + test.initialize_json_file() + + with open(dump_file_path) as f: + load_data = json.load(f) + result = {"dump_data_dir": dump_tensor_data_dir, "data": {}} + self.assertEqual(result, load_data) + is_exist_1 = os.path.exists(test.stack_file_path) + self.assertTrue(is_exist_1) + is_exist_2 = os.path.exists(test.construct_file_path) + self.assertTrue(is_exist_2) + + os.remove(construct_file_path) + os.remove(stack_file_path) + os.remove(dump_file_path) + + def test_update_dump_paths(self): + test = DataWriter() + self.assertTrue(test.dump_file_path == None) + + cur_path = os.path.dirname(os.path.realpath(__file__)) + test_path = os.path.join(cur_path, "test1.json") + + test.update_dump_paths(test_path, test_path, test_path, test_path, test_path) + self.assertFalse(test.dump_file_path == None) + self.assertFalse(test.stack_file_path == None) + self.assertFalse(test.construct_file_path == None) + self.assertFalse(test.dump_tensor_data_dir == None) + self.assertFalse(test.free_benchmark_file_path == None) + + def test_update_data(self): + data = {"A":"1", "B":"2", "C":{"D":"2"}} + test = DataWriter() + test.cache_data["data"]["test_1"] = True + test.cache_data["data"]["test_2"] = False + + test.update_data(data) + self.assertEqual(test.cache_data["data"]["A"], "1") + + new_data = {"C":{"F":3}} + test.update_data(new_data) + self.assertEqual(test.cache_data["data"]["C"]["F"], 3) + + + def test_flush_data_when_buffer_is_full_and_test_write_data_json(self): + data = {"A":"1", "B":"2", "data":{}} + test = DataWriter() + test.buffer_size = 1 + test.cache_data["data"] = {"A":"1", "B":"2", "C":"3"} + + self.assertTrue(len(test.cache_data["data"]) >= test.buffer_size) + cur_path = os.path.dirname(os.path.realpath(__file__)) + dump_tensor_data_dir = os.path.join(cur_path, "dump_tensor_data.json") + dump_file_path = os.path.join(cur_path, "dump_file.json") + stack_file_path = os.path.join(cur_path, "stack_file.json") + construct_file_path = os.path.join(cur_path, "construct_file.json") + + test.dump_file_path = dump_file_path + test.dump_tensor_data_dir = dump_tensor_data_dir + + with open(dump_file_path, "w") as f: + dump_data = json.dumps(data) + f.write(dump_data) + + test.flush_data_when_buffer_is_full() + + with open(dump_file_path, "r") as f: + new_data = json.load(f) + + data.update({"data": {"A":"1", "B":"2", "C":"3"}}) + self.assertEqual(new_data, data) + + self.assertTrue(test.cache_data["data"] == {}) + os.remove(dump_file_path) + + + def test_update_stack(self): + data = {"A":"1", "B":"2", "data":{}} + test = DataWriter() + test.update_stack(data) + self.assertEqual(test.cache_stack, data) + + def test_update_construct(self): + data = {"A":"1", "B":"2", "data":{}} + test = DataWriter() + test.update_construct(data) + self.assertEqual(test.cache_construct, data) + + def test_write_stack_info_json(self): + test = DataWriter() + data = {"A":"1", "B":"2", "data":{}} + test.cache_stack = data + + cur_path = os.path.dirname(os.path.realpath(__file__)) + file_path = os.path.join(cur_path, "dump.json") + + test.write_stack_info_json(file_path) + + with open(file_path, "r") as f: + load_result = json.load(f) + try: + self.assertEqual(load_result, data) + finally: + os.remove(file_path) + + + def test_write_construct_info_json(self): + test = DataWriter() + data = {"A":"1", "B":"2", "data":{}} + test.cache_construct = data + + cur_path = os.path.dirname(os.path.realpath(__file__)) + file_path = os.path.join(cur_path, "dump.json") + + test.write_construct_info_json(file_path) + + with open(file_path, "r") as f: + load_result = json.load(f) + try: + self.assertEqual(load_result, data) + finally: + os.remove(file_path) diff --git a/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_api_registry.py b/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_api_registry.py new file mode 100644 index 0000000000000000000000000000000000000000..583f4d6c192a9047f569d2592595af8896acccd7 --- /dev/null +++ b/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_api_registry.py @@ -0,0 +1,130 @@ +import unittest +from atat.pytorch.hook_module.api_registry import ApiRegistry, torch_version_above_2, is_gpu + + +class TestApiRegistry(unittest.TestCase): + #100 + def test_store_ori_attr(self): + class A(): + a1 = 1 + class B(): + a = A() + b1 = 1 + b2 = 2 + + api_list = ["a.a1", "b1", "b2"] + expect_output = {"a.a1":1, "b1":1, "b2":2} + actual_output = dict() + ApiRegistry.store_ori_attr(B, api_list, actual_output) + self.assertEqual(actual_output, expect_output) + + #100 + def test_set_api_attr(self): + class A(): + a1 = 1 + class B(): + a = A().__class__ + b1 = 1 + + attr_dict = {"a.a2":2, "b2":2, "b3":3} + ApiRegistry.set_api_attr(B, attr_dict) + + for k, v in attr_dict.items(): + if '.' in k: + sub_module_name, sub_op = k.rsplit('.', 1) + sub_module = getattr(B, sub_module_name, None) + #print(True) + self.assertEqual(getattr(sub_module, sub_op), v) + else: + self.assertEqual(getattr(B, k), v) + + def test_api_modularity(self): + + import torch + import torch.distributed as dist + #import torch_npu #门禁没有安装torch_npu + from atat.pytorch.hook_module.api_registry import torch_without_guard_version, npu_distributed_api, is_gpu, torch_version_above_2 + + + + reg = ApiRegistry() + attr_dict = {"b2":2, "b3":3} + reg.tensor_hook_attr = attr_dict + reg.torch_hook_attr = attr_dict + reg.functional_hook_attr = attr_dict + reg.distributed_hook_attr = attr_dict + reg.npu_distributed_hook_attr = attr_dict + reg.aten_hook_attr = attr_dict + reg.vf_hook_attr = attr_dict + reg.torch_npu_hook_attr = attr_dict + + reg.api_modularity() + self.assertEqual(torch.Tensor.b2, 2) + + self.assertEqual(torch.b2, 2) + self.assertEqual(torch.nn.functional.b2, 2) + self.assertEqual(dist.b2, 2) + self.assertEqual(dist.distributed_c10d.b2, 2) + #if not is_gpu and not torch_without_guard_version: + #self.assertEqual(torch_npu.distributed.b2, 2) + #self.assertEqual(torch_npu.distributed.distributed_c10d.b2, 2) + if torch_version_above_2: + self.assertEqual(torch.ops.aten.b2, 2) + self.assertEqual(torch._VF.b2, 2) + #if not is_gpu: + #self.assertEqual(torch_npu.b2, 2) + + + def test_api_originality(self): + import torch + import torch.distributed as dist + #import torch_npu #门禁没有安装torch_npu + from atat.pytorch.hook_module.api_registry import torch_without_guard_version, npu_distributed_api, is_gpu, torch_version_above_2 + + + + reg = ApiRegistry() + attr_dict = {"b2":2, "b3":3} + reg.tensor_hook_attr = attr_dict + reg.torch_hook_attr = attr_dict + reg.functional_hook_attr = attr_dict + reg.distributed_hook_attr = attr_dict + reg.npu_distributed_hook_attr = attr_dict + reg.aten_hook_attr = attr_dict + reg.vf_hook_attr = attr_dict + reg.torch_npu_hook_attr = attr_dict + + reg.api_originality() + self.assertEqual(torch.Tensor.b2, 2) + + self.assertEqual(torch.b2, 2) + self.assertEqual(torch.nn.functional.b2, 2) + self.assertEqual(dist.b2, 2) + self.assertEqual(dist.distributed_c10d.b2, 2) + #if not is_gpu and not torch_without_guard_version: + #self.assertEqual(torch_npu.distributed.b2, 2) + #self.assertEqual(torch_npu.distributed.distributed_c10d.b2, 2) + if torch_version_above_2: + self.assertEqual(torch.ops.aten.b2, 2) + self.assertEqual(torch._VF.b2, 2) + #if not is_gpu: + #self.assertEqual(torch_npu.b2, 2) + + def test_initialize_hook(self): + def hook_test(): + pass + + reg = ApiRegistry() + reg.initialize_hook(hook_test) + empty_list = [] + self.assertFalse(empty_list==reg.tensor_hook_attr) + self.assertFalse(empty_list==reg.torch_hook_attr) + self.assertFalse(empty_list==reg.functional_hook_attr) + self.assertFalse(empty_list==reg.distributed_hook_attr) + self.assertFalse(empty_list==reg.npu_distributed_hook_attr) + if torch_version_above_2: + #print(True) + self.assertFalse(empty_list==reg.aten_hook_attr) + if not is_gpu: + #print(True) + self.assertFalse(empty_list==reg.torch_npu_hook_attr) \ No newline at end of file diff --git a/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_hook_module.py b/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_hook_module.py new file mode 100644 index 0000000000000000000000000000000000000000..9a0620ce2e5418e2b7103b1191c0f6c85f5ab259 --- /dev/null +++ b/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_hook_module.py @@ -0,0 +1,37 @@ +import unittest +from unittest.mock import patch, Mock + +from atat.pytorch.hook_module.hook_module import HOOKModule + +class TestHookModule(unittest.TestCase): + def test_call_1(self): + def forward_pre_hook(): + return "result_input", "result_kwargs" + def forward_hook(): + return 2 + def backward_hook(): + pass + + def hook(prefix): + return forward_pre_hook, forward_hook, backward_hook + HOOKModule.prefix_op_name_ = "123" + test = HOOKModule(hook) + test._call_func = Mock(return_value=1) + result = test() + self.assertEqual(result, 1) + + def test_call_2(self): + def forward_pre_hook(nope, input, kwargs): + return input, kwargs + def forward_hook(nope, input, kwargs, result): + return 2 + def backward_hook(): + pass + + def hook(prefix): + return forward_pre_hook, forward_hook, backward_hook + HOOKModule.prefix_op_name_ = "123" + test = HOOKModule(hook) + test.forward = Mock(return_value=1) + result = test() + self.assertEqual(result, 2) \ No newline at end of file diff --git a/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_aten.py b/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_aten.py new file mode 100644 index 0000000000000000000000000000000000000000..c8969bf5e96a901b873a7701ac4501740d3a4edc --- /dev/null +++ b/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_aten.py @@ -0,0 +1,55 @@ +import unittest +import torch +from atat.pytorch.hook_module.wrap_aten import AtenOPTemplate, AtenOPPacketTemplate + + +def hook(name): + def forward_pre_hook(nope, input, kwargs): + return input, kwargs + def forward_hook(): + return 2 + def backward_hook(): + pass + + return forward_pre_hook, forward_hook, backward_hook + + +if torch.__version__.split("+")[0] > '2.0': + class TestWrapAten(unittest.TestCase): + def setUp(self): + self.aten_op = AtenOPPacketTemplate(torch.ops.aten.convolution, hook) + + def test_atenop_attribute(self): + self.setUp() + self.assertEqual(self.aten_op.default.op, torch.ops.aten.convolution.default) + self.assertEqual(self.aten_op.out.op, torch.ops.aten.convolution.out) + + def test_atenop_forward(self): + self.setUp() + image = torch.randn(4, 3, 24, 24) + kernel = torch.randn(10, 3, 3, 3) + functional_out = torch.nn.functional.conv2d(image, kernel, stride=[1, 1], + padding=[1, 1], dilation=[1, 1], groups=1, bias=None) + aten_out = self.aten_op(image, kernel, None, [1, 1], [1, 1], [1, 1], False, [0, 0], 1) + self.assertTrue(aten_out == functional_out) + + def test_atenop_overload_forward(self): + self.setUp() + image = torch.randn(4, 3, 24, 24) + kernel = torch.randn(10, 3, 3, 3) + functional_out = torch.nn.functional.conv2d(image, kernel, stride=[1, 1], + padding=[1, 1], dilation=[1, 1], groups=1, bias=None) + aten_out = self.aten_op(image, kernel, None, [1, 1], [1, 1], [1, 1], False, [0, 0], 1) + self.assertTrue(aten_out == functional_out) + + def test_atenop_nonattr(self): + self.setUp() + self.assertRaises(AttributeError, getattr, self.aten_op, "foo") + + def test_atenop_overloads(self): + self.setUp() + self.assertEqual(self.aten_op.overloads(), self.aten_op.opPacket.overloads()) + + + + \ No newline at end of file diff --git a/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_distributed.py b/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..0df9088757c4d1cc89c31f72041f6565790f16ec --- /dev/null +++ b/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_distributed.py @@ -0,0 +1,35 @@ +import unittest +import torch.distributed as dist +from atat.pytorch.hook_module.wrap_distributed import * + +class TestWrapDistributed(unittest.TestCase): + def hook(name): + def forward_pre_hook(nope, input, kwargs): + return input, kwargs + def forward_hook(): + return 2 + def backward_hook(): + pass + return forward_pre_hook, forward_hook, backward_hook + + def test_get_distributed_ops(self): + ops = get_distributed_ops() + self.assertIsInstance(ops, set) + + def test_DistributedOPTemplate(self): + self.setUp() + op_name = 'all_reduce' + if op_name in get_distributed_ops(): + op = DistributedOPTemplate(op_name, self.hook) + self.assertEqual(op.op_name_, op_name) + + def test_wrap_distributed_op(self): + op_name = 'all_reduce' + if op_name in get_distributed_ops(): + wrapped_op = wrap_distributed_op(op_name, self.hook) + self.assertTrue(callable(wrapped_op)) + + def test_wrap_distributed_ops_and_bind(self): + wrap_distributed_ops_and_bind(self.hook) + for op_name in get_distributed_ops(): + self.assertTrue(hasattr(HOOKDistributedOP, "wrap_" + str(op_name))) \ No newline at end of file diff --git a/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_functional.py b/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_functional.py new file mode 100644 index 0000000000000000000000000000000000000000..232117498b5900b1d7d178208b81430f86b52eb4 --- /dev/null +++ b/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_functional.py @@ -0,0 +1,20 @@ +import unittest +import torch +from atat.pytorch.hook_module import wrap_functional as wf + +class TestWrapFunctional(unittest.TestCase): + + def test_remove_dropout(self): + input_tensor = torch.randn(20, 16) + wf.remove_dropout() + output_tensor = torch.nn.functional.dropout(input_tensor) + self.assertTrue(torch.equal(input_tensor, output_tensor)) + + def test_get_functional_ops(self): + expected_ops = {'relu', 'sigmoid', 'softmax'} + actual_ops = wf.get_functional_ops() + self.assertTrue(expected_ops.issubset(actual_ops)) + + def test_wrap_functional_ops_and_bind(self): + wf.wrap_functional_ops_and_bind(None) + self.assertTrue(hasattr(wf.HOOKFunctionalOP, 'wrap_relu')) \ No newline at end of file diff --git a/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_tensor.py b/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..fd3113eaefa82652f321d57bdecbee1dfe910c01 --- /dev/null +++ b/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_tensor.py @@ -0,0 +1,35 @@ +import unittest +import torch +import yaml +from atat.pytorch.hook_module.wrap_tensor import get_tensor_ops, HOOKTensor, TensorOPTemplate, wrap_tensor_op, wrap_tensor_ops_and_bind + +class TestWrapTensor(unittest.TestCase): + + def hook(name): + def forward_pre_hook(nope, input, kwargs): + return input, kwargs + def forward_hook(): + return 2 + def backward_hook(): + pass + return forward_pre_hook, forward_hook, backward_hook + + def test_get_tensor_ops(self): + result = get_tensor_ops() + self.assertIsInstance(result, set) + + def test_HOOKTensor(self): + hook_tensor = HOOKTensor() + self.assertIsInstance(hook_tensor, HOOKTensor) + + def test_TensorOPTemplate(self): + tensor_op_template = TensorOPTemplate('add', self.hook) + self.assertTrue(tensor_op_template.op_name_, 'add') + + def test_wrap_tensor_op(self): + wrapped_op = wrap_tensor_op('add', self.hook) + self.assertTrue(callable(wrapped_op)) + + def test_wrap_tensor_ops_and_bind(self): + wrap_tensor_ops_and_bind(self.hook) + self.assertTrue(hasattr(HOOKTensor, 'wrap_add')) \ No newline at end of file diff --git a/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_torch.py b/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_torch.py new file mode 100644 index 0000000000000000000000000000000000000000..add7a99e0aece1c63381bfc06f81beb35cebb18d --- /dev/null +++ b/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_torch.py @@ -0,0 +1,43 @@ +import unittest +import torch +import yaml +from atat.pytorch.hook_module.wrap_torch import * + +class TestWrapTorch(unittest.TestCase): + + def hook(name): + def forward_pre_hook(nope, input, kwargs): + return input, kwargs + def forward_hook(): + return 2 + def backward_hook(): + pass + return forward_pre_hook, forward_hook, backward_hook + + def setUp(self): + + self.op_name = 'add' + self.torch_op = wrap_torch_op(self.op_name, self.hook) + + def test_get_torch_ops(self): + self.setUp() + ops = get_torch_ops() + self.assertIsInstance(ops, set) + self.assertIn(self.op_name, ops) + + def test_TorchOPTemplate(self): + self.setUp() + template = TorchOPTemplate(self.op_name, self.hook) + self.assertEqual(template.op_name_, self.op_name) + self.assertEqual(template.prefix_op_name_, "Torch." + str(self.op_name) + ".") + + def test_forward(self): + self.setUp() + template = TorchOPTemplate(self.op_name, self.hook) + result = template.forward(torch.tensor([1, 2, 3]), torch.tensor([4, 5, 6])) + torch.testing.assert_close(result, torch.tensor([5, 7, 9])) + + def test_wrap_torch_ops_and_bind(self): + self.setUp() + wrap_torch_ops_and_bind(self.hook) + self.assertTrue(hasattr(HOOKTorchOP, "wrap_" + self.op_name)) \ No newline at end of file diff --git a/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_vf.py b/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_vf.py new file mode 100644 index 0000000000000000000000000000000000000000..8d57fad6eb623569acfbbffe28dc9decb1ca30b6 --- /dev/null +++ b/debug/accuracy_tools/atat/test/pytorch_ut/hook_module/test_wrap_vf.py @@ -0,0 +1,11 @@ +import unittest +import torch +from atat.pytorch.hook_module import wrap_vf + +class TestWrapVF(unittest.TestCase): + def setUp(self): + self.hook = lambda x: x + + def test_get_vf_ops(self): + ops = wrap_vf.get_vf_ops() + self.assertIsInstance(ops, list) \ No newline at end of file diff --git a/debug/accuracy_tools/atat/test/test_module_processer.py b/debug/accuracy_tools/atat/test/test_module_processer.py new file mode 100644 index 0000000000000000000000000000000000000000..31f96bfa08fab22b48f5f3339dc975bba3e20f83 --- /dev/null +++ b/debug/accuracy_tools/atat/test/test_module_processer.py @@ -0,0 +1,64 @@ +import unittest +from atat.pytorch.module_processer import ModuleProcesser +from atat.pytorch.common.utils import Const + +import torch + +class TestModuleProcesser(unittest.TestCase): + def test_filter_tensor_and_tuple(self): + def func(nope, x): + return x * 2 + + result_1 = ModuleProcesser.filter_tensor_and_tuple(func)(None, torch.tensor([1])) + self.assertEqual(result_1, torch.tensor([2])) + + result_2 = ModuleProcesser.filter_tensor_and_tuple(func)(None, "test") + self.assertEqual(result_2, "test") + + def test_clone_return_value_and_test_clone_if_tensor(self): + def func(x): + return x + + input = torch.tensor([1]) + input_tuple = (torch.tensor([1]), torch.tensor([2])) + input_list = [torch.tensor([1]), torch.tensor([2])] + input_dict = {"A":torch.tensor([1]), "B":torch.tensor([2])} + + result = ModuleProcesser.clone_return_value(func)(input) + result[0] = 2 + self.assertEqual(result, input) + + result_tuple = ModuleProcesser.clone_return_value(func)(input_tuple) + result_tuple[0][0] = 2 + self.assertEqual(result_tuple, input_tuple) + + result_list = ModuleProcesser.clone_return_value(func)(input_list) + result_list[0][0] = 2 + self.assertEqual(result_list, input_list) + + result_dict = ModuleProcesser.clone_return_value(func)(input_dict) + result_dict["A"][0] = 2 + self.assertEqual(result_dict, input_dict) + + + def test_node_hook(self): + empty_list = [] + test = ModuleProcesser(None) + pre_hook = test.node_hook("test", Const.START) + self.assertIsNone(pre_hook) + end_hook = test.node_hook("test", "stop") + self.assertIsNotNone(end_hook) + + class A(): + pass + pre_hook(A, None, None) + self.assertIn("test", test.module_count) + self.assertFalse(test.module_stack==empty_list) + + def test_module_count_func(self): + test = ModuleProcesser(None) + self.assertEqual(test.module_count, {}) + + module_name = "nope" + test.module_count_func(module_name) + self.assertEqual(test.module_count["nope"], 0) \ No newline at end of file