diff --git a/tests/fuzz/NpuFusedLamb_fuzz.py b/tests/fuzz/NpuFusedLamb_fuzz.py new file mode 100644 index 0000000000000000000000000000000000000000..31292562a5fa6c7b95b5f90bb25d8a5b6c3e8c4b --- /dev/null +++ b/tests/fuzz/NpuFusedLamb_fuzz.py @@ -0,0 +1,55 @@ +import atheris +import random +import sys + +import torch +import torch_npu + +with atheris.instrument_imports(): + from apex.optimizers import NpuFusedLamb + + +@atheris.instrument_func +def Test_NpuFusedLamb(input_bytes): + input_list = [True, False] + check_list_input = input_bytes.decode('utf-8', 'ignore').strip().split(',') + if not check_list_input or len(check_list_input) != 7: + return False + try: + for i in range(7): + check_list_input[i] = float(check_list_input[i]) + except Exception as e: + return False + + lr = random.random() * check_list_input[1] + betas_0 = random.random() * check_list_input[2] + betas_1 = random.random() * check_list_input[3] + betas = (betas_0, betas_1) + eps = random.random() * check_list_input[4] + weight_decay = random.random() * check_list_input[5] + number_of_params = int(check_list_input[6]) + adam = input_list[random.randint(0, 1)] + use_global_grad_norm = input_list[random.randint(0, 1)] + + try: + params = [] + for i in range(number_of_params): + input_tensor_size = [random.randint(0, check_list_input[0]) for _ in range(random.randint(0, 5))] + if input_list[random.randint(0, 1)]: + input_tensor = torch.randn(input_tensor_size).float().npu() + else: + input_tensor = torch.randn(input_tensor_size).half().npu() + params.append(input_tensor) + for i, p in enumerate(params): + if i < len(params) - 1: + p.requires_grad = True + p.grad = p.clone().detach() / 100 + NpuFusedLamb(params, lr, betas, eps, weight_decay, adam, use_global_grad_norm) + except Exception as e: + print(e) + return True + + +if __name__ == "__main__": + atheris.Setup(sys.argv, Test_NpuFusedLamb) + atheris.Fuzz() diff --git a/tests/fuzz/NpuFusedLamb_samples/samples1.txt b/tests/fuzz/NpuFusedLamb_samples/samples1.txt new file mode 100644 index 0000000000000000000000000000000000000000..792c2c61d3c2c3f859dd99c3119d6c286ce4a213 --- /dev/null +++ b/tests/fuzz/NpuFusedLamb_samples/samples1.txt @@ -0,0 +1 @@ +5,1,1,1,1,0,5 \ No newline at end of file diff --git a/tests/fuzz/NpuFusedRMSpropTF_fuzz.py b/tests/fuzz/NpuFusedRMSpropTF_fuzz.py new file mode 100644 index 0000000000000000000000000000000000000000..7e063eaa626d85ea9739aa1f06dc1462e97d35b4 --- /dev/null +++ b/tests/fuzz/NpuFusedRMSpropTF_fuzz.py @@ -0,0 +1,55 @@ +import atheris +import random +import sys + +import torch +import torch_npu + +with atheris.instrument_imports(): + from apex.optimizers import NpuFusedRMSpropTF + + +@atheris.instrument_func +def Test_NpuFusedRMSpropTF(input_bytes): + input_list = [True, False] + check_list_input = input_bytes.decode('utf-8', 'ignore').strip().split(',') + if not check_list_input or len(check_list_input) != 7: + return False + try: + for i in range(7): + check_list_input[i] = float(check_list_input[i]) + except Exception as e: + return False + + lr = random.random() * check_list_input[1] + alpha = random.random() * check_list_input[2] + eps = random.random() * check_list_input[3] + weight_decay = random.random() * check_list_input[4] + momentum = random.random() * check_list_input[5] + number_of_params = int(check_list_input[6]) + centered = input_list[random.randint(0, 1)] + decoupled_decay = input_list[random.randint(0, 1)] + lr_in_momentum = input_list[random.randint(0, 1)] + + try: + params = [] + for i in range(number_of_params): + input_tensor_size = [random.randint(0, check_list_input[0]) for _ in range(random.randint(0, 5))] + if input_list[random.randint(0, 1)]: + input_tensor = torch.randn(input_tensor_size).float().npu() + else: + input_tensor = torch.randn(input_tensor_size).half().npu() + params.append(input_tensor) + for i, p in enumerate(params): + if i < len(params) - 1: + p.requires_grad = True + p.grad = p.clone().detach() / 100 + NpuFusedRMSpropTF(params, lr, alpha, eps, weight_decay, momentum, centered, decoupled_decay, lr_in_momentum) + except Exception as e: + print(e) + return True + + +if __name__ == "__main__": + atheris.Setup(sys.argv, Test_NpuFusedRMSpropTF) + atheris.Fuzz() diff --git a/tests/fuzz/NpuFusedRMSpropTF_samples/samples1.txt b/tests/fuzz/NpuFusedRMSpropTF_samples/samples1.txt new file mode 100644 index 0000000000000000000000000000000000000000..7528086249e8301e1bcd33b5793ec13ef0754823 --- /dev/null +++ b/tests/fuzz/NpuFusedRMSpropTF_samples/samples1.txt @@ -0,0 +1 @@ +5,1,1,1,0,0,5 \ No newline at end of file diff --git a/tests/fuzz/NpuFusedRMSprop_fuzz.py b/tests/fuzz/NpuFusedRMSprop_fuzz.py new file mode 100644 index 0000000000000000000000000000000000000000..8ac95e04cfd65d6e024feed78bae23adc51a9f2d --- /dev/null +++ b/tests/fuzz/NpuFusedRMSprop_fuzz.py @@ -0,0 +1,53 @@ +import atheris +import random +import sys + +import torch +import torch_npu + +with atheris.instrument_imports(): + from apex.optimizers import NpuFusedRMSprop + + +@atheris.instrument_func +def Test_NpuFusedRMSprop(input_bytes): + input_list = [True, False] + check_list_input = input_bytes.decode('utf-8', 'ignore').strip().split(',') + if not check_list_input or len(check_list_input) != 7: + return False + try: + for i in range(7): + check_list_input[i] = float(check_list_input[i]) + except Exception as e: + return False + + lr = random.random() * check_list_input[1] + alpha = random.random() * check_list_input[2] + eps = random.random() * check_list_input[3] + weight_decay = random.random() * check_list_input[4] + momentum = random.random() * check_list_input[5] + number_of_params = int(check_list_input[6]) + centered = input_list[random.randint(0, 1)] + + try: + params = [] + for i in range(number_of_params): + input_tensor_size = [random.randint(0, check_list_input[0]) for _ in range(random.randint(0, 5))] + if input_list[random.randint(0, 1)]: + input_tensor = torch.randn(input_tensor_size).float().npu() + else: + input_tensor = torch.randn(input_tensor_size).half().npu() + params.append(input_tensor) + for i, p in enumerate(params): + if i < len(params) - 1: + p.requires_grad = True + p.grad = p.clone().detach() / 100 + NpuFusedRMSprop(params, lr, alpha, eps, weight_decay, momentum, centered) + except Exception as e: + print(e) + return True + + +if __name__ == "__main__": + atheris.Setup(sys.argv, Test_NpuFusedRMSprop) + atheris.Fuzz() diff --git a/tests/fuzz/NpuFusedRMSprop_samples/samples1.txt b/tests/fuzz/NpuFusedRMSprop_samples/samples1.txt new file mode 100644 index 0000000000000000000000000000000000000000..7528086249e8301e1bcd33b5793ec13ef0754823 --- /dev/null +++ b/tests/fuzz/NpuFusedRMSprop_samples/samples1.txt @@ -0,0 +1 @@ +5,1,1,1,0,0,5 \ No newline at end of file diff --git a/tests/fuzz/NpuFusedSGD_fuzz.py b/tests/fuzz/NpuFusedSGD_fuzz.py new file mode 100644 index 0000000000000000000000000000000000000000..51446c123145dc67853a4c72fce5531f93b5597a --- /dev/null +++ b/tests/fuzz/NpuFusedSGD_fuzz.py @@ -0,0 +1,51 @@ +import atheris +import random +import sys + +import torch +import torch_npu + +with atheris.instrument_imports(): + from apex.optimizers import NpuFusedSGD + + +@atheris.instrument_func +def Test_NpuFusedSGD(input_bytes): + input_list = [True, False] + check_list_input = input_bytes.decode('utf-8', 'ignore').strip().split(',') + if not check_list_input or len(check_list_input) != 6: + return False + try: + for i in range(6): + check_list_input[i] = float(check_list_input[i]) + except Exception as e: + return False + + lr = random.random() * check_list_input[1] + momentum = random.random() * check_list_input[2] + dampening = random.random() * check_list_input[3] + weight_decay = random.random() * check_list_input[4] + number_of_params = int(check_list_input[5]) + + try: + params = [] + for i in range(number_of_params): + input_tensor_size = [random.randint(0, check_list_input[0]) for _ in range(random.randint(0, 5))] + if input_list[random.randint(0, 1)]: + input_tensor = torch.randn(input_tensor_size).float().npu() + else: + input_tensor = torch.randn(input_tensor_size).half().npu() + params.append(input_tensor) + for i, p in enumerate(params): + if i < len(params) - 1: + p.requires_grad = True + p.grad = p.clone().detach() / 100 + NpuFusedSGD(params, lr, momentum, dampening, weight_decay, input_list[random.randint(0, 1)]) + except Exception as e: + print(e) + return True + + +if __name__ == "__main__": + atheris.Setup(sys.argv, Test_NpuFusedSGD) + atheris.Fuzz() diff --git a/tests/fuzz/NpuFusedSGD_samples/samples1.txt b/tests/fuzz/NpuFusedSGD_samples/samples1.txt new file mode 100644 index 0000000000000000000000000000000000000000..350ee34489dc4684b5a6497aff3c20aefa8642cf --- /dev/null +++ b/tests/fuzz/NpuFusedSGD_samples/samples1.txt @@ -0,0 +1 @@ +5,5,5,5,5,5 \ No newline at end of file diff --git a/tests/fuzz/zero_grad_fuzz.py b/tests/fuzz/zero_grad_fuzz.py new file mode 100644 index 0000000000000000000000000000000000000000..6b01081aebb05138e48772745a2e5fab17346cb2 --- /dev/null +++ b/tests/fuzz/zero_grad_fuzz.py @@ -0,0 +1,52 @@ +import atheris +import random +import sys + +import torch +import torch_npu + +with atheris.instrument_imports(): + from apex.optimizers import NpuFusedSGD + + +@atheris.instrument_func +def Test_zero_grad(input_bytes): + input_list = [True, False] + check_list_input = input_bytes.decode('utf-8', 'ignore').strip().split(',') + if not check_list_input or len(check_list_input) != 6: + return False + try: + for i in range(6): + check_list_input[i] = float(check_list_input[i]) + except Exception as e: + return False + + lr = random.random() * check_list_input[1] + momentum = random.random() * check_list_input[2] + dampening = random.random() * check_list_input[3] + weight_decay = random.random() * check_list_input[4] + number_of_params = int(check_list_input[5]) + + try: + params = [] + for i in range(number_of_params): + input_tensor_size = [random.randint(0, check_list_input[0]) for _ in range(random.randint(0, 5))] + if input_list[random.randint(0, 1)]: + input_tensor = torch.randn(input_tensor_size).float().npu() + else: + input_tensor = torch.randn(input_tensor_size).half().npu() + params.append(input_tensor) + for i, p in enumerate(params): + if i < len(params) - 1: + p.requires_grad = True + p.grad = p.clone().detach() / 100 + opt = NpuFusedSGD(params, lr, momentum, dampening, weight_decay, input_list[random.randint(0, 1)]) + opt.zero_grad() + except Exception as e: + print(e) + return True + + +if __name__ == "__main__": + atheris.Setup(sys.argv, Test_zero_grad) + atheris.Fuzz() diff --git a/tests/fuzz/zero_grad_samples/samples1.txt b/tests/fuzz/zero_grad_samples/samples1.txt new file mode 100644 index 0000000000000000000000000000000000000000..350ee34489dc4684b5a6497aff3c20aefa8642cf --- /dev/null +++ b/tests/fuzz/zero_grad_samples/samples1.txt @@ -0,0 +1 @@ +5,5,5,5,5,5 \ No newline at end of file