diff --git a/tests/fuzz/Lamb_fuzz.py b/tests/fuzz/Lamb_fuzz.py new file mode 100644 index 0000000000000000000000000000000000000000..3681fcfce934bee0679499ea61855e09adbfce17 --- /dev/null +++ b/tests/fuzz/Lamb_fuzz.py @@ -0,0 +1,52 @@ +import sys +import atheris +import torch +import torch_npu +import random +with atheris.instrument_imports(): + from apex.optimizers import Lamb + +@atheris.instrument_func +def Test_Lamb(input_bytes): + + input_list = [True, False] + check_list_input = input_bytes.decode('utf-8', 'ignore').strip().split(',') + if not check_list_input or len(check_list_input) != 7: + return False + try: + for i in range(7): + check_list_input[i] = float(check_list_input[i]) + except Exception as e: + return False + + lr = random.random() * check_list_input[1] + betas_0 = random.random() * check_list_input[2] + betas_1 = random.random() * check_list_input[3] + betas = (betas_0, betas_1) + eps = random.random() * check_list_input[4] + weight_decay = random.random() * check_list_input[5] + number_of_params = int(check_list_input[6]) + adam = input_list[random.randint(0, 1)] + use_global_grad_norm = input_list[random.randint(0, 1)] + + try: + params = [] + for i in range(number_of_params): + input_tensor_size = [random.randint(0, check_list_input[0]) for _ in range(random.randint(0, 5))] + if input_list[random.randint(0, 1)]: + input_tensor = torch.randn(input_tensor_size).float().npu() + else: + input_tensor = torch.randn(input_tensor_size).half().npu() + params.append(input_tensor) + for i, p in enumerate(params): + if i < len(params) - 1: + p.requires_grad = True + p.grad = p.clone().detach() / 100 + Lamb(params, lr, betas, eps, weight_decay, adam, use_global_grad_norm) + except Exception as e: + print(e) + return True + print(111111111111) +if __name__ == "__main__": + atheris.Setup(sys.argv, Test_Lamb) + atheris.Fuzz() diff --git a/tests/fuzz/Lamb_samples/samples1.txt b/tests/fuzz/Lamb_samples/samples1.txt new file mode 100644 index 0000000000000000000000000000000000000000..792c2c61d3c2c3f859dd99c3119d6c286ce4a213 --- /dev/null +++ b/tests/fuzz/Lamb_samples/samples1.txt @@ -0,0 +1 @@ +5,1,1,1,1,0,5 \ No newline at end of file diff --git a/tests/fuzz/NpuFusedAdadelta_fuzz.py b/tests/fuzz/NpuFusedAdadelta_fuzz.py new file mode 100644 index 0000000000000000000000000000000000000000..8e7738330415d3e48186da53b96dd06218ece218 --- /dev/null +++ b/tests/fuzz/NpuFusedAdadelta_fuzz.py @@ -0,0 +1,48 @@ +import sys +import atheris +import torch +import torch_npu +import random +with atheris.instrument_imports(): + from apex.optimizers import NpuFusedAdadelta + +@atheris.instrument_func +def Test_NpuFusedAdadelta(input_bytes): + + input_list = [True, False] + check_list_input = input_bytes.decode('utf-8', 'ignore').strip().split(',') + if not check_list_input or len(check_list_input) != 6: + return False + try: + for i in range(6): + check_list_input[i] = float(check_list_input[i]) + except Exception as e: + return False + + lr = random.random() * check_list_input[1] + rho = random.random() * check_list_input[2] + eps = random.random() * check_list_input[3] + weight_decay = random.random() * check_list_input[4] + number_of_params = int(check_list_input[5]) + + try: + params = [] + for i in range(number_of_params): + input_tensor_size = [random.randint(0, check_list_input[0]) for _ in range(random.randint(0, 5))] + if input_list[random.randint(0, 1)]: + input_tensor = torch.randn(input_tensor_size).float().npu() + else: + input_tensor = torch.randn(input_tensor_size).half().npu() + params.append(input_tensor) + for i, p in enumerate(params): + if i < len(params) - 1: + p.requires_grad = True + p.grad = p.clone().detach() / 100 + NpuFusedAdadelta(params, lr, rho, eps, weight_decay) + except Exception as e: + print(e) + return True + print(111111111111) +if __name__ == "__main__": + atheris.Setup(sys.argv, Test_NpuFusedAdadelta) + atheris.Fuzz() diff --git a/tests/fuzz/NpuFusedAdadelta_samples/samples1.txt b/tests/fuzz/NpuFusedAdadelta_samples/samples1.txt new file mode 100644 index 0000000000000000000000000000000000000000..fa1b7d09477aee5fa3b46a1922fddf944b94ad6a --- /dev/null +++ b/tests/fuzz/NpuFusedAdadelta_samples/samples1.txt @@ -0,0 +1 @@ +5,1,1,0,0,5 \ No newline at end of file diff --git a/tests/fuzz/NpuFusedAdamP_fuzz.py b/tests/fuzz/NpuFusedAdamP_fuzz.py new file mode 100644 index 0000000000000000000000000000000000000000..8b217146432d9090c72eaf2a1b0a62fb2fb1e17c --- /dev/null +++ b/tests/fuzz/NpuFusedAdamP_fuzz.py @@ -0,0 +1,53 @@ +import sys +import atheris +import torch +import torch_npu +import random +with atheris.instrument_imports(): + from apex.optimizers import NpuFusedAdamP + +@atheris.instrument_func +def Test_NpuFusedAdamP(input_bytes): + + input_list = [True, False] + check_list_input = input_bytes.decode('utf-8', 'ignore').strip().split(',') + if not check_list_input or len(check_list_input) != 9: + return False + try: + for i in range(9): + check_list_input[i] = float(check_list_input[i]) + except Exception as e: + return False + + lr = random.random() * check_list_input[1] + betas_0 = random.random() * check_list_input[2] + betas_1 = random.random() * check_list_input[3] + betas = (betas_0, betas_1) + eps = random.random() * check_list_input[4] + weight_decay = random.random() * check_list_input[5] + delta = check_list_input[6] + wd_ratio = check_list_input[7] + number_of_params = int(check_list_input[8]) + nesterov = input_list[random.randint(0, 1)] + + try: + params = [] + for i in range(number_of_params): + input_tensor_size = [random.randint(0, check_list_input[0]) for _ in range(random.randint(0, 5))] + if input_list[random.randint(0, 1)]: + input_tensor = torch.randn(input_tensor_size).float().npu() + else: + input_tensor = torch.randn(input_tensor_size).half().npu() + params.append(input_tensor) + for i, p in enumerate(params): + if i < len(params) - 1: + p.requires_grad = True + p.grad = p.clone().detach() / 100 + NpuFusedAdamP(params, lr, betas, eps, weight_decay, delta, wd_ratio, nesterov) + except Exception as e: + print(e) + return True + print(111111111111) +if __name__ == "__main__": + atheris.Setup(sys.argv, Test_NpuFusedAdamP) + atheris.Fuzz() diff --git a/tests/fuzz/NpuFusedAdamP_samples/samples1.txt b/tests/fuzz/NpuFusedAdamP_samples/samples1.txt new file mode 100644 index 0000000000000000000000000000000000000000..044acb11b9c093dae6a52731f1c1052a592d38cd --- /dev/null +++ b/tests/fuzz/NpuFusedAdamP_samples/samples1.txt @@ -0,0 +1 @@ +5,1,1,1,1,0,1,1,5 \ No newline at end of file diff --git a/tests/fuzz/NpuFusedAdamW_fuzz.py b/tests/fuzz/NpuFusedAdamW_fuzz.py new file mode 100644 index 0000000000000000000000000000000000000000..0f5428e8ce3179d83f7be5feb389979c1514b76e --- /dev/null +++ b/tests/fuzz/NpuFusedAdamW_fuzz.py @@ -0,0 +1,51 @@ +import sys +import atheris +import torch +import torch_npu +import random +with atheris.instrument_imports(): + from apex.optimizers import NpuFusedAdamW + +@atheris.instrument_func +def Test_NpuFusedAdamW(input_bytes): + + input_list = [True, False] + check_list_input = input_bytes.decode('utf-8', 'ignore').strip().split(',') + if not check_list_input or len(check_list_input) != 7: + return False + try: + for i in range(7): + check_list_input[i] = float(check_list_input[i]) + except Exception as e: + return False + + lr = random.random() * check_list_input[1] + betas_0 = random.random() * check_list_input[2] + betas_1 = random.random() * check_list_input[3] + betas = (betas_0, betas_1) + eps = random.random() * check_list_input[4] + weight_decay = random.random() * check_list_input[5] + number_of_params = int(check_list_input[6]) + amsgrad = input_list[random.randint(0, 1)] + + try: + params = [] + for i in range(number_of_params): + input_tensor_size = [random.randint(0, check_list_input[0]) for _ in range(random.randint(0, 5))] + if input_list[random.randint(0, 1)]: + input_tensor = torch.randn(input_tensor_size).float().npu() + else: + input_tensor = torch.randn(input_tensor_size).half().npu() + params.append(input_tensor) + for i, p in enumerate(params): + if i < len(params) - 1: + p.requires_grad = True + p.grad = p.clone().detach() / 100 + NpuFusedAdamW(params, lr, betas, eps, weight_decay, amsgrad) + except Exception as e: + print(e) + return True + print(111111111111) +if __name__ == "__main__": + atheris.Setup(sys.argv, Test_NpuFusedAdamW) + atheris.Fuzz() diff --git a/tests/fuzz/NpuFusedAdamW_samples/samples1.txt b/tests/fuzz/NpuFusedAdamW_samples/samples1.txt new file mode 100644 index 0000000000000000000000000000000000000000..792c2c61d3c2c3f859dd99c3119d6c286ce4a213 --- /dev/null +++ b/tests/fuzz/NpuFusedAdamW_samples/samples1.txt @@ -0,0 +1 @@ +5,1,1,1,1,0,5 \ No newline at end of file diff --git a/tests/fuzz/NpuFusedAdam_fuzz.py b/tests/fuzz/NpuFusedAdam_fuzz.py new file mode 100644 index 0000000000000000000000000000000000000000..a23067d6fbe52d994dd23d5b49f5051ac5a8ce48 --- /dev/null +++ b/tests/fuzz/NpuFusedAdam_fuzz.py @@ -0,0 +1,51 @@ +import sys +import atheris +import torch +import torch_npu +import random +with atheris.instrument_imports(): + from apex.optimizers import NpuFusedAdam + +@atheris.instrument_func +def Test_NpuFusedAdam(input_bytes): + + input_list = [True, False] + check_list_input = input_bytes.decode('utf-8', 'ignore').strip().split(',') + if not check_list_input or len(check_list_input) != 7: + return False + try: + for i in range(7): + check_list_input[i] = float(check_list_input[i]) + except Exception as e: + return False + + lr = random.random() * check_list_input[1] + betas_0 = random.random() * check_list_input[2] + betas_1 = random.random() * check_list_input[3] + betas = (betas_0, betas_1) + eps = random.random() * check_list_input[4] + weight_decay = random.random() * check_list_input[5] + number_of_params = int(check_list_input[6]) + amsgrad = input_list[random.randint(0, 1)] + + try: + params = [] + for i in range(number_of_params): + input_tensor_size = [random.randint(0, check_list_input[0]) for _ in range(random.randint(0, 5))] + if input_list[random.randint(0, 1)]: + input_tensor = torch.randn(input_tensor_size).float().npu() + else: + input_tensor = torch.randn(input_tensor_size).half().npu() + params.append(input_tensor) + for i, p in enumerate(params): + if i < len(params) - 1: + p.requires_grad = True + p.grad = p.clone().detach() / 100 + NpuFusedAdam(params, lr, betas, eps, weight_decay, amsgrad) + except Exception as e: + print(e) + return True + print(111111111111) +if __name__ == "__main__": + atheris.Setup(sys.argv, Test_NpuFusedAdam) + atheris.Fuzz() diff --git a/tests/fuzz/NpuFusedAdam_samples/samples1.txt b/tests/fuzz/NpuFusedAdam_samples/samples1.txt new file mode 100644 index 0000000000000000000000000000000000000000..792c2c61d3c2c3f859dd99c3119d6c286ce4a213 --- /dev/null +++ b/tests/fuzz/NpuFusedAdam_samples/samples1.txt @@ -0,0 +1 @@ +5,1,1,1,1,0,5 \ No newline at end of file diff --git a/tests/fuzz/NpuFusedBertAdam_fuzz.py b/tests/fuzz/NpuFusedBertAdam_fuzz.py new file mode 100644 index 0000000000000000000000000000000000000000..2b294fe9245ded5e21fc9ddb9e75358084db357d --- /dev/null +++ b/tests/fuzz/NpuFusedBertAdam_fuzz.py @@ -0,0 +1,54 @@ +import sys +import atheris +import torch +import torch_npu +import random +with atheris.instrument_imports(): + from apex.optimizers import NpuFusedBertAdam + +@atheris.instrument_func +def Test_NpuFusedBertAdam(input_bytes): + + input_list = [True, False] + Schedules = ['warmup_cosine','warmup_constant','warmup_linear','warmup_poly'] + check_list_input = input_bytes.decode('utf-8', 'ignore').strip().split(',') + if not check_list_input or len(check_list_input) != 10: + return False + try: + for i in range(10): + check_list_input[i] = float(check_list_input[i]) + except Exception as e: + return False + + lr = random.random() * check_list_input[1] + warmup = random.random() * check_list_input[2] + t_total = random.random() * check_list_input[3] + b1 = random.random() * check_list_input[4] + b2 = random.random() * check_list_input[5] + e = check_list_input[6] + weight_decay = check_list_input[7] + max_grad_norm = check_list_input[8] + number_of_params = int(check_list_input[9]) + schedule = Schedules[random.randint(0, 3)] + + try: + params = [] + for i in range(number_of_params): + input_tensor_size = [random.randint(0, check_list_input[0]) for _ in range(random.randint(0, 5))] + if input_list[random.randint(0, 1)]: + input_tensor = torch.randn(input_tensor_size).float().npu() + else: + input_tensor = torch.randn(input_tensor_size).half().npu() + params.append(input_tensor) + for i, p in enumerate(params): + if i < len(params) - 1: + p.requires_grad = True + p.grad = p.clone().detach() / 100 + NpuFusedBertAdam(params, lr, warmup, t_total, schedule, b1, b2, e, weight_decay, max_grad_norm) + except Exception as e: + print(e) + return True + print(111111111111) +if __name__ == "__main__": + atheris.Setup(sys.argv, Test_NpuFusedBertAdam) + atheris.Fuzz() diff --git a/tests/fuzz/NpuFusedBertAdam_samples/samples1.txt b/tests/fuzz/NpuFusedBertAdam_samples/samples1.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca9c2ff75e7555cdbd0c9704186bf921ebede914 --- /dev/null +++ b/tests/fuzz/NpuFusedBertAdam_samples/samples1.txt @@ -0,0 +1 @@ +5,1,1,1,1,1,1,1,1,5 \ No newline at end of file diff --git a/tests/fuzz/clip_grad_norm_fused_fuzz.py b/tests/fuzz/clip_grad_norm_fused_fuzz.py new file mode 100644 index 0000000000000000000000000000000000000000..049372e59ef321fcc002371734ee0ddbeb6fcfc0 --- /dev/null +++ b/tests/fuzz/clip_grad_norm_fused_fuzz.py @@ -0,0 +1,66 @@ +import sys +import atheris +import torch +import torch_npu +import torch.nn as nn +import random +from apex import amp +with atheris.instrument_imports(): + from apex.optimizers import NpuFusedSGD + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.linear1 = nn.Linear(120, 84) + self.linear2 = nn.Linear(84, 48) + def forward(self, x): + x = self.linear1(x) + x = self.linear2(x) + return x + +@atheris.instrument_func +def Test_NpuFusedSGD(input_bytes): + + check_list_input = input_bytes.decode('utf-8', 'ignore').strip().split(',') + if not check_list_input or len(check_list_input) != 6: + return False + try: + for i in range(2): + check_list_input[i] = float(check_list_input[i]) + except Exception as e: + return False + + max_norm = random.random() * check_list_input[0] + norm_type = random.random() * check_list_input[1] + + try: + ip = torch.randn([1, 120]).npu().abs() + ip.requires_grad = True + input_list = [True, False] + params = [] + for i in range(5): + input_tensor_size = [random.randint(0, 5) for _ in range(random.randint(0, 5))] + if input_list[random.randint(0, 1)]: + input_tensor = torch.randn(input_tensor_size).float().npu() + else: + input_tensor = torch.randn(input_tensor_size).half().npu() + params.append(input_tensor) + for i, p in enumerate(params): + if i < len(params) - 1: + p.requires_grad = True + p.grad = p.clone().detach() / 100 + optimizer = NpuFusedSGD(params, lr=0.1) + model = Net().npu() + model, optimizer = amp.initialize(model, optimizer, opt_level='O1', loss_scale=128, combine_grad=True) + loss = model(ip).sum() + loss.backward() + print(loss) + optimizer.step() + optimizer.clip_optimizer_grad_norm_fused(max_norm=max_norm, norm_type=norm_type) + except Exception as e: + print(e) + return True + print(111111111111) +if __name__ == "__main__": + atheris.Setup(sys.argv, Test_NpuFusedSGD) + atheris.Fuzz() diff --git a/tests/fuzz/clip_grad_norm_samples/samples1.txt b/tests/fuzz/clip_grad_norm_samples/samples1.txt new file mode 100644 index 0000000000000000000000000000000000000000..8cb45cdc3fc44d8d80c3e92448e1938a169cf579 --- /dev/null +++ b/tests/fuzz/clip_grad_norm_samples/samples1.txt @@ -0,0 +1 @@ +0.1,2 \ No newline at end of file