diff --git a/patch/pytorch1.5.0_npu.patch b/patch/pytorch1.5.0_npu.patch index ee3eb9f10681396c3f54a9b3fe109b4a2179dba2..4db1af623eb49e0ffac7256df7e9b5da0784bd54 100644 --- a/patch/pytorch1.5.0_npu.patch +++ b/patch/pytorch1.5.0_npu.patch @@ -1,6 +1,6 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/CMakeLists.txt pytorch-develop-150/aten/CMakeLists.txt --- pytorch-v1.5.0/aten/CMakeLists.txt 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/CMakeLists.txt 2022-06-21 12:00:30.775079637 +0800 ++++ pytorch-develop-150/aten/CMakeLists.txt 2022-06-23 23:00:37.341734508 +0800 @@ -22,8 +22,10 @@ set(ATen_CPU_INCLUDE) set(ATen_THIRD_PARTY_INCLUDE) @@ -51,7 +51,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= set(ATen_CPU_DEPENDENCY_LIBS ${ATen_CPU_DEPENDENCY_LIBS} PARENT_SCOPE) diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/CMakeLists.txt pytorch-develop-150/aten/src/ATen/CMakeLists.txt --- pytorch-v1.5.0/aten/src/ATen/CMakeLists.txt 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/CMakeLists.txt 2022-06-21 12:00:30.775079637 +0800 ++++ pytorch-develop-150/aten/src/ATen/CMakeLists.txt 2022-06-23 23:00:37.341734508 +0800 @@ -67,6 +67,9 @@ FILE(GLOB native_quantized_h "native/quantized/*.h" "native/quantized/cpu/*.h") FILE(GLOB native_cpu_h "native/cpu/*.h") @@ -129,7 +129,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= set(ATen_QUANTIZED_SRCS ${ATen_QUANTIZED_SRCS} PARENT_SCOPE) diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/core/dispatch/DispatchTable.h pytorch-develop-150/aten/src/ATen/core/dispatch/DispatchTable.h --- pytorch-v1.5.0/aten/src/ATen/core/dispatch/DispatchTable.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/core/dispatch/DispatchTable.h 2022-06-21 12:00:30.787079637 +0800 ++++ pytorch-develop-150/aten/src/ATen/core/dispatch/DispatchTable.h 2022-06-23 23:00:37.357734507 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -170,7 +170,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= } diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/function_wrapper.py pytorch-develop-150/aten/src/ATen/function_wrapper.py --- pytorch-v1.5.0/aten/src/ATen/function_wrapper.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/function_wrapper.py 2022-06-21 12:00:30.795079636 +0800 ++++ pytorch-develop-150/aten/src/ATen/function_wrapper.py 2022-06-23 23:00:37.365734507 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -354,7 +354,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= for option in declaration['options']: diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/gen.py pytorch-develop-150/aten/src/ATen/gen.py --- pytorch-v1.5.0/aten/src/ATen/gen.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/gen.py 2022-06-21 12:00:30.795079636 +0800 ++++ pytorch-develop-150/aten/src/ATen/gen.py 2022-06-23 23:00:37.365734507 +0800 @@ -1,3 +1,18 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -512,7 +512,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= generate_outputs() diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/native/BatchLinearAlgebra.cpp pytorch-develop-150/aten/src/ATen/native/BatchLinearAlgebra.cpp --- pytorch-v1.5.0/aten/src/ATen/native/BatchLinearAlgebra.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/native/BatchLinearAlgebra.cpp 2022-06-21 12:00:30.799079636 +0800 ++++ pytorch-develop-150/aten/src/ATen/native/BatchLinearAlgebra.cpp 2022-06-23 23:00:37.369734506 +0800 @@ -680,7 +680,7 @@ std::tuple triangular_solve_out(Tensor& result, Tensor& clone_A, const Tensor& self, const Tensor& A, bool upper, bool transpose, bool unitriangular) { @@ -524,7 +524,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= return std::tuple(result, clone_A); diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/native/cpu/Activation.cpp pytorch-develop-150/aten/src/ATen/native/cpu/Activation.cpp --- pytorch-v1.5.0/aten/src/ATen/native/cpu/Activation.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/native/cpu/Activation.cpp 2022-06-21 12:00:30.807079636 +0800 ++++ pytorch-develop-150/aten/src/ATen/native/cpu/Activation.cpp 2022-06-23 23:00:37.385734506 +0800 @@ -339,20 +339,20 @@ void hardsigmoid_backward_kernel(TensorIterator& iter) { @@ -552,7 +552,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= }); diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/native/Memory.cpp pytorch-develop-150/aten/src/ATen/native/Memory.cpp --- pytorch-v1.5.0/aten/src/ATen/native/Memory.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/native/Memory.cpp 2022-06-21 12:00:30.803079636 +0800 ++++ pytorch-develop-150/aten/src/ATen/native/Memory.cpp 2022-06-23 23:00:37.377734506 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -614,7 +614,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= detail::computeStorageSize(self.sizes(), self.strides()), diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/native/native_functions.yaml pytorch-develop-150/aten/src/ATen/native/native_functions.yaml --- pytorch-v1.5.0/aten/src/ATen/native/native_functions.yaml 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/native/native_functions.yaml 2022-06-21 12:00:30.823079635 +0800 ++++ pytorch-develop-150/aten/src/ATen/native/native_functions.yaml 2022-06-23 23:00:37.409734505 +0800 @@ -1,6 +1,5 @@ # See README.md in this directory for more guidance @@ -2691,36 +2691,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # Only exposed from C++ -- in Python, # we expose it as an attribute `T`, not a function. -@@ -2236,71 +2849,111 @@ - - func: randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - - - func: randn(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -+ npu_dispatch: -+ NPU: randn_npu - - - func: randn.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor -+ npu_dispatch: -+ NPU: randn_npu - - - func: randn.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - device_guard: False -+ npu_dispatch: -+ NPU: randn_npu - - - func: randn.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - device_guard: False -+ npu_dispatch: -+ NPU: randn_npu - - - func: randn.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) -+ npu_dispatch: -+ NPU: randn_out_npu - - - func: randn.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) -+ npu_dispatch: -+ NPU: randn_out_npu - - - func: randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor +@@ -2253,54 +2866,82 @@ supports_named_tensor: True - func: randperm(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor @@ -2804,7 +2775,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: repeat_interleave.Tensor(Tensor repeats) -> Tensor use_c10_dispatcher: full -@@ -2316,6 +2969,8 @@ +@@ -2316,6 +2957,8 @@ - func: repeat_interleave.self_int(Tensor self, int repeats, int? dim=None) -> Tensor use_c10_dispatcher: full variants: function, method @@ -2813,7 +2784,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: reshape(Tensor self, int[] shape) -> Tensor variants: function, method -@@ -2337,16 +2992,22 @@ +@@ -2337,16 +2980,22 @@ use_c10_dispatcher: full supports_named_tensor: True variants: function, method @@ -2836,7 +2807,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor -@@ -2360,6 +3021,8 @@ +@@ -2360,6 +3009,8 @@ CUDA: relu MkldnnCPU: mkldnn_relu QuantizedCPU: quantized_relu @@ -2845,7 +2816,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= supports_named_tensor: True - func: relu_(Tensor(a!) self) -> Tensor(a!) -@@ -2370,6 +3033,8 @@ +@@ -2370,6 +3021,8 @@ CUDA: relu_ MkldnnCPU: mkldnn_relu_ QuantizedCPU: quantized_relu_ @@ -2854,7 +2825,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: prelu(Tensor self, Tensor weight) -> Tensor use_c10_dispatcher: full -@@ -2377,12 +3042,16 @@ +@@ -2377,12 +3030,16 @@ dispatch: CPU: prelu_cpu CUDA: prelu_cuda @@ -2871,7 +2842,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: gelu(Tensor self) -> Tensor use_c10_dispatcher: full -@@ -2390,6 +3059,8 @@ +@@ -2390,6 +3047,8 @@ dispatch: CPU: gelu_cpu CUDA: gelu_cuda @@ -2880,7 +2851,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: gelu_backward(Tensor grad, Tensor self) -> Tensor use_c10_dispatcher: full -@@ -2397,29 +3068,41 @@ +@@ -2397,29 +3056,41 @@ dispatch: CPU: gelu_backward_cpu CUDA: gelu_backward_cuda @@ -2922,7 +2893,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a) variants: function, method -@@ -2433,14 +3116,21 @@ +@@ -2433,14 +3104,21 @@ - func: selu(Tensor self) -> Tensor use_c10_dispatcher: full @@ -2945,7 +2916,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: sigmoid(Tensor self) -> Tensor use_c10_dispatcher: full -@@ -2451,6 +3141,8 @@ +@@ -2451,6 +3129,8 @@ CUDA: sigmoid QuantizedCPU: quantized_sigmoid MkldnnCPU: mkldnn_sigmoid @@ -2954,7 +2925,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: sigmoid_(Tensor(a!) self) -> Tensor(a!) supports_named_tensor: True -@@ -2459,36 +3151,52 @@ +@@ -2459,36 +3139,52 @@ CPU: sigmoid_ CUDA: sigmoid_ MkldnnCPU: mkldnn_sigmoid_ @@ -3007,7 +2978,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # Returns a copy of this `Variable` that is detached from its autograd graph. # This method is OK to call if the `Variable` is a view. -@@ -2533,6 +3241,8 @@ +@@ -2533,6 +3229,8 @@ - func: slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) variants: function, method @@ -3016,7 +2987,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: smm(Tensor self, Tensor mat2) -> Tensor use_c10_dispatcher: full -@@ -2542,10 +3252,14 @@ +@@ -2542,10 +3240,14 @@ - func: softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor variants: function, method supports_named_tensor: True @@ -3031,7 +3002,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: _softmax(Tensor self, int dim, bool half_to_float) -> Tensor use_c10_dispatcher: full -@@ -2553,12 +3267,16 @@ +@@ -2553,12 +3255,16 @@ CPU: softmax_cpu CUDA: softmax_cuda MkldnnCPU: mkldnn_softmax @@ -3048,7 +3019,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: split.Tensor(Tensor(a) self, int split_size, int dim=0) -> Tensor(a)[] variants: function, method -@@ -2609,8 +3327,12 @@ +@@ -2609,8 +3315,12 @@ SparseCUDA: _sspaddmm_out_cuda - func: stack(Tensor[] tensors, int dim=0) -> Tensor @@ -3061,7 +3032,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # The signature is designed to be consistent with librosa except that it is # missing the `pad_mode` and `center` arguments, which are taken care of at -@@ -2633,20 +3355,30 @@ +@@ -2633,20 +3343,30 @@ - func: sum(Tensor self, *, ScalarType? dtype=None) -> Tensor variants: function, method supports_named_tensor: True @@ -3092,7 +3063,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: sum_to_size(Tensor self, int[] size) -> Tensor variants: method -@@ -2656,13 +3388,19 @@ +@@ -2656,13 +3376,19 @@ use_c10_dispatcher: full supports_named_tensor: True variants: function, method @@ -3112,7 +3083,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: square(Tensor self) -> Tensor use_c10_dispatcher: full -@@ -2677,51 +3415,81 @@ +@@ -2677,51 +3403,81 @@ use_c10_dispatcher: full variants: function, method supports_named_tensor: True @@ -3195,7 +3166,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: t(Tensor(a) self) -> Tensor(a) device_guard: False -@@ -2736,6 +3504,8 @@ +@@ -2736,6 +3492,8 @@ use_c10_dispatcher: full supports_named_tensor: True variants: function, method @@ -3204,7 +3175,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: tan_(Tensor(a!) self) -> Tensor(a!) supports_named_tensor: True -@@ -2743,12 +3513,16 @@ +@@ -2743,12 +3501,16 @@ dispatch: CPU: _tan__cpu CUDA: _tan__cuda @@ -3221,7 +3192,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: tanh(Tensor self) -> Tensor use_c10_dispatcher: full -@@ -2758,6 +3532,8 @@ +@@ -2758,6 +3520,8 @@ CPU: tanh CUDA: tanh QuantizedCPU: quantized_tanh @@ -3230,7 +3201,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: tanh_(Tensor(a!) self) -> Tensor(a!) supports_named_tensor: True -@@ -2765,12 +3541,16 @@ +@@ -2765,12 +3529,16 @@ dispatch: CPU: _tanh__cpu CUDA: _tanh__cuda @@ -3247,7 +3218,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor variants: function -@@ -2783,6 +3563,8 @@ +@@ -2783,6 +3551,8 @@ dispatch: CPU: threshold CUDA: threshold_cuda @@ -3256,7 +3227,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!) variants: function -@@ -2790,12 +3572,16 @@ +@@ -2790,12 +3560,16 @@ dispatch: CPU: threshold_ CUDA: threshold__cuda @@ -3273,7 +3244,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor use_c10_dispatcher: full -@@ -2803,6 +3589,8 @@ +@@ -2803,6 +3577,8 @@ dispatch: CPU: threshold_backward CUDA: threshold_backward_cuda @@ -3282,7 +3253,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) variants: function, method -@@ -2835,18 +3623,24 @@ +@@ -2835,18 +3611,24 @@ use_c10_dispatcher: full python_module: nn variants: function @@ -3307,7 +3278,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # default int[] value [0,1] should not add space after comma, since native_parse.py uses ', ' to split args -@@ -2872,6 +3666,8 @@ +@@ -2872,6 +3654,8 @@ CUDA: true_divide SparseCPU: true_divide_sparse SparseCUDA: true_divide_sparse @@ -3316,7 +3287,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= supports_named_tensor: True - func: true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) -@@ -2881,6 +3677,8 @@ +@@ -2881,6 +3665,8 @@ CUDA: true_divide_ SparseCPU: true_divide_sparse_ SparseCUDA: true_divide_sparse_ @@ -3325,7 +3296,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= supports_named_tensor: True - func: true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) -@@ -2889,31 +3687,43 @@ +@@ -2889,31 +3675,43 @@ CUDA: true_divide_out SparseCPU: true_divide_out_sparse_zerodim SparseCUDA: true_divide_out_sparse_zerodim @@ -3369,7 +3340,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: type_as(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full -@@ -2956,6 +3766,8 @@ +@@ -2956,6 +3754,8 @@ dispatch: CPU: _unique2_cpu CUDA: _unique2_cuda @@ -3378,7 +3349,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: _unsafe_view(Tensor self, int[] size) -> Tensor -@@ -2971,32 +3783,48 @@ +@@ -2971,32 +3771,48 @@ use_c10_dispatcher: full variants: function, method supports_named_tensor: True @@ -3427,7 +3398,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: view_as(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full -@@ -3009,13 +3837,19 @@ +@@ -3009,13 +3825,19 @@ - func: where.self(Tensor condition, Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full variants: function, method @@ -3447,7 +3418,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor variants: function -@@ -3041,13 +3875,21 @@ +@@ -3041,13 +3863,21 @@ - func: zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_guard: False @@ -3469,7 +3440,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: _standard_gamma_grad(Tensor self, Tensor output) -> Tensor use_c10_dispatcher: full -@@ -3100,25 +3942,37 @@ +@@ -3100,25 +3930,37 @@ - func: _sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor dispatch: @@ -3509,7 +3480,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor variants: function, method -@@ -3162,12 +4016,16 @@ +@@ -3162,12 +4004,16 @@ SparseCUDA: clone_sparse MkldnnCPU: mkldnn_clone QuantizedCPU: quantized_clone @@ -3526,7 +3497,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True -@@ -3176,6 +4034,8 @@ +@@ -3176,6 +4022,8 @@ CUDA: pow_out SparseCPU: pow_out_sparse_scalar SparseCUDA: pow_out_sparse_scalar @@ -3535,7 +3506,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor use_c10_dispatcher: full -@@ -3186,6 +4046,8 @@ +@@ -3186,6 +4034,8 @@ CUDA: pow SparseCPU: pow_sparse_scalar SparseCUDA: pow_sparse_scalar @@ -3544,7 +3515,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: zero_(Tensor(a!) self) -> Tensor(a!) supports_named_tensor: True -@@ -3196,6 +4058,14 @@ +@@ -3196,6 +4046,14 @@ SparseCPU: zero_sparse_ SparseCUDA: zero_sparse_ MkldnnCPU: mkldnn_zero_ @@ -3559,7 +3530,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) dispatch: -@@ -3204,6 +4074,8 @@ +@@ -3204,6 +4062,8 @@ SparseCPU: sub_out_sparse SparseCUDA: sub_out_sparse supports_named_tensor: True @@ -3568,7 +3539,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor use_c10_dispatcher: full -@@ -3213,6 +4085,8 @@ +@@ -3213,6 +4073,8 @@ CUDA: sub SparseCPU: sub_sparse SparseCUDA: sub_sparse @@ -3577,7 +3548,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= supports_named_tensor: True - func: sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) -@@ -3222,6 +4096,8 @@ +@@ -3222,6 +4084,8 @@ CUDA: sub_ SparseCPU: sub_sparse_ SparseCUDA: sub_sparse_ @@ -3586,7 +3557,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= supports_named_tensor: True # For C++ only, until we have conversion from C++ numbers to Tensor -@@ -3229,21 +4105,29 @@ +@@ -3229,21 +4093,29 @@ use_c10_dispatcher: full variants: function, method supports_named_tensor: True @@ -3616,7 +3587,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # Functionally the same as addmm, but we give it a different derivative formula # that doesn't propagate gradients to non-present entries on sparse. -@@ -3257,6 +4141,8 @@ +@@ -3257,6 +4129,8 @@ CUDA: legacy::cuda::_th_addmm_out SparseCPU: addmm_out_sparse_dense_cpu SparseCUDA: addmm_out_sparse_dense_cuda @@ -3625,7 +3596,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= supports_named_tensor: True - func: addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor -@@ -3267,6 +4153,8 @@ +@@ -3267,6 +4141,8 @@ CUDA: legacy::cuda::_th_addmm SparseCPU: addmm_sparse_dense_cpu SparseCUDA: addmm_sparse_dense_cuda @@ -3634,7 +3605,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= supports_named_tensor: True - func: addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) -@@ -3278,9 +4166,10 @@ +@@ -3278,9 +4154,10 @@ # broadcasting SparseCPU: s_addmm_sparse_dense_cpu_ SparseCUDA: s_addmm_sparse_dense_cuda_ @@ -3646,7 +3617,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # NOTE [ Sparse: autograd and API ] # # -@@ -3396,7 +4285,6 @@ +@@ -3396,7 +4273,6 @@ # shared. In other words, their outputs are non-differentiable views of the # sparse tensor. @@ -3654,7 +3625,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # FIXME: would be nicer if TensorOptions was optional based; not adding default arguments for options given # the default would never make sense. - func: sparse_coo_tensor.size(int[] size, *, ScalarType dtype, Layout layout, Device device, bool pin_memory=False) -> Tensor -@@ -3433,7 +4321,6 @@ +@@ -3433,7 +4309,6 @@ SparseCUDA: sparse_resize_and_clear_ requires_tensor: True @@ -3662,7 +3633,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: sparse_mask(Tensor self, Tensor mask) -> Tensor use_c10_dispatcher: full variants: method -@@ -3442,7 +4329,6 @@ +@@ -3442,7 +4317,6 @@ SparseCUDA: sparse_mask_cuda requires_tensor: True @@ -3670,7 +3641,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: to_dense(Tensor self) -> Tensor use_c10_dispatcher: full variants: method -@@ -3474,7 +4360,6 @@ +@@ -3474,7 +4348,6 @@ requires_tensor: True device_guard: False @@ -3678,7 +3649,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: dense_dim(Tensor self) -> int use_c10_dispatcher: full variants: method -@@ -3494,7 +4379,6 @@ +@@ -3494,7 +4367,6 @@ requires_tensor: True device_guard: False @@ -3686,7 +3657,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: _nnz(Tensor self) -> int use_c10_dispatcher: full variants: method -@@ -3504,7 +4388,6 @@ +@@ -3504,7 +4376,6 @@ requires_tensor: True device_guard: False @@ -3694,7 +3665,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: coalesce(Tensor self) -> Tensor use_c10_dispatcher: full variants: method -@@ -3513,7 +4396,6 @@ +@@ -3513,7 +4384,6 @@ SparseCUDA: coalesce_sparse_cuda requires_tensor: True @@ -3702,7 +3673,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: is_coalesced(Tensor self) -> bool use_c10_dispatcher: full variants: method -@@ -3524,7 +4406,6 @@ +@@ -3524,7 +4394,6 @@ device_guard: False supports_named_tensor: True @@ -3710,7 +3681,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: _indices(Tensor(a) self) -> Tensor(a) variants: method dispatch: -@@ -3568,7 +4449,6 @@ +@@ -3568,7 +4437,6 @@ requires_tensor: True device_guard: False @@ -3718,7 +3689,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) dispatch: SparseCPU: hspmm_out_sparse_cpu -@@ -3630,11 +4510,15 @@ +@@ -3630,11 +4498,15 @@ variants: function dispatch: CPU: quantize_per_tensor_cpu @@ -3734,7 +3705,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: dequantize(Tensor self) -> Tensor use_c10_dispatcher: full -@@ -3713,20 +4597,28 @@ +@@ -3713,20 +4585,28 @@ variants: method device_guard: False supports_named_tensor: True @@ -3763,7 +3734,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: meshgrid(Tensor[] tensors) -> Tensor[] -@@ -3765,6 +4657,8 @@ +@@ -3765,6 +4645,8 @@ dispatch: CPU: _local_scalar_dense_cpu CUDA: _local_scalar_dense_cuda @@ -3772,7 +3743,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= variants: function supports_named_tensor: True -@@ -3791,10 +4685,16 @@ +@@ -3791,10 +4673,16 @@ # RNN cells and layers - func: lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) @@ -3789,7 +3760,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) -@@ -3807,7 +4707,9 @@ +@@ -3807,7 +4695,9 @@ - func: rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) - func: lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) @@ -3800,7 +3771,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor - func: rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor -@@ -3839,10 +4741,14 @@ +@@ -3839,10 +4729,14 @@ # PackedSequence utilities - func: _pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor) @@ -3815,7 +3786,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # wrappers for legacy TH methods -@@ -3852,6 +4758,8 @@ +@@ -3852,6 +4746,8 @@ dispatch: CPU: set_ CUDA: set_ @@ -3824,7 +3795,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!) variants: method -@@ -3860,6 +4768,8 @@ +@@ -3860,6 +4756,8 @@ CPU: legacy::cpu::_th_set_ CUDA: legacy::cuda::_th_set_ QuantizedCPU: set_storage @@ -3833,7 +3804,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!) variants: method -@@ -3867,12 +4777,16 @@ +@@ -3867,12 +4765,16 @@ dispatch: CPU: set_tensor_ CUDA: set_tensor_ @@ -3850,7 +3821,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: set_quantizer_(Tensor(a!) self, ConstQuantizerPtr quantizer) -> Tensor(a!) variants: method -@@ -3892,6 +4806,8 @@ +@@ -3892,6 +4794,8 @@ dispatch: CPU: masked_fill__cpu CUDA: masked_fill__cuda @@ -3859,7 +3830,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= supports_named_tensor: True - func: masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor -@@ -3904,6 +4820,8 @@ +@@ -3904,6 +4808,8 @@ dispatch: CPU: masked_fill__cpu CUDA: masked_fill__cuda @@ -3868,7 +3839,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= supports_named_tensor: True - func: masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor -@@ -3916,6 +4834,8 @@ +@@ -3916,6 +4822,8 @@ dispatch: CPU: masked_scatter__cpu CUDA: masked_scatter__cuda @@ -3877,7 +3848,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor use_c10_dispatcher: full -@@ -3929,25 +4849,35 @@ +@@ -3929,25 +4837,35 @@ CUDA: view MkldnnCPU: mkldnn_view QuantizedCPU: view @@ -3913,7 +3884,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) variants: method -@@ -3955,11 +4885,15 @@ +@@ -3955,11 +4873,15 @@ dispatch: CPU: legacy::cpu::_th_index_fill_ CUDA: legacy::cuda::_th_index_fill_ @@ -3929,7 +3900,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!) variants: method -@@ -3967,11 +4901,15 @@ +@@ -3967,11 +4889,15 @@ CPU: index_fill_ CUDA: index_fill_ supports_named_tensor: True @@ -3945,7 +3916,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!) variants: method -@@ -3994,6 +4932,8 @@ +@@ -3994,6 +4920,8 @@ dispatch: CPU: scatter_cpu_ CUDA: legacy::cuda::_th_scatter_ @@ -3954,7 +3925,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor use_c10_dispatcher: full -@@ -4004,6 +4944,8 @@ +@@ -4004,6 +4932,8 @@ dispatch: CPU: scatter_fill_cpu_ CUDA: legacy::cuda::_th_scatter_ @@ -3963,7 +3934,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor use_c10_dispatcher: full -@@ -4020,81 +4962,127 @@ +@@ -4020,81 +4950,127 @@ dispatch: CPU: scatter_add_cpu_ CUDA: legacy::cuda::_th_scatter_add_ @@ -4091,7 +4062,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: __iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) variants: method -@@ -4107,70 +5095,106 @@ +@@ -4107,70 +5083,106 @@ dispatch: CPU: bitwise_or_out CUDA: bitwise_or_out @@ -4198,7 +4169,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: __ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) variants: method -@@ -4184,6 +5208,8 @@ +@@ -4184,6 +5196,8 @@ dispatch: CPU: __lshift__ CUDA: __lshift__ @@ -4207,7 +4178,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: __lshift__.Tensor(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full -@@ -4191,18 +5217,24 @@ +@@ -4191,18 +5205,24 @@ dispatch: CPU: __lshift__ CUDA: __lshift__ @@ -4232,7 +4203,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: __rshift__.Scalar(Tensor self, Scalar other) -> Tensor use_c10_dispatcher: full -@@ -4210,6 +5242,8 @@ +@@ -4210,6 +5230,8 @@ dispatch: CPU: __rshift__ CUDA: __rshift__ @@ -4241,7 +4212,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: __rshift__.Tensor(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full -@@ -4217,18 +5251,24 @@ +@@ -4217,18 +5239,24 @@ dispatch: CPU: __rshift__ CUDA: __rshift__ @@ -4266,7 +4237,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: lgamma_(Tensor(a!) self) -> Tensor(a!) supports_named_tensor: True -@@ -4240,18 +5280,24 @@ +@@ -4240,18 +5268,24 @@ - func: atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) supports_named_tensor: True variants: method @@ -4291,7 +4262,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: digamma_(Tensor(a!) self) -> Tensor(a!) supports_named_tensor: True -@@ -4266,6 +5312,8 @@ +@@ -4266,6 +5300,8 @@ dispatch: CPU: legacy::cpu::_th_renorm_ CUDA: legacy::cuda::_th_renorm_ @@ -4300,7 +4271,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) supports_named_tensor: True -@@ -4273,6 +5321,8 @@ +@@ -4273,6 +5309,8 @@ dispatch: CPU: pow_ CUDA: pow_ @@ -4309,7 +4280,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) supports_named_tensor: True -@@ -4280,53 +5330,71 @@ +@@ -4280,53 +5318,71 @@ dispatch: CPU: pow_ CUDA: pow_ @@ -4381,7 +4352,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor use_c10_dispatcher: full -@@ -4334,28 +5402,40 @@ +@@ -4334,28 +5390,40 @@ dispatch: CPU: legacy::cpu::_th_addbmm CUDA: legacy::cuda::_th_addbmm @@ -4422,7 +4393,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= supports_named_tensor: True - func: cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!) -@@ -4380,6 +5460,8 @@ +@@ -4380,6 +5448,8 @@ dispatch: CPU: legacy::cpu::_th_diag_out CUDA: legacy::cuda::_th_diag_out @@ -4431,7 +4402,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: diag(Tensor self, int diagonal=0) -> Tensor use_c10_dispatcher: full -@@ -4387,40 +5469,58 @@ +@@ -4387,40 +5457,58 @@ dispatch: CPU: legacy::cpu::_th_diag CUDA: legacy::cuda::_th_diag @@ -4490,7 +4461,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: trace(Tensor self) -> Tensor use_c10_dispatcher: full -@@ -4435,6 +5535,8 @@ +@@ -4435,6 +5523,8 @@ CPU: ne_out CUDA: ne_out QuantizedCPU: ne_out_quantized_cpu @@ -4499,7 +4470,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: ne.Scalar(Tensor self, Scalar other) -> Tensor supports_named_tensor: True -@@ -4444,6 +5546,8 @@ +@@ -4444,6 +5534,8 @@ CPU: ne CUDA: ne QuantizedCPU: ne_quantized_cpu @@ -4508,7 +4479,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True -@@ -4451,6 +5555,8 @@ +@@ -4451,6 +5543,8 @@ CPU: ne_out CUDA: ne_out QuantizedCPU: ne_out_quantized_cpu @@ -4517,7 +4488,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: ne.Tensor(Tensor self, Tensor other) -> Tensor supports_named_tensor: True -@@ -4460,6 +5566,8 @@ +@@ -4460,6 +5554,8 @@ CPU: ne CUDA: ne QuantizedCPU: ne_quantized_cpu @@ -4526,7 +4497,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True -@@ -4467,6 +5575,8 @@ +@@ -4467,6 +5563,8 @@ CPU: eq_out CUDA: eq_out QuantizedCPU: eq_out_quantized_cpu @@ -4535,7 +4506,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: eq.Scalar(Tensor self, Scalar other) -> Tensor supports_named_tensor: True -@@ -4476,6 +5586,8 @@ +@@ -4476,6 +5574,8 @@ CPU: eq CUDA: eq QuantizedCPU: eq_quantized_cpu @@ -4544,7 +4515,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True -@@ -4483,6 +5595,8 @@ +@@ -4483,6 +5583,8 @@ CPU: eq_out CUDA: eq_out QuantizedCPU: eq_out_quantized_cpu @@ -4553,7 +4524,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: eq.Tensor(Tensor self, Tensor other) -> Tensor supports_named_tensor: True -@@ -4492,6 +5606,8 @@ +@@ -4492,6 +5594,8 @@ CPU: eq CUDA: eq QuantizedCPU: eq_quantized_cpu @@ -4562,7 +4533,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True -@@ -4499,6 +5615,8 @@ +@@ -4499,6 +5603,8 @@ CPU: ge_out CUDA: ge_out QuantizedCPU: ge_out_quantized_cpu @@ -4571,7 +4542,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: ge.Scalar(Tensor self, Scalar other) -> Tensor supports_named_tensor: True -@@ -4508,6 +5626,8 @@ +@@ -4508,6 +5614,8 @@ CPU: ge CUDA: ge QuantizedCPU: ge_quantized_cpu @@ -4580,7 +4551,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True -@@ -4515,6 +5635,8 @@ +@@ -4515,6 +5623,8 @@ CPU: ge_out CUDA: ge_out QuantizedCPU: ge_out_quantized_cpu @@ -4589,7 +4560,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: ge.Tensor(Tensor self, Tensor other) -> Tensor supports_named_tensor: True -@@ -4524,6 +5646,8 @@ +@@ -4524,6 +5634,8 @@ CPU: ge CUDA: ge QuantizedCPU: ge_quantized_cpu @@ -4598,7 +4569,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True -@@ -4531,6 +5655,8 @@ +@@ -4531,6 +5643,8 @@ CPU: le_out CUDA: le_out QuantizedCPU: le_out_quantized_cpu @@ -4607,7 +4578,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: le.Scalar(Tensor self, Scalar other) -> Tensor supports_named_tensor: True -@@ -4540,6 +5666,8 @@ +@@ -4540,6 +5654,8 @@ CPU: le CUDA: le QuantizedCPU: le_quantized_cpu @@ -4616,7 +4587,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True -@@ -4547,6 +5675,8 @@ +@@ -4547,6 +5663,8 @@ CPU: le_out CUDA: le_out QuantizedCPU: le_out_quantized_cpu @@ -4625,7 +4596,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: le.Tensor(Tensor self, Tensor other) -> Tensor supports_named_tensor: True -@@ -4556,6 +5686,8 @@ +@@ -4556,6 +5674,8 @@ CPU: le CUDA: le QuantizedCPU: le_quantized_cpu @@ -4634,7 +4605,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True -@@ -4563,6 +5695,8 @@ +@@ -4563,6 +5683,8 @@ CPU: gt_out CUDA: gt_out QuantizedCPU: gt_out_quantized_cpu @@ -4643,7 +4614,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: gt.Scalar(Tensor self, Scalar other) -> Tensor supports_named_tensor: True -@@ -4572,6 +5706,8 @@ +@@ -4572,6 +5694,8 @@ CPU: gt CUDA: gt QuantizedCPU: gt_quantized_cpu @@ -4652,7 +4623,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True -@@ -4579,6 +5715,8 @@ +@@ -4579,6 +5703,8 @@ CPU: gt_out CUDA: gt_out QuantizedCPU: gt_out_quantized_cpu @@ -4661,7 +4632,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: gt.Tensor(Tensor self, Tensor other) -> Tensor supports_named_tensor: True -@@ -4588,6 +5726,8 @@ +@@ -4588,6 +5714,8 @@ CPU: gt CUDA: gt QuantizedCPU: gt_quantized_cpu @@ -4670,7 +4641,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True -@@ -4595,6 +5735,8 @@ +@@ -4595,6 +5723,8 @@ CPU: lt_out CUDA: lt_out QuantizedCPU: lt_out_quantized_cpu @@ -4679,7 +4650,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: lt.Scalar(Tensor self, Scalar other) -> Tensor supports_named_tensor: True -@@ -4604,6 +5746,8 @@ +@@ -4604,6 +5734,8 @@ CPU: lt CUDA: lt QuantizedCPU: lt_quantized_cpu @@ -4688,7 +4659,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True -@@ -4611,6 +5755,8 @@ +@@ -4611,6 +5743,8 @@ CPU: lt_out CUDA: lt_out QuantizedCPU: lt_out_quantized_cpu @@ -4697,7 +4668,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: lt.Tensor(Tensor self, Tensor other) -> Tensor supports_named_tensor: True -@@ -4620,11 +5766,16 @@ +@@ -4620,11 +5754,16 @@ CPU: lt CUDA: lt QuantizedCPU: lt_quantized_cpu @@ -4714,7 +4685,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: take(Tensor self, Tensor index) -> Tensor use_c10_dispatcher: full -@@ -4632,11 +5783,16 @@ +@@ -4632,11 +5771,16 @@ dispatch: CPU: legacy::cpu::_th_take CUDA: legacy::cuda::_th_take @@ -4731,7 +4702,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: index_select(Tensor self, int dim, Tensor index) -> Tensor use_c10_dispatcher: full -@@ -4646,17 +5802,25 @@ +@@ -4646,17 +5790,25 @@ CUDA: legacy::cuda::_th_index_select SparseCPU: index_select_sparse SparseCUDA: index_select_sparse @@ -4757,7 +4728,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: masked_select(Tensor self, Tensor mask) -> Tensor use_c10_dispatcher: full -@@ -4665,11 +5829,15 @@ +@@ -4665,11 +5817,15 @@ CPU: masked_select_cpu CUDA: masked_select_cuda supports_named_tensor: True @@ -4773,7 +4744,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: nonzero(Tensor self) -> Tensor use_c10_dispatcher: full -@@ -4677,6 +5845,8 @@ +@@ -4677,6 +5833,8 @@ dispatch: CPU: legacy::cpu::_th_nonzero CUDA: legacy::cuda::_th_nonzero @@ -4782,7 +4753,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: nonzero_numpy(Tensor self) -> Tensor[] variants: method, function -@@ -4685,6 +5855,8 @@ +@@ -4685,6 +5843,8 @@ dispatch: CPU: gather_out_cpu CUDA: gather_out_cuda @@ -4791,7 +4762,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor use_c10_dispatcher: full -@@ -4692,34 +5864,50 @@ +@@ -4692,34 +5852,50 @@ dispatch: CPU: gather_cpu CUDA: gather_cuda @@ -4842,7 +4813,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR) dispatch: -@@ -4742,6 +5930,8 @@ +@@ -4742,6 +5918,8 @@ dispatch: CPU: _triangular_solve_helper_cpu CUDA: _triangular_solve_helper_cuda @@ -4851,7 +4822,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) -@@ -4753,6 +5943,8 @@ +@@ -4753,6 +5931,8 @@ dispatch: CPU: _symeig_helper_cpu CUDA: _symeig_helper_cuda @@ -4860,7 +4831,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: eig.e(Tensor self, bool eigenvectors=False, *, Tensor(a!) e, Tensor(b!) v) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) dispatch: -@@ -4775,6 +5967,8 @@ +@@ -4775,6 +5955,8 @@ dispatch: CPU: _svd_helper_cpu CUDA: _svd_helper_cuda @@ -4869,7 +4840,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) -@@ -4826,9 +6020,13 @@ +@@ -4826,9 +6008,13 @@ CUDA: legacy::cuda::_th_potri - func: qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) @@ -4883,7 +4854,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: _qr_helper(Tensor self, bool some) -> (Tensor, Tensor) variants: function -@@ -4891,12 +6089,16 @@ +@@ -4891,12 +6077,16 @@ dispatch: CPU: multinomial_out CUDA: multinomial_out @@ -4900,7 +4871,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: _multinomial_alias_setup(Tensor probs) -> (Tensor, Tensor) variants: function -@@ -4947,6 +6149,8 @@ +@@ -4947,6 +6137,8 @@ dispatch: CPU: erfinv CUDA: erfinv @@ -4909,7 +4880,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: erfinv_(Tensor(a!) self) -> Tensor(a!) supports_named_tensor: True -@@ -4954,26 +6158,36 @@ +@@ -4954,26 +6146,36 @@ dispatch: CPU: _erfinv__cpu CUDA: _erfinv__cuda @@ -4946,7 +4917,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: dist(Tensor self, Tensor other, Scalar p=2) -> Tensor use_c10_dispatcher: full -@@ -4981,21 +6195,29 @@ +@@ -4981,21 +6183,29 @@ - func: atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) supports_named_tensor: True @@ -4976,7 +4947,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor use_c10_dispatcher: full -@@ -5003,6 +6225,8 @@ +@@ -5003,6 +6213,8 @@ dispatch: CPU: lerp_cpu_scalar CUDA: lerp_cuda_scalar @@ -4985,7 +4956,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor use_c10_dispatcher: full -@@ -5010,6 +6234,8 @@ +@@ -5010,6 +6222,8 @@ dispatch: CPU: lerp_cpu_tensor CUDA: lerp_cuda_tensor @@ -4994,7 +4965,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) dispatch: -@@ -5027,6 +6253,8 @@ +@@ -5027,6 +6241,8 @@ dispatch: CPU: fmod_out CUDA: legacy::cuda::_th_fmod_out @@ -5003,7 +4974,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: fmod.Scalar(Tensor self, Scalar other) -> Tensor use_c10_dispatcher: full -@@ -5034,11 +6262,15 @@ +@@ -5034,11 +6250,15 @@ dispatch: CPU: fmod CUDA: legacy::cuda::_th_fmod @@ -5019,7 +4990,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: fmod.Tensor(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full -@@ -5046,11 +6278,15 @@ +@@ -5046,11 +6266,15 @@ dispatch: CPU: fmod CUDA: legacy::cuda::_th_fmod @@ -5035,7 +5006,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: remainder.Scalar(Tensor self, Scalar other) -> Tensor use_c10_dispatcher: full -@@ -5058,11 +6294,15 @@ +@@ -5058,11 +6282,15 @@ dispatch: CPU: remainder CUDA: remainder @@ -5051,7 +5022,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: remainder.Tensor(Tensor self, Tensor other) -> Tensor use_c10_dispatcher: full -@@ -5070,12 +6310,18 @@ +@@ -5070,12 +6298,18 @@ dispatch: CPU: remainder CUDA: remainder @@ -5070,7 +5041,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: min(Tensor self) -> Tensor use_c10_dispatcher: full -@@ -5084,13 +6330,19 @@ +@@ -5084,13 +6318,19 @@ CPU: min CUDA: legacy::cuda::_th_min QuantizedCPU: min_quant @@ -5090,7 +5061,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: max(Tensor self) -> Tensor use_c10_dispatcher: full -@@ -5099,6 +6351,8 @@ +@@ -5099,6 +6339,8 @@ CPU: max CUDA: legacy::cuda::_th_max QuantizedCPU: max_quant @@ -5099,7 +5070,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= supports_named_tensor: True - func: median(Tensor self) -> Tensor -@@ -5107,12 +6361,16 @@ +@@ -5107,12 +6349,16 @@ dispatch: CPU: median_cpu CUDA: median_cuda @@ -5116,7 +5087,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) variants: method, function -@@ -5120,23 +6378,45 @@ +@@ -5120,23 +6366,45 @@ CPU: legacy::cpu::_th_sort CUDA: legacy::cuda::_th_sort QuantizedCPU: sort_quant @@ -5162,7 +5133,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) variants: method, function -@@ -5144,11 +6424,15 @@ +@@ -5144,11 +6412,15 @@ CPU: topk CUDA: topk QuantizedCPU: quantized_topk_cpu @@ -5178,7 +5149,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: any(Tensor self) -> Tensor use_c10_dispatcher: full -@@ -5159,11 +6443,15 @@ +@@ -5159,11 +6431,15 @@ CUDA: any SparseCPU: any_sparse SparseCUDA: any_sparse @@ -5194,7 +5165,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor use_c10_dispatcher: full -@@ -5171,6 +6459,8 @@ +@@ -5171,6 +6447,8 @@ dispatch: CPU: legacy::cpu::_th_renorm CUDA: legacy::cuda::_th_renorm @@ -5203,7 +5174,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a) variants: method -@@ -5178,6 +6468,8 @@ +@@ -5178,6 +6456,8 @@ dispatch: CPU: unfold CUDA: unfold @@ -5212,7 +5183,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: equal(Tensor self, Tensor other) -> bool use_c10_dispatcher: full -@@ -5186,6 +6478,8 @@ +@@ -5186,6 +6466,8 @@ CPU: legacy::cpu::_th_equal CUDA: legacy::cuda::_th_equal QuantizedCPU: quantized_equal @@ -5221,7 +5192,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= supports_named_tensor: True - func: pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) -@@ -5193,6 +6487,8 @@ +@@ -5193,6 +6475,8 @@ dispatch: CPU: pow_out CUDA: pow_out @@ -5230,7 +5201,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor use_c10_dispatcher: full -@@ -5201,12 +6497,16 @@ +@@ -5201,12 +6485,16 @@ dispatch: CPU: pow CUDA: pow @@ -5247,7 +5218,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: pow.Scalar(Scalar self, Tensor exponent) -> Tensor use_c10_dispatcher: full -@@ -5214,6 +6514,8 @@ +@@ -5214,6 +6502,8 @@ dispatch: CPU: pow CUDA: pow @@ -5256,7 +5227,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!) variants: method -@@ -5221,40 +6523,58 @@ +@@ -5221,40 +6511,58 @@ CPU: normal_cpu_ CUDA: normal_cuda_ supports_named_tensor: True @@ -5315,7 +5286,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: alias(Tensor(a) self) -> Tensor(a) variants: method, function -@@ -5265,43 +6585,59 @@ +@@ -5265,43 +6573,59 @@ dispatch: CPU: legacy::cpu::_th_addr CUDA: legacy::cuda::_th_addr @@ -5375,7 +5346,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: _var(Tensor self, bool unbiased=True) -> Tensor use_c10_dispatcher: full -@@ -5309,6 +6645,8 @@ +@@ -5309,6 +6633,8 @@ CPU: legacy::cpu::_th_var CUDA: legacy::cuda::_th_var supports_named_tensor: True @@ -5384,7 +5355,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: _std(Tensor self, bool unbiased=True) -> Tensor use_c10_dispatcher: full -@@ -5321,6 +6659,8 @@ +@@ -5321,6 +6647,8 @@ variants: function dispatch: CUDA: _amp_non_finite_check_and_unscale_cuda_ @@ -5393,7 +5364,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: _amp_update_scale(Tensor(a!) growth_tracker, Tensor current_scale, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor variants: function -@@ -5332,12 +6672,16 @@ +@@ -5332,12 +6660,16 @@ CPU: _cat_cpu CUDA: cat_cuda QuantizedCPU: quantized_cat @@ -5410,7 +5381,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: _mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor, Tensor) dispatch: -@@ -5353,36 +6697,50 @@ +@@ -5353,36 +6685,50 @@ dispatch: CPU: legacy::cpu::_th_max CUDA: legacy::cuda::_th_max @@ -5461,7 +5432,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor use_c10_dispatcher: full -@@ -5390,23 +6748,33 @@ +@@ -5390,23 +6736,33 @@ dispatch: CPU: mse_loss_backward CUDA: mse_loss_backward @@ -5495,7 +5466,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) python_module: nn -@@ -5434,22 +6802,30 @@ +@@ -5434,22 +6790,30 @@ - func: multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) python_module: nn @@ -5526,7 +5497,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn -@@ -5466,97 +6842,137 @@ +@@ -5466,97 +6830,137 @@ - func: nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) python_module: nn @@ -5664,7 +5635,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) python_module: nn -@@ -5564,6 +6980,8 @@ +@@ -5564,6 +6968,8 @@ CPU: elu_out CUDA: elu_out QuantizedCPU: quantized_elu_out @@ -5673,7 +5644,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor use_c10_dispatcher: full -@@ -5572,16 +6990,22 @@ +@@ -5572,16 +6978,22 @@ CPU: elu CUDA: elu QuantizedCPU: quantized_elu @@ -5696,7 +5667,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!) python_module: nn -@@ -5589,12 +7013,16 @@ +@@ -5589,12 +7001,16 @@ CPU: elu_ CUDA: elu_ QuantizedCPU: quantized_elu_ @@ -5713,7 +5684,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: glu(Tensor self, int dim=-1) -> Tensor use_c10_dispatcher: full -@@ -5602,12 +7030,16 @@ +@@ -5602,12 +7018,16 @@ dispatch: CPU: glu CUDA: legacy::cuda::_thnn_glu_forward @@ -5730,7 +5701,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor use_c10_dispatcher: full -@@ -5615,20 +7047,30 @@ +@@ -5615,20 +7035,30 @@ dispatch: CPU: glu_backward CUDA: legacy::cuda::_thnn_glu_backward @@ -5761,7 +5732,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) python_module: nn -@@ -5636,6 +7078,8 @@ +@@ -5636,6 +7066,8 @@ CPU: hardtanh_out CUDA: hardtanh_out QuantizedCPU: quantized_hardtanh_out @@ -5770,7 +5741,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor use_c10_dispatcher: full -@@ -5644,16 +7088,22 @@ +@@ -5644,16 +7076,22 @@ CPU: hardtanh CUDA: hardtanh QuantizedCPU: quantized_hardtanh @@ -5793,7 +5764,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) python_module: nn -@@ -5661,6 +7111,8 @@ +@@ -5661,6 +7099,8 @@ CPU: hardtanh_ CUDA: hardtanh_ QuantizedCPU: quantized_hardtanh_ @@ -5802,7 +5773,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) python_module: nn -@@ -5668,6 +7120,8 @@ +@@ -5668,6 +7108,8 @@ CPU: leaky_relu_out CUDA: leaky_relu_out QuantizedCPU: quantized_leaky_relu_out @@ -5811,7 +5782,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor use_c10_dispatcher: full -@@ -5676,10 +7130,14 @@ +@@ -5676,10 +7118,14 @@ CPU: leaky_relu CUDA: leaky_relu QuantizedCPU: quantized_leaky_relu @@ -5826,7 +5797,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!) python_module: nn -@@ -5687,31 +7145,44 @@ +@@ -5687,31 +7133,44 @@ CPU: leaky_relu_ CUDA: leaky_relu_ QuantizedCPU: quantized_leaky_relu_ @@ -5871,7 +5842,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor use_c10_dispatcher: full -@@ -5719,62 +7190,88 @@ +@@ -5719,62 +7178,88 @@ dispatch: CPU: log_sigmoid_backward_cpu CUDA: legacy::cuda::_thnn_log_sigmoid_backward @@ -5960,7 +5931,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) python_module: nn -@@ -5782,9 +7279,13 @@ +@@ -5782,9 +7267,13 @@ CPU: adaptive_avg_pool2d_out_cpu CUDA: adaptive_avg_pool2d_out_cuda MkldnnCPU: mkldnn_adaptive_avg_pool2d_out @@ -5974,7 +5945,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor dispatch: -@@ -5796,6 +7297,8 @@ +@@ -5796,6 +7285,8 @@ CPU: adaptive_avg_pool2d_cpu CUDA: adaptive_avg_pool2d_cuda QuantizedCPU: quantized_adaptive_avg_pool2d @@ -5983,7 +5954,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: _adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor use_c10_dispatcher: full -@@ -5803,24 +7306,32 @@ +@@ -5803,24 +7294,32 @@ dispatch: CPU: adaptive_avg_pool2d_backward_cpu CUDA: adaptive_avg_pool2d_backward_cuda @@ -6016,7 +5987,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor use_c10_dispatcher: full -@@ -5828,6 +7339,8 @@ +@@ -5828,6 +7327,8 @@ dispatch: CPU: adaptive_avg_pool3d_backward_cpu CUDA: adaptive_avg_pool3d_backward_cuda @@ -6025,7 +5996,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) -@@ -5835,6 +7348,8 @@ +@@ -5835,6 +7336,8 @@ dispatch: CPU: adaptive_max_pool2d_out_cpu CUDA: adaptive_max_pool2d_out_cuda @@ -6034,7 +6005,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor) -@@ -5842,12 +7357,16 @@ +@@ -5842,12 +7345,16 @@ dispatch: CPU: adaptive_max_pool2d_cpu CUDA: adaptive_max_pool2d_cuda @@ -6051,7 +6022,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor use_c10_dispatcher: full -@@ -5855,6 +7374,8 @@ +@@ -5855,6 +7362,8 @@ dispatch: CPU: adaptive_max_pool2d_backward_cpu CUDA: adaptive_max_pool2d_backward_cuda @@ -6060,7 +6031,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # Return: (Tensor output, Tensor indices) - func: adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) -@@ -5889,6 +7410,8 @@ +@@ -5889,6 +7398,8 @@ CPU: avg_pool2d_out_cpu CUDA: avg_pool2d_out_cuda MkldnnCPU: mkldnn_avg_pool2d_out @@ -6069,7 +6040,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor python_module: nn -@@ -5897,24 +7420,32 @@ +@@ -5897,24 +7408,32 @@ CUDA: avg_pool2d_cuda MkldnnCPU: mkldnn_avg_pool2d QuantizedCPU: quantized_avg_pool2d @@ -6102,7 +6073,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor python_module: nn -@@ -5922,18 +7453,24 @@ +@@ -5922,18 +7441,24 @@ CPU: avg_pool3d_cpu CUDA: avg_pool3d_cuda QuantizedCPU: quantized_avg_pool3d @@ -6127,7 +6098,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # Return: (Tensor output, Tensor indices) - func: fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) -@@ -5993,6 +7530,8 @@ +@@ -5993,6 +7518,8 @@ dispatch: CPU: max_pool2d_with_indices_out_cpu CUDA: max_pool2d_with_indices_out_cuda @@ -6136,7 +6107,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # Return: (Tensor output, Tensor indices) - func: max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) -@@ -6000,6 +7539,8 @@ +@@ -6000,6 +7527,8 @@ dispatch: CPU: max_pool2d_with_indices_cpu CUDA: max_pool2d_with_indices_cuda @@ -6145,7 +6116,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= supports_named_tensor: True - func: max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) -@@ -6007,12 +7548,16 @@ +@@ -6007,12 +7536,16 @@ dispatch: CPU: max_pool2d_with_indices_backward_out_cpu CUDA: max_pool2d_with_indices_backward_out_cuda @@ -6162,7 +6133,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # Return: (Tensor output, Tensor indices) - func: max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) -@@ -6020,6 +7565,8 @@ +@@ -6020,6 +7553,8 @@ dispatch: CPU: max_pool3d_with_indices_out_cpu CUDA: max_pool3d_with_indices_out_cuda @@ -6171,7 +6142,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # Return: (Tensor output, Tensor indices) - func: max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) -@@ -6027,6 +7574,8 @@ +@@ -6027,6 +7562,8 @@ dispatch: CPU: max_pool3d_with_indices_cpu CUDA: max_pool3d_with_indices_cuda @@ -6180,7 +6151,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= supports_named_tensor: True - func: max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) -@@ -6034,72 +7583,97 @@ +@@ -6034,72 +7571,97 @@ dispatch: CPU: max_pool3d_with_indices_backward_out_cpu CUDA: max_pool3d_with_indices_backward_out_cuda @@ -6278,7 +6249,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn -@@ -6118,72 +7692,96 @@ +@@ -6118,72 +7680,96 @@ dispatch: CPU: reflection_pad2d_out_cpu CUDA: reflection_pad2d_out_cuda @@ -6375,7 +6346,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: replication_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!) python_module: nn -@@ -6214,12 +7812,16 @@ +@@ -6214,12 +7800,16 @@ dispatch: CPU: upsample_linear1d_out_cpu CUDA: upsample_linear1d_out_cuda @@ -6392,7 +6363,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: upsample_linear1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) python_module: nn -@@ -6232,12 +7834,16 @@ +@@ -6232,12 +7822,16 @@ dispatch: CPU: upsample_linear1d_backward_cpu CUDA: upsample_linear1d_backward_cuda @@ -6409,7 +6380,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn -@@ -6245,96 +7851,128 @@ +@@ -6245,96 +7839,128 @@ CPU: upsample_bilinear2d_cpu CUDA: upsample_bilinear2d_cuda QuantizedCPU: quantized_upsample_bilinear2d_cpu @@ -6538,7 +6509,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: upsample_nearest2d(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn -@@ -6342,24 +7980,32 @@ +@@ -6342,24 +7968,32 @@ CPU: upsample_nearest2d_cpu CUDA: upsample_nearest2d_cuda QuantizedCPU: quantized_upsample_nearest2d_cpu @@ -6571,7 +6542,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: upsample_nearest3d(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor python_module: nn -@@ -6367,38 +8013,52 @@ +@@ -6367,38 +8001,52 @@ CPU: upsample_nearest3d_cpu CUDA: upsample_nearest3d_cuda QuantizedCPU: quantized_upsample_nearest3d_cpu @@ -6624,7 +6595,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # What's a thnn_conv_ versus a slow_conv_? # -@@ -6423,24 +8083,32 @@ +@@ -6423,24 +8071,32 @@ dispatch: CPU: slow_conv_transpose2d_out_cpu CUDA: slow_conv_transpose2d_out_cuda @@ -6657,7 +6628,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) python_module: nn -@@ -6468,21 +8136,29 @@ +@@ -6468,21 +8124,29 @@ - func: thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) python_module: nn @@ -6687,7 +6658,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: thnn_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, Tensor finput, Tensor fgrad_input, *, Tensor(a!)? grad_input, Tensor(b!)? grad_weight, Tensor(c!)? grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) python_module: nn -@@ -6495,48 +8171,70 @@ +@@ -6495,48 +8159,70 @@ dispatch: CPU: slow_conv2d_backward_cpu CUDA: legacy::cuda::_thnn_conv2d_backward @@ -6758,7 +6729,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: slow_conv3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, Tensor finput, Tensor fgrad_input, *, Tensor(a!)? grad_input, Tensor(b!)? grad_weight, Tensor(c!)? grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) python_module: nn -@@ -6553,12 +8251,16 @@ +@@ -6553,12 +8239,16 @@ dispatch: CPU: slow_conv_dilated2d_cpu CUDA: slow_conv_dilated2d_cuda @@ -6775,7 +6746,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - func: slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1) -> Tensor python_module: nn -@@ -6577,57 +8279,559 @@ +@@ -6577,57 +8267,559 @@ dispatch: CPU: col2im_out_cpu CUDA: col2im_out_cuda @@ -7338,7 +7309,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= \ No newline at end of file diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/8x8-dq-aarch64-neon.S pytorch-develop-150/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/8x8-dq-aarch64-neon.S --- pytorch-v1.5.0/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/8x8-dq-aarch64-neon.S 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/8x8-dq-aarch64-neon.S 2022-06-21 12:00:30.875079633 +0800 ++++ pytorch-develop-150/aten/src/ATen/native/quantized/cpu/qnnpack/src/q8gemm/8x8-dq-aarch64-neon.S 2022-06-23 23:00:37.481734501 +0800 @@ -659,14 +659,14 @@ SUB x1, x1, 4 @@ -7364,7 +7335,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= CMP x1, 2 diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/native/TensorCompare.cpp pytorch-develop-150/aten/src/ATen/native/TensorCompare.cpp --- pytorch-v1.5.0/aten/src/ATen/native/TensorCompare.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/native/TensorCompare.cpp 2022-06-21 12:00:30.807079636 +0800 ++++ pytorch-develop-150/aten/src/ATen/native/TensorCompare.cpp 2022-06-23 23:00:37.381734506 +0800 @@ -64,7 +64,7 @@ Tensor isinf(const Tensor &self) { @@ -7376,7 +7347,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= return AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "isinf", [&]() { diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/native/TensorFactories.cpp pytorch-develop-150/aten/src/ATen/native/TensorFactories.cpp --- pytorch-v1.5.0/aten/src/ATen/native/TensorFactories.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/native/TensorFactories.cpp 2022-06-21 12:00:30.807079636 +0800 ++++ pytorch-develop-150/aten/src/ATen/native/TensorFactories.cpp 2022-06-23 23:00:37.381734506 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -7421,7 +7392,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= } diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/native/TensorProperties.cpp pytorch-develop-150/aten/src/ATen/native/TensorProperties.cpp --- pytorch-v1.5.0/aten/src/ATen/native/TensorProperties.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/native/TensorProperties.cpp 2022-06-21 12:00:30.807079636 +0800 ++++ pytorch-develop-150/aten/src/ATen/native/TensorProperties.cpp 2022-06-23 23:00:37.381734506 +0800 @@ -87,6 +87,7 @@ if (self.is_contiguous(memory_format)) { return self; @@ -7432,7 +7403,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= "preserve memory format is unsupported by the contiguous operator"); diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/native/UpSampleBicubic2d.cpp pytorch-develop-150/aten/src/ATen/native/UpSampleBicubic2d.cpp --- pytorch-v1.5.0/aten/src/ATen/native/UpSampleBicubic2d.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/native/UpSampleBicubic2d.cpp 2022-06-21 12:00:30.807079636 +0800 ++++ pytorch-develop-150/aten/src/ATen/native/UpSampleBicubic2d.cpp 2022-06-23 23:00:37.385734506 +0800 @@ -26,7 +26,7 @@ const scalar_t* in = &idata[output_y * input_width + output_x]; scalar_t* out = &odata[output_y * output_width + output_x]; @@ -7444,7 +7415,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= out += output_width * output_height; diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/native_parse.py pytorch-develop-150/aten/src/ATen/native_parse.py --- pytorch-v1.5.0/aten/src/ATen/native_parse.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/native_parse.py 2022-06-21 12:00:30.895079632 +0800 ++++ pytorch-develop-150/aten/src/ATen/native_parse.py 2022-06-23 23:00:37.513734500 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -7482,7 +7453,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= msg = '''Exception raised in processing function: diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/preprocess_declarations.py pytorch-develop-150/aten/src/ATen/preprocess_declarations.py --- pytorch-v1.5.0/aten/src/ATen/preprocess_declarations.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/preprocess_declarations.py 2022-06-21 12:00:30.899079632 +0800 ++++ pytorch-develop-150/aten/src/ATen/preprocess_declarations.py 2022-06-23 23:00:37.513734500 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -7514,7 +7485,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/templates/TensorBody.h pytorch-develop-150/aten/src/ATen/templates/TensorBody.h --- pytorch-v1.5.0/aten/src/ATen/templates/TensorBody.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/templates/TensorBody.h 2022-06-21 12:00:30.899079632 +0800 ++++ pytorch-develop-150/aten/src/ATen/templates/TensorBody.h 2022-06-23 23:00:37.513734500 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -7547,7 +7518,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/ATen/templates/TensorMethods.h pytorch-develop-150/aten/src/ATen/templates/TensorMethods.h --- pytorch-v1.5.0/aten/src/ATen/templates/TensorMethods.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/ATen/templates/TensorMethods.h 2022-06-21 12:00:30.899079632 +0800 ++++ pytorch-develop-150/aten/src/ATen/templates/TensorMethods.h 2022-06-23 23:00:37.513734500 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -7581,7 +7552,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= } diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/TH/CMakeLists.txt pytorch-develop-150/aten/src/TH/CMakeLists.txt --- pytorch-v1.5.0/aten/src/TH/CMakeLists.txt 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/TH/CMakeLists.txt 2022-06-21 12:00:30.903079632 +0800 ++++ pytorch-develop-150/aten/src/TH/CMakeLists.txt 2022-06-23 23:00:37.521734500 +0800 @@ -48,6 +48,11 @@ ${CMAKE_CURRENT_SOURCE_DIR} PARENT_SCOPE) @@ -7596,7 +7567,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/TH/generic/THStorage.cpp pytorch-develop-150/aten/src/TH/generic/THStorage.cpp --- pytorch-v1.5.0/aten/src/TH/generic/THStorage.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/TH/generic/THStorage.cpp 2022-06-21 12:00:30.911079631 +0800 ++++ pytorch-develop-150/aten/src/TH/generic/THStorage.cpp 2022-06-23 23:00:37.529734499 +0800 @@ -1,9 +1,33 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -7720,7 +7691,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/aten/src/TH/generic/THStorage.h pytorch-develop-150/aten/src/TH/generic/THStorage.h --- pytorch-v1.5.0/aten/src/TH/generic/THStorage.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/aten/src/TH/generic/THStorage.h 2022-06-21 12:00:30.911079631 +0800 ++++ pytorch-develop-150/aten/src/TH/generic/THStorage.h 2022-06-23 23:00:37.529734499 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -7759,7 +7730,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/c10/CMakeLists.txt pytorch-develop-150/c10/CMakeLists.txt --- pytorch-v1.5.0/c10/CMakeLists.txt 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/c10/CMakeLists.txt 2022-06-21 12:00:30.943079630 +0800 ++++ pytorch-develop-150/c10/CMakeLists.txt 2022-06-23 23:00:37.573734497 +0800 @@ -63,6 +63,14 @@ message(STATUS "don't use NUMA") endif() @@ -7788,7 +7759,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # not checked in diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/c10/core/Backend.h pytorch-develop-150/c10/core/Backend.h --- pytorch-v1.5.0/c10/core/Backend.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/c10/core/Backend.h 2022-06-21 12:00:30.943079630 +0800 ++++ pytorch-develop-150/c10/core/Backend.h 2022-06-23 23:00:37.573734497 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -7885,7 +7856,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= case Backend::CUDA: diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/c10/core/Device.cpp pytorch-develop-150/c10/core/Device.cpp --- pytorch-v1.5.0/c10/core/Device.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/c10/core/Device.cpp 2022-06-21 12:00:30.943079630 +0800 ++++ pytorch-develop-150/c10/core/Device.cpp 2022-06-23 23:00:37.577734497 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -7925,7 +7896,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= types.begin(), diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/c10/core/Device.h pytorch-develop-150/c10/core/Device.h --- pytorch-v1.5.0/c10/core/Device.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/c10/core/Device.h 2022-06-21 12:00:30.943079630 +0800 ++++ pytorch-develop-150/c10/core/Device.h 2022-06-23 23:00:37.577734497 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -7960,7 +7931,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= return type_ == DeviceType::CPU; diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/c10/core/DeviceType.cpp pytorch-develop-150/c10/core/DeviceType.cpp --- pytorch-v1.5.0/c10/core/DeviceType.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/c10/core/DeviceType.cpp 2022-06-21 12:00:30.943079630 +0800 ++++ pytorch-develop-150/c10/core/DeviceType.cpp 2022-06-23 23:00:37.577734497 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -8000,7 +7971,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= return false; diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/c10/core/DeviceType.h pytorch-develop-150/c10/core/DeviceType.h --- pytorch-v1.5.0/c10/core/DeviceType.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/c10/core/DeviceType.h 2022-06-21 12:00:30.943079630 +0800 ++++ pytorch-develop-150/c10/core/DeviceType.h 2022-06-23 23:00:37.577734497 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -8043,7 +8014,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= constexpr DeviceType kXLA = DeviceType::XLA; diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/c10/core/DispatchKey.cpp pytorch-develop-150/c10/core/DispatchKey.cpp --- pytorch-v1.5.0/c10/core/DispatchKey.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/c10/core/DispatchKey.cpp 2022-06-21 12:00:30.943079630 +0800 ++++ pytorch-develop-150/c10/core/DispatchKey.cpp 2022-06-23 23:00:37.577734497 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -8075,7 +8046,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= case DispatchKey::SparseCPUTensorId: diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/c10/core/DispatchKey.h pytorch-develop-150/c10/core/DispatchKey.h --- pytorch-v1.5.0/c10/core/DispatchKey.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/c10/core/DispatchKey.h 2022-06-21 12:00:30.943079630 +0800 ++++ pytorch-develop-150/c10/core/DispatchKey.h 2022-06-23 23:00:37.577734497 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -8107,7 +8078,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/c10/core/Storage.h pytorch-develop-150/c10/core/Storage.h --- pytorch-v1.5.0/c10/core/Storage.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/c10/core/Storage.h 2022-06-21 12:00:30.943079630 +0800 ++++ pytorch-develop-150/c10/core/Storage.h 2022-06-23 23:00:37.577734497 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -8141,7 +8112,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= }; diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/c10/core/StorageImpl.cpp pytorch-develop-150/c10/core/StorageImpl.cpp --- pytorch-v1.5.0/c10/core/StorageImpl.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/c10/core/StorageImpl.cpp 2022-06-21 12:00:30.943079630 +0800 ++++ pytorch-develop-150/c10/core/StorageImpl.cpp 2022-06-23 23:00:37.577734497 +0800 @@ -1 +1,18 @@ #include + @@ -8163,7 +8134,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= +} // namespace c10 diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/c10/core/StorageImpl.h pytorch-develop-150/c10/core/StorageImpl.h --- pytorch-v1.5.0/c10/core/StorageImpl.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/c10/core/StorageImpl.h 2022-06-21 12:00:30.943079630 +0800 ++++ pytorch-develop-150/c10/core/StorageImpl.h 2022-06-23 23:00:37.577734497 +0800 @@ -1,11 +1,55 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -8290,7 +8261,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= } diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/c10/core/TensorImpl.h pytorch-develop-150/c10/core/TensorImpl.h --- pytorch-v1.5.0/c10/core/TensorImpl.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/c10/core/TensorImpl.h 2022-06-21 12:00:30.943079630 +0800 ++++ pytorch-develop-150/c10/core/TensorImpl.h 2022-06-23 23:00:37.577734497 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -8358,7 +8329,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= } diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/c10/core/TensorOptions.h pytorch-develop-150/c10/core/TensorOptions.h --- pytorch-v1.5.0/c10/core/TensorOptions.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/c10/core/TensorOptions.h 2022-06-21 12:00:30.947079630 +0800 ++++ pytorch-develop-150/c10/core/TensorOptions.h 2022-06-23 23:00:37.577734497 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -8399,7 +8370,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= } else if (tid == DispatchKey::HIPTensorId) { diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/c10/cuda/CMakeLists.txt pytorch-develop-150/c10/cuda/CMakeLists.txt --- pytorch-v1.5.0/c10/cuda/CMakeLists.txt 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/c10/cuda/CMakeLists.txt 2022-06-21 12:00:30.947079630 +0800 ++++ pytorch-develop-150/c10/cuda/CMakeLists.txt 2022-06-23 23:00:37.581734497 +0800 @@ -24,6 +24,7 @@ CUDACachingAllocator.cpp impl/CUDAGuardImpl.cpp @@ -8418,7 +8389,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= torch_cuda_based_add_library(c10_cuda ${C10_CUDA_SRCS} ${C10_CUDA_HEADERS}) diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/c10/macros/Export.h pytorch-develop-150/c10/macros/Export.h --- pytorch-v1.5.0/c10/macros/Export.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/c10/macros/Export.h 2022-06-21 12:00:30.947079630 +0800 ++++ pytorch-develop-150/c10/macros/Export.h 2022-06-23 23:00:37.585734497 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -8545,7 +8516,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= -... diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/caffe2/CMakeLists.txt pytorch-develop-150/caffe2/CMakeLists.txt --- pytorch-v1.5.0/caffe2/CMakeLists.txt 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/caffe2/CMakeLists.txt 2022-06-21 12:00:30.963079629 +0800 ++++ pytorch-develop-150/caffe2/CMakeLists.txt 2022-06-23 23:00:37.605734496 +0800 @@ -32,6 +32,7 @@ # Add source, includes, and libs to lists list(APPEND Caffe2_CPU_SRCS ${ATen_CPU_SRCS}) @@ -8700,7 +8671,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # Call again since Caffe2_HIP_INCLUDE is extended with ATen include dirs. diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/.clang-format pytorch-develop-150/.clang-format --- pytorch-v1.5.0/.clang-format 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/.clang-format 2022-06-21 12:00:30.763079638 +0800 ++++ pytorch-develop-150/.clang-format 2022-06-23 23:00:37.325734508 +0800 @@ -84,5 +84,4 @@ SpacesInSquareBrackets: false Standard: Cpp11 @@ -8711,7 +8682,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= \ No newline at end of file diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/cmake/BuildVariables.cmake pytorch-develop-150/cmake/BuildVariables.cmake --- pytorch-v1.5.0/cmake/BuildVariables.cmake 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/cmake/BuildVariables.cmake 2022-06-21 12:00:31.215079618 +0800 ++++ pytorch-develop-150/cmake/BuildVariables.cmake 2022-06-23 23:00:37.993734479 +0800 @@ -11,6 +11,7 @@ # CMakeLists.txt files under each folder respectively. set(Caffe2_CPU_SRCS) @@ -8738,7 +8709,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # symbols. However, if the lib is whole linked in caffe2 lib, we don't want diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/cmake/Codegen.cmake pytorch-develop-150/cmake/Codegen.cmake --- pytorch-v1.5.0/cmake/Codegen.cmake 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/cmake/Codegen.cmake 2022-06-21 12:00:31.215079618 +0800 ++++ pytorch-develop-150/cmake/Codegen.cmake 2022-06-23 23:00:37.993734479 +0800 @@ -191,13 +191,14 @@ file(READ ${CMAKE_BINARY_DIR}/aten/src/ATen/generated_cpp.txt generated_cpp) file(READ ${CMAKE_BINARY_DIR}/aten/src/ATen/generated_cpp.txt-cuda cuda_generated_cpp) @@ -8769,7 +8740,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= endif() diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/cmake/Dependencies.cmake pytorch-develop-150/cmake/Dependencies.cmake --- pytorch-v1.5.0/cmake/Dependencies.cmake 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/cmake/Dependencies.cmake 2022-06-21 12:00:31.215079618 +0800 ++++ pytorch-develop-150/cmake/Dependencies.cmake 2022-06-23 23:00:37.993734479 +0800 @@ -1509,6 +1509,13 @@ ENDIF(NOT C_HAS_THREAD) endif() @@ -8786,7 +8757,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/cmake/Summary.cmake pytorch-develop-150/cmake/Summary.cmake --- pytorch-v1.5.0/cmake/Summary.cmake 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/cmake/Summary.cmake 2022-06-21 12:00:31.219079618 +0800 ++++ pytorch-develop-150/cmake/Summary.cmake 2022-06-23 23:00:38.001734479 +0800 @@ -134,6 +134,7 @@ if(NOT "${SELECTED_OP_LIST}" STREQUAL "") message(STATUS " SELECTED_OP_LIST : ${SELECTED_OP_LIST}") @@ -8797,7 +8768,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= endfunction() diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/cmake/TorchConfig.cmake.in pytorch-develop-150/cmake/TorchConfig.cmake.in --- pytorch-v1.5.0/cmake/TorchConfig.cmake.in 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/cmake/TorchConfig.cmake.in 2022-06-21 12:00:31.219079618 +0800 ++++ pytorch-develop-150/cmake/TorchConfig.cmake.in 2022-06-23 23:00:38.001734479 +0800 @@ -112,6 +112,11 @@ list(APPEND TORCH_LIBRARIES ${TORCH_CUDA_LIBRARIES}) endif() @@ -8812,7 +8783,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= set(TORCH_CXX_FLAGS "-D_GLIBCXX_USE_CXX11_ABI=@GLIBCXX_USE_CXX11_ABI@") diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/CMakeLists.txt pytorch-develop-150/CMakeLists.txt --- pytorch-v1.5.0/CMakeLists.txt 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/CMakeLists.txt 2022-06-21 12:00:30.767079638 +0800 ++++ pytorch-develop-150/CMakeLists.txt 2022-06-23 23:00:37.329734508 +0800 @@ -205,6 +205,10 @@ option(USE_TBB "Use TBB" OFF) option(ONNX_ML "Enable traditional ONNX ML API." ON) @@ -8879,7 +8850,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-missing-braces") diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/CONTRIBUTING.zh.md pytorch-develop-150/CONTRIBUTING.zh.md --- pytorch-v1.5.0/CONTRIBUTING.zh.md 1970-01-01 08:00:00.000000000 +0800 -+++ pytorch-develop-150/CONTRIBUTING.zh.md 2022-06-21 12:00:30.767079638 +0800 ++++ pytorch-develop-150/CONTRIBUTING.zh.md 2022-06-23 23:00:37.329734508 +0800 @@ -0,0 +1,228 @@ +# PyTorch贡献指南 +- [贡献者许可协议](#贡献者许可协议.md) @@ -9111,7 +9082,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= + diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/.dockerignore pytorch-develop-150/.dockerignore --- pytorch-v1.5.0/.dockerignore 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/.dockerignore 2022-06-21 12:00:30.763079638 +0800 ++++ pytorch-develop-150/.dockerignore 2022-06-23 23:00:37.325734508 +0800 @@ -1,257 +1 @@ -# READ THIS BEFORE YOU REFACTOR ME -# @@ -9387,7 +9358,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= \ No newline at end of file diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/requirements.txt pytorch-develop-150/requirements.txt --- pytorch-v1.5.0/requirements.txt 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/requirements.txt 2022-06-21 12:00:30.931079630 +0800 ++++ pytorch-develop-150/requirements.txt 2022-06-23 23:00:37.605734496 +0800 @@ -4,4 +4,11 @@ requests setuptools @@ -9405,7 +9376,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= \ No newline at end of file diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/setup.py pytorch-develop-150/setup.py --- pytorch-v1.5.0/setup.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/setup.py 2022-06-21 12:00:30.931079630 +0800 ++++ pytorch-develop-150/setup.py 2022-06-23 23:00:37.605734496 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -9508,7 +9479,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= 'python/serialized_test/data/operator_test/*.zip', diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/test/distributed/test_c10d.py pytorch-develop-150/test/distributed/test_c10d.py --- pytorch-v1.5.0/test/distributed/test_c10d.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/test/distributed/test_c10d.py 2022-06-21 12:00:31.275079615 +0800 ++++ pytorch-develop-150/test/distributed/test_c10d.py 2022-06-23 23:00:38.085734475 +0800 @@ -3049,8 +3049,8 @@ model = self._create_mixed_precision_model() reducer = self._create_reducer_for_models([model]) @@ -9522,7 +9493,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= # Check that the grad of fc3 is not set. diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/test/run_test.py pytorch-develop-150/test/run_test.py --- pytorch-v1.5.0/test/run_test.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/test/run_test.py 2022-06-21 12:00:31.307079614 +0800 ++++ pytorch-develop-150/test/run_test.py 2022-06-23 23:00:38.133734473 +0800 @@ -11,6 +11,8 @@ import subprocess import sys @@ -9783,7 +9754,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= shell(['coverage', 'html']) diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/test/test_autograd.py pytorch-develop-150/test/test_autograd.py --- pytorch-v1.5.0/test/test_autograd.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/test/test_autograd.py 2022-06-21 12:00:31.307079614 +0800 ++++ pytorch-develop-150/test/test_autograd.py 2022-06-23 23:00:38.137734473 +0800 @@ -24,7 +24,7 @@ from torch.autograd.function import once_differentiable from torch.autograd.profiler import (profile, format_time, EventList, @@ -9814,7 +9785,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/test/test_nn.py pytorch-develop-150/test/test_nn.py --- pytorch-v1.5.0/test/test_nn.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/test/test_nn.py 2022-06-21 12:00:31.311079614 +0800 ++++ pytorch-develop-150/test/test_nn.py 2022-06-23 23:00:38.141734472 +0800 @@ -3535,14 +3535,17 @@ # earlier versions or no versions, it should provide default value of 0. bn = nn.BatchNorm2d(3) @@ -9837,7 +9808,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= @unittest.skipIf(not PY3, 'Python 2.7 generates cyclic trash') diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/test/test_torch.py pytorch-develop-150/test/test_torch.py --- pytorch-v1.5.0/test/test_torch.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/test/test_torch.py 2022-06-21 12:00:31.399079610 +0800 ++++ pytorch-develop-150/test/test_torch.py 2022-06-23 23:00:38.273734467 +0800 @@ -4087,6 +4087,9 @@ def test_print(self): default_type = torch.Tensor().type() @@ -9886,7 +9857,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= @onlyCPU diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/test/test_utils.py pytorch-develop-150/test/test_utils.py --- pytorch-v1.5.0/test/test_utils.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/test/test_utils.py 2022-06-21 12:00:31.403079610 +0800 ++++ pytorch-develop-150/test/test_utils.py 2022-06-23 23:00:38.277734466 +0800 @@ -6,6 +6,7 @@ import random import tempfile @@ -9905,7 +9876,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= load_tests = load_tests diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/tools/autograd/derivatives.yaml pytorch-develop-150/tools/autograd/derivatives.yaml --- pytorch-v1.5.0/tools/autograd/derivatives.yaml 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/tools/autograd/derivatives.yaml 2022-06-21 12:00:33.435079520 +0800 ++++ pytorch-develop-150/tools/autograd/derivatives.yaml 2022-06-23 23:00:41.229734337 +0800 @@ -107,6 +107,10 @@ # # NB: The parameter names here MUST be consistent with the parameter names @@ -10040,7 +10011,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= + input, w_ih, w_hh, bias, h, c: npu_lstm_cell_backward(grads[0], grads[1], grads[2], input, w_ih, w_hh, h, c, result0, result1, result2, result3, result4, result5, result6, result7) diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/tools/autograd/dump_utils.py pytorch-develop-150/tools/autograd/dump_utils.py --- pytorch-v1.5.0/tools/autograd/dump_utils.py 1970-01-01 08:00:00.000000000 +0800 -+++ pytorch-develop-150/tools/autograd/dump_utils.py 2022-06-21 12:00:33.435079520 +0800 ++++ pytorch-develop-150/tools/autograd/dump_utils.py 2022-06-23 23:00:41.229734337 +0800 @@ -0,0 +1,313 @@ +# Copyright (c) 2021 Huawei Technologies Co., Ltd +# All rights reserved. @@ -10357,7 +10328,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= + return prepare_to_check_overflow, overflow_dump_inputs diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/tools/autograd/gen_autograd_functions.py pytorch-develop-150/tools/autograd/gen_autograd_functions.py --- pytorch-v1.5.0/tools/autograd/gen_autograd_functions.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/tools/autograd/gen_autograd_functions.py 2022-06-21 12:00:33.435079520 +0800 ++++ pytorch-develop-150/tools/autograd/gen_autograd_functions.py 2022-06-23 23:00:41.229734337 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2021 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -10563,7 +10534,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= + diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/tools/autograd/gen_python_functions.py pytorch-develop-150/tools/autograd/gen_python_functions.py --- pytorch-v1.5.0/tools/autograd/gen_python_functions.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/tools/autograd/gen_python_functions.py 2022-06-21 12:00:33.435079520 +0800 ++++ pytorch-develop-150/tools/autograd/gen_python_functions.py 2022-06-23 23:00:41.229734337 +0800 @@ -1,3 +1,20 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -10605,7 +10576,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= 'value': argname, diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/tools/autograd/gen_variable_type.py pytorch-develop-150/tools/autograd/gen_variable_type.py --- pytorch-v1.5.0/tools/autograd/gen_variable_type.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/tools/autograd/gen_variable_type.py 2022-06-21 12:00:33.435079520 +0800 ++++ pytorch-develop-150/tools/autograd/gen_variable_type.py 2022-06-23 23:00:41.229734337 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2021 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -10786,7 +10757,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/tools/autograd/templates/Functions.cpp pytorch-develop-150/tools/autograd/templates/Functions.cpp --- pytorch-v1.5.0/tools/autograd/templates/Functions.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/tools/autograd/templates/Functions.cpp 2022-06-21 12:00:33.435079520 +0800 ++++ pytorch-develop-150/tools/autograd/templates/Functions.cpp 2022-06-23 23:00:41.233734336 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2021 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -10867,7 +10838,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= auto sparse = sparse_.coalesce(); diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/tools/autograd/templates/python_torch_functions.cpp pytorch-develop-150/tools/autograd/templates/python_torch_functions.cpp --- pytorch-v1.5.0/tools/autograd/templates/python_torch_functions.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/tools/autograd/templates/python_torch_functions.cpp 2022-06-21 12:00:33.435079520 +0800 ++++ pytorch-develop-150/tools/autograd/templates/python_torch_functions.cpp 2022-06-23 23:00:41.233734336 +0800 @@ -22,7 +22,7 @@ #include "torch/csrc/autograd/generated/variable_factories.h" #include "torch/csrc/utils/structseq.h" @@ -10951,7 +10922,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= } diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/tools/autograd/templates/python_variable_methods.cpp pytorch-develop-150/tools/autograd/templates/python_variable_methods.cpp --- pytorch-v1.5.0/tools/autograd/templates/python_variable_methods.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/tools/autograd/templates/python_variable_methods.cpp 2022-06-21 12:00:33.439079520 +0800 ++++ pytorch-develop-150/tools/autograd/templates/python_variable_methods.cpp 2022-06-23 23:00:41.233734336 +0800 @@ -15,7 +15,13 @@ #include "torch/csrc/cuda/Stream.h" #include "torch/csrc/cuda/Event.h" @@ -11046,7 +11017,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= {"has_names", (PyCFunction)THPVariable_has_names, METH_NOARGS, NULL}, diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/tools/autograd/templates/VariableType.cpp pytorch-develop-150/tools/autograd/templates/VariableType.cpp --- pytorch-v1.5.0/tools/autograd/templates/VariableType.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/tools/autograd/templates/VariableType.cpp 2022-06-21 12:00:33.435079520 +0800 ++++ pytorch-develop-150/tools/autograd/templates/VariableType.cpp 2022-06-23 23:00:41.233734336 +0800 @@ -1,7 +1,29 @@ +// Copyright (c) 2021 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -11079,7 +11050,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/tools/autograd/templates/VariableType.h pytorch-develop-150/tools/autograd/templates/VariableType.h --- pytorch-v1.5.0/tools/autograd/templates/VariableType.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/tools/autograd/templates/VariableType.h 2022-06-21 12:00:33.435079520 +0800 ++++ pytorch-develop-150/tools/autograd/templates/VariableType.h 2022-06-23 23:00:41.233734336 +0800 @@ -1,3 +1,20 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -11111,7 +11082,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= const at::Tensor & unpack(const Tensor & t, const char * name, int pos); diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/tools/build_variables.bzl pytorch-develop-150/tools/build_variables.bzl --- pytorch-v1.5.0/tools/build_variables.bzl 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/tools/build_variables.bzl 2022-06-21 12:00:33.431079520 +0800 ++++ pytorch-develop-150/tools/build_variables.bzl 2022-06-23 23:00:41.225734337 +0800 @@ -46,6 +46,7 @@ "torch/csrc/autograd/functions/utils.cpp", "torch/csrc/autograd/input_buffer.cpp", @@ -11197,7 +11168,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= -def grad(outputs: _TensorOrTensors, inputs: _TensorOrTensors, grad_outputs: Optional[_TensorOrTensors]=..., retain_graph: Optional[bool]=..., create_graph: bool=..., only_inputs: bool=..., allow_unused: bool=...) -> Tuple[Tensor, ...]: ... diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/autograd/profiler.py pytorch-develop-150/torch/autograd/profiler.py --- pytorch-v1.5.0/torch/autograd/profiler.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/autograd/profiler.py 2022-06-21 12:00:33.447079520 +0800 ++++ pytorch-develop-150/torch/autograd/profiler.py 2022-06-23 23:00:41.245734336 +0800 @@ -1,8 +1,25 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -11674,7 +11645,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= return ''.join(result) diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/CMakeLists.txt pytorch-develop-150/torch/CMakeLists.txt --- pytorch-v1.5.0/torch/CMakeLists.txt 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/CMakeLists.txt 2022-06-21 12:00:33.439079520 +0800 ++++ pytorch-develop-150/torch/CMakeLists.txt 2022-06-23 23:00:41.237734336 +0800 @@ -97,6 +97,7 @@ ${TORCH_SRC_DIR}/csrc/tensor/python_tensor.cpp ${TORCH_SRC_DIR}/csrc/utils.cpp @@ -11706,7 +11677,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= endif() diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/autograd/engine.cpp pytorch-develop-150/torch/csrc/autograd/engine.cpp --- pytorch-v1.5.0/torch/csrc/autograd/engine.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/autograd/engine.cpp 2022-06-21 12:00:33.483079518 +0800 ++++ pytorch-develop-150/torch/csrc/autograd/engine.cpp 2022-06-23 23:00:41.301734333 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -11858,7 +11829,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= auto event = c10::Event{c10::DeviceType::CUDA}; diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/autograd/function.h pytorch-develop-150/torch/csrc/autograd/function.h --- pytorch-v1.5.0/torch/csrc/autograd/function.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/autograd/function.h 2022-06-21 12:00:33.483079518 +0800 ++++ pytorch-develop-150/torch/csrc/autograd/function.h 2022-06-23 23:00:41.301734333 +0800 @@ -11,6 +11,7 @@ #include @@ -11880,7 +11851,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= // probably operate with names. diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/autograd/functions/tensor.cpp pytorch-develop-150/torch/csrc/autograd/functions/tensor.cpp --- pytorch-v1.5.0/torch/csrc/autograd/functions/tensor.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/autograd/functions/tensor.cpp 2022-06-21 12:00:33.483079518 +0800 ++++ pytorch-develop-150/torch/csrc/autograd/functions/tensor.cpp 2022-06-23 23:00:41.305734333 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -11912,7 +11883,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= /*non_blocking=*/false, diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/autograd/init.cpp pytorch-develop-150/torch/csrc/autograd/init.cpp --- pytorch-v1.5.0/torch/csrc/autograd/init.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/autograd/init.cpp 2022-06-21 12:00:33.483079518 +0800 ++++ pytorch-develop-150/torch/csrc/autograd/init.cpp 2022-06-23 23:00:41.305734333 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -11955,7 +11926,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= m.def("_enable_profiler", enableProfiler); diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/autograd/input_buffer.cpp pytorch-develop-150/torch/csrc/autograd/input_buffer.cpp --- pytorch-v1.5.0/torch/csrc/autograd/input_buffer.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/autograd/input_buffer.cpp 2022-06-21 12:00:33.483079518 +0800 ++++ pytorch-develop-150/torch/csrc/autograd/input_buffer.cpp 2022-06-23 23:00:41.305734333 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -12007,7 +11978,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= auto& old_var = buffer[pos]; diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/autograd/profiler.cpp pytorch-develop-150/torch/csrc/autograd/profiler.cpp --- pytorch-v1.5.0/torch/csrc/autograd/profiler.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/autograd/profiler.cpp 2022-06-21 12:00:33.483079518 +0800 ++++ pytorch-develop-150/torch/csrc/autograd/profiler.cpp 2022-06-23 23:00:41.305734333 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -12247,7 +12218,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= CUDAStubs::~CUDAStubs() = default; diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/autograd/profiler.h pytorch-develop-150/torch/csrc/autograd/profiler.h --- pytorch-v1.5.0/torch/csrc/autograd/profiler.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/autograd/profiler.h 2022-06-21 12:00:33.483079518 +0800 ++++ pytorch-develop-150/torch/csrc/autograd/profiler.h 2022-06-23 23:00:41.305734333 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -12383,7 +12354,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/autograd/python_variable.cpp pytorch-develop-150/torch/csrc/autograd/python_variable.cpp --- pytorch-v1.5.0/torch/csrc/autograd/python_variable.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/autograd/python_variable.cpp 2022-06-21 12:00:33.487079518 +0800 ++++ pytorch-develop-150/torch/csrc/autograd/python_variable.cpp 2022-06-23 23:00:41.309734333 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -12437,7 +12408,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= {"is_complex", (getter)THPVariable_is_complex, nullptr, nullptr, nullptr}, diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/autograd/python_variable_indexing.cpp pytorch-develop-150/torch/csrc/autograd/python_variable_indexing.cpp --- pytorch-v1.5.0/torch/csrc/autograd/python_variable_indexing.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/autograd/python_variable_indexing.cpp 2022-06-21 12:00:33.487079518 +0800 ++++ pytorch-develop-150/torch/csrc/autograd/python_variable_indexing.cpp 2022-06-23 23:00:41.309734333 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -12468,7 +12439,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= at::Device self_device = self_.device(); diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/autograd/record_function.cpp pytorch-develop-150/torch/csrc/autograd/record_function.cpp --- pytorch-v1.5.0/torch/csrc/autograd/record_function.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/autograd/record_function.cpp 2022-06-21 12:00:33.487079518 +0800 ++++ pytorch-develop-150/torch/csrc/autograd/record_function.cpp 2022-06-23 23:00:41.313734333 +0800 @@ -154,6 +154,12 @@ } } @@ -12502,7 +12473,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/autograd/record_function.h pytorch-develop-150/torch/csrc/autograd/record_function.h --- pytorch-v1.5.0/torch/csrc/autograd/record_function.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/autograd/record_function.h 2022-06-21 12:00:33.487079518 +0800 ++++ pytorch-develop-150/torch/csrc/autograd/record_function.h 2022-06-23 23:00:41.313734333 +0800 @@ -3,6 +3,7 @@ #include #include @@ -12574,7 +12545,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= if (torch::autograd::profiler::needsInputs()) { \ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/autograd/utils/wrap_outputs.h pytorch-develop-150/torch/csrc/autograd/utils/wrap_outputs.h --- pytorch-v1.5.0/torch/csrc/autograd/utils/wrap_outputs.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/autograd/utils/wrap_outputs.h 2022-06-21 12:00:33.487079518 +0800 ++++ pytorch-develop-150/torch/csrc/autograd/utils/wrap_outputs.h 2022-06-23 23:00:41.313734333 +0800 @@ -168,6 +168,45 @@ return r.release(); } @@ -12623,7 +12594,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= if (!r) throw python_error(); diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/autograd/VariableTypeManual.cpp pytorch-develop-150/torch/csrc/autograd/VariableTypeManual.cpp --- pytorch-v1.5.0/torch/csrc/autograd/VariableTypeManual.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/autograd/VariableTypeManual.cpp 2022-06-21 12:00:33.479079518 +0800 ++++ pytorch-develop-150/torch/csrc/autograd/VariableTypeManual.cpp 2022-06-23 23:00:41.301734333 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -12657,7 +12628,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= if (!t.defined()) { diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/distributed/c10d/comm.cpp pytorch-develop-150/torch/csrc/distributed/c10d/comm.cpp --- pytorch-v1.5.0/torch/csrc/distributed/c10d/comm.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/distributed/c10d/comm.cpp 2022-06-21 12:00:33.495079518 +0800 ++++ pytorch-develop-150/torch/csrc/distributed/c10d/comm.cpp 2022-06-23 23:00:41.325734332 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -12739,7 +12710,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= // because c10d::ProcessGroup::broadcast takes a vector argument. diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/distributed/c10d/init.cpp pytorch-develop-150/torch/csrc/distributed/c10d/init.cpp --- pytorch-v1.5.0/torch/csrc/distributed/c10d/init.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/distributed/c10d/init.cpp 2022-06-21 12:00:33.495079518 +0800 ++++ pytorch-develop-150/torch/csrc/distributed/c10d/init.cpp 2022-06-23 23:00:41.325734332 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -12796,7 +12767,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= .def("is_success", &::c10d::ProcessGroup::Work::isSuccess) diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/distributed/c10d/reducer.cpp pytorch-develop-150/torch/csrc/distributed/c10d/reducer.cpp --- pytorch-v1.5.0/torch/csrc/distributed/c10d/reducer.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/distributed/c10d/reducer.cpp 2022-06-21 12:00:33.495079518 +0800 ++++ pytorch-develop-150/torch/csrc/distributed/c10d/reducer.cpp 2022-06-23 23:00:41.325734332 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -13075,7 +13046,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= if (bucket_size_limit_iterators.count(key) == 0) { diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/DynamicTypes.cpp pytorch-develop-150/torch/csrc/DynamicTypes.cpp --- pytorch-v1.5.0/torch/csrc/DynamicTypes.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/DynamicTypes.cpp 2022-06-21 12:00:33.451079520 +0800 ++++ pytorch-develop-150/torch/csrc/DynamicTypes.cpp 2022-06-23 23:00:41.253734335 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -13124,7 +13095,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= return it->second; diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/Generator.cpp pytorch-develop-150/torch/csrc/Generator.cpp --- pytorch-v1.5.0/torch/csrc/Generator.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/Generator.cpp 2022-06-21 12:00:33.451079520 +0800 ++++ pytorch-develop-150/torch/csrc/Generator.cpp 2022-06-23 23:00:41.253734335 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -13192,7 +13163,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= #endif diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/generic/serialization.cpp pytorch-develop-150/torch/csrc/generic/serialization.cpp --- pytorch-v1.5.0/torch/csrc/generic/serialization.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/generic/serialization.cpp 2022-06-21 12:00:33.503079517 +0800 ++++ pytorch-develop-150/torch/csrc/generic/serialization.cpp 2022-06-23 23:00:41.337734332 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -13307,7 +13278,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/generic/Storage.cpp pytorch-develop-150/torch/csrc/generic/Storage.cpp --- pytorch-v1.5.0/torch/csrc/generic/Storage.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/generic/Storage.cpp 2022-06-21 12:00:33.503079517 +0800 ++++ pytorch-develop-150/torch/csrc/generic/Storage.cpp 2022-06-23 23:00:41.337734332 +0800 @@ -1,7 +1,25 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -13386,7 +13357,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= for (Py_ssize_t i = 0; i < length; i++) { diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/generic/StorageMethods.cpp pytorch-develop-150/torch/csrc/generic/StorageMethods.cpp --- pytorch-v1.5.0/torch/csrc/generic/StorageMethods.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/generic/StorageMethods.cpp 2022-06-21 12:00:33.503079517 +0800 ++++ pytorch-develop-150/torch/csrc/generic/StorageMethods.cpp 2022-06-23 23:00:41.337734332 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -13434,7 +13405,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= {"_write_file", (PyCFunction)THPStorage_(writeFile), METH_VARARGS, nullptr}, diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/Module.cpp pytorch-develop-150/torch/csrc/Module.cpp --- pytorch-v1.5.0/torch/csrc/Module.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/Module.cpp 2022-06-21 12:00:33.451079520 +0800 ++++ pytorch-develop-150/torch/csrc/Module.cpp 2022-06-23 23:00:41.257734335 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -13596,7 +13567,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= auto set_module_attr = [&](const char* name, PyObject* v, bool incref = true) { diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/tensor/python_tensor.cpp pytorch-develop-150/torch/csrc/tensor/python_tensor.cpp --- pytorch-v1.5.0/torch/csrc/tensor/python_tensor.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/tensor/python_tensor.cpp 2022-06-21 12:00:33.555079515 +0800 ++++ pytorch-develop-150/torch/csrc/tensor/python_tensor.cpp 2022-06-23 23:00:41.421734328 +0800 @@ -1,18 +1,35 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -13973,7 +13944,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= +} // namespace torch diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/utils/init.cpp pytorch-develop-150/torch/csrc/utils/init.cpp --- pytorch-v1.5.0/torch/csrc/utils/init.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/utils/init.cpp 2022-06-21 12:00:33.555079515 +0800 ++++ pytorch-develop-150/torch/csrc/utils/init.cpp 2022-06-23 23:00:41.425734328 +0800 @@ -1,7 +1,13 @@ #include #include @@ -14138,7 +14109,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= } // namespace torch diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/utils/init.h pytorch-develop-150/torch/csrc/utils/init.h --- pytorch-v1.5.0/torch/csrc/utils/init.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/utils/init.h 2022-06-21 12:00:33.555079515 +0800 ++++ pytorch-develop-150/torch/csrc/utils/init.h 2022-06-23 23:00:41.425734328 +0800 @@ -8,4 +8,7 @@ void initThroughputBenchmarkBindings(PyObject* module); @@ -14149,7 +14120,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= } // namespace torch diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/utils/python_arg_parser.h pytorch-develop-150/torch/csrc/utils/python_arg_parser.h --- pytorch-v1.5.0/torch/csrc/utils/python_arg_parser.h 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/utils/python_arg_parser.h 2022-06-21 12:00:33.559079515 +0800 ++++ pytorch-develop-150/torch/csrc/utils/python_arg_parser.h 2022-06-23 23:00:41.429734328 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -14184,7 +14155,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= return at::Device(device_str); diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/utils/tensor_layouts.cpp pytorch-develop-150/torch/csrc/utils/tensor_layouts.cpp --- pytorch-v1.5.0/torch/csrc/utils/tensor_layouts.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/utils/tensor_layouts.cpp 2022-06-21 12:00:33.559079515 +0800 ++++ pytorch-develop-150/torch/csrc/utils/tensor_layouts.cpp 2022-06-23 23:00:41.429734328 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -14215,7 +14186,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= registerLayoutObject((THPLayout*)strided_layout, at::Backend::QuantizedCPU); diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/utils/tensor_new.cpp pytorch-develop-150/torch/csrc/utils/tensor_new.cpp --- pytorch-v1.5.0/torch/csrc/utils/tensor_new.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/utils/tensor_new.cpp 2022-06-21 12:00:33.563079515 +0800 ++++ pytorch-develop-150/torch/csrc/utils/tensor_new.cpp 2022-06-23 23:00:41.433734328 +0800 @@ -1,3 +1,19 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -14351,7 +14322,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= " or ", c10::DispatchKey::XLATensorId, diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/csrc/utils/tensor_types.cpp pytorch-develop-150/torch/csrc/utils/tensor_types.cpp --- pytorch-v1.5.0/torch/csrc/utils/tensor_types.cpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/csrc/utils/tensor_types.cpp 2022-06-21 12:00:33.563079515 +0800 ++++ pytorch-develop-150/torch/csrc/utils/tensor_types.cpp 2022-06-23 23:00:41.433734328 +0800 @@ -1,58 +1,91 @@ +// Copyright (c) 2020 Huawei Technologies Co., Ltd +// Copyright (c) 2019, Facebook CORPORATION. @@ -14564,7 +14535,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= -def get_rng_state(): ... diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/distributed/distributed_c10d.py pytorch-develop-150/torch/distributed/distributed_c10d.py --- pytorch-v1.5.0/torch/distributed/distributed_c10d.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/distributed/distributed_c10d.py 2022-06-21 12:00:33.567079514 +0800 ++++ pytorch-develop-150/torch/distributed/distributed_c10d.py 2022-06-23 23:00:41.437734327 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -14655,7 +14626,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= """ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/__init__.py pytorch-develop-150/torch/__init__.py --- pytorch-v1.5.0/torch/__init__.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/__init__.py 2022-06-21 12:00:33.443079520 +0800 ++++ pytorch-develop-150/torch/__init__.py 2022-06-23 23:00:41.241734336 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -14698,7 +14669,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= \ No newline at end of file diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/jit/frontend.py pytorch-develop-150/torch/jit/frontend.py --- pytorch-v1.5.0/torch/jit/frontend.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/jit/frontend.py 2022-06-21 12:00:33.575079514 +0800 ++++ pytorch-develop-150/torch/jit/frontend.py 2022-06-23 23:00:41.461734326 +0800 @@ -616,6 +616,17 @@ return Subscript(base, [build_SliceExpr(ctx, base, expr.slice)]) elif sub_type is ast.ExtSlice: @@ -14719,7 +14690,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/lib/c10d/CMakeLists.txt pytorch-develop-150/torch/lib/c10d/CMakeLists.txt --- pytorch-v1.5.0/torch/lib/c10d/CMakeLists.txt 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/lib/c10d/CMakeLists.txt 2022-06-21 12:00:33.575079514 +0800 ++++ pytorch-develop-150/torch/lib/c10d/CMakeLists.txt 2022-06-23 23:00:41.461734326 +0800 @@ -28,6 +28,10 @@ option(USE_C10D_NCCL "USE C10D NCCL" ON) endif() @@ -14772,7 +14743,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= copy_header(ProcessGroupMPI.hpp) diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/lib/c10d/ProcessGroup.hpp pytorch-develop-150/torch/lib/c10d/ProcessGroup.hpp --- pytorch-v1.5.0/torch/lib/c10d/ProcessGroup.hpp 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/lib/c10d/ProcessGroup.hpp 2022-06-21 12:00:33.575079514 +0800 ++++ pytorch-develop-150/torch/lib/c10d/ProcessGroup.hpp 2022-06-23 23:00:41.465734326 +0800 @@ -115,6 +115,17 @@ std::vector& data, const AllreduceOptions& opts = AllreduceOptions()) = 0; @@ -14793,7 +14764,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= virtual std::shared_ptr allreduce_coalesced( diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/lib/libshm/CMakeLists.txt pytorch-develop-150/torch/lib/libshm/CMakeLists.txt --- pytorch-v1.5.0/torch/lib/libshm/CMakeLists.txt 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/lib/libshm/CMakeLists.txt 2022-06-21 12:00:33.579079514 +0800 ++++ pytorch-develop-150/torch/lib/libshm/CMakeLists.txt 2022-06-23 23:00:41.473734326 +0800 @@ -37,8 +37,11 @@ SET_TARGET_PROPERTIES(shm PROPERTIES PREFIX "lib" @@ -14850,7 +14821,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= -_maybe_indices_t = _scalar_or_tuple_2_t[Tensor] diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/nn/functional.py pytorch-develop-150/torch/nn/functional.py --- pytorch-v1.5.0/torch/nn/functional.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/nn/functional.py 2022-06-21 12:00:33.583079514 +0800 ++++ pytorch-develop-150/torch/nn/functional.py 2022-06-23 23:00:41.481734325 +0800 @@ -1611,7 +1611,7 @@ else: output = input.matmul(weight.t()) @@ -14873,7 +14844,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= -from . import parallel as parallel diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/nn/modules/batchnorm.py pytorch-develop-150/torch/nn/modules/batchnorm.py --- pytorch-v1.5.0/torch/nn/modules/batchnorm.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/nn/modules/batchnorm.py 2022-06-21 12:00:33.587079514 +0800 ++++ pytorch-develop-150/torch/nn/modules/batchnorm.py 2022-06-23 23:00:41.485734325 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -14919,7 +14890,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/nn/modules/__init__.py pytorch-develop-150/torch/nn/modules/__init__.py --- pytorch-v1.5.0/torch/nn/modules/__init__.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/nn/modules/__init__.py 2022-06-21 12:00:33.587079514 +0800 ++++ pytorch-develop-150/torch/nn/modules/__init__.py 2022-06-23 23:00:41.485734325 +0800 @@ -18,6 +18,7 @@ from .instancenorm import InstanceNorm1d, InstanceNorm2d, InstanceNorm3d from .normalization import LocalResponseNorm, CrossMapLRN2d, LayerNorm, GroupNorm @@ -14939,7 +14910,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= 'LSTMCell', 'GRUCell', 'PixelShuffle', 'Upsample', 'UpsamplingNearest2d', 'UpsamplingBilinear2d', diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/nn/modules/module.py pytorch-develop-150/torch/nn/modules/module.py --- pytorch-v1.5.0/torch/nn/modules/module.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/nn/modules/module.py 2022-06-21 12:00:33.591079513 +0800 ++++ pytorch-develop-150/torch/nn/modules/module.py 2022-06-23 23:00:41.489734325 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -15130,7 +15101,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= return t.to(device, dtype if t.is_floating_point() else None, non_blocking, memory_format=convert_to_format) diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/nn/modules/normalization.py pytorch-develop-150/torch/nn/modules/normalization.py --- pytorch-v1.5.0/torch/nn/modules/normalization.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/nn/modules/normalization.py 2022-06-21 12:00:33.591079513 +0800 ++++ pytorch-develop-150/torch/nn/modules/normalization.py 2022-06-23 23:00:41.493734325 +0800 @@ -128,13 +128,14 @@ """ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine'] @@ -15163,7 +15134,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= return '{normalized_shape}, eps={eps}, ' \ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/nn/modules/npu_modules.py pytorch-develop-150/torch/nn/modules/npu_modules.py --- pytorch-v1.5.0/torch/nn/modules/npu_modules.py 1970-01-01 08:00:00.000000000 +0800 -+++ pytorch-develop-150/torch/nn/modules/npu_modules.py 2022-06-21 12:00:33.591079513 +0800 ++++ pytorch-develop-150/torch/nn/modules/npu_modules.py 2022-06-23 23:00:41.493734325 +0800 @@ -0,0 +1,42 @@ +from .module import Module +from .. import npu_functional as F @@ -15210,7 +15181,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= \ No newline at end of file diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/nn/npu_functional.py pytorch-develop-150/torch/nn/npu_functional.py --- pytorch-v1.5.0/torch/nn/npu_functional.py 1970-01-01 08:00:00.000000000 +0800 -+++ pytorch-develop-150/torch/nn/npu_functional.py 2022-06-21 12:00:33.591079513 +0800 ++++ pytorch-develop-150/torch/nn/npu_functional.py 2022-06-23 23:00:41.497734325 +0800 @@ -0,0 +1,30 @@ +r"""Functional interface""" + @@ -15280,7 +15251,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - module_kwargs: Optional[Any] = ...) -> Tensor: ... diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/nn/parallel/distributed.py pytorch-develop-150/torch/nn/parallel/distributed.py --- pytorch-v1.5.0/torch/nn/parallel/distributed.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/nn/parallel/distributed.py 2022-06-21 12:00:33.591079513 +0800 ++++ pytorch-develop-150/torch/nn/parallel/distributed.py 2022-06-23 23:00:41.497734325 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -15644,7 +15615,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= -def remove_weight_norm(module: T_module, name: str = ...) -> T_module: ... diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/onnx/symbolic_opset9.py pytorch-develop-150/torch/onnx/symbolic_opset9.py --- pytorch-v1.5.0/torch/onnx/symbolic_opset9.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/onnx/symbolic_opset9.py 2022-06-21 12:00:33.599079513 +0800 ++++ pytorch-develop-150/torch/onnx/symbolic_opset9.py 2022-06-23 23:00:41.509734324 +0800 @@ -1621,14 +1621,23 @@ slices = [sym_help._slice_helper(g, w, axes=[0], starts=[x * n], ends=[y * n]) for x, y in intervals] return g.op('Concat', *slices, axis_i=0) @@ -15722,7 +15693,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - def __init__(self, params: _params_t, lr: float=..., lr_decay: float=..., weight_decay: float=..., initial_accumulator_value: float=..., eps: float=...) -> None: ... diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/optim/adamax.py pytorch-develop-150/torch/optim/adamax.py --- pytorch-v1.5.0/torch/optim/adamax.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/optim/adamax.py 2022-06-21 12:00:33.599079513 +0800 ++++ pytorch-develop-150/torch/optim/adamax.py 2022-06-23 23:00:41.509734324 +0800 @@ -80,8 +80,8 @@ exp_inf.mul_(beta2).unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0) @@ -15899,7 +15870,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - def __init__(self, params: _params_t, lr: float=..., betas: Tuple[float, float]=..., eps: float=...) -> None: ... diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/random.py pytorch-develop-150/torch/random.py --- pytorch-v1.5.0/torch/random.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/random.py 2022-06-21 12:00:33.603079513 +0800 ++++ pytorch-develop-150/torch/random.py 2022-06-23 23:00:41.517734324 +0800 @@ -30,6 +30,10 @@ if not torch.cuda._is_in_bad_fork(): @@ -15924,7 +15895,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/serialization.py pytorch-develop-150/torch/serialization.py --- pytorch-v1.5.0/torch/serialization.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/serialization.py 2022-06-21 12:00:33.603079513 +0800 ++++ pytorch-develop-150/torch/serialization.py 2022-06-23 23:00:41.517734324 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -16008,7 +15979,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= def location_tag(storage): diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/storage.py pytorch-develop-150/torch/storage.py --- pytorch-v1.5.0/torch/storage.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/storage.py 2022-06-21 12:00:33.603079513 +0800 ++++ pytorch-develop-150/torch/storage.py 2022-06-23 23:00:41.517734324 +0800 @@ -7,6 +7,7 @@ class _StorageBase(object): @@ -16028,7 +15999,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= else: diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/tensor.py pytorch-develop-150/torch/tensor.py --- pytorch-v1.5.0/torch/tensor.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/tensor.py 2022-06-21 12:00:33.603079513 +0800 ++++ pytorch-develop-150/torch/tensor.py 2022-06-23 23:00:41.517734324 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -16090,7 +16061,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= def __reversed__(self): diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/_tensor_str.py pytorch-develop-150/torch/_tensor_str.py --- pytorch-v1.5.0/torch/_tensor_str.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/_tensor_str.py 2022-06-21 12:00:33.443079520 +0800 ++++ pytorch-develop-150/torch/_tensor_str.py 2022-06-23 23:00:41.241734336 +0800 @@ -1,7 +1,24 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -16177,7 +16148,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= has_default_dtype = self.dtype in (torch.get_default_dtype(), torch.int64, torch.bool) diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/testing/_internal/common_device_type.py pytorch-develop-150/torch/testing/_internal/common_device_type.py --- pytorch-v1.5.0/torch/testing/_internal/common_device_type.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/testing/_internal/common_device_type.py 2022-06-21 12:00:33.603079513 +0800 ++++ pytorch-develop-150/torch/testing/_internal/common_device_type.py 2022-06-23 23:00:41.517734324 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -16330,7 +16301,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/testing/_internal/common_utils.py pytorch-develop-150/torch/testing/_internal/common_utils.py --- pytorch-v1.5.0/torch/testing/_internal/common_utils.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/testing/_internal/common_utils.py 2022-06-21 12:00:33.607079513 +0800 ++++ pytorch-develop-150/torch/testing/_internal/common_utils.py 2022-06-23 23:00:41.521734324 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -16455,7 +16426,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= exact_dtype = self.exact_dtype diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/utils/data/dataloader.py pytorch-develop-150/torch/utils/data/dataloader.py --- pytorch-v1.5.0/torch/utils/data/dataloader.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/utils/data/dataloader.py 2022-06-21 12:00:33.611079512 +0800 ++++ pytorch-develop-150/torch/utils/data/dataloader.py 2022-06-23 23:00:41.529734323 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -16664,7 +16635,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - def __init__(self, sampler: Sampler[int], batch_size: int, drop_last: bool) -> None: ... diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/utils/data/_utils/pin_memory.py pytorch-develop-150/torch/utils/data/_utils/pin_memory.py --- pytorch-v1.5.0/torch/utils/data/_utils/pin_memory.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/utils/data/_utils/pin_memory.py 2022-06-21 12:00:33.611079512 +0800 ++++ pytorch-develop-150/torch/utils/data/_utils/pin_memory.py 2022-06-23 23:00:41.529734323 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. @@ -16725,7 +16696,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= - diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/utils/__init__.py pytorch-develop-150/torch/utils/__init__.py --- pytorch-v1.5.0/torch/utils/__init__.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/utils/__init__.py 2022-06-21 12:00:33.607079513 +0800 ++++ pytorch-develop-150/torch/utils/__init__.py 2022-06-23 23:00:41.525734324 +0800 @@ -1,6 +1,9 @@ from __future__ import absolute_import, division, print_function, unicode_literals @@ -16738,7 +16709,7 @@ diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude= def set_module(obj, mod): diff -Nur '--exclude=.git*' '--exclude=.jenkins' '--exclude=android' '--exclude=OWNERS' '--exclude=third_party' '--exclude=README*' -Nur pytorch-v1.5.0/torch/_utils.py pytorch-develop-150/torch/_utils.py --- pytorch-v1.5.0/torch/_utils.py 2021-04-10 18:39:32.000000000 +0800 -+++ pytorch-develop-150/torch/_utils.py 2022-06-21 12:00:33.443079520 +0800 ++++ pytorch-develop-150/torch/_utils.py 2022-06-23 23:00:41.245734336 +0800 @@ -1,3 +1,19 @@ +# Copyright (c) 2020 Huawei Technologies Co., Ltd +# Copyright (c) 2019, Facebook CORPORATION. diff --git a/pytorch1.5.0/src/aten/src/ATen/native/native_functions.yaml b/pytorch1.5.0/src/aten/src/ATen/native/native_functions.yaml index 5a5b27e8ab64a8f41452e5dc4d73a0d876fc1d75..e67b8bc06241503f4ed32abe69c11142d72a1110 100644 --- a/pytorch1.5.0/src/aten/src/ATen/native/native_functions.yaml +++ b/pytorch1.5.0/src/aten/src/ATen/native/native_functions.yaml @@ -2849,30 +2849,18 @@ - func: randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor - func: randn(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - npu_dispatch: - NPU: randn_npu - func: randn.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor - npu_dispatch: - NPU: randn_npu - func: randn.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_guard: False - npu_dispatch: - NPU: randn_npu - func: randn.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor device_guard: False - npu_dispatch: - NPU: randn_npu - func: randn.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) - npu_dispatch: - NPU: randn_out_npu - func: randn.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) - npu_dispatch: - NPU: randn_out_npu - func: randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor supports_named_tensor: True diff --git a/pytorch1.5.0/src/aten/src/ATen/native/npu/AllKernelNpu.cpp b/pytorch1.5.0/src/aten/src/ATen/native/npu/AllKernelNpu.cpp index 10a99c06aad0b73e3b08f3b311fe9a92509a7d39..852df0e5bd1829bdca3ee5cdbd25fbf00bb06250 100644 --- a/pytorch1.5.0/src/aten/src/ATen/native/npu/AllKernelNpu.cpp +++ b/pytorch1.5.0/src/aten/src/ATen/native/npu/AllKernelNpu.cpp @@ -64,51 +64,69 @@ Tensor& all_out_npu( Tensor all_npu(const Tensor& self, int64_t dim, bool keepdim) { TORCH_CHECK(self.scalar_type() == ScalarType::Bool || self.scalar_type() == ScalarType::Byte, "all only supports torch.uint8 and torch.bool dtypes"); + Tensor selfCopy = self; + if(selfCopy.scalar_type() == ScalarType::Byte){ + selfCopy = selfCopy.npu_dtype_cast(ScalarType::Bool); + } if (self.numel() == 0) { - Tensor res = at::empty_with_format({}, self.options().dtype(kInt), CalcuOpUtil::get_tensor_npu_format(self)).fill_(1).to(ScalarType::Bool); + Tensor res = OpPreparation::ApplyTensorWithFormat( + {}, + self.options().dtype(kInt), + CalcuOpUtil::get_tensor_npu_format(self)).fill_(1).to(self.scalar_type()); return res; } // calculate the output size IntArrayRef dims(dim); - auto outputSize = reduce_ops_npu_output_size(self, dims, keepdim); + auto outputSize = reduce_ops_npu_output_size(selfCopy, dims, keepdim); // construct the output tensor of the NPU - Tensor result = at::empty_with_format( - outputSize, self.options(), CalcuOpUtil::get_tensor_npu_format(self)); + Tensor result = OpPreparation::ApplyTensorWithFormat( + outputSize, selfCopy.options(), CalcuOpUtil::get_tensor_npu_format(selfCopy)); // calculate the output result of the NPU - all_out_npu_nocheck(result, self, {dim}, keepdim); - + all_out_npu_nocheck(result, selfCopy, {dim}, keepdim); + if(self.scalar_type() == ScalarType::Byte){ + result = result.npu_dtype_cast(ScalarType::Byte); + } return result; } Tensor all_npu(const Tensor& self) { TORCH_CHECK(self.scalar_type() == ScalarType::Bool || self.scalar_type() == ScalarType::Byte, "all only supports torch.uint8 and torch.bool dtypes"); + Tensor selfCopy = self; + if(selfCopy.scalar_type() == ScalarType::Byte){ + selfCopy = selfCopy.npu_dtype_cast(ScalarType::Bool); + } + if (self.numel() == 0) { - Tensor res = at::empty_with_format( + Tensor res = OpPreparation::ApplyTensorWithFormat( {}, self.options().dtype(kInt), - CalcuOpUtil::get_tensor_npu_format(self)).fill_(1).to(ScalarType::Bool); + CalcuOpUtil::get_tensor_npu_format(self)).fill_(1).to(self.scalar_type()); return res; } // calculate the output size IntArrayRef dims; - auto outputSize = reduce_ops_npu_output_size(self, dims, false); + auto outputSize = reduce_ops_npu_output_size(selfCopy, dims, false); // construct the output tensor of the NPU - Tensor result = at::empty_with_format( - outputSize, self.options(), CalcuOpUtil::get_tensor_npu_format(self)); + Tensor result = OpPreparation::ApplyTensorWithFormat( + outputSize, selfCopy.options(), CalcuOpUtil::get_tensor_npu_format(selfCopy)); // calculate the output result of the NPU all_out_npu_nocheck( result, - self, - CalcuOpUtil::get_dimlist_for_tensor(self), + selfCopy, + CalcuOpUtil::get_dimlist_for_tensor(selfCopy), false); + if(self.scalar_type() == ScalarType::Byte){ + result = result.npu_dtype_cast(ScalarType::Byte); + } + return result; } diff --git a/pytorch1.5.0/src/aten/src/ATen/native/npu/RandnKernelNpu.cpp b/pytorch1.5.0/src/aten/src/ATen/native/npu/RandnKernelNpu.cpp deleted file mode 100644 index 7a9baf62da8f1b019eb6224a9763022c2381ca45..0000000000000000000000000000000000000000 --- a/pytorch1.5.0/src/aten/src/ATen/native/npu/RandnKernelNpu.cpp +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (c) 2020 Huawei Technologies Co., Ltd -// Copyright (c) 2019, Facebook CORPORATION. -// All rights reserved. -// -// Licensed under the BSD 3-Clause License (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://opensource.org/licenses/BSD-3-Clause -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "ATen/native/npu/utils/OpAdapter.h" -#include "ATen/native/npu/utils/CalcuOpUtil.h" -#include "ATen/npu/NPUGenerator.h" -#include "ATen/Utils.h" - -namespace at { -namespace native { -using namespace at::native::npu; - -Tensor& randn_out_npu_nocheck(Tensor& result, IntArrayRef size, int64_t seed, int64_t seed2) { - OpCommand cmd; - cmd.Name("RandomStandardNormal") - .Input(size) - .Output(result) - .Attr("dtype", result.scalar_type()) - .Attr("seed", seed) - .Attr("seed2", seed2) - .Run(); - return result; -} - -Tensor& randn_out_npu(Tensor& result, IntArrayRef size) { - const auto gen = at::npu::detail::getDefaultNPUGenerator(); - auto pair = gen->philox_engine_inputs(10); - const int64_t seed = pair.first; - const int64_t seed2 = pair.second; - OpPreparation::CheckOut( - {}, - result, - result, - size); - randn_out_npu_nocheck(result, size, seed, seed2); - return result; -} - -Tensor& randn_out_npu(Tensor& result, IntArrayRef size, Generator* gen_) { - auto gen = get_generator_or_default(gen_, at::npu::detail::getDefaultNPUGenerator()); - auto pair = gen->philox_engine_inputs(10); - const int64_t seed = pair.first; - const int64_t seed2 = pair.second; - OpPreparation::CheckOut( - {}, - result, - result, - size); - randn_out_npu_nocheck(result, size, seed, seed2); - return result; -} - -Tensor randn_npu(IntArrayRef size, const TensorOptions& options) { - const auto gen = at::npu::detail::getDefaultNPUGenerator(); - auto pair = gen->philox_engine_inputs(10); - const int64_t seed = pair.first; - const int64_t seed2 = pair.second; - Tensor result = OpPreparation::ApplyTensorWithFormat(size, options, ACL_FORMAT_ND); - randn_out_npu_nocheck(result, size, seed, seed2); - return result; -} - -Tensor randn_npu(IntArrayRef size, Generator* gen_, const TensorOptions& options) { - auto gen = get_generator_or_default(gen_, at::npu::detail::getDefaultNPUGenerator()); - auto pair = gen->philox_engine_inputs(10); - const int64_t seed = pair.first; - const int64_t seed2 = pair.second; - Tensor result = OpPreparation::ApplyTensorWithFormat(size, options, ACL_FORMAT_ND); - randn_out_npu_nocheck(result, size, seed, seed2); - return result; -} - -Tensor randn_npu(IntArrayRef size, optional names, const TensorOptions& options) { - return randn_npu(size, options); -} - -Tensor randn_npu(IntArrayRef size, Generator* gen_, optional names, const TensorOptions& options) { - return randn_npu(size, gen_, options); -} - -} // namespace native -} // namespace at \ No newline at end of file diff --git a/pytorch1.5.0/src/aten/src/ATen/native/npu/frame/OpParamMaker.cpp b/pytorch1.5.0/src/aten/src/ATen/native/npu/frame/OpParamMaker.cpp index 4e56245054c08c8de69dd5ece322e3554cf07293..5d2afb291c9e6c29ced234f267b4ac804824135e 100644 --- a/pytorch1.5.0/src/aten/src/ATen/native/npu/frame/OpParamMaker.cpp +++ b/pytorch1.5.0/src/aten/src/ATen/native/npu/frame/OpParamMaker.cpp @@ -73,10 +73,6 @@ void OpAttrMaker::Set(aclopAttr* attr, const string& name, Scalar value) { aclopSetAttrFloat(attr, name.c_str(), val); } -void OpAttrMaker::Set(aclopAttr* attr, const string& name, ScalarType value) { - aclDataType val = CalcuOpUtil::convert_to_acl_data_type(value); - aclopSetAttrDataType(attr, name.c_str(), val); -} void OpAttrMaker::Set( aclopAttr* attr, diff --git a/pytorch1.5.0/src/aten/src/ATen/native/npu/frame/OpParamMaker.h b/pytorch1.5.0/src/aten/src/ATen/native/npu/frame/OpParamMaker.h index 609fa79b02b0d9eb923caabb45d7fb03bf801387..886178ff308aee7c634b310df1c813d6930571be 100644 --- a/pytorch1.5.0/src/aten/src/ATen/native/npu/frame/OpParamMaker.h +++ b/pytorch1.5.0/src/aten/src/ATen/native/npu/frame/OpParamMaker.h @@ -40,7 +40,6 @@ class OpAttrMaker { static void Set(aclopAttr* attr, const string& name, at::ArrayRef value); static void Set(aclopAttr* attr, const string& name, at::ArrayRef value); static void Set(aclopAttr* attr, const string& name, Scalar value); - static void Set(aclopAttr* attr, const string& name, ScalarType value); static void Set( aclopAttr* attr, const string& name, diff --git a/pytorch1.5.0/src/aten/src/ATen/native/npu/graph/construct/GraphConstructor.h b/pytorch1.5.0/src/aten/src/ATen/native/npu/graph/construct/GraphConstructor.h index ad4d9b77b84af58e0288a896d37e900b89ddbf23..97ae9d00e4273e53ea30b2cc559c74b6b7824cee 100644 --- a/pytorch1.5.0/src/aten/src/ATen/native/npu/graph/construct/GraphConstructor.h +++ b/pytorch1.5.0/src/aten/src/ATen/native/npu/graph/construct/GraphConstructor.h @@ -18,7 +18,7 @@ #include #include -#include + #include #include namespace at { @@ -104,16 +104,6 @@ public: NodeExtInfoType::ATTR_TYPE_FLOAT, std::make_pair(attr_name, val)); node->UpdateNodeHash(val); } - - static void SetAttr( - const string& attr_name, - const c10::ScalarType& value, - NodePtr node) { - ge::DataType val = ATenGeBridge::GetGeDType(value); - node->AddExtInfo( - NodeExtInfoType::ATTR_TYPE_DATATYPE, std::make_pair(attr_name, val)); - node->UpdateNodeHash(val); - } }; class GraphCommandImpl { diff --git a/pytorch1.5.0/src/aten/src/ATen/native/npu/graph/util/ATenGeBridge.cpp b/pytorch1.5.0/src/aten/src/ATen/native/npu/graph/util/ATenGeBridge.cpp index d48ae9bf029bf6feae3cf53e907579966e7ed925..ce249b50ee2a6c8234156eedadefdb37bec4b0e6 100644 --- a/pytorch1.5.0/src/aten/src/ATen/native/npu/graph/util/ATenGeBridge.cpp +++ b/pytorch1.5.0/src/aten/src/ATen/native/npu/graph/util/ATenGeBridge.cpp @@ -212,9 +212,6 @@ void ATenGeBridge::AddNodeExtInfoIntoGeOp( case NodeExtInfoType::ATTR_TYPE_LIST_BOOL: SetGeOpAttr>>(info.second, ge_op); break; - case NodeExtInfoType::ATTR_TYPE_DATATYPE: - SetGeOpAttr>(info.second, ge_op); - break; case NodeExtInfoType::ATTR_TYPE_LIST_LONG: SetGeOpAttr>>(info.second, ge_op); break; diff --git a/pytorch1.5.0/src/c10/npu/NPUGraph.h b/pytorch1.5.0/src/c10/npu/NPUGraph.h index 94d388203403b1e644b8d6bd44a29a5ddc98495d..44054d9efca7b0d2031149e2e5e00d1726544e26 100644 --- a/pytorch1.5.0/src/c10/npu/NPUGraph.h +++ b/pytorch1.5.0/src/c10/npu/NPUGraph.h @@ -64,7 +64,6 @@ enum class NodeExtInfoType : uint8_t { ATTR_TYPE_LONG, ATTR_TYPE_FLOAT, ATTR_TYPE_STRING, - ATTR_TYPE_DATATYPE, ATTR_TYPE_LIST_BOOL, ATTR_TYPE_LIST_LONG, ATTR_TYPE_LIST_FLOAT, diff --git a/pytorch1.5.0/src/third_party/acl/inc/acl/acl_op.h b/pytorch1.5.0/src/third_party/acl/inc/acl/acl_op.h index 6c1d196eba7cc86b8dd3c922fba173be8facdd8b..10f87e650d4cbcaf0ea69c12d3ecbbb3587190a6 100644 --- a/pytorch1.5.0/src/third_party/acl/inc/acl/acl_op.h +++ b/pytorch1.5.0/src/third_party/acl/inc/acl/acl_op.h @@ -216,23 +216,6 @@ ACL_FUNC_VISIBILITY aclError aclopSetAttrListListInt( const int* numValues, const int64_t* const values[]); -/** - * @ingroup AscendCL - * @brief set an attribute. the type of the attribute is DataType - * - * @param attr [IN] pointer to the instance of aclopAttr - * @param attrName [IN] attribute name - * @param numLists [IN] number of lists - * @param numValues [IN] pointer to number of values of each list - * @param values [IN] pointer to values - * @retval ACL_ERROR_NONE The function is successfully executed. - * @retval OtherValues Failure - */ -ACL_FUNC_VISIBILITY aclError aclopSetAttrDataType( - aclopAttr* attr, - const char* attrName, - aclDataType values); - /** * @ingroup AscendCL * @brief Load and execute the specified operator asynchronously diff --git a/pytorch1.5.0/src/third_party/acl/libs/acl.cpp b/pytorch1.5.0/src/third_party/acl/libs/acl.cpp index 149428a431b87d6505de0a77f0160a9c0446e652..1570a34134bec10c33f260aaf44f62dcc7da07cb 100644 --- a/pytorch1.5.0/src/third_party/acl/libs/acl.cpp +++ b/pytorch1.5.0/src/third_party/acl/libs/acl.cpp @@ -64,7 +64,6 @@ aclError aclopSetAttrString(aclopAttr *attr, const char *attrName, const char *a aclError aclopSetAttrListInt(aclopAttr *attr, const char *attrName, int numValues, const int64_t *values){return 0;} aclError aclopSetAttrListFloat(aclopAttr *attr, const char *attrName, int numValues, const float *values){return 0;} aclError aclopSetAttrListBool(aclopAttr *attr, const char *attrName, int numValues, const uint8_t *values){return 0;} -aclError aclopSetAttrDataType(aclopAttr *attr, const char *attrName, aclDataType values){return 0;} // Tensor相关 aclTensorDesc *aclCreateTensorDesc(aclDataType dataType, int numDims, const int64_t *dims, aclFormat format){return NULL;} void aclDestroyTensorDesc(const aclTensorDesc *desc){return;} diff --git a/pytorch1.5.0/src/third_party/acl/libs/operator.cpp b/pytorch1.5.0/src/third_party/acl/libs/operator.cpp index 8bbc172333ba2742f49043b35ab36a2360fcb541..ca38a46829156ef794bf2d51c88d1e8732c6b5f9 100644 --- a/pytorch1.5.0/src/third_party/acl/libs/operator.cpp +++ b/pytorch1.5.0/src/third_party/acl/libs/operator.cpp @@ -156,10 +156,6 @@ Operator& Operator::SetAttr(const char* name, const Tensor& attr_value) { return op; } -Operator& Operator::SetAttr(const char* name, const ge::DataType& attr_value) { - return op; -} - Operator& Operator::AddControlInput(const Operator& src_oprt) { return op; } diff --git a/pytorch1.5.0/test/test_npu/test_network_ops/test_all.py b/pytorch1.5.0/test/test_npu/test_network_ops/test_all.py index 12cf8284caf24bf24f2ae090be190166117c009b..a59c0139bdcb7aedb067d01829758c0ec46d3820 100755 --- a/pytorch1.5.0/test/test_npu/test_network_ops/test_all.py +++ b/pytorch1.5.0/test/test_npu/test_network_ops/test_all.py @@ -19,12 +19,6 @@ from util_test import create_common_tensor class TestAll(TestCase): - def create_bool_tensor(self, shape, minValue, maxValue): - input1 = np.random.uniform(minValue, maxValue, shape) - input1 = input1 > 0.5 - cpu_input = torch.from_numpy(input1) - npu_input = torch.from_numpy(input1).to("npu") - return cpu_input, npu_input def cpu_op_exec(self, input): output = input.all() @@ -37,16 +31,36 @@ class TestAll(TestCase): output = output.numpy() return output - def test_all_shape_format(self, device): + def test_all_shape_format_bool(self, device): + dtype_list = [np.float32] + format_list = [0, 2] shape_list = [[1024], [32, 1024], [32, 8, 1024], [128, 32, 8, 1024], [2, 0, 2]] - for item in shape_list: - cpu_input, npu_input = self.create_bool_tensor(item, 0, 1) + shape_format = [ + [d, i, j] for d in dtype_list for i in format_list for j in shape_list + ] + for item in shape_format: + cpu_input, npu_input = create_common_tensor(item, 0, 1) + cpu_input = cpu_input > 0.5 + npu_input = npu_input > 0.5 cpu_output = self.cpu_op_exec(cpu_input) npu_output = self.npu_op_exec(npu_input) self.assertRtolEqual( cpu_output.astype( np.int32), npu_output.astype( np.int32)) + + def test_shape_format_uint8(self, device): + dtype_list = [np.uint8] + format_list = [0, 2] + shape_list = [[1024], [32, 1024], [32, 8, 1024], [128, 32, 8, 1024], [2, 0, 2]] + shape_format = [ + [d, i, j] for d in dtype_list for i in format_list for j in shape_list + ] + for item in shape_format: + cpu_input, npu_input = create_common_tensor(item, 0, 255) + cpu_output = self.cpu_op_exec(cpu_input) + npu_output = self.npu_op_exec(npu_input) + self.assertRtolEqual(cpu_output, npu_output) def cpu_op_exec1(self, input, dim): output = input.all(dim=dim) @@ -72,9 +86,16 @@ class TestAll(TestCase): return output0, output1 def test_alld_shape_format(self, device): + dtype_list = [np.float32] + format_list = [0, 2] shape_list = [[1024], [32, 1024], [32, 8, 1024], [128, 32, 8, 1024]] - for item in shape_list: - cpu_input, npu_input = self.create_bool_tensor(item, 0, 1) + shape_format = [ + [d, i, j] for d in dtype_list for i in format_list for j in shape_list + ] + for item in shape_format: + cpu_input, npu_input = create_common_tensor(item, 0, 1) + cpu_input = cpu_input > 0.5 + npu_input = npu_input > 0.5 cpu_output = self.cpu_op_exec1(cpu_input, 0) npu_output = self.npu_op_exec1(npu_input, 0) npu_out0, npu_out1 = self.npu_op_out_exec1(npu_input, 0) diff --git a/pytorch1.5.0/test/test_npu/test_network_ops/test_randn.py b/pytorch1.5.0/test/test_npu/test_network_ops/test_randn.py deleted file mode 100644 index c33e8d50e59407065d342196b2ea0c94d2d35e61..0000000000000000000000000000000000000000 --- a/pytorch1.5.0/test/test_npu/test_network_ops/test_randn.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) 2020 Huawei Technologies Co., Ltd -# Copyright (c) 2019, Facebook CORPORATION. -# All rights reserved. -# -# Licensed under the BSD 3-Clause License (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://opensource.org/licenses/BSD-3-Clause -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import torch -import numpy as np -from common_utils import TestCase, run_tests -from common_device_type import dtypes, instantiate_device_type_tests -from util_test import create_common_tensor - -class TestRandn(TestCase): - @dtypes(torch.float64, torch.float, torch.float16) - def test_randn(self, device, dtype): - torch.manual_seed(123456) - res1 = torch.randn((12, 345), dtype=dtype, device='npu') - res2 = torch.tensor([], dtype=dtype).npu() - torch.manual_seed(123456) - torch.randn((12, 345), device='npu', out=res2) - self.assertRtolEqual(res1.cpu(), res2.cpu()) - - -instantiate_device_type_tests(TestRandn, globals(), except_for='cpu') -if __name__ == "__main__": - run_tests() \ No newline at end of file