diff --git a/0001-Add-arm-source-file-into-aws-checksums.patch b/0001-Add-arm-source-file-into-aws-checksums.patch deleted file mode 100644 index 3ac6060f60f6c67d25642265813a6952ace8d99a..0000000000000000000000000000000000000000 --- a/0001-Add-arm-source-file-into-aws-checksums.patch +++ /dev/null @@ -1,25 +0,0 @@ -From c6f06c2f1b17508670601894378bb89b71fbe37e Mon Sep 17 00:00:00 2001 -From: bzhaoopenstack -Date: Tue, 23 Jun 2020 12:04:12 +0800 -Subject: [PATCH] Add arm source file into aws-checksums - -According to https://github.com/tensorflow/tensorflow/issues/40463#issuecomment-647640030 , seem the aws libs need to add the arm related libs during build tensorflow package. ---- - third_party/aws/aws-checksums.bazel | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/third_party/aws/aws-checksums.bazel b/third_party/aws/aws-checksums.bazel -index 759cb2e6fc..f620a96d2c 100644 ---- a/third_party/aws/aws-checksums.bazel -+++ b/third_party/aws/aws-checksums.bazel -@@ -16,6 +16,7 @@ cc_library( - "//conditions:default": [], - }) + glob([ - "source/intel/*.c", -+ "source/arm/*.c", - "source/*.c", - ]), - hdrs = glob([ --- -2.23.0 - diff --git a/CVE-2020-15265.patch b/CVE-2020-15265.patch deleted file mode 100644 index b82175f7028ecc275215e4e58c38910b7286ee0f..0000000000000000000000000000000000000000 --- a/CVE-2020-15265.patch +++ /dev/null @@ -1,53 +0,0 @@ -From eccb7ec454e6617738554a255d77f08e60ee0808 Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Mon, 19 Oct 2020 17:56:36 -0700 -Subject: [PATCH] Prevent segfault in `quantize_and_dequantize` - ---- - .../core/kernels/quantize_and_dequantize_op.cc | 4 ++++ - tensorflow/python/kernel_tests/array_ops_test.py | 14 ++++++++++++++ - 2 files changed, 18 insertions(+) - -diff --git a/tensorflow/core/kernels/quantize_and_dequantize_op.cc b/tensorflow/core/kernels/quantize_and_dequantize_op.cc -index 8f71d09c..fda54208 100644 ---- a/tensorflow/core/kernels/quantize_and_dequantize_op.cc -+++ b/tensorflow/core/kernels/quantize_and_dequantize_op.cc -@@ -71,6 +71,10 @@ class QuantizeAndDequantizeV2Op : public OpKernel { - - void Compute(OpKernelContext* ctx) override { - const Tensor& input = ctx->input(0); -+ OP_REQUIRES( -+ ctx, (axis_ == -1 || axis_ < input.shape().dims()), -+ errors::InvalidArgument("Shape must be at least rank", axis_ + 1, -+ " but is rank ", input.shape().dims())); - const int depth = (axis_ == -1) ? 1 : input.dim_size(axis_); - Tensor input_min_tensor; - Tensor input_max_tensor; -diff --git a/tensorflow/python/kernel_tests/array_ops_test.py b/tensorflow/python/kernel_tests/array_ops_test.py -index dbff3a1b..c498ff62 100644 ---- a/tensorflow/python/kernel_tests/array_ops_test.py -+++ b/tensorflow/python/kernel_tests/array_ops_test.py -@@ -1541,6 +1541,20 @@ class QuantizeAndDequantizeTest(test_util.TensorFlowTestCase): - axis=(axis - 4))) - self.assertAllClose(fake_quantized, expected) - -+ def testBadAxis(self): -+ input_tensor = [2.5, 2.5] -+ input_min = [0, 0] -+ input_max = [1, 1] -+ error_message_pattern = "Shape must be at least rank 11 but is rank 1" -+ # TODO(b/171260356): Eager mode and graph mode throw different error types -+ error = errors.InvalidArgumentError if context.executing_eagerly( -+ ) else ValueError -+ with self.assertRaisesRegex(error, error_message_pattern): self.evaluate( -+ array_ops.quantize_and_dequantize_v2( -+ input=input_tensor, -+ input_min=input_min, -+ input_max=input_max, -+ axis=10)) - - @test_util.run_all_in_graph_and_eager_modes - class SortedSearchTest(test_util.TensorFlowTestCase): --- -2.23.0 - diff --git a/CVE-2020-15266.patch b/CVE-2020-15266.patch deleted file mode 100644 index 25c42ffb3098dabdbf6f3ce4ffcdb253829b34da..0000000000000000000000000000000000000000 --- a/CVE-2020-15266.patch +++ /dev/null @@ -1,67 +0,0 @@ -From 3ade2efec2e90c6237de32a19680caaa3ebc2845 Mon Sep 17 00:00:00 2001 -From: Yong Tang -Date: Sat, 8 Aug 2020 00:47:35 +0000 -Subject: [PATCH] Fix segmentation fault in tf.image.crop_and_resize when boxes - ---- - tensorflow/core/kernels/crop_and_resize_op.cc | 13 +++++++++++++ - tensorflow/python/ops/image_ops_test.py | 12 ++++++++++++ - 2 files changed, 25 insertions(+) - -diff --git a/tensorflow/core/kernels/crop_and_resize_op.cc b/tensorflow/core/kernels/crop_and_resize_op.cc -index 4ecd3bc0..e14f4e43 100644 ---- a/tensorflow/core/kernels/crop_and_resize_op.cc -+++ b/tensorflow/core/kernels/crop_and_resize_op.cc -@@ -71,6 +71,18 @@ static inline Status ParseAndCheckBoxSizes(const Tensor& boxes, - if (boxes.dim_size(1) != 4) { - return errors::InvalidArgument("boxes must have 4 columns"); - } -+ for (int64 i = 0; i < *num_boxes; i++) { -+ for (int64 j = 0; j < 4; j++) { -+ if (!isfinite(boxes.tensor()(i, j))) { -+ return errors::InvalidArgument( -+ "boxes values must be finite, received boxes[", i, "]: ", -+ boxes.tensor()(i, 0), ", ", -+ boxes.tensor()(i, 1), ", ", -+ boxes.tensor()(i, 2), ", ", -+ boxes.tensor()(i, 3)); -+ } -+ } -+ } - // The shape of 'box_index' is [num_boxes]. - if (box_index.dims() != 1) { - return errors::InvalidArgument("box_index must be 1-D", -@@ -256,6 +268,7 @@ struct CropAndResize { - continue; - } - if (method_name == "bilinear") { -+ - const int top_y_index = floorf(in_y); - const int bottom_y_index = ceilf(in_y); - const float y_lerp = in_y - top_y_index; -diff --git a/tensorflow/python/ops/image_ops_test.py b/tensorflow/python/ops/image_ops_test.py -index 0206ccf9..0630b6fc 100644 ---- a/tensorflow/python/ops/image_ops_test.py -+++ b/tensorflow/python/ops/image_ops_test.py -@@ -5275,6 +5275,18 @@ class DecodeImageTest(test_util.TensorFlowTestCase): - self.assertAllEqual(list(image0.shape), [40, 20, 3]) - self.assertAllEqual(image0, image1) - -+ def testImageCropAndResize(self): -+ # Test case for GitHub issue 42129 -+ message = "boxes values must be finite" -+ with self.assertRaisesRegex( -+ (errors.InvalidArgumentError, ValueError), message): -+ v = image_ops_impl.crop_and_resize_v2( -+ image=array_ops.zeros((2, 1, 1, 1)), -+ boxes=[[1.0e+40, 0, 0, 0]], -+ box_indices=[1], -+ crop_size=[1, 1]) -+ self.evaluate(v) -+ - - if __name__ == "__main__": - googletest.main() --- -2.23.0 - diff --git a/CVE-2020-26266.patch b/CVE-2020-26266.patch deleted file mode 100644 index 1cd5aaba1d8c672827cfd680755665f298710d17..0000000000000000000000000000000000000000 --- a/CVE-2020-26266.patch +++ /dev/null @@ -1,62 +0,0 @@ -From ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2 Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Tue, 24 Nov 2020 11:40:42 -0800 -Subject: [PATCH] Default initialize fixed point Eigen types. - -In certain cases, tensors are filled with default values of the type. But, for these fixed point types, these values were uninitialized. Thus, we would have uninitialized memory access bugs, some of which were caught by MSAN. - -PiperOrigin-RevId: 344101137 -Change-Id: I14555fda74dca3b5f1582da9008901937e3f14e2 ---- - .../Eigen/CXX11/src/FixedPoint/FixedPointTypes.h | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - -diff --git a/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/FixedPointTypes.h b/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/FixedPointTypes.h -index ff359cedced96..fd35360da2820 100644 ---- a/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/FixedPointTypes.h -+++ b/third_party/eigen3/unsupported/Eigen/CXX11/src/FixedPoint/FixedPointTypes.h -@@ -49,7 +49,7 @@ struct scalar_product_traits { - // the compiler from silently type cast the mantissa into a bigger or a smaller - // representation. - struct QInt8 { -- QInt8() {} -+ QInt8() : value(0) {} - QInt8(const int8_t v) : value(v) {} - QInt8(const QInt32 v); - -@@ -59,7 +59,7 @@ struct QInt8 { - }; - - struct QUInt8 { -- QUInt8() {} -+ QUInt8() : value(0) {} - QUInt8(const uint8_t v) : value(v) {} - QUInt8(const QInt32 v); - -@@ -69,7 +69,7 @@ struct QUInt8 { - }; - - struct QInt16 { -- QInt16() {} -+ QInt16() : value(0) {} - QInt16(const int16_t v) : value(v) {} - QInt16(const QInt32 v); - operator int() const { return static_cast(value); } -@@ -78,7 +78,7 @@ struct QInt16 { - }; - - struct QUInt16 { -- QUInt16() {} -+ QUInt16() : value(0) {} - QUInt16(const uint16_t v) : value(v) {} - QUInt16(const QInt32 v); - operator int() const { return static_cast(value); } -@@ -87,7 +87,7 @@ struct QUInt16 { - }; - - struct QInt32 { -- QInt32() {} -+ QInt32() : value(0) {} - QInt32(const int8_t v) : value(v) {} - QInt32(const int32_t v) : value(v) {} - QInt32(const uint32_t v) : value(static_cast(v)) {} diff --git a/CVE-2020-26267-1.patch b/CVE-2020-26267-1.patch deleted file mode 100644 index 1f4a16a041364dd9fe906c738e50ca9b33ae1ff3..0000000000000000000000000000000000000000 --- a/CVE-2020-26267-1.patch +++ /dev/null @@ -1,252 +0,0 @@ -From 1a11d01c1fdd6683e9aa210dccde81de127dbf3e Mon Sep 17 00:00:00 2001 -From: Kaixi Hou -Date: Mon, 14 Sep 2020 15:52:22 -0700 -Subject: [PATCH 1/1] support reduce ops for 5d tensors in layout optimizer - ---- - .../generic_layout_optimizer_transposer.cc | 27 +++++++++- - tensorflow/core/kernels/data_format_ops.cc | 10 ++-- - tensorflow/core/kernels/data_format_ops.h | 53 ++++++++++++++----- - .../python/grappler/layout_optimizer_test.py | 39 ++++++++++++++ - tensorflow/python/ops/nn_test.py | 27 ++++++++++ - 5 files changed, 136 insertions(+), 20 deletions(-) - -diff --git a/tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.cc b/tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.cc -index ab7d8fcd..fbbeffc7 100644 ---- a/tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.cc -+++ b/tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.cc -@@ -1283,11 +1283,31 @@ bool ReduceTransposer::IsReduceAxisSupported( - Status ReduceTransposer::TransposeNode(TransposeContext* context, - utils::MutableNodeView* node) { - DCHECK(IsReduceOp(*node->node())); -- if (!ShouldProcess(*context, *node) || !IsFaninPortRankN(*node, 0, 4) || -+ const auto* output_shape_attr = node->GetAttr(kAttrOutputShape); -+ const auto& shape = output_shape_attr->list().shape(0); -+ const int rank = shape.dim_size(); -+ std::string src_format = context->src_format; -+ std::string dst_format = context->dst_format; -+ // Update the format from 4D to 5D layout if necessary. -+ if (rank == 5) { -+ std::string src_format_3d = src_format == "NHWC" ? "NDHWC" : "NCDHW"; -+ std::string dst_format_3d = dst_format == "NHWC" ? "NDHWC" : "NCDHW"; -+ context->AssignDeviceAndDataFormats(context->target_device, src_format_3d, -+ dst_format_3d); -+ } -+ if (!ShouldProcess(*context, *node) || !IsFaninPortRankN(*node, 0, rank) || - !IsReduceAxisSupported(*context, *node) || - !IsAfterDstToSrcTransform(*context, *node)) { -+ // Change back to the original layout due to early exit. -+ if (rank == 5) { -+ context->AssignDeviceAndDataFormats(context->target_device, src_format, -+ dst_format); -+ } - return Status::OK(); - } -+ VLOG(3) << "GenericLayoutOptimizer: transforming node '" << node->GetName() -+ << "' with op '" << node->GetOp() << "' from data format '" -+ << context->src_format << "' to '" << context->dst_format << "'"; - TF_RETURN_IF_ERROR(UpdateFaninEdgesWithOp(context, {0}, node, kOpTranspose)); - TF_RETURN_IF_ERROR( - UpdateFaninEdgesWithOp(context, {1}, node, kOpDataFormatDimMap)); -@@ -1295,6 +1315,11 @@ Status ReduceTransposer::TransposeNode(TransposeContext* context, - TF_RETURN_IF_ERROR( - UpdateFanoutEdgesWithOp(context, {0}, node, kOpTranspose)); - } -+ // Change back the format from 5D to 4D layout. -+ if (rank == 5) { -+ context->AssignDeviceAndDataFormats(context->target_device, src_format, -+ dst_format); -+ } - return context->graph_view->GetMutationBuilder()->Apply(); - } - -diff --git a/tensorflow/core/kernels/data_format_ops.cc b/tensorflow/core/kernels/data_format_ops.cc -index 181aa1b8..e9c71f17 100644 ---- a/tensorflow/core/kernels/data_format_ops.cc -+++ b/tensorflow/core/kernels/data_format_ops.cc -@@ -37,14 +37,14 @@ class DataFormatDimMapOp : public OpKernel { - OP_REQUIRES_OK(context, context->GetAttr("src_format", &src_format)); - string dst_format; - OP_REQUIRES_OK(context, context->GetAttr("dst_format", &dst_format)); -- OP_REQUIRES(context, src_format.size() == 4, -+ OP_REQUIRES(context, src_format.size() == 4 || src_format.size() == 5, - errors::InvalidArgument(strings::StrCat( -- "Source format must of length 4, received src_format = ", -- src_format))); -+ "Source format must of length 4 or 5, received " -+ "src_format = ", src_format))); - OP_REQUIRES( -- context, dst_format.size() == 4, -+ context, dst_format.size() == 4 || dst_format.size() == 5, - errors::InvalidArgument(strings::StrCat( -- "Destination format must of length 4, received dst_format = ", -+ "Destination format must of length 4 or 5, received dst_format = ", - dst_format))); - dst_idx_ = Tensor(DT_INT32, {static_cast(src_format.size())}); - for (int i = 0; i < src_format.size(); ++i) { -diff --git a/tensorflow/core/kernels/data_format_ops.h b/tensorflow/core/kernels/data_format_ops.h -index bc416fa7..89b54901 100644 ---- a/tensorflow/core/kernels/data_format_ops.h -+++ b/tensorflow/core/kernels/data_format_ops.h -@@ -28,24 +28,49 @@ template - struct DataFormatDimMap { - void operator()(const Device& d, typename TTypes::ConstFlat x, - typename TTypes::Flat y, const TTypes::Vec dst) { -- auto zero = x.constant(0); -- auto one = x.constant(1); -- auto two = x.constant(2); -+ if (dst.size() == 4) { -+ auto zero = x.constant(0); -+ auto one = x.constant(1); -+ auto two = x.constant(2); - -- auto f_zero = x.constant(dst(0)); -- auto f_one = x.constant(dst(1)); -- auto f_two = x.constant(dst(2)); -- auto f_three = x.constant(dst(3)); -+ auto f_zero = x.constant(dst(0)); -+ auto f_one = x.constant(dst(1)); -+ auto f_two = x.constant(dst(2)); -+ auto f_three = x.constant(dst(3)); - -- auto four = x.constant(4); -- auto x_mod = (x + four) % 4; -+ auto four = x.constant(4); -+ auto x_mod = (x + four) % 4; - -- auto is_zero = (x_mod == zero); -- auto is_one = (x_mod == one); -- auto is_two = (x_mod == two); -+ auto is_zero = (x_mod == zero); -+ auto is_one = (x_mod == one); -+ auto is_two = (x_mod == two); - -- y.device(d) = is_zero.select( -- f_zero, is_one.select(f_one, is_two.select(f_two, f_three))); -+ y.device(d) = is_zero.select( -+ f_zero, is_one.select(f_one, is_two.select(f_two, f_three))); -+ } else { -+ auto zero = x.constant(0); -+ auto one = x.constant(1); -+ auto two = x.constant(2); -+ auto three = x.constant(3); -+ -+ auto f_zero = x.constant(dst(0)); -+ auto f_one = x.constant(dst(1)); -+ auto f_two = x.constant(dst(2)); -+ auto f_three = x.constant(dst(3)); -+ auto f_four = x.constant(dst(4)); -+ -+ auto five = x.constant(5); -+ auto x_mod = (x + five) % 5; -+ -+ auto is_zero = (x_mod == zero); -+ auto is_one = (x_mod == one); -+ auto is_two = (x_mod == two); -+ auto is_three = (x_mod == three); -+ -+ y.device(d) = is_zero.select( -+ f_zero, is_one.select(f_one, is_two.select(f_two, -+ is_three.select(f_three, f_four)))); -+ } - } - }; - -diff --git a/tensorflow/python/grappler/layout_optimizer_test.py b/tensorflow/python/grappler/layout_optimizer_test.py -index 10f86980..f90da7ed 100644 ---- a/tensorflow/python/grappler/layout_optimizer_test.py -+++ b/tensorflow/python/grappler/layout_optimizer_test.py -@@ -215,6 +215,9 @@ class LayoutOptimizerTest(test.TestCase): - def _assert_map_nhwc_to_nchw(self, name, nodes): - self.assertIn(name + '-DimMapNHWCToNCHW-LayoutOptimizer', nodes) - -+ def _assert_map_ndhwc_to_ncdhw(self, name, nodes): -+ self.assertIn(name + '-DataFormatDimMapNDHWCToNCDHW-LayoutOptimizer', nodes) -+ - def _assert_vec_nchw_to_nhwc(self, name, nodes): - self.assertIn(name + '-VecPermuteNCHWToNHWC-LayoutOptimizer', nodes) - -@@ -286,6 +289,42 @@ class LayoutOptimizerTest(test.TestCase): - - self.assertAllClose(output_val_ref, output_val, atol=1e-3) - -+ @test_util.deprecated_graph_mode_only -+ def testReduceOpsFor5DTensors(self): -+ if test.is_gpu_available(cuda_only=True): -+ random_seed.set_random_seed(0) -+ x = random_ops.truncated_normal([1, 4, 2, 3, 3], seed=0) -+ w = random_ops.truncated_normal([2, 2, 2, 3, 3], seed=0) -+ gamma = random_ops.truncated_normal([1, 1, 1, 1, 3], seed=0) -+ beta = random_ops.truncated_normal([1, 1, 1, 1, 3], seed=0) -+ conv3d = gen_nn_ops.conv3d(x, w, [1, 1, 1, 1, 1], 'SAME') -+ y = math_ops.reduce_mean(conv3d, [0, 1, 2, 3], keepdims=True) -+ output = array_ops.identity(y) -+ -+ with session.Session(config=_get_config(False)) as sess: -+ output_val_ref = sess.run(output) -+ -+ with session.Session(config=_get_config()) as sess: -+ metadata = config_pb2.RunMetadata() -+ output_val = sess.run(output, run_metadata=metadata) -+ -+ nodes = [] -+ num_transposes = 0 -+ for node in metadata.cost_graph.node: -+ if _is_transpose(node.name): -+ num_transposes += 1 -+ nodes.append(node.name) -+ print(node.name) -+ -+ # The reduce op Mean needs to dim map the input reduce index to NCDHW. -+ # Then, the output needs to be tranposed back to NDHWC. -+ expected_num_transposes = 2 -+ self.assertEqual(expected_num_transposes, num_transposes) -+ self._assert_trans_ndhwc_to_ncdhw('Conv3D-0', nodes) -+ self._assert_map_ndhwc_to_ncdhw('Mean-1', nodes) -+ self._assert_trans_ncdhw_to_ndhwc('Mean-0-0', nodes) -+ self.assertAllClose(output_val_ref, output_val, atol=1e-3) -+ - @test_util.deprecated_graph_mode_only - def testSplitWithNonConstAxis(self): - if test.is_gpu_available(cuda_only=True): -diff --git a/tensorflow/python/ops/nn_test.py b/tensorflow/python/ops/nn_test.py -index bfe11b63..55d11a35 100644 ---- a/tensorflow/python/ops/nn_test.py -+++ b/tensorflow/python/ops/nn_test.py -@@ -1207,6 +1207,33 @@ class DataFormatDimMapTest(test_lib.TestCase): - y_val = self.evaluate(y) - self.assertAllEqual(y_val, y_val_expected) - -+ def testNDHWCtoNCDHW(self): -+ x_val = [1, -4, -3, -2] -+ y_val_expected = [2, 2, 3, 4] -+ x = constant_op.constant(x_val) -+ y = nn_ops.data_format_dim_map(x, src_format="NDHWC", dst_format="NCDHW") -+ with test_util.use_gpu(): -+ y_val = self.evaluate(y) -+ self.assertAllEqual(y_val, y_val_expected) -+ -+ def testNDHWCtoDHWNC(self): -+ x_val = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] -+ y_val_expected = [3, 0, 1, 2, 4, 3, 0, 1, 2, 4] -+ x = constant_op.constant(x_val) -+ y = nn_ops.data_format_dim_map(x, src_format="NDHWC", dst_format="DHWNC") -+ with test_util.use_gpu(): -+ y_val = self.evaluate(y) -+ self.assertAllEqual(y_val, y_val_expected) -+ -+ def testDNHWCtoWHDCN(self): -+ x_val = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] -+ y_val_expected = [4, 2, 1, 0, 3, 4, 2, 1, 0, 3] -+ x = constant_op.constant(x_val) -+ y = nn_ops.data_format_dim_map(x, src_format="NDHWC", dst_format="WHDCN") -+ with test_util.use_gpu(): -+ y_val = self.evaluate(y) -+ self.assertAllEqual(y_val, y_val_expected) -+ - def testArbitraryASCII(self): - x_val = [-4, -3, -2, -1, 0, 1, 2, 3] - y_val_expected = [3, 2, 1, 0, 3, 2, 1, 0] --- -2.27.0 - diff --git a/CVE-2020-26267-2.patch b/CVE-2020-26267-2.patch deleted file mode 100644 index 9c0355295bf006f296207b7fe262a96b757a741e..0000000000000000000000000000000000000000 --- a/CVE-2020-26267-2.patch +++ /dev/null @@ -1,266 +0,0 @@ -From ebc70b7a592420d3d2f359e4b1694c236b82c7ae Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Mon, 7 Dec 2020 11:15:21 -0800 -Subject: [PATCH] Validate that `DataFormat*` attributes form a permutation. - -The `src_format` and `dst_format` attributes for the `DataFormatDimMap` and `DataFormatVecPermute` raw ops are supposed to determine a permutation. However, this was not validated and could result in unitialized memory accesses as well as writes outside of bounds and potential crashes. - -While here, we also test that the format attributes have the needed length, add tests for all validation failure cases, remove unnecessary calls to `strings::StrCat`, and fix a few grammar errors. - -This will be cherry-picked on the supported release branches. - -PiperOrigin-RevId: 346135579 -Change-Id: I1c76392382c89ad8f072d5bc93d70669851eb404 ---- - tensorflow/core/kernels/data_format_ops.cc | 72 ++++++++++++++-- - tensorflow/python/ops/nn_test.py | 96 ++++++++++++++++++++++ - 2 files changed, 161 insertions(+), 7 deletions(-) - -diff --git a/tensorflow/core/kernels/data_format_ops.cc b/tensorflow/core/kernels/data_format_ops.cc -index e9c71f17..abe2fbc3 100644 ---- a/tensorflow/core/kernels/data_format_ops.cc -+++ b/tensorflow/core/kernels/data_format_ops.cc -@@ -18,16 +18,52 @@ limitations under the License. - #define EIGEN_USE_THREADS - - #include "tensorflow/core/kernels/data_format_ops.h" -+ -+#include -+ - #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" - #include "tensorflow/core/framework/op_kernel.h" - #include "tensorflow/core/framework/register_types.h" - #include "tensorflow/core/framework/tensor.h" -+#include "tensorflow/core/platform/errors.h" - - namespace tensorflow { - - typedef Eigen::ThreadPoolDevice CPUDevice; - typedef Eigen::GpuDevice GPUDevice; - -+// Ensure that `src` and `dst` define a valid permutation. -+// Ops defined in this file assume that user specifies a permutation via two -+// string attributes. This check validates that these attributes properly define -+// it to prevent security vulnerabilities. -+static bool IsValidPermutation(const std::string& src, const std::string& dst) { -+ if (src.size() != dst.size()) { -+ return false; -+ } -+ -+ std::map characters; -+ -+ // Every character in `src` must be present only once -+ for (const auto c : src) { -+ if (characters[c]) { -+ return false; -+ } -+ characters[c] = true; -+ } -+ -+ // Every character in `dst` must show up in `src` exactly once -+ for (const auto c : dst) { -+ if (!characters[c]) { -+ return false; -+ } -+ characters[c] = false; -+ } -+ -+ // At this point, characters[] has been switched to true and false exactly -+ // once for all character in `src` (and `dst`) so we have a valid permutation -+ return true; -+} -+ - template - class DataFormatDimMapOp : public OpKernel { - public: -@@ -38,14 +74,18 @@ class DataFormatDimMapOp : public OpKernel { - string dst_format; - OP_REQUIRES_OK(context, context->GetAttr("dst_format", &dst_format)); - OP_REQUIRES(context, src_format.size() == 4 || src_format.size() == 5, -- errors::InvalidArgument(strings::StrCat( -- "Source format must of length 4 or 5, received " -- "src_format = ", src_format))); -+ errors::InvalidArgument( -+ "Source format must be of length 4 or 5, received " -+ "src_format = ", src_format)); -+ OP_REQUIRES(context, dst_format.size() == 4 || dst_format.size() == 5, -+ errors::InvalidArgument("Destination format must be of length " -+ "4 or 5, received dst_format = ", -+ dst_format)); - OP_REQUIRES( -- context, dst_format.size() == 4 || dst_format.size() == 5, -- errors::InvalidArgument(strings::StrCat( -- "Destination format must of length 4 or 5, received dst_format = ", -- dst_format))); -+ context, IsValidPermutation(src_format, dst_format), -+ errors::InvalidArgument( -+ "Destination and source format must determine a permutation, got ", -+ src_format, " and ", dst_format)); - dst_idx_ = Tensor(DT_INT32, {static_cast(src_format.size())}); - for (int i = 0; i < src_format.size(); ++i) { - for (int j = 0; j < dst_format.size(); ++j) { -@@ -77,8 +117,22 @@ class DataFormatVecPermuteOp : public OpKernel { - : OpKernel(context) { - string src_format; - OP_REQUIRES_OK(context, context->GetAttr("src_format", &src_format)); -+ OP_REQUIRES(context, src_format.size() == 4 || src_format.size() == 5, -+ errors::InvalidArgument( -+ "Source format must be of length 4 or 5, received " -+ "src_format = ", -+ src_format)); - string dst_format; - OP_REQUIRES_OK(context, context->GetAttr("dst_format", &dst_format)); -+ OP_REQUIRES(context, dst_format.size() == 4 || dst_format.size() == 5, -+ errors::InvalidArgument("Destination format must be of length " -+ "4 or 5, received dst_format = ", -+ dst_format)); -+ OP_REQUIRES( -+ context, IsValidPermutation(src_format, dst_format), -+ errors::InvalidArgument( -+ "Destination and source format must determine a permutation, got ", -+ src_format, " and ", dst_format)); - src_format_ = src_format; - dst_format_ = dst_format; - } -@@ -124,6 +178,10 @@ class DataFormatVecPermuteOp : public OpKernel { - }; - keep_only_spatial_dimensions(&src_format_str); - keep_only_spatial_dimensions(&dst_format_str); -+ OP_REQUIRES(context, -+ src_format_str.size() == 2 && dst_format_str.size() == 2, -+ errors::InvalidArgument( -+ "Format specifier must contain H and W for 2D case")); - } - ComputeDstIndex(src_format_str, dst_format_str, input.dims(), &dst_idx); - -diff --git a/tensorflow/python/ops/nn_test.py b/tensorflow/python/ops/nn_test.py -index 55d11a35..d2094a7d 100644 ---- a/tensorflow/python/ops/nn_test.py -+++ b/tensorflow/python/ops/nn_test.py -@@ -27,6 +27,7 @@ from six.moves import xrange # pylint: disable=redefined-builtin - from tensorflow.python.eager import def_function - from tensorflow.python.framework import constant_op - from tensorflow.python.framework import dtypes -+from tensorflow.python.framework import errors - from tensorflow.python.framework import ops - from tensorflow.python.framework import tensor_spec - from tensorflow.python.framework import test_util -@@ -1234,6 +1235,7 @@ class DataFormatDimMapTest(test_lib.TestCase): - y_val = self.evaluate(y) - self.assertAllEqual(y_val, y_val_expected) - -+ @test_util.disable_xla("XLA catches the error and rethrows as different one") - def testArbitraryASCII(self): - x_val = [-4, -3, -2, -1, 0, 1, 2, 3] - y_val_expected = [3, 2, 1, 0, 3, 2, 1, 0] -@@ -1243,6 +1245,46 @@ class DataFormatDimMapTest(test_lib.TestCase): - y_val = self.evaluate(y) - self.assertAllEqual(y_val, y_val_expected) - -+ @test_util.disable_xla("XLA catches the error and rethrows as different one") -+ def testInvalidLength(self): -+ x = [-4, -3, -2, -1, 0, 1, 2, 3] -+ with self.assertRaisesRegex(errors.InvalidArgumentError, -+ "Source format must be of length 4 or 5"): -+ op = nn_ops.data_format_dim_map( -+ x, src_format="12345678", dst_format="87654321") -+ with test_util.use_gpu(): -+ self.evaluate(op) -+ -+ @test_util.disable_xla("XLA catches the error and rethrows as different one") -+ def testDuplicateSrc(self): -+ x = [-4, -3, -2, -1, 0, 1, 2, 3] -+ with self.assertRaisesRegex( -+ errors.InvalidArgumentError, -+ "Destination and source format must determine a permutation"): -+ op = nn_ops.data_format_dim_map(x, src_format="1233", dst_format="4321") -+ with test_util.use_gpu(): -+ self.evaluate(op) -+ -+ @test_util.disable_xla("XLA catches the error and rethrows as different one") -+ def testDuplicateDst(self): -+ x = [-4, -3, -2, -1, 0, 1, 2, 3] -+ with self.assertRaisesRegex( -+ errors.InvalidArgumentError, -+ "Destination and source format must determine a permutation"): -+ op = nn_ops.data_format_dim_map(x, src_format="1234", dst_format="3321") -+ with test_util.use_gpu(): -+ self.evaluate(op) -+ -+ @test_util.disable_xla("XLA catches the error and rethrows as different one") -+ def testExtraSpecifiers(self): -+ x = [-4, -3, -2, -1, 0, 1, 2, 3] -+ with self.assertRaisesRegex( -+ errors.InvalidArgumentError, -+ "Destination and source format must determine a permutation"): -+ op = nn_ops.data_format_dim_map(x, src_format="1234", dst_format="5321") -+ with test_util.use_gpu(): -+ self.evaluate(op) -+ - - class DataFormatVectorPermuteTest(test_lib.TestCase): - -@@ -1344,6 +1386,60 @@ class DataFormatVectorPermuteTest(test_lib.TestCase): - y_val = self.evaluate(y) - self.assertAllEqual(y_val, [[7, 4], [4, 5], [5, 1], [9, 3]]) - -+ @test_util.disable_xla("XLA catches the error and rethrows as different one") -+ def testInvalidLength(self): -+ x = [0, 1, 2, 3] -+ with self.assertRaisesRegex(errors.InvalidArgumentError, -+ "Source format must be of length 4 or 5"): -+ op = nn_ops.data_format_vec_permute( -+ x, src_format="12345678", dst_format="87654321") -+ with test_util.use_gpu(): -+ self.evaluate(op) -+ -+ @test_util.disable_xla("XLA catches the error and rethrows as different one") -+ def testDuplicateSrc(self): -+ x = [0, 1, 2, 3] -+ with self.assertRaisesRegex( -+ errors.InvalidArgumentError, -+ "Destination and source format must determine a permutation"): -+ op = nn_ops.data_format_vec_permute( -+ x, src_format="1233", dst_format="4321") -+ with test_util.use_gpu(): -+ self.evaluate(op) -+ -+ @test_util.disable_xla("XLA catches the error and rethrows as different one") -+ def testDuplicateDst(self): -+ x = [0, 1, 2, 3] -+ with self.assertRaisesRegex( -+ errors.InvalidArgumentError, -+ "Destination and source format must determine a permutation"): -+ op = nn_ops.data_format_vec_permute( -+ x, src_format="1234", dst_format="3321") -+ with test_util.use_gpu(): -+ self.evaluate(op) -+ -+ @test_util.disable_xla("XLA catches the error and rethrows as different one") -+ def testExtraSpecifiers(self): -+ x = [0, 1, 2, 3] -+ with self.assertRaisesRegex( -+ errors.InvalidArgumentError, -+ "Destination and source format must determine a permutation"): -+ op = nn_ops.data_format_vec_permute( -+ x, src_format="1234", dst_format="5321") -+ with test_util.use_gpu(): -+ self.evaluate(op) -+ -+ @test_util.disable_xla("XLA catches the error and rethrows as different one") -+ def test2DNoWH(self): -+ x = [[0, 1], [2, 3]] -+ with self.assertRaisesRegex( -+ errors.InvalidArgumentError, -+ "Format specifier must contain H and W for 2D case"): -+ op = nn_ops.data_format_vec_permute( -+ x, src_format="1234", dst_format="4321") -+ with test_util.use_gpu(): -+ self.evaluate(op) -+ - - @test_util.run_all_in_graph_and_eager_modes - class AvgPoolTest(test_lib.TestCase): --- -2.27.0 - diff --git a/CVE-2020-26268.patch b/CVE-2020-26268.patch deleted file mode 100644 index d6f57069b4749ed4653093e19c8d7232a246cc34..0000000000000000000000000000000000000000 --- a/CVE-2020-26268.patch +++ /dev/null @@ -1,32 +0,0 @@ -From c1e1fc899ad5f8c725dcbb6470069890b5060bc7 Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Fri, 4 Dec 2020 17:06:23 -0800 -Subject: [PATCH] Mark `MemmappedTensorAllocator` as returning opaque handle. - -This allocator is used for `ImmutableConstantOp` and it returns a handle to the contents of a memory mapped file which is supposed to represent a tensor. - -For tensors of complex types (resources, variables and strings), allocators which are not marked as returning opaque handles will call placement new to initialize each element. This means writing to the buffer. However, in our case, the buffer is immutable and already contains the tensor data. Hence, writing to it is both destructive and causes a crash. - -PiperOrigin-RevId: 345786451 -Change-Id: I46369c50fa60b3431709ffe068a728d3061f49c4 ---- - tensorflow/core/kernels/immutable_constant_op.cc | 6 ++++++ - 1 file changed, 6 insertions(+) - -diff --git a/tensorflow/core/kernels/immutable_constant_op.cc b/tensorflow/core/kernels/immutable_constant_op.cc -index 0dd08c694eb6c..1cfbdb8277891 100644 ---- a/tensorflow/core/kernels/immutable_constant_op.cc -+++ b/tensorflow/core/kernels/immutable_constant_op.cc -@@ -62,6 +62,12 @@ class MemmappedTensorAllocator : public Allocator { - - void set_delete_on_deallocate() { delete_on_deallocate_ = true; } - -+ // Make sure tensors or complex types (strings, variants, resources) don't get -+ // their constructor called via a placement new since that would require -+ // writing to immutable data. -+ // See also: tensorflow/core/framework/typed_allocator.h -+ bool AllocatesOpaqueHandle() const override { return true; } -+ - private: - std::unique_ptr memory_region_; - // If there is an error during allocation we keep it in this status. diff --git a/CVE-2020-26270.patch b/CVE-2020-26270.patch deleted file mode 100644 index 7427e7d3fce115f072dab890d5544ea1ed6ad72c..0000000000000000000000000000000000000000 --- a/CVE-2020-26270.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 14755416e364f17fb1870882fa778c7fec7f16e3 Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Mon, 7 Dec 2020 20:31:31 -0800 -Subject: [PATCH] Prevent CHECK-fail in LSTM/GRU with zero-length input. - -PiperOrigin-RevId: 346239181 -Change-Id: I5f233dbc076aab7bb4e31ba24f5abd4eaf99ea4f ---- - tensorflow/stream_executor/cuda/cuda_dnn.cc | 8 ++++++-- - 1 file changed, 6 insertions(+), 2 deletions(-) - -diff --git a/tensorflow/stream_executor/cuda/cuda_dnn.cc b/tensorflow/stream_executor/cuda/cuda_dnn.cc -index a97850bd..5ae19f27 100644 ---- a/tensorflow/stream_executor/cuda/cuda_dnn.cc -+++ b/tensorflow/stream_executor/cuda/cuda_dnn.cc -@@ -1474,7 +1474,9 @@ class CudnnRnnSequenceTensorDescriptor - static port::StatusOr Create( - GpuExecutor* parent, int max_seq_length, int batch_size, int data_size, - cudnnDataType_t data_type) { -- CHECK_GT(max_seq_length, 0); -+ if (max_seq_length <= 0) { -+ return port::Status(port::error::INVALID_ARGUMENT, "max_seq_length <= 0"); -+ } - int dims[] = {batch_size, data_size, 1}; - int strides[] = {dims[1] * dims[2], dims[2], 1}; - TensorDescriptor tensor_desc = CreateTensorDescriptor(); -@@ -1495,7 +1497,9 @@ class CudnnRnnSequenceTensorDescriptor - const absl::Span& seq_lengths, bool time_major, - cudnnDataType_t data_type) { - #if CUDNN_VERSION >= 7201 -- CHECK_GT(max_seq_length, 0); -+ if (max_seq_length <= 0) { -+ return port::Status(port::error::INVALID_ARGUMENT, "max_seq_length <= 0"); -+ } - int dims[] = {batch_size, data_size, 1}; - int strides[] = {dims[1] * dims[2], dims[2], 1}; - TensorDescriptor tensor_desc = CreateTensorDescriptor(); --- -2.27.0 - diff --git a/CVE-2020-26271.patch b/CVE-2020-26271.patch deleted file mode 100644 index ec7474acb2d9a459bd5c4795beffd473a66812fa..0000000000000000000000000000000000000000 --- a/CVE-2020-26271.patch +++ /dev/null @@ -1,44 +0,0 @@ -From 0cc38aaa4064fd9e79101994ce9872c6d91f816b Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Tue, 8 Dec 2020 09:31:57 -0800 -Subject: [PATCH] Prevent unitialized memory access in - `GraphConstructor::MakeEdge` - -The `MakeEdge` implementation assumes that there exists an output at `output_index` of `src` node and an input at `input_index` of `dst` node. However, if this is not the case this results in accessing data out of bounds. Because we are accessing an array that is a private member of a class and only in read only mode, this usually results only in unitialized memory access. However, it is reasonable to think that malicious users could manipulate these indexes to actually read data outside the class, thus resulting in information leakage and further exploits. - -PiperOrigin-RevId: 346343288 -Change-Id: I2127da27c2023d27f26efd39afa6c853385cab6f ---- - tensorflow/core/common_runtime/graph_constructor.cc | 12 ++++++++++++ - 1 file changed, 12 insertions(+) - -diff --git a/tensorflow/core/common_runtime/graph_constructor.cc b/tensorflow/core/common_runtime/graph_constructor.cc -index 92b07682d76cd..639739e9cac8c 100644 ---- a/tensorflow/core/common_runtime/graph_constructor.cc -+++ b/tensorflow/core/common_runtime/graph_constructor.cc -@@ -44,6 +44,7 @@ limitations under the License. - #include "tensorflow/core/lib/gtl/inlined_vector.h" - #include "tensorflow/core/lib/strings/scanner.h" - #include "tensorflow/core/lib/strings/str_util.h" -+#include "tensorflow/core/platform/errors.h" - #include "tensorflow/core/platform/logging.h" - #include "tensorflow/core/platform/macros.h" - #include "tensorflow/core/public/version.h" -@@ -1425,6 +1426,17 @@ void GraphConstructor::Undo() { - - Status GraphConstructor::MakeEdge(Node* src, int output_index, Node* dst, - int input_index) { -+ if (output_index >= src->num_outputs()) { -+ return errors::InvalidArgument( -+ "Output ", output_index, " of node ", src->name(), -+ " does not exist. Node only has ", src->num_outputs(), " outputs."); -+ } -+ if (input_index >= dst->num_inputs()) { -+ return errors::InvalidArgument( -+ "Input ", input_index, " of node ", dst->name(), -+ " does not exist. Node only has ", dst->num_inputs(), " inputs."); -+ } -+ - DataType src_out = src->output_type(output_index); - DataType dst_in = dst->input_type(input_index); - if (!TypesCompatible(dst_in, src_out)) { diff --git a/CVE-2021-29512_CVE-2021-29514.patch b/CVE-2021-29512_CVE-2021-29514.patch deleted file mode 100644 index c497636be32952a5b5df44c4ad096e3591b5f11b..0000000000000000000000000000000000000000 --- a/CVE-2021-29512_CVE-2021-29514.patch +++ /dev/null @@ -1,32 +0,0 @@ -From eebb96c2830d48597d055d247c0e9aebaea94cd5 Mon Sep 17 00:00:00 2001 -From: Amit Patankar -Date: Tue, 13 Apr 2021 14:18:51 -0700 -Subject: [PATCH] Fix an invalid address vulnerability in - `tf.raw_ops.RaggedBincount`. - -PiperOrigin-RevId: 368293153 -Change-Id: I4b4e493d3fd05e7dc55a55de3a041a80a4f275c3 ---- - tensorflow/core/kernels/bincount_op.cc | 9 +++++++++ - 1 file changed, 9 insertions(+) - -diff --git a/tensorflow/core/kernels/bincount_op.cc b/tensorflow/core/kernels/bincount_op.cc -index 35911ee5d5540..258266ab29d33 100644 ---- a/tensorflow/core/kernels/bincount_op.cc -+++ b/tensorflow/core/kernels/bincount_op.cc -@@ -420,6 +420,15 @@ class RaggedBincountOp : public OpKernel { - int num_values = values.size(); - int batch_idx = 0; - -+ OP_REQUIRES(ctx, splits(0) == 0, -+ errors::InvalidArgument("Splits must start with 0, not with ", -+ splits(0))); -+ -+ OP_REQUIRES(ctx, splits(num_rows) == num_values, -+ errors::InvalidArgument( -+ "Splits must end with the number of values, got ", -+ splits(num_rows), " instead of ", num_values)); -+ - Tensor* out_t; - OP_REQUIRES_OK( - ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t)); diff --git a/CVE-2021-29513.patch b/CVE-2021-29513.patch deleted file mode 100644 index d32d3bb8f4c9df3914a6e9644e26428f26bd77a2..0000000000000000000000000000000000000000 --- a/CVE-2021-29513.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 030af767d357d1b4088c4a25c72cb3906abac489 Mon Sep 17 00:00:00 2001 -From: Amit Patankar -Date: Tue, 13 Apr 2021 14:25:01 -0700 -Subject: [PATCH] Fix `tf.raw_ops.ResourceCountUpTo` null pointer dereference. - -PiperOrigin-RevId: 368294347 -Change-Id: I2c16fbfc9b4966c402c3d8e311f0d665a9c852d8 ---- - tensorflow/python/lib/core/ndarray_tensor.cc | 8 ++++++++ - 1 file changed, 8 insertions(+) - -diff --git a/tensorflow/python/lib/core/ndarray_tensor.cc b/tensorflow/python/lib/core/ndarray_tensor.cc -index 03fbea397485e..6cf51ceebbdaa 100644 ---- a/tensorflow/python/lib/core/ndarray_tensor.cc -+++ b/tensorflow/python/lib/core/ndarray_tensor.cc -@@ -16,6 +16,7 @@ limitations under the License. - #include "tensorflow/python/lib/core/ndarray_tensor.h" - - #include -+#include - - #include "tensorflow/c/eager/tfe_context_internal.h" - #include "tensorflow/c/tf_tensor_internal.h" -@@ -74,6 +75,13 @@ Status PyArrayDescr_to_TF_DataType(PyArray_Descr* descr, - PyObject* key; - PyObject* value; - Py_ssize_t pos = 0; -+ -+ // Return an error if the fields attribute is null. -+ // Occurs with an improper conversion attempt to resource. -+ if (descr->fields == nullptr) { -+ return errors::Internal("Unexpected numpy data type"); -+ } -+ - if (PyDict_Next(descr->fields, &pos, &key, &value)) { - // In Python 3, the keys of numpy custom struct types are unicode, unlike - // Python 2, where the keys are bytes. diff --git a/CVE-2021-29515.patch b/CVE-2021-29515.patch deleted file mode 100644 index 713eaa884ec0ee0f6b7e1c6be0882825e5e51343..0000000000000000000000000000000000000000 --- a/CVE-2021-29515.patch +++ /dev/null @@ -1,41 +0,0 @@ -From a7116dd3913c4a4afd2a3a938573aa7c785fdfc6 Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Sat, 17 Apr 2021 20:55:53 -0700 -Subject: [PATCH] Validate `MatrixDiagV{2,3}` arguments to prevent breakage. - -PiperOrigin-RevId: 369056033 -Change-Id: Ic2018c297d3dd6f252dc1dd3667f1ed5cb1eaa42 ---- - .../core/kernels/matrix_diag_op.cc | 19 ++++++++++++++++--- - 1 file changed, 16 insertions(+), 3 deletions(-) - -diff --git a/tensorflow/core/kernels/matrix_diag_op.cc b/tensorflow/core/kernels/matrix_diag_op.cc -index 69cc8170793ae..d4eb589836a85 100644 ---- a/tensorflow/core/kernels/matrix_diag_op.cc -+++ b/tensorflow/core/kernels/matrix_diag_op.cc -@@ -192,9 +192,22 @@ class MatrixDiagOp : public OpKernel { - upper_diag_index = diag_index.flat()(1); - } - } -- num_rows = context->input(2).flat()(0); -- num_cols = context->input(3).flat()(0); -- padding_value = context->input(4).flat()(0); -+ -+ auto& num_rows_tensor = context->input(2); -+ OP_REQUIRES(context, TensorShapeUtils::IsScalar(num_rows_tensor.shape()), -+ errors::InvalidArgument("num_rows must be a scalar")); -+ num_rows = num_rows_tensor.flat()(0); -+ -+ auto& num_cols_tensor = context->input(3); -+ OP_REQUIRES(context, TensorShapeUtils::IsScalar(num_cols_tensor.shape()), -+ errors::InvalidArgument("num_cols must be a scalar")); -+ num_cols = num_cols_tensor.flat()(0); -+ -+ auto& padding_value_tensor = context->input(4); -+ OP_REQUIRES(context, -+ TensorShapeUtils::IsScalar(padding_value_tensor.shape()), -+ errors::InvalidArgument("padding_value must be a scalar")); -+ padding_value = padding_value_tensor.flat()(0); - } - - // Size validations. diff --git a/CVE-2021-29516-1.patch b/CVE-2021-29516-1.patch deleted file mode 100644 index f4095ffa298b28902e6a73bcdf3f0424fac1a773..0000000000000000000000000000000000000000 --- a/CVE-2021-29516-1.patch +++ /dev/null @@ -1,199 +0,0 @@ -From ce47a396ff795bdb6cf48eb53dbcba46cb51fa7d Mon Sep 17 00:00:00 2001 -From: Katherine Tian -Date: Tue, 30 Jun 2020 04:12:11 +0000 -Subject: [PATCH 1/1] TensorKey class and TensorMap tests - ---- - tensorflow/core/BUILD | 1 + - tensorflow/core/framework/BUILD | 70 ++++++++++++++++++++++++++ - tensorflow/core/framework/tensor_key.h | 64 +++++++++++++++++++++++ - tensorflow/core/kernels/BUILD | 1 + - 4 files changed, 136 insertions(+) - create mode 100644 tensorflow/core/framework/tensor_key.h - -diff --git a/tensorflow/core/BUILD b/tensorflow/core/BUILD -index d0be6ee9..6e745b4e 100644 ---- a/tensorflow/core/BUILD -+++ b/tensorflow/core/BUILD -@@ -495,6 +495,7 @@ tf_cuda_library( - "//tensorflow/core/framework:shared_ptr_variant.h", - "//tensorflow/core/framework:stats_aggregator.h", - "//tensorflow/core/framework:tensor.h", -+ "//tensorflow/core/framework:tensor_key.h", - "//tensorflow/core/framework:tensor_shape.h", - "//tensorflow/core/framework:tensor_slice.h", - "//tensorflow/core/framework:tensor_types.h", -diff --git a/tensorflow/core/framework/BUILD b/tensorflow/core/framework/BUILD -index 9b6ddb2a..093f0545 100644 ---- a/tensorflow/core/framework/BUILD -+++ b/tensorflow/core/framework/BUILD -@@ -209,6 +209,7 @@ filegroup( - "shared_ptr_variant.h", - "stats_aggregator.h", - "tensor.h", -+ "tensor_key.h", - "tensor_reference.h", - "tensor_shape.h", - "tensor_slice.h", -@@ -760,6 +761,75 @@ tf_cuda_library( - alwayslink = 1, - ) - -+tf_cuda_library( -+ name = "tensor_key", -+ srcs = [ -+ "log_memory.cc", -+ "tensor.cc", -+ "typed_allocator.cc", -+ "types.cc", -+ "variant.cc", -+ "variant_op_registry.cc", -+ "variant_tensor_data.cc", -+ ], -+ hdrs = [ -+ "log_memory.h", -+ "register_types.h", -+ "tensor.h", -+ "tensor_key.h", -+ "typed_allocator.h", -+ "types.h", -+ "variant.h", -+ "variant_encode_decode.h", -+ "variant_op_registry.h", -+ "variant_tensor_data.h", -+ ], -+ visibility = [ -+ "//tensorflow/core:__pkg__", -+ "//tensorflow/core/util:__pkg__", -+ ], -+ deps = [ -+ ":allocation_description_proto_cc", -+ ":allocator", -+ ":bfloat16", -+ ":log_memory_proto_cc", -+ ":numeric_types", -+ ":resource_handle", -+ ":resource_handle_proto_cc", -+ ":tensor_description_proto_cc", -+ ":tensor_proto_cc", -+ ":tensor_shape", -+ ":tensor_types", -+ ":type_index", -+ ":type_traits", -+ ":types_proto_cc", -+ "//tensorflow/core/lib/core:coding", -+ "//tensorflow/core/lib/core:errors", -+ "//tensorflow/core/lib/core:refcount", -+ "//tensorflow/core/lib/core:status", -+ "//tensorflow/core/lib/core:stringpiece", -+ "//tensorflow/core/lib/gtl:array_slice", -+ "//tensorflow/core/lib/gtl:flatmap", -+ "//tensorflow/core/lib/gtl:inlined_vector", -+ "//tensorflow/core/lib/hash", -+ "//tensorflow/core/lib/strings:str_util", -+ "//tensorflow/core/lib/strings:strcat", -+ "//tensorflow/core/platform:abi", -+ "//tensorflow/core/platform:logging", -+ "//tensorflow/core/platform:macros", -+ "//tensorflow/core/platform:platform_port", -+ "//tensorflow/core/platform:protobuf", -+ "//tensorflow/core/platform:strcat", -+ "//tensorflow/core/platform:tensor_coding", -+ "//tensorflow/core/platform:types", -+ "//tensorflow/core/public:version", -+ "//third_party/eigen3", -+ "@com_google_absl//absl/memory", -+ "@com_google_absl//absl/strings", -+ ], -+ alwayslink = 1, -+) -+ - cc_library( - name = "shape_inference", - srcs = ["shape_inference.cc"], -diff --git a/tensorflow/core/framework/tensor_key.h b/tensorflow/core/framework/tensor_key.h -new file mode 100644 -index 00000000..8eff58b2 ---- /dev/null -+++ b/tensorflow/core/framework/tensor_key.h -@@ -0,0 +1,64 @@ -+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. -+ -+Licensed under the Apache License, Version 2.0 (the "License"); -+you may not use this file except in compliance with the License. -+You may obtain a copy of the License at -+ -+ http://www.apache.org/licenses/LICENSE-2.0 -+ -+Unless required by applicable law or agreed to in writing, software -+distributed under the License is distributed on an "AS IS" BASIS, -+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+See the License for the specific language governing permissions and -+limitations under the License. -+==============================================================================*/ -+ -+#include "tensorflow/core/framework/tensor.h" -+ -+namespace tensorflow { -+ -+class TensorKey : public Tensor { -+ public: -+ using Tensor::Tensor; -+ -+ TensorKey(const Tensor& t) : Tensor(t) {} -+ -+ // Equality operator. Needed for absl hashing. -+ friend bool operator==(const TensorKey& t1, const TensorKey& t2) { -+ if (t1.dtype() != t2.dtype() || t1.shape() != t2.shape()) { -+ return false; -+ } -+ if (DataTypeCanUseMemcpy(t1.dtype())) { -+ return t1.tensor_data() == t2.tensor_data(); -+ } -+ if (t1.dtype() == DT_STRING) { -+ const auto s1 = t1.unaligned_flat(); -+ const auto s2 = t2.unaligned_flat(); -+ for (int64 i = 0, n = t1.NumElements(); i < n; ++i) { -+ if (TF_PREDICT_FALSE(s1(i) != s2(i))) { -+ return false; -+ } -+ } -+ return true; -+ } -+ return false; -+ } -+ -+ friend bool operator!=(const TensorKey& t1, const TensorKey& t2) { -+ return !(t1==t2); -+ } -+ -+ // AbslHashValue() function, needed for absl hashing. -+ template -+ friend H AbslHashValue(H h, const TensorKey& k) { -+ uint8* d = (uint8*)(k.data()); -+ size_t s = k.AllocatedBytes(); -+ std::vector vec; -+ for (int i=0; i < s; i++) { -+ vec.push_back(d[i]); -+ } -+ return H::combine(std::move(h), s); -+ } -+}; -+ -+} //namespace tensorflow -\ No newline at end of file -diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD -index f5a480b3..4ef86efb 100644 ---- a/tensorflow/core/kernels/BUILD -+++ b/tensorflow/core/kernels/BUILD -@@ -3219,6 +3219,7 @@ tf_cc_tests( - ], - deps = [ - ":eigen_helpers", -+ "//tensorflow/core/framework:tensor_testutil", - "//tensorflow/core:test", - "//tensorflow/core:test_main", - "@com_google_absl//absl/strings", --- -2.27.0 - diff --git a/CVE-2021-29516-2.patch b/CVE-2021-29516-2.patch deleted file mode 100644 index 9fa3b2218f473dd04db8ac44897e77d6c48a9a66..0000000000000000000000000000000000000000 --- a/CVE-2021-29516-2.patch +++ /dev/null @@ -1,70 +0,0 @@ -From b6a0cba2b381e83a1d0a19b675ca6f7459d2d2bc Mon Sep 17 00:00:00 2001 -From: Edward Loper -Date: Tue, 25 Aug 2020 08:12:53 -0700 -Subject: [PATCH 1/1] Fix segmentation fault in tf.map_fn when fn_output_spec - is a RaggedTensorSpec and the input tensor has shape [0, ...]. - -PiperOrigin-RevId: 328332518 -Change-Id: I6aff03152bbc96507fb6c5f89b05722f3cc30164 ---- - .../kernels/ragged_tensor_from_variant_op.cc | 16 +++++++++++++++- - .../python/ops/ragged/ragged_map_fn_op_test.py | 15 +++++++++++++++ - 2 files changed, 30 insertions(+), 1 deletion(-) - -diff --git a/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc b/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc -index ad0712e6fd0..aa736ad7f60 100644 ---- a/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc -+++ b/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc -@@ -175,8 +175,22 @@ Status NestedStackRaggedTensors( - } - } - -+ // If the variant tensor input is empty, then we have no way to determine -+ // the correct shape for the dense_values. (It must have rank>=1, and its -+ // outer dimension must be 0, but we don't know its shape beyond that.) -+ // For now, we just use a shape of `[0]` in this case. -+ // TODO(edloper): Update this op with an attribute containing information -+ // about dense_values shape. If it's `None`, then we'll probably still have -+ // to use shape=[0] here, but if we have more info, then we can use it. -+ // E.g., in map_fn, we may have shape info from the RaggedTensorSpec. -+ TensorShape component_values_shape; -+ if (ragged_components.empty()) { -+ component_values_shape = TensorShape({0}); -+ } else { -+ component_values_shape = ragged_components[0].values.shape(); -+ } -+ - // Populate values. -- TensorShape component_values_shape = ragged_components[0].values.shape(); - int values_size = component_values_shape.dim_size(0); - for (int i = 1; i < ragged_components.size(); i++) { - if (ragged_components[i].values.dims() != component_values_shape.dims()) { -diff --git a/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py b/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py -index 8a40e396a68..bead4923a0a 100644 ---- a/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py -+++ b/tensorflow/python/ops/ragged/ragged_map_fn_op_test.py -@@ -150,6 +150,21 @@ class RaggedMapOpTest(test_util.TensorFlowTestCase, - result_dtype=ragged_tensor.RaggedTensorType( - dtype=dtypes.int64, ragged_rank=4), - ), -+ # [d1] -> [d1, (d2), (d3)] -+ dict( -+ fn=ragged_math_ops.range, -+ elems=np.array([1, 2, 3], np.int64), -+ expected_output=[[[0]], [[0, 1]], [[0, 1, 2]]], -+ result_dtype=ragged_tensor.RaggedTensorType( -+ dtype=dtypes.int64, ragged_rank=2)), -+ # [0] -> [0, (d2), (d3)] (github issue #36232) -+ dict( -+ fn=ragged_math_ops.range, -+ elems=np.zeros([0], np.int64), -+ expected_output=[], -+ expected_ragged_rank=2, -+ result_dtype=ragged_tensor.RaggedTensorType( -+ dtype=dtypes.int64, ragged_rank=2)), - ]) - - def testRaggedMap( --- -2.27.0 - diff --git a/CVE-2021-29516-3.patch b/CVE-2021-29516-3.patch deleted file mode 100644 index 088f31a86004ef41430c96c9324155cc2047dc50..0000000000000000000000000000000000000000 --- a/CVE-2021-29516-3.patch +++ /dev/null @@ -1,904 +0,0 @@ -From be6b1fdb0699d4000b70ad32cc23d1503e5c7511 Mon Sep 17 00:00:00 2001 -From: Edward Loper -Date: Wed, 14 Oct 2020 09:41:17 -0700 -Subject: [PATCH 1/1] Added gradients for RaggedTensorToVariant and - RaggedTensorFromVariant. (This allows gradients to pass through map_fn when - it is applied to ragged tensors.) - -PiperOrigin-RevId: 337108621 -Change-Id: I73d5f3296181877f0cc4c7a6273b693bcf8310ab ---- - tensorflow/core/kernels/BUILD | 15 ++ - .../kernels/ragged_tensor_from_variant_op.cc | 164 +++++++--------- - .../kernels/ragged_tensor_to_variant_op.cc | 180 +++++++++++------- - .../core/kernels/ragged_tensor_variant.cc | 86 +++++++++ - .../core/kernels/ragged_tensor_variant.h | 110 +++++++++++ - tensorflow/core/ops/ragged_conversion_ops.cc | 20 +- - tensorflow/python/ops/ragged/BUILD | 1 + - 9 files changed, 478 insertions(+), 172 deletions(-) - create mode 100644 tensorflow/core/framework/tensor_key.h - create mode 100644 tensorflow/core/kernels/ragged_tensor_variant.cc - create mode 100644 tensorflow/core/kernels/ragged_tensor_variant.h - -diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD -index f5a480b3..12adb2b2 100644 ---- a/tensorflow/core/kernels/BUILD -+++ b/tensorflow/core/kernels/BUILD -@@ -1529,10 +1529,22 @@ tf_cc_test( - ], - ) - -+cc_library( -+ name = "ragged_tensor_variant", -+ srcs = ["ragged_tensor_variant.cc"], -+ hdrs = ["ragged_tensor_variant.h"], -+ deps = [ -+ ":cwise_op", -+ "//tensorflow/core:framework", -+ ], -+) -+ - tf_kernel_library( - name = "ragged_tensor_to_variant_op", - srcs = ["ragged_tensor_to_variant_op.cc"], - deps = [ -+ ":concat_lib", -+ ":ragged_tensor_variant", - "//tensorflow/core:framework", - "//tensorflow/core:lib", - ], -@@ -1542,6 +1554,7 @@ tf_kernel_library( - name = "ragged_tensor_from_variant_op", - srcs = ["ragged_tensor_from_variant_op.cc"], - deps = [ -+ ":ragged_tensor_variant", - "//tensorflow/core:framework", - "//tensorflow/core:lib", - ], -@@ -1554,6 +1567,7 @@ tf_cc_test( - deps = [ - ":ops_testutil", - ":ragged_tensor_to_variant_op", -+ ":ragged_tensor_variant", - "//tensorflow/core:framework", - "//tensorflow/core:lib", - "//tensorflow/core:test", -@@ -1570,6 +1584,7 @@ tf_cc_test( - deps = [ - ":ops_testutil", - ":ragged_tensor_from_variant_op", -+ ":ragged_tensor_variant", - "//tensorflow/core:framework", - "//tensorflow/core:lib", - "//tensorflow/core:test", -diff --git a/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc b/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc -index d7b6a89a..fa8853af 100644 ---- a/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc -+++ b/tensorflow/core/kernels/ragged_tensor_from_variant_op.cc -@@ -20,110 +20,76 @@ limitations under the License. - #include "tensorflow/core/framework/tensor.h" - #include "tensorflow/core/framework/variant.h" - #include "tensorflow/core/framework/variant_encode_decode.h" -+#include "tensorflow/core/kernels/ragged_tensor_variant.h" - #include "tensorflow/core/lib/core/errors.h" - #include "tensorflow/core/lib/core/status.h" - - namespace tensorflow { - namespace { - --struct RaggedTensor { -- Tensor values; -- std::vector nested_splits; --}; -- --Status RaggedComponentsFromVariant(const Tensor& encoded_variant, -- int ragged_rank, DataType value_dtype, -- DataType split_dtype, -- std::vector* decoded_ragged) { -+Status RaggedComponentsFromVariant( -+ const Tensor& encoded_variant, int ragged_rank, DataType value_dtype, -+ DataType split_dtype, std::vector* decoded_ragged) { - const auto& flat_variants = encoded_variant.flat(); -- decoded_ragged->resize(flat_variants.size()); -- // Step 1: Extract the 1-D DT_VARIANT Tensor from each Variant element in the -- // input. -+ decoded_ragged->reserve(flat_variants.size()); -+ - for (int i = 0; i < flat_variants.size(); i++) { - const auto& flat_variant = flat_variants(i); -- const Tensor* encoded_list = flat_variant.get(); -- if (encoded_list == nullptr) { -+ const RaggedTensorVariant* decoded = -+ flat_variant.get(); -+ if (decoded == nullptr) { - return errors::InvalidArgument( - "Input Variant element at index ", i, -- " doesn't hold a Tensor: ", flat_variant.DebugString()); -+ " doesn't hold a RaggedTensorVariant: ", flat_variant.DebugString()); - } -- if (encoded_list->dims() != 1) { -+ decoded_ragged->push_back(*decoded); -+ decoded = &decoded_ragged->back(); -+ // Check ragged rank & types -+ if (decoded->ragged_rank() != ragged_rank) { - return errors::InvalidArgument( -- "Encoded input Variant must have rank 1, but found rank: ", -- encoded_list->dims(), -- ". encoded input Variant: ", encoded_list->DebugString()); -+ "Encoded input RaggedTensorVariant has ragged_rank=", -+ decoded->ragged_rank(), ". Expected ragged_rank=", ragged_rank, "."); - } -- if (encoded_list->NumElements() != (ragged_rank + 1) && -- encoded_list->NumElements() != 1) { -+ if (decoded->values().dtype() != value_dtype) { - return errors::InvalidArgument( -- "Encoded input Variant must hold either input_ragged_rank + 1 " -- "Tensors or an empty Tensor (zero splits Tensors, 1 values Tensor), " -- "input_ragged_rank: ", -- ragged_rank, -- ", encoded input Variant: ", encoded_list->DebugString()); -+ "Expected values Tensor dtype: ", DataTypeString(value_dtype), -+ ", found: ", DataTypeString(decoded->values().dtype())); - } -- const auto& input_vec = encoded_list->vec(); -- -- // Step 2: Get the splits and value Tensors from the 1-D DT_VARIANT Tensor -- // to create the component RaggedTensors. -- (*decoded_ragged)[i].nested_splits.reserve(ragged_rank); -- for (int j = 0; j < ragged_rank; j++) { -- const Tensor* split_tensor = input_vec(j).get(); -- if (split_tensor == nullptr) { -- return errors::InvalidArgument( -- "Encoded scalar element at index ", i, -- " doesn't have a splits Tensor at split_index ", j, ": ", -- input_vec(j).DebugString()); -- } -- Tensor splits_tensor = *split_tensor; -- if (splits_tensor.dtype() != split_dtype) { -+ if (decoded->values().dims() < 1) { -+ return errors::InvalidArgument( -+ "Ragged values must have rank >= 1; encoded scalar element at index ", -+ i, " has values Tensor: ", decoded->values().DebugString()); -+ } -+ for (const auto& splits : decoded->nested_splits()) { -+ if (splits.dtype() != split_dtype) { - return errors::InvalidArgument( -- "Expected splits Tensor dtype: ", split_dtype, -- ", found: ", splits_tensor.dtype()); -+ "Expected row_splits Tensor dtype: ", DataTypeString(split_dtype), -+ ", found: ", DataTypeString(splits.dtype())); - } -- if (splits_tensor.dims() != 1) { -+ if (splits.dims() != 1) { - return errors::InvalidArgument( - "Ragged splits must have rank 1; encoded scalar element at index ", -- i, " has splits Tensor at split_index ", j, ": ", -- splits_tensor.DebugString()); -+ i, " has splits Tensor ", splits.DebugString()); - } -- (*decoded_ragged)[i].nested_splits.push_back(splits_tensor); -- } -- const Tensor* values_tensor = input_vec(ragged_rank).get(); -- if (values_tensor == nullptr) { -- return errors::InvalidArgument("Encoded scalar element at index ", i, -- " doesn't have a values Tensor: ", -- input_vec(ragged_rank).DebugString()); -- } -- if (values_tensor->dtype() != value_dtype) { -- return errors::InvalidArgument( -- "Expected values Tensor dtype: ", DataTypeString(value_dtype), -- ", found: ", DataTypeString(values_tensor->dtype())); -- } -- if (values_tensor->dims() < 1) { -- return errors::InvalidArgument( -- "Ragged values must have rank >= 1; encoded scalar element at index ", -- i, " has values Tensor: ", values_tensor->DebugString()); - } -- (*decoded_ragged)[i].values = *values_tensor; - } - return Status::OK(); - } - - template - Status NestedStackRaggedTensors( -- const std::vector& ragged_components, -+ const std::vector& ragged_components, - const std::vector& nested_dim_sizes, const int input_ragged_rank, -- const int output_ragged_rank, RaggedTensor* output_ragged) { -- output_ragged->nested_splits.reserve(output_ragged_rank); -+ const int output_ragged_rank, RaggedTensorVariant* output_ragged) { -+ output_ragged->mutable_nested_splits()->reserve(output_ragged_rank); - const int dims = nested_dim_sizes.size(); - - // Populate first `dims - 1` splits. - for (int i = 0; i < dims - 1; i++) { - int dims_splits_size = nested_dim_sizes[i] + 1; -- output_ragged->nested_splits.push_back(Tensor( -- DataTypeToEnum::value, TensorShape({dims_splits_size}))); -- auto splits_vec = output_ragged->nested_splits[i].vec(); -+ output_ragged->append_splits(Tensor(DataTypeToEnum::value, -+ TensorShape({dims_splits_size}))); -+ auto splits_vec = output_ragged->mutable_splits(i)->vec(); - int split_diff = nested_dim_sizes[i + 1]; - for (int j = 0; j < dims_splits_size; j++) { - splits_vec(j) = j * split_diff; -@@ -132,15 +98,15 @@ Status NestedStackRaggedTensors( - - // Populate `dims`-th split. - int splits_size = ragged_components.size() + 1; -- output_ragged->nested_splits.push_back( -+ output_ragged->append_splits( - Tensor(DataTypeToEnum::value, TensorShape({splits_size}))); - auto dims_splits_vec = -- output_ragged->nested_splits[dims - 1].vec(); -+ output_ragged->mutable_splits(dims - 1)->vec(); - dims_splits_vec(0) = 0; - for (int i = 0; i < ragged_components.size(); i++) { -- int split_val = ragged_components[i].values.shape().dim_size(0); -- if (input_ragged_rank != 0 && !ragged_components[i].nested_splits.empty()) { -- split_val = ragged_components[i].nested_splits[0].NumElements() - 1; -+ int split_val = ragged_components[i].values().shape().dim_size(0); -+ if (input_ragged_rank != 0 && ragged_components[i].ragged_rank() > 0) { -+ split_val = ragged_components[i].splits(0).NumElements() - 1; - } - dims_splits_vec(i + 1) = dims_splits_vec(i) + split_val; - } -@@ -150,24 +116,24 @@ Status NestedStackRaggedTensors( - int split_index = dims + i; - int split_size = 1; - for (int j = 0; j < ragged_components.size(); j++) { -- if (!ragged_components[j].nested_splits.empty()) { -- split_size += ragged_components[j].nested_splits[i].NumElements() - 1; -+ if (!ragged_components[j].nested_splits().empty()) { -+ split_size += ragged_components[j].splits(i).NumElements() - 1; - } - } -- output_ragged->nested_splits.push_back( -+ output_ragged->append_splits( - Tensor(DataTypeToEnum::value, TensorShape({split_size}))); - auto splits_vec = -- output_ragged->nested_splits[split_index].vec(); -+ output_ragged->mutable_splits(split_index)->vec(); - splits_vec(0) = 0; - SPLIT_TYPE last_split_value = 0; - int index = 1; - for (int j = 0; j < ragged_components.size(); j++) { -- if (ragged_components[j].nested_splits.empty()) { -+ if (ragged_components[j].nested_splits().empty()) { - // Corner case: empty row. e.g [ [[x], [x]], [] ] - continue; - } - auto component_splits_vec = -- ragged_components[j].nested_splits[i].vec(); -+ ragged_components[j].splits(i).vec(); - for (int k = 1; k < component_splits_vec.size(); k++, index++) { - splits_vec(index) = component_splits_vec(k) + last_split_value; - } -@@ -187,35 +153,35 @@ Status NestedStackRaggedTensors( - if (ragged_components.empty()) { - component_values_shape = TensorShape({0}); - } else { -- component_values_shape = ragged_components[0].values.shape(); -+ component_values_shape = ragged_components[0].values().shape(); - } - - // Populate values. - int values_size = component_values_shape.dim_size(0); - for (int i = 1; i < ragged_components.size(); i++) { -- if (ragged_components[i].values.dims() != component_values_shape.dims()) { -+ if (ragged_components[i].values().dims() != component_values_shape.dims()) { - return errors::InvalidArgument( - "Rank of values must match for all " - "components; values shape at index 0: ", - component_values_shape.DebugString(), ", values shape at index ", i, -- ": ", ragged_components[i].values.shape().DebugString()); -+ ": ", ragged_components[i].values().shape().DebugString()); - } -- values_size += ragged_components[i].values.shape().dim_size(0); -+ values_size += ragged_components[i].values().shape().dim_size(0); - } - component_values_shape.set_dim(0, values_size); -- output_ragged->values = -- Tensor(DataTypeToEnum::value, component_values_shape); -+ output_ragged->set_values( -+ Tensor(DataTypeToEnum::value, component_values_shape)); - auto output_values_flat = -- output_ragged->values.flat_outer_dims(); -+ output_ragged->mutable_values()->flat_outer_dims(); - int values_index = 0; - for (int i = 0; i < ragged_components.size(); i++) { - auto component_values_flat = -- ragged_components[i].values.flat_outer_dims(); -- int num_inner_elements = ragged_components[i].values.NumElements(); -- if (ragged_components[i].values.dim_size(0) > 0) { -- num_inner_elements /= ragged_components[i].values.dim_size(0); -+ ragged_components[i].values().flat_outer_dims(); -+ int num_inner_elements = ragged_components[i].values().NumElements(); -+ if (ragged_components[i].values().dim_size(0) > 0) { -+ num_inner_elements /= ragged_components[i].values().dim_size(0); - } -- for (int j = 0; j < ragged_components[i].values.dim_size(0); -+ for (int j = 0; j < ragged_components[i].values().dim_size(0); - j++, values_index++) { - for (int k = 0; k < num_inner_elements; k++) { - output_values_flat(values_index, k) = component_values_flat(j, k); -@@ -265,7 +231,7 @@ class RaggedTensorFromVariantOp : public OpKernel { - // Decode all variants. - const auto value_dtype = DataTypeToEnum::v(); - const auto split_dtype = DataTypeToEnum::v(); -- std::vector decoded_components; -+ std::vector decoded_components; - OP_REQUIRES_OK(context, RaggedComponentsFromVariant( - encoded_variant, input_ragged_rank_, - value_dtype, split_dtype, &decoded_components)); -@@ -281,7 +247,7 @@ class RaggedTensorFromVariantOp : public OpKernel { - for (int i = 0; i < encoded_variant.dims(); i++) { - encoded_dim_sizes[i] = encoded_variant.dim_size(i); - } -- RaggedTensor output_ragged; -+ RaggedTensorVariant output_ragged; - OP_REQUIRES_OK( - context, NestedStackRaggedTensors( - decoded_components, encoded_dim_sizes, input_ragged_rank_, -@@ -296,15 +262,15 @@ class RaggedTensorFromVariantOp : public OpKernel { - int output_ragged_rank_; - - void ReturnRaggedTensor(OpKernelContext* context, -- RaggedTensor ragged_tensor) { -- int ragged_rank = ragged_tensor.nested_splits.size(); -+ const RaggedTensorVariant& ragged_tensor) { -+ int ragged_rank = ragged_tensor.ragged_rank(); - OpOutputList splits_out; - OP_REQUIRES_OK(context, - context->output_list("output_nested_splits", &splits_out)); - for (int i = 0; i < ragged_rank; i++) { -- splits_out.set(i, ragged_tensor.nested_splits[i]); -+ splits_out.set(i, ragged_tensor.splits(i)); - } -- context->set_output(ragged_rank, ragged_tensor.values); -+ context->set_output(ragged_rank, ragged_tensor.values()); - } - }; - -diff --git a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc -index 3190534b..a60e5c62 100644 ---- a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc -+++ b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc -@@ -18,50 +18,38 @@ limitations under the License. - #include "tensorflow/core/framework/op_kernel.h" - #include "tensorflow/core/framework/register_types.h" - #include "tensorflow/core/framework/tensor.h" -+#include "tensorflow/core/framework/tensor_shape.h" - #include "tensorflow/core/framework/variant.h" - #include "tensorflow/core/framework/variant_encode_decode.h" -+#include "tensorflow/core/framework/variant_op_registry.h" -+#include "tensorflow/core/kernels/concat_lib.h" -+#include "tensorflow/core/kernels/ragged_tensor_variant.h" - #include "tensorflow/core/lib/core/errors.h" - #include "tensorflow/core/lib/core/status.h" -+#include "tensorflow/core/util/tensor_ops_util.h" - - namespace tensorflow { - namespace { - --struct RaggedTensor { -- Tensor values; -- std::vector nested_splits; --}; -- --Status RaggedToVariant(const RaggedTensor& ragged, Tensor* encoded_list) { -- // Encode as a rank-1 Variant Tensor. -- int ragged_rank = ragged.nested_splits.size(); -- *encoded_list = Tensor(DT_VARIANT, TensorShape({ragged_rank + 1})); -- auto encoded_vec = encoded_list->vec(); -- for (int i = 0; i < ragged_rank; i++) { -- encoded_vec(i) = ragged.nested_splits[i]; -- } -- encoded_vec(ragged_rank) = ragged.values; -- return Status::OK(); --} -- - template --Status UnbatchRaggedZerothDim(const RaggedTensor& batched_ragged, -- std::vector* ragged_components) { -+Status UnbatchRaggedZerothDim( -+ const RaggedTensorVariant& batched_ragged, -+ std::vector* ragged_components) { - // Set up the component Ragged Tensors. -- int ragged_rank = batched_ragged.nested_splits.size(); -- auto batched_splits_top_vec = -- batched_ragged.nested_splits[0].vec(); -+ int ragged_rank = batched_ragged.ragged_rank(); -+ auto batched_splits_top_vec = batched_ragged.splits(0).vec(); - int num_components = batched_splits_top_vec.size() - 1; - int num_splits = ragged_rank - 1; - ragged_components->resize(num_components); -- for (RaggedTensor ragged_component : *ragged_components) { -- ragged_component.nested_splits.reserve(num_splits); -+ for (RaggedTensorVariant& ragged_component : *ragged_components) { -+ ragged_component.mutable_nested_splits()->reserve(num_splits); - } -- const auto& batched_flat = batched_ragged.values.flat(); -- int num_inner_elems = batched_ragged.values.NumElements(); -- if (batched_ragged.values.dim_size(0) > 1) { -- num_inner_elems /= batched_ragged.values.dim_size(0); -+ const auto& batched_flat = batched_ragged.values().flat(); -+ int num_inner_elems = batched_ragged.values().NumElements(); -+ if (batched_ragged.values().dim_size(0) > 1) { -+ num_inner_elems /= batched_ragged.values().dim_size(0); - } -- TensorShape values_shape = batched_ragged.values.shape(); -+ TensorShape values_shape = batched_ragged.values().shape(); - - // Corner case: ragged_rank == 1, e.g. [[1, 2, 3], [4, 5]] - if (num_splits == 0) { -@@ -70,10 +58,10 @@ Status UnbatchRaggedZerothDim(const RaggedTensor& batched_ragged, - int limit = batched_splits_top_vec(i + 1); - int num_values = limit - start; - values_shape.set_dim(0, num_values); -- (*ragged_components)[i].values = -- Tensor(DataTypeToEnum::value, values_shape); -+ (*ragged_components)[i].set_values( -+ Tensor(DataTypeToEnum::value, values_shape)); - auto ragged_component_values_flat = -- (*ragged_components)[i].values.flat(); -+ (*ragged_components)[i].mutable_values()->flat(); - for (int j = 0; j < num_values * num_inner_elems; j++) { - ragged_component_values_flat(j) = - batched_flat(j + start * num_inner_elems); -@@ -86,8 +74,7 @@ Status UnbatchRaggedZerothDim(const RaggedTensor& batched_ragged, - std::vector::ConstVec> batched_splits_vec; - batched_splits_vec.reserve(ragged_rank); - for (int i = 0; i < ragged_rank; i++) { -- batched_splits_vec.push_back( -- batched_ragged.nested_splits[i].vec()); -+ batched_splits_vec.push_back(batched_ragged.splits(i).vec()); - } - std::vector index(num_splits, 1); - std::vector ragged_component_values_size(num_components, 0); -@@ -104,10 +91,10 @@ Status UnbatchRaggedZerothDim(const RaggedTensor& batched_ragged, - int last_index = ragged_component_splits_vec[j - 1].size() - 1; - split_size = ragged_component_splits_vec[j - 1](last_index) + 1; - } -- (*ragged_components)[i].nested_splits.push_back( -+ (*ragged_components)[i].append_splits( - Tensor(DataTypeToEnum::value, TensorShape({split_size}))); - ragged_component_splits_vec.push_back( -- (*ragged_components)[i].nested_splits[j].vec()); -+ (*ragged_components)[i].mutable_splits(j)->vec()); - SPLIT_TYPE last_split_value = batched_splits_vec[j + 1](index[j] - 1); - ragged_component_splits_vec[j](0) = 0; - for (int k = 1; k < split_size; k++, index[j]++) { -@@ -125,10 +112,10 @@ Status UnbatchRaggedZerothDim(const RaggedTensor& batched_ragged, - for (int i = 0; i < num_components; i++) { - int num_values = ragged_component_values_size[i]; - values_shape.set_dim(0, num_values); -- (*ragged_components)[i].values = -- Tensor(DataTypeToEnum::value, values_shape); -+ (*ragged_components)[i].set_values( -+ Tensor(DataTypeToEnum::value, values_shape)); - auto ragged_component_values_flat = -- (*ragged_components)[i].values.flat(); -+ (*ragged_components)[i].mutable_values()->flat(); - for (int j = 0; j < num_values * num_inner_elems; j++, value_index++) { - ragged_component_values_flat(j) = batched_flat(value_index); - } -@@ -152,24 +139,21 @@ class RaggedTensorToVariantOp : public OpKernel { - OP_REQUIRES_OK(context, context->input_list("rt_nested_splits", - &ragged_nested_splits_in)); - const int ragged_nested_splits_len = ragged_nested_splits_in.size(); -- RaggedTensor batched_ragged_input; -+ RaggedTensorVariant batched_ragged_input; - // Read ragged_values input. -- batched_ragged_input.values = context->input(ragged_nested_splits_len); -- batched_ragged_input.nested_splits.reserve(ragged_nested_splits_len); -+ batched_ragged_input.set_values(context->input(ragged_nested_splits_len)); -+ batched_ragged_input.mutable_nested_splits()->reserve( -+ ragged_nested_splits_len); - for (int i = 0; i < ragged_nested_splits_len; i++) { -- batched_ragged_input.nested_splits.push_back(ragged_nested_splits_in[i]); -+ batched_ragged_input.append_splits(ragged_nested_splits_in[i]); - } - - if (!batched_input_) { -- // Encode the input as is. -- Tensor encoded_list; -- OP_REQUIRES_OK(context, -- RaggedToVariant(batched_ragged_input, &encoded_list)); - // Encode as a Scalar Variant Tensor. - Tensor* encoded_scalar; - OP_REQUIRES_OK(context, context->allocate_output(0, TensorShape({}), - &encoded_scalar)); -- encoded_scalar->scalar()() = std::move(encoded_list); -+ encoded_scalar->scalar()() = std::move(batched_ragged_input); - return; - } - -@@ -180,24 +164,19 @@ class RaggedTensorToVariantOp : public OpKernel { - "received rt_nested_splits of length 0.")); - - // Unbatch the Ragged Tensor and encode the components. -- std::vector ragged_components; -+ std::vector unbatched_ragged_input; - OP_REQUIRES_OK(context, UnbatchRaggedZerothDim( -- batched_ragged_input, &ragged_components)); -- std::vector encoded_components(ragged_components.size()); -- for (int i = 0; i < ragged_components.size(); i++) { -- OP_REQUIRES_OK(context, RaggedToVariant(ragged_components[i], -- &encoded_components[i])); -- } -+ batched_ragged_input, &unbatched_ragged_input)); - - // Bundle the encoded scalar Variant Tensors into a rank-1 Variant Tensor. -- Tensor* encoded_ragged; -- int output_size = ragged_components.size(); -+ Tensor* encoded_vector; -+ int output_size = unbatched_ragged_input.size(); - OP_REQUIRES_OK(context, - context->allocate_output(0, TensorShape({output_size}), -- &encoded_ragged)); -- auto encoded_ragged_vec = encoded_ragged->vec(); -+ &encoded_vector)); -+ auto encoded_vector_t = encoded_vector->vec(); - for (int i = 0; i < output_size; i++) { -- encoded_ragged_vec(i) = encoded_components[i]; -+ encoded_vector_t(i) = unbatched_ragged_input[i]; - } - } - -@@ -205,12 +184,81 @@ class RaggedTensorToVariantOp : public OpKernel { - bool batched_input_; - }; - --#define REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, split_type) \ -- REGISTER_KERNEL_BUILDER(Name("RaggedTensorToVariant") \ -- .Device(DEVICE_CPU) \ -- .TypeConstraint("Tvalues") \ -- .TypeConstraint("Tsplits"), \ -- RaggedTensorToVariantOp); -+template -+class RaggedTensorToVariantGradientOp : public OpKernel { -+ public: -+ using OpKernel::OpKernel; -+ -+ void Compute(OpKernelContext* context) override { -+ // Read inputs. -+ Tensor encoded_variant = context->input(0); -+ Tensor row_splits = context->input(1); -+ auto flat_row_splits = row_splits.flat(); -+ TensorShape dense_values_shape; -+ OP_REQUIRES_OK(context, -+ TensorShapeUtils::MakeShape(context->input(2).vec(), -+ &dense_values_shape)); -+ -+ const auto& flat_variants = encoded_variant.flat(); -+ -+ // Get a Tensor containing the flat_values for each variant. -+ std::vector values; -+ for (int i = 0; i < flat_variants.size(); ++i) { -+ if (const auto* encoded = flat_variants(i).get()) { -+ values.push_back(encoded->values()); -+ } else { -+ // Missing value: this happens if only some of the variant values -+ // generated by ragged_tensor_to_variant impacted the value that we're -+ // calculating the gradient for. In this case, we will see a -+ // default-constructed variant; so treat it as a zero tensor with the -+ // appropriate shape. -+ const auto value_dtype = DataTypeToEnum::v(); -+ int piece_size = flat_row_splits(i + 1) - flat_row_splits(i); -+ TensorShape zeros_shape = dense_values_shape; -+ zeros_shape.set_dim(0, piece_size); -+ Tensor zero(value_dtype, zeros_shape); -+ zero.flat() = -+ zero.flat().constant(VALUE_TYPE()); -+ values.push_back(zero); -+ } -+ } -+ -+ if (values.size() == 1) { -+ // Just one flat_value tensor: return as-is. -+ context->set_output(0, values[0]); -+ } else { -+ // Multiple flat_values tensors: concatenate them together. -+ using Piece = typename TTypes::Matrix; -+ using ConstPiece = typename TTypes::ConstMatrix; -+ std::vector> pieces; -+ pieces.reserve(values.size()); -+ for (const Tensor& t : values) { -+ pieces.emplace_back( -+ new ConstPiece(t.shaped({1, t.NumElements()}))); -+ } -+ Tensor* out = nullptr; -+ OP_REQUIRES_OK(context, -+ context->allocate_output(0, dense_values_shape, &out)); -+ Piece out_flat = -+ out->shaped({1, dense_values_shape.num_elements()}); -+ ConcatCPU(context->device(), pieces, &out_flat); -+ } -+ } -+}; -+ -+#define REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, split_type) \ -+ REGISTER_KERNEL_BUILDER(Name("RaggedTensorToVariant") \ -+ .Device(DEVICE_CPU) \ -+ .TypeConstraint("Tvalues") \ -+ .TypeConstraint("Tsplits"), \ -+ RaggedTensorToVariantOp); \ -+ REGISTER_KERNEL_BUILDER( \ -+ Name("RaggedTensorToVariantGradient") \ -+ .Device(DEVICE_CPU) \ -+ .TypeConstraint("Tvalues") \ -+ .TypeConstraint("Tsplits"), \ -+ RaggedTensorToVariantGradientOp); -+ - #define REGISTER_KERNELS(value_type) \ - REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int32) \ - REGISTER_KERNELS_WITH_SPLIT_TYPE(value_type, int64) -diff --git a/tensorflow/core/kernels/ragged_tensor_variant.cc b/tensorflow/core/kernels/ragged_tensor_variant.cc -new file mode 100644 -index 00000000..94663138 ---- /dev/null -+++ b/tensorflow/core/kernels/ragged_tensor_variant.cc -@@ -0,0 +1,86 @@ -+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. -+ -+Licensed under the Apache License, Version 2.0 (the "License"); -+you may not use this file except in compliance with the License. -+You may obtain a copy of the License at -+ -+ http://www.apache.org/licenses/LICENSE-2.0 -+ -+Unless required by applicable law or agreed to in writing, software -+distributed under the License is distributed on an "AS IS" BASIS, -+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+See the License for the specific language governing permissions and -+limitations under the License. -+==============================================================================*/ -+ -+#define EIGEN_USE_THREADS -+#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM -+#define EIGEN_USE_GPU -+#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM -+ -+#include "tensorflow/core/kernels/ragged_tensor_variant.h" -+ -+namespace tensorflow { -+ -+string RaggedTensorVariant::TypeName() const { return "RaggedTensorVariant"; } -+ -+string RaggedTensorVariant::DebugString() const { -+ return absl::StrCat( -+ "RaggedTensorVariant(dtype=", DataTypeString(values_.dtype()), -+ ", ragged_rank=", nested_splits_.size(), ", splits_dtype=", -+ DataTypeString(nested_splits_.empty() ? DT_INVALID -+ : nested_splits_.back().dtype())); -+} -+ -+void RaggedTensorVariant::Encode(VariantTensorData* data) const { -+ data->set_type_name(TypeName()); -+ for (const auto& splits : nested_splits_) { -+ *data->add_tensors() = splits; -+ } -+ *data->add_tensors() = values_; -+} -+ -+bool RaggedTensorVariant::Decode(const VariantTensorData& data) { -+ if (data.tensors_size() < 1) { -+ return false; -+ } -+ nested_splits_.assign(data.tensors().begin(), -+ std::prev(data.tensors().end())); -+ values_ = data.tensors().back(); -+ return true; -+} -+ -+namespace { -+ -+Status RaggedTensorVariantDeviceCopy( -+ const RaggedTensorVariant& from, RaggedTensorVariant* to, -+ const UnaryVariantOpRegistry::AsyncTensorDeviceCopyFn& copy) { -+ TF_RETURN_IF_ERROR(copy(from.values(), to->mutable_values())); -+ // TODO(b/170415165) Should we use `copy` to move splits from device<->host? -+ *to->mutable_nested_splits() = from.nested_splits(); -+ return Status::OK(); -+} -+ -+} // namespace -+ -+REGISTER_UNARY_VARIANT_UNARY_OP_FUNCTION( -+ ZEROS_LIKE_VARIANT_UNARY_OP, DEVICE_CPU, RaggedTensorVariant, -+ RaggedTensorVariantZerosLike); -+ -+REGISTER_UNARY_VARIANT_BINARY_OP_FUNCTION( -+ ADD_VARIANT_BINARY_OP, DEVICE_CPU, RaggedTensorVariant, -+ RaggedTensorVariantBinaryAdd); -+ -+REGISTER_UNARY_VARIANT_DECODE_FUNCTION(RaggedTensorVariant, -+ "RaggedTensorVariant"); -+ -+#define REGISTER_RAGGED_TENSOR_VARIANT_COPY(DIRECTION) \ -+ INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION( \ -+ RaggedTensorVariant, DIRECTION, RaggedTensorVariantDeviceCopy) -+ -+REGISTER_RAGGED_TENSOR_VARIANT_COPY(VariantDeviceCopyDirection::HOST_TO_DEVICE); -+REGISTER_RAGGED_TENSOR_VARIANT_COPY(VariantDeviceCopyDirection::DEVICE_TO_HOST); -+REGISTER_RAGGED_TENSOR_VARIANT_COPY( -+ VariantDeviceCopyDirection::DEVICE_TO_DEVICE); -+ -+} // namespace tensorflow -diff --git a/tensorflow/core/kernels/ragged_tensor_variant.h b/tensorflow/core/kernels/ragged_tensor_variant.h -new file mode 100644 -index 00000000..730758a3 ---- /dev/null -+++ b/tensorflow/core/kernels/ragged_tensor_variant.h -@@ -0,0 +1,110 @@ -+#include "tensorflow/core/framework/tensor_key.h" -+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. -+ -+Licensed under the Apache License, Version 2.0 (the "License"); -+you may not use this file except in compliance with the License. -+You may obtain a copy of the License at -+ -+ http://www.apache.org/licenses/LICENSE-2.0 -+ -+Unless required by applicable law or agreed to in writing, software -+distributed under the License is distributed on an "AS IS" BASIS, -+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+See the License for the specific language governing permissions and -+limitations under the License. -+==============================================================================*/ -+ -+#ifndef TENSORFLOW_CORE_KERNELS_RAGGED_TENSOR_VARIANT_H_ -+#define TENSORFLOW_CORE_KERNELS_RAGGED_TENSOR_VARIANT_H_ -+ -+#define EIGEN_USE_THREADS -+#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM -+#define EIGEN_USE_GPU -+#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM -+ -+#include -+ -+#include "tensorflow/core/framework/tensor.h" -+#include "tensorflow/core/framework/types.h" -+#include "tensorflow/core/framework/variant_op_registry.h" -+#include "tensorflow/core/framework/variant_tensor_data.h" -+#include "tensorflow/core/kernels/cwise_ops_common.h" -+#include "tensorflow/core/util/tensor_ops_util.h" -+ -+namespace tensorflow { -+ -+// Class used to store a RaggedTensor as a Variant scalar. -+class RaggedTensorVariant { -+ public: -+ RaggedTensorVariant() {} -+ RaggedTensorVariant(Tensor values, const std::vector& nested_splits) -+ : values_(std::move(values)), nested_splits_(nested_splits) {} -+ -+ // Variant support methods. -+ string TypeName() const; -+ string DebugString() const; -+ void Encode(VariantTensorData* data) const; -+ bool Decode(const VariantTensorData& data); -+ -+ // The flat_values of the RaggedTensor. -+ const Tensor& values() const { return values_; } -+ Tensor* mutable_values() { return &values_; } -+ void set_values(const Tensor& new_values) { values_ = new_values; } -+ -+ // The nested row_splits of the RaggedTensor. -+ int ragged_rank() const { return nested_splits_.size(); } -+ const std::vector& nested_splits() const { return nested_splits_; } -+ std::vector* mutable_nested_splits() { return &nested_splits_; } -+ const Tensor& splits(int i) const { return nested_splits_[i]; } -+ Tensor* mutable_splits(int i) { return &nested_splits_[i]; } -+ void set_nested_splits(const std::vector& nested_splits) { -+ nested_splits_ = nested_splits; -+ } -+ void append_splits(const Tensor& splits) { nested_splits_.push_back(splits); } -+ -+ private: -+ Tensor values_; -+ std::vector nested_splits_; -+}; -+ -+template -+Status RaggedTensorVariantZerosLike(OpKernelContext* c, -+ const RaggedTensorVariant& x, -+ RaggedTensorVariant* y) { -+ y->set_nested_splits(x.nested_splits()); -+ TF_RETURN_IF_ERROR( -+ ZerosLikeTensor(c, x.values(), y->mutable_values())); -+ return Status::OK(); -+} -+ -+template -+Status RaggedTensorVariantBinaryAdd(OpKernelContext* c, -+ const RaggedTensorVariant& x, -+ const RaggedTensorVariant& y, -+ RaggedTensorVariant* out) { -+ if (x.values().dtype() != y.values().dtype()) { -+ return errors::InvalidArgument( -+ "Can't add RaggedTensorVariants of different dtypes. One is ", -+ DataTypeString(x.values().dtype()), " and the other is ", -+ DataTypeString(y.values().dtype())); -+ } -+ if (x.ragged_rank() != y.ragged_rank()) { -+ return errors::InvalidArgument( -+ "Can't add RaggedTensorVariants of different ragged rank. ", "One is ", -+ x.ragged_rank(), " and the other is ", y.ragged_rank()); -+ } -+ for (int i = 0; i < x.ragged_rank(); ++i) { -+ if (TensorKey(x.splits(i)) != TensorKey(y.splits(i))) { -+ return errors::InvalidArgument( -+ "Can't add RaggedTensorVariants with different row_splits."); -+ } -+ } -+ out->set_nested_splits(x.nested_splits()); -+ TF_RETURN_IF_ERROR(BinaryAddTensors(c, x.values(), y.values(), -+ out->mutable_values())); -+ return Status::OK(); -+} -+ -+} // namespace tensorflow -+ -+#endif // TENSORFLOW_CORE_KERNELS_RAGGED_TENSOR_VARIANT_H_ -diff --git a/tensorflow/core/ops/ragged_conversion_ops.cc b/tensorflow/core/ops/ragged_conversion_ops.cc -index 6bee189c..8512bcf3 100644 ---- a/tensorflow/core/ops/ragged_conversion_ops.cc -+++ b/tensorflow/core/ops/ragged_conversion_ops.cc -@@ -92,7 +92,8 @@ tensorflow::Status ValidateRowPartitionTypesAndShapes( - Status RaggedTensorToSparseShapeFn(InferenceContext* c); - Status RaggedTensorToVariantShapeFn(InferenceContext* c); - Status RaggedTensorFromVariantShapeFn(InferenceContext* c); --tensorflow::Status RaggedTensorToTensorShapeFn(InferenceContext* c); -+Status RaggedTensorToVariantGradientShapeFn(InferenceContext* c); -+Status RaggedTensorToTensorShapeFn(InferenceContext* c); - - //============================================================================== - // Registered Ops -@@ -129,6 +130,15 @@ REGISTER_OP("RaggedTensorFromVariant") - .Attr("Tsplits: {int32, int64} = DT_INT64") - .SetShapeFn(RaggedTensorFromVariantShapeFn); - -+REGISTER_OP("RaggedTensorToVariantGradient") -+ .Input("encoded_ragged_grad: variant") -+ .Input("row_splits: Tsplits") -+ .Input("dense_values_shape: int32") -+ .Output("dense_values_grad: Tvalues") -+ .Attr("Tvalues: type") -+ .Attr("Tsplits: {int32, int64} = DT_INT64") -+ .SetShapeFn(RaggedTensorToVariantGradientShapeFn); -+ - REGISTER_OP("RaggedTensorToTensor") - .Attr("T: type") - .Attr("Tindex: {int64, int32}") -@@ -201,6 +211,14 @@ Status RaggedTensorToVariantShapeFn(InferenceContext* c) { - return Status::OK(); - } - -+Status RaggedTensorToVariantGradientShapeFn(InferenceContext* c) { -+ ShapeHandle shape; -+ TF_RETURN_IF_ERROR( -+ c->MakeShapeFromShapeTensorTreatScalarAsUnknownShape(2, &shape)); -+ c->set_output(0, shape); -+ return Status::OK(); -+} -+ - Status RaggedTensorFromVariantShapeFn(InferenceContext* c) { - int64 input_ragged_rank; - TF_RETURN_IF_ERROR( -diff --git a/tensorflow/python/ops/ragged/BUILD b/tensorflow/python/ops/ragged/BUILD -index 95e5602a..34372160 100644 ---- a/tensorflow/python/ops/ragged/BUILD -+++ b/tensorflow/python/ops/ragged/BUILD -@@ -507,6 +507,7 @@ py_test( - "//tensorflow/python:framework_ops", - "//tensorflow/python:framework_test_lib", - "//tensorflow/python:platform_test", -+ "//tensorflow/python:tensor_array_grad", - "//tensorflow/python:tensor_shape", - "//tensorflow/python:tensor_spec", - "//tensorflow/python/data/ops:dataset_ops", --- -2.27.0 - diff --git a/CVE-2021-29516-4.patch b/CVE-2021-29516-4.patch deleted file mode 100644 index 7018ef9c7c580506f2f4761a11e4eede058c8268..0000000000000000000000000000000000000000 --- a/CVE-2021-29516-4.patch +++ /dev/null @@ -1,30 +0,0 @@ -From b055b9c474cd376259dde8779908f9eeaf097d93 Mon Sep 17 00:00:00 2001 -From: Amit Patankar -Date: Tue, 13 Apr 2021 14:49:50 -0700 -Subject: [PATCH] Fix `tf.raw_ops.RaggedTensorToVariant` invalid resize. - -PiperOrigin-RevId: 368299574 -Change-Id: I751c186325aa0bab397928845e790e60c2d90918 ---- - tensorflow/core/kernels/ragged_tensor_to_variant_op.cc | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc -index a60e5c62..fb1f25fc 100644 ---- a/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc -+++ b/tensorflow/core/kernels/ragged_tensor_to_variant_op.cc -@@ -165,6 +165,11 @@ class RaggedTensorToVariantOp : public OpKernel { - - // Unbatch the Ragged Tensor and encode the components. - std::vector unbatched_ragged_input; -+ auto batched_splits_top_vec = -+ batched_ragged_input.splits(0).vec(); -+ int num_components = batched_splits_top_vec.size() - 1; -+ OP_REQUIRES(context, num_components >= 0, -+ errors::Internal("Invalid split argument.")); - OP_REQUIRES_OK(context, UnbatchRaggedZerothDim( - batched_ragged_input, &unbatched_ragged_input)); - --- -2.27.0 - diff --git a/CVE-2021-29517-1.patch b/CVE-2021-29517-1.patch deleted file mode 100644 index a54e77b50aba0ed48c721ab0c3c9c512eeeba914..0000000000000000000000000000000000000000 --- a/CVE-2021-29517-1.patch +++ /dev/null @@ -1,611 +0,0 @@ -From cc5ea8469641b6680971eb76020407f81ab3f573 Mon Sep 17 00:00:00 2001 -From: Anna R -Date: Wed, 9 Dec 2020 16:13:53 -0800 -Subject: [PATCH] Remove changes made to support TFRT-based OpKernel classes in - ---- - tensorflow/core/framework/BUILD | 3 - - tensorflow/core/framework/numeric_op.h | 21 ++- - tensorflow/core/framework/numeric_op_base.h | 49 ----- - tensorflow/core/kernels/BUILD | 47 +---- - tensorflow/core/kernels/conv_ops_3d.cc | 153 ++++++++++++++-- - tensorflow/core/kernels/conv_ops_3d.h | 187 -------------------- - 6 files changed, 161 insertions(+), 299 deletions(-) - -diff --git a/tensorflow/core/framework/BUILD b/tensorflow/core/framework/BUILD -index d47c74a6..9b6ddb2a 100644 ---- a/tensorflow/core/framework/BUILD -+++ b/tensorflow/core/framework/BUILD -@@ -51,7 +51,6 @@ exports_files( - "model.h", - "node_def_builder.h", - "numeric_op.h", -- "numeric_op_base.h", - "op_kernel.h", - "op_requires.h", - "op_segment.h", -@@ -183,7 +182,6 @@ filegroup( - "node_def_util.h", - "node_properties.h", - "numeric_op.h", -- "numeric_op_base.h", - "numeric_types.h", - "op.h", - "op_def_builder.h", -@@ -280,7 +278,6 @@ filegroup( - "kernel_shape_util.h", - "log_memory.cc", - "log_memory.h", -- "numeric_op_base.h", - "numeric_types.h", - "op_requires.h", - "ops_util.cc", -diff --git a/tensorflow/core/framework/numeric_op.h b/tensorflow/core/framework/numeric_op.h -index 9f8ceed2..ad452bcd 100644 ---- a/tensorflow/core/framework/numeric_op.h -+++ b/tensorflow/core/framework/numeric_op.h -@@ -15,19 +15,34 @@ limitations under the License. - #ifndef TENSORFLOW_CORE_FRAMEWORK_NUMERIC_OP_H_ - #define TENSORFLOW_CORE_FRAMEWORK_NUMERIC_OP_H_ - --#include "tensorflow/core/framework/numeric_op_base.h" - #include "tensorflow/core/framework/op_kernel.h" - #include "tensorflow/core/framework/tensor.h" -+#include "tensorflow/core/framework/types.h" -+#include "tensorflow/core/framework/types.pb.h" - #include "tensorflow/core/lib/core/errors.h" - #include "tensorflow/core/lib/core/status.h" - - namespace tensorflow { - -+// One input and one output, both the same type. - template --using UnaryOp = UnaryOpBase; -+class UnaryOp : public OpKernel { -+ public: -+ explicit UnaryOp(OpKernelConstruction* context) : OpKernel(context) { -+ const DataType dt = DataTypeToEnum::v(); -+ OP_REQUIRES_OK(context, context->MatchSignature({dt}, {dt})); -+ } -+}; - -+// Two inputs and one output, all the same type. - template --using BinaryOp = BinaryOpBase; -+class BinaryOp : public OpKernel { -+ public: -+ explicit BinaryOp(OpKernelConstruction* context) : OpKernel(context) { -+ const DataType dt = DataTypeToEnum::v(); -+ OP_REQUIRES_OK(context, context->MatchSignature({dt, dt}, {dt})); -+ } -+}; - - // For operations where the input and output are the same shape. - // -diff --git a/tensorflow/core/framework/numeric_op_base.h b/tensorflow/core/framework/numeric_op_base.h -index be7d3bf8..e69de29b 100644 ---- a/tensorflow/core/framework/numeric_op_base.h -+++ b/tensorflow/core/framework/numeric_op_base.h -@@ -1,49 +0,0 @@ --/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. -- --Licensed under the Apache License, Version 2.0 (the "License"); --you may not use this file except in compliance with the License. --You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- --Unless required by applicable law or agreed to in writing, software --distributed under the License is distributed on an "AS IS" BASIS, --WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --See the License for the specific language governing permissions and --limitations under the License. --==============================================================================*/ -- --#ifndef TENSORFLOW_CORE_FRAMEWORK_NUMERIC_OP_BASE_H_ --#define TENSORFLOW_CORE_FRAMEWORK_NUMERIC_OP_BASE_H_ -- --#include "tensorflow/core/framework/op_requires.h" --#include "tensorflow/core/framework/types.h" --#include "tensorflow/core/framework/types.pb.h" --#include "tensorflow/core/lib/core/status.h" -- --namespace tensorflow { -- --// One input and one output, both the same type. --template --class UnaryOpBase : public OpKernelT { -- public: -- explicit UnaryOpBase(OpKernelConstructionT* construction) : -- OpKernelT(construction) { -- const DataType dt = DataTypeToEnum::v(); -- OP_REQUIRES_OK(construction, construction->MatchSignature({dt}, {dt})); -- } --}; -- --// Two inputs and one output, all the same type. --template --class BinaryOpBase : public OpKernelT { -- public: -- explicit BinaryOpBase(OpKernelConstructionT* construction) : -- OpKernelT(construction) { -- const DataType dt = DataTypeToEnum::v(); -- OP_REQUIRES_OK(construction, construction->MatchSignature({dt, dt}, {dt})); -- } --}; --} // namespace tensorflow -- --#endif // TENSORFLOW_CORE_FRAMEWORK_NUMERIC_OP_BASE_H_ -diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD -index 14f7d99b..5f8fa80b 100644 ---- a/tensorflow/core/kernels/BUILD -+++ b/tensorflow/core/kernels/BUILD -@@ -4048,48 +4048,6 @@ cc_library( - }), - ) - --# TODO(annarev): conv_ops_3d_headers currently depends on android target build --# from selected sources. We should switch to use granular dependencies instead. --# Then, we can just depend on "conv3d". --cc_library( -- name = "conv_3d_mobile", -- hdrs = [ -- "conv_3d.h", -- "eigen_backward_cuboid_convolutions.h", -- "eigen_convolution_helpers.h", -- "eigen_cuboid_convolution.h", -- "eigen_volume_patch.h", -- ], -- deps = [ -- ":eigen_spatial_convolutions-inl", -- ] + select({ -- "//tensorflow:android": [ -- "//tensorflow/core:portable_tensorflow_lib_lite", # TODO(annarev): exclude runtime srcs -- ], -- "//conditions:default": [ -- "//tensorflow/core:framework", -- ], -- }), --) -- --cc_library( -- name = "conv_ops_3d_headers", -- hdrs = [ -- "conv_ops_3d.h", -- ], -- deps = select({ -- "//tensorflow:android": [ -- ":conv_3d_mobile", -- "//tensorflow/core:portable_tensorflow_lib_lite", # TODO(annarev): exclude runtime srcs -- ], -- "//conditions:default": [ -- ":conv_3d", -- "//third_party/eigen3", -- "//tensorflow/core:framework", -- ], -- }), --) -- - tf_kernel_library( - name = "argmax_op", - prefix = "argmax_op", -@@ -4673,6 +4631,7 @@ tf_kernel_library( - "deep_conv2d.h", - "gemm_functors.h", - "winograd_transform.h", -+ "conv_ops_fused_impl.h", - ] + select({ - ":xsmm_convolutions": ["xsmm_conv2d.h"], - "//conditions:default": [], -@@ -4687,8 +4646,6 @@ tf_kernel_library( - prefix = "conv_ops", - deps = [ - ":conv_grad_shape_utils", -- ":conv_ops_3d_headers", -- ":bounds_check", - ":conv_2d", - ":conv_3d", - ":eigen_contraction_kernel", -@@ -6710,7 +6667,6 @@ filegroup( - "conv_2d.h", - "conv_3d.h", - "conv_ops.h", -- "conv_ops_3d.h", - "conv_ops_gpu.h", - "data_format_ops.h", - "depthtospace_op.h", -@@ -7160,7 +7116,6 @@ filegroup( - "stateful_random_ops_cpu_gpu.h", - # Allows conv_3d ops for android but excluded from *_3d* rule above. - "conv_3d.h", -- "conv_ops_3d.h", - "conv_ops_3d.cc", - "conv_ops_gpu.h", - ], -diff --git a/tensorflow/core/kernels/conv_ops_3d.cc b/tensorflow/core/kernels/conv_ops_3d.cc -index 289a083a..52356443 100644 ---- a/tensorflow/core/kernels/conv_ops_3d.cc -+++ b/tensorflow/core/kernels/conv_ops_3d.cc -@@ -16,8 +16,7 @@ limitations under the License. - #define USE_EIGEN_TENSOR - #define EIGEN_USE_THREADS - --#include "tensorflow/core/kernels/conv_ops_3d.h" -- -+#include "tensorflow/core/framework/kernel_shape_util.h" - #include "tensorflow/core/framework/numeric_op.h" - #include "tensorflow/core/framework/op_kernel.h" - #include "tensorflow/core/framework/register_types.h" -@@ -51,11 +50,146 @@ namespace tensorflow { - typedef Eigen::ThreadPoolDevice CPUDevice; - typedef Eigen::GpuDevice GPUDevice; - -+template -+ struct LaunchConvOp; -+template -+struct LaunchConvOp { -+ static void launch(OpKernelContext* context, bool cudnn_use_autotune, -+ const Tensor& input, const Tensor& filter, -+ const std::array& dilations, -+ const std::array& strides, const Padding padding, -+ TensorFormat data_format, Tensor* output) { -+ OP_REQUIRES(context, data_format == FORMAT_NHWC, -+ errors::InvalidArgument("CPU implementation of Conv3D " -+ "currently only supports the NHWC " -+ "tensor format.")); -+ OP_REQUIRES(context, -+ dilations[0] == 1 && dilations[1] == 1 && dilations[2] == 1, -+ errors::InvalidArgument("CPU implementation of Conv3D " -+ "currently only supports dilated rates " -+ "of 1.")); -+ functor::CuboidConvolution()( -+ context->eigen_device(), output->tensor(), -+ input.tensor(), filter.tensor(), strides[2], strides[1], -+ strides[0], BrainPadding2EigenPadding(padding)); -+ } -+}; -+ -+template -+class Conv3DOp : public BinaryOp { -+ public: -+ explicit Conv3DOp(OpKernelConstruction* context) : BinaryOp(context) { -+ string data_format; -+ OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); -+ OP_REQUIRES(context, FormatFromString(data_format, &data_format_), -+ errors::InvalidArgument("Invalid data format")); -+ OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); -+ OP_REQUIRES(context, stride_.size() == 5, -+ errors::InvalidArgument("Sliding window strides field must " -+ "specify 5 dimensions")); -+ OP_REQUIRES( -+ context, -+ (GetTensorDim(stride_, data_format_, 'N') == 1 && -+ GetTensorDim(stride_, data_format_, 'C') == 1), -+ errors::InvalidArgument("Current implementation does not yet support " -+ "strides in the batch and depth dimensions.")); -+ OP_REQUIRES( -+ context, -+ (GetTensorDim(stride_, data_format_, '0') > 0 && -+ GetTensorDim(stride_, data_format_, '1') > 0 && -+ GetTensorDim(stride_, data_format_, '2') > 0), -+ errors::InvalidArgument("Spatial strides should be larger than 0.")); -+ OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilation_)); -+ OP_REQUIRES(context, dilation_.size() == 5, -+ errors::InvalidArgument("Dilation rates field must " -+ "specify 5 dimensions")); -+ OP_REQUIRES(context, -+ (GetTensorDim(dilation_, data_format_, 'N') == 1 && -+ GetTensorDim(dilation_, data_format_, 'C') == 1), -+ errors::InvalidArgument( -+ "Current implementation does not yet support " -+ "dilation rates in the batch and depth dimensions.")); -+ OP_REQUIRES( -+ context, -+ (GetTensorDim(dilation_, data_format_, '0') > 0 && -+ GetTensorDim(dilation_, data_format_, '1') > 0 && -+ GetTensorDim(dilation_, data_format_, '2') > 0), -+ errors::InvalidArgument("Dilated rates should be larger than 0.")); -+ OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); -+ cudnn_use_autotune_ = CudnnUseAutotune(); -+ } -+ -+ void Compute(OpKernelContext* context) override { -+ // Input tensor is of the following dimensions: -+ // [ batch, in_z, in_y, in_x, in_channels ] -+ const Tensor& input = context->input(0); -+ -+ // Input filter is of the following dimensions: -+ // [ filter_z, filter_y, filter_x, in_channels, out_channels] -+ const Tensor& filter = context->input(1); -+ -+ // NOTE: The ordering of the spatial dimensions is arbitrary, but has to be -+ // kept consistent between input/filter/output. -+ OP_REQUIRES(context, input.dims() == 5, -+ errors::InvalidArgument("input must be 5-dimensional")); -+ OP_REQUIRES(context, filter.dims() == 5, -+ errors::InvalidArgument("filter must be 5-dimensional")); -+ -+ const int64 in_depth = GetTensorDim(input, data_format_, 'C'); -+ const int64 in_batch = GetTensorDim(input, data_format_, 'N'); -+ -+ const int64 filter_depth = filter.dim_size(3); -+ const int64 out_depth = filter.dim_size(4); -+ -+ OP_REQUIRES(context, in_depth % filter_depth == 0, -+ errors::InvalidArgument( -+ "Input depth must be evenly divisible by filter depth: ", -+ in_depth, " vs ", filter_depth)); -+ -+ // Dimension order for these arrays is: z, y, x. -+ std::array input_size = { -+ {GetTensorDim(input, data_format_, '0'), -+ GetTensorDim(input, data_format_, '1'), -+ GetTensorDim(input, data_format_, '2')}}; -+ std::array filter_size = { -+ {filter.dim_size(0), filter.dim_size(1), filter.dim_size(2)}}; -+ std::array dilations = { -+ {GetTensorDim(dilation_, data_format_, '0'), -+ GetTensorDim(dilation_, data_format_, '1'), -+ GetTensorDim(dilation_, data_format_, '2')}}; -+ std::array strides = {{GetTensorDim(stride_, data_format_, '0'), -+ GetTensorDim(stride_, data_format_, '1'), -+ GetTensorDim(stride_, data_format_, '2')}}; -+ std::array out, padding; -+ -+ OP_REQUIRES_OK( -+ context, Get3dOutputSizeV2(input_size, filter_size, dilations, strides, -+ padding_, &out, &padding)); -+ TensorShape out_shape = ShapeFromFormat( -+ data_format_, in_batch, {{out[0], out[1], out[2]}}, out_depth); -+ Tensor* output; -+ OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); -+ -+ // Return early if nothing to do. -+ if (out_shape.num_elements() == 0) return; -+ -+ LaunchConvOp::launch(context, cudnn_use_autotune_, input, filter, -+ dilations, strides, padding_, data_format_, -+ output); -+ } -+ -+ private: -+ std::vector dilation_; -+ std::vector stride_; -+ Padding padding_; -+ TensorFormat data_format_; -+ bool cudnn_use_autotune_; -+}; -+ - #define REGISTER_CPU_KERNEL(T) \ - REGISTER_KERNEL_BUILDER( \ - Name("Conv3D").Device(DEVICE_CPU).TypeConstraint("T"), \ -- Conv3DOp); -+ Conv3DOp); - TF_CALL_half(REGISTER_CPU_KERNEL); - TF_CALL_float(REGISTER_CPU_KERNEL); - TF_CALL_double(REGISTER_CPU_KERNEL); -@@ -73,7 +207,7 @@ typedef AutoTuneSingleton --struct LaunchConvOp { -+struct LaunchConvOp { - static void launch(OpKernelContext* ctx, bool cudnn_use_autotune, - const Tensor& input_param, const Tensor& filter, - const std::array& dilations, -@@ -559,16 +693,13 @@ DECLARE_GPU_SPEC(double); - // Registration of the GPU implementations. - REGISTER_KERNEL_BUILDER( - Name("Conv3D").Device(DEVICE_GPU).TypeConstraint("T"), -- Conv3DOp); -+ Conv3DOp); - REGISTER_KERNEL_BUILDER( - Name("Conv3D").Device(DEVICE_GPU).TypeConstraint("T"), -- Conv3DOp); -+ Conv3DOp); - REGISTER_KERNEL_BUILDER( - Name("Conv3D").Device(DEVICE_GPU).TypeConstraint("T"), -- Conv3DOp); -+ Conv3DOp); - #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM - - } // namespace tensorflow -diff --git a/tensorflow/core/kernels/conv_ops_3d.h b/tensorflow/core/kernels/conv_ops_3d.h -index 9dcdea5b..e69de29b 100644 ---- a/tensorflow/core/kernels/conv_ops_3d.h -+++ b/tensorflow/core/kernels/conv_ops_3d.h -@@ -1,187 +0,0 @@ --/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. -- --Licensed under the Apache License, Version 2.0 (the "License"); --you may not use this file except in compliance with the License. --You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 -- --Unless required by applicable law or agreed to in writing, software --distributed under the License is distributed on an "AS IS" BASIS, --WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --See the License for the specific language governing permissions and --limitations under the License. --==============================================================================*/ --#ifndef TENSORFLOW_CORE_KERNELS_CONV_OPS_3D_H_ --#define TENSORFLOW_CORE_KERNELS_CONV_OPS_3D_H_ -- --#include -- --#define USE_EIGEN_TENSOR --#define EIGEN_USE_THREADS -- --#include "tensorflow/core/framework/numeric_op_base.h" --#include "tensorflow/core/framework/kernel_shape_util.h" --#include "tensorflow/core/framework/op_requires.h" --#include "tensorflow/core/framework/ops_util.h" --#include "tensorflow/core/framework/tensor.h" --#include "tensorflow/core/framework/tensor_shape.h" --#include "tensorflow/core/kernels/conv_3d.h" --#include "tensorflow/core/platform/errors.h" --#include "tensorflow/core/util/padding.h" --#include "tensorflow/core/util/tensor_format.h" --#if GOOGLE_CUDA --#include "tensorflow/core/util/use_cudnn.h" --#endif -- --namespace tensorflow { --typedef Eigen::ThreadPoolDevice CPUDevice; -- --template --struct LaunchConvOp; -- --template --struct LaunchConvOp { -- static void launch(OpKernelContextT* context, bool cudnn_use_autotune, -- const Tensor& input, const Tensor& filter, -- const std::array& dilations, -- const std::array& strides, const Padding padding, -- TensorFormat data_format, Tensor* output) { -- OP_REQUIRES(context, data_format == FORMAT_NHWC, -- errors::InvalidArgument("CPU implementation of Conv3D " -- "currently only supports the NHWC " -- "tensor format.")); -- OP_REQUIRES(context, -- dilations[0] == 1 && dilations[1] == 1 && dilations[2] == 1, -- errors::InvalidArgument("CPU implementation of Conv3D " -- "currently only supports dilated rates " -- "of 1.")); -- functor::CuboidConvolution()( -- context->template eigen_device(), output->tensor(), -- input.tensor(), filter.tensor(), strides[2], strides[1], -- strides[0], BrainPadding2EigenPadding(padding)); -- } --}; -- --template --class Conv3DOp : public BinaryOpBase { -- public: -- explicit Conv3DOp(OpKernelConstructionT* context) : -- BinaryOpBase(context) { -- string data_format; -- OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); -- OP_REQUIRES(context, FormatFromString(data_format, &data_format_), -- errors::InvalidArgument("Invalid data format")); -- OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); -- OP_REQUIRES(context, stride_.size() == 5, -- errors::InvalidArgument("Sliding window strides field must " -- "specify 5 dimensions")); -- OP_REQUIRES( -- context, -- (GetTensorDim(stride_, data_format_, 'N') == 1 && -- GetTensorDim(stride_, data_format_, 'C') == 1), -- errors::InvalidArgument("Current implementation does not yet support " -- "strides in the batch and depth dimensions.")); -- OP_REQUIRES( -- context, -- (GetTensorDim(stride_, data_format_, '0') > 0 && -- GetTensorDim(stride_, data_format_, '1') > 0 && -- GetTensorDim(stride_, data_format_, '2') > 0), -- errors::InvalidArgument("Spatial strides should be larger than 0.")); -- OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilation_)); -- OP_REQUIRES(context, dilation_.size() == 5, -- errors::InvalidArgument("Dilation rates field must " -- "specify 5 dimensions")); -- OP_REQUIRES(context, -- (GetTensorDim(dilation_, data_format_, 'N') == 1 && -- GetTensorDim(dilation_, data_format_, 'C') == 1), -- errors::InvalidArgument( -- "Current implementation does not yet support " -- "dilation rates in the batch and depth dimensions.")); -- OP_REQUIRES( -- context, -- (GetTensorDim(dilation_, data_format_, '0') > 0 && -- GetTensorDim(dilation_, data_format_, '1') > 0 && -- GetTensorDim(dilation_, data_format_, '2') > 0), -- errors::InvalidArgument("Dilated rates should be larger than 0.")); -- OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); --#if GOOGLE_CUDA -- cudnn_use_autotune_ = CudnnUseAutotune(); --#else -- cudnn_use_autotune_ = false; --#endif -- } -- -- void Compute(OpKernelContextT* context) override { -- // Input tensor is of the following dimensions: -- // [ batch, in_z, in_y, in_x, in_channels ] -- const Tensor& input = context->input(0); -- -- // Input filter is of the following dimensions: -- // [ filter_z, filter_y, filter_x, in_channels, out_channels] -- const Tensor& filter = context->input(1); -- -- // NOTE: The ordering of the spatial dimensions is arbitrary, but has to be -- // kept consistent between input/filter/output. -- OP_REQUIRES(context, input.dims() == 5, -- errors::InvalidArgument("input must be 5-dimensional")); -- OP_REQUIRES(context, filter.dims() == 5, -- errors::InvalidArgument("filter must be 5-dimensional")); -- -- const int64 in_depth = GetTensorDim(input, data_format_, 'C'); -- const int64 in_batch = GetTensorDim(input, data_format_, 'N'); -- -- const int64 filter_depth = filter.dim_size(3); -- const int64 out_depth = filter.dim_size(4); -- -- OP_REQUIRES(context, in_depth % filter_depth == 0, -- errors::InvalidArgument( -- "Input depth must be evenly divisible by filter depth: ", -- in_depth, " vs ", filter_depth)); -- -- // Dimension order for these arrays is: z, y, x. -- std::array input_size = { -- {GetTensorDim(input, data_format_, '0'), -- GetTensorDim(input, data_format_, '1'), -- GetTensorDim(input, data_format_, '2')}}; -- std::array filter_size = { -- {filter.dim_size(0), filter.dim_size(1), filter.dim_size(2)}}; -- std::array dilations = { -- {GetTensorDim(dilation_, data_format_, '0'), -- GetTensorDim(dilation_, data_format_, '1'), -- GetTensorDim(dilation_, data_format_, '2')}}; -- std::array strides = {{GetTensorDim(stride_, data_format_, '0'), -- GetTensorDim(stride_, data_format_, '1'), -- GetTensorDim(stride_, data_format_, '2')}}; -- std::array out, padding; -- -- OP_REQUIRES_OK( -- context, Get3dOutputSizeV2(input_size, filter_size, dilations, strides, -- padding_, &out, &padding)); -- TensorShape out_shape = ShapeFromFormat( -- data_format_, in_batch, {{out[0], out[1], out[2]}}, out_depth); -- Tensor* output; -- OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); -- -- // Return early if nothing to do. -- if (out_shape.num_elements() == 0) return; -- -- LaunchConvOp::launch( -- context, cudnn_use_autotune_, input, filter, -- dilations, strides, padding_, data_format_, -- output); -- } -- -- private: -- std::vector dilation_; -- std::vector stride_; -- Padding padding_; -- TensorFormat data_format_; -- bool cudnn_use_autotune_; --}; -- --} // namespace tensorflow -- -- --#endif // TENSORFLOW_CORE_KERNELS_CONV_OPS_3D_H_ --- -2.23.0 - diff --git a/CVE-2021-29517-2.patch b/CVE-2021-29517-2.patch deleted file mode 100644 index 542a3c8b23bc1172ac45afffd6036833cd9fa0a4..0000000000000000000000000000000000000000 --- a/CVE-2021-29517-2.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 799f835a3dfa00a4d852defa29b15841eea9d64f Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Mon, 19 Apr 2021 09:56:46 -0700 -Subject: [PATCH] Fix 2 issues with `Conv3D`. - ---- - tensorflow/core/kernels/conv_ops_3d.cc | 7 +++++++ - 1 file changed, 7 insertions(+) - -diff --git a/tensorflow/core/kernels/conv_ops_3d.cc b/tensorflow/core/kernels/conv_ops_3d.cc -index 52356443..75a0a043 100644 ---- a/tensorflow/core/kernels/conv_ops_3d.cc -+++ b/tensorflow/core/kernels/conv_ops_3d.cc -@@ -68,6 +68,11 @@ struct LaunchConvOp { - errors::InvalidArgument("CPU implementation of Conv3D " - "currently only supports dilated rates " - "of 1.")); -+ OP_REQUIRES(context, filter.dim_size(3) == input.dim_size(input.dims() - 1), -+ errors::InvalidArgument( -+ "Number of channels in filter (", filter.dim_size(3), -+ ") must match last dimension of input (", -+ input.dim_size(input.dims() - 1), ")")); - functor::CuboidConvolution()( - context->eigen_device(), output->tensor(), - input.tensor(), filter.tensor(), strides[2], strides[1], -@@ -141,6 +146,8 @@ class Conv3DOp : public BinaryOp { - const int64 filter_depth = filter.dim_size(3); - const int64 out_depth = filter.dim_size(4); - -+ OP_REQUIRES(context, filter_depth != 0, -+ errors::InvalidArgument("filter_depth must be non-zero")); - OP_REQUIRES(context, in_depth % filter_depth == 0, - errors::InvalidArgument( - "Input depth must be evenly divisible by filter depth: ", --- -2.23.0 - diff --git a/CVE-2021-29518.patch b/CVE-2021-29518.patch deleted file mode 100644 index e026c6a47a0c6fc397d29244b3b02688a1b8469b..0000000000000000000000000000000000000000 --- a/CVE-2021-29518.patch +++ /dev/null @@ -1,42 +0,0 @@ -From ff70c47a396ef1e3cb73c90513da4f5cb71bebba Mon Sep 17 00:00:00 2001 -From: Amit Patankar -Date: Tue, 13 Apr 2021 14:24:00 -0700 -Subject: [PATCH] Fix `tf.raw_ops.GetSessionTensor` and - `tf.raw_ops.DeleteSessionTensor` null pointer dereferences. - ---- - tensorflow/core/kernels/session_ops.cc | 11 ++++++++++- - 1 file changed, 10 insertions(+), 1 deletion(-) - -diff --git a/tensorflow/core/kernels/session_ops.cc b/tensorflow/core/kernels/session_ops.cc -index e7e73549..dab59e70 100644 ---- a/tensorflow/core/kernels/session_ops.cc -+++ b/tensorflow/core/kernels/session_ops.cc -@@ -119,6 +119,11 @@ class GetSessionTensorOp : public OpKernel { - const string& name = handle.scalar()(); - Tensor val; - OP_REQUIRES_OK(ctx, ctx->session_state()->GetTensor(name, &val)); -+ auto session_state = ctx->session_state(); -+ OP_REQUIRES(ctx, session_state != nullptr, -+ errors::FailedPrecondition( -+ "GetSessionTensor called on null session state")); -+ OP_REQUIRES_OK(ctx, session_state->GetTensor(name, &val)); - ctx->set_output(0, val); - } - -@@ -160,7 +165,11 @@ class DeleteSessionTensorOp : public OpKernel { - void Compute(OpKernelContext* ctx) override { - const Tensor& handle = ctx->input(0); - const string& name = handle.scalar()(); -- OP_REQUIRES_OK(ctx, ctx->session_state()->DeleteTensor(name)); -+ auto session_state = ctx->session_state(); -+ OP_REQUIRES(ctx, session_state != nullptr, -+ errors::FailedPrecondition( -+ "DeleteSessionTensor called on null session state")); -+ OP_REQUIRES_OK(ctx, session_state->DeleteTensor(name)); - } - - TF_DISALLOW_COPY_AND_ASSIGN(DeleteSessionTensorOp); --- -2.23.0 - diff --git a/CVE-2021-29519.patch b/CVE-2021-29519.patch deleted file mode 100644 index 2d07edad9a7fafca34ab9727b60d465ccd93de5f..0000000000000000000000000000000000000000 --- a/CVE-2021-29519.patch +++ /dev/null @@ -1,143 +0,0 @@ -From b1cc5e5a50e7cee09f2c6eb48eb40ee9c4125025 Mon Sep 17 00:00:00 2001 -From: Amit Patankar -Date: Thu, 15 Apr 2021 13:03:19 -0700 -Subject: [PATCH] Fix `tf.raw_ops.SparseCross` failing CHECK. - -PiperOrigin-RevId: 368701671 -Change-Id: Id805729dd9ba0bda36e4bb309408129b55fb649d ---- - tensorflow/core/kernels/sparse_cross_op.cc | 55 +++++++++++++++++++--- - 1 file changed, 48 insertions(+), 7 deletions(-) - -diff --git a/tensorflow/core/kernels/sparse_cross_op.cc b/tensorflow/core/kernels/sparse_cross_op.cc -index 583235b4a309b..43b3bedc74503 100644 ---- a/tensorflow/core/kernels/sparse_cross_op.cc -+++ b/tensorflow/core/kernels/sparse_cross_op.cc -@@ -27,6 +27,7 @@ limitations under the License. - #include "tensorflow/core/framework/tensor.h" - #include "tensorflow/core/framework/tensor_shape.h" - #include "tensorflow/core/framework/types.h" -+#include "tensorflow/core/framework/types.pb.h" - #include "tensorflow/core/lib/core/stringpiece.h" - #include "tensorflow/core/lib/strings/str_util.h" - #include "tensorflow/core/platform/fingerprint.h" -@@ -460,10 +461,19 @@ int64 CalculateBatchSize(const OpInputList& shapes_list_in, - Status ValidateInput(const OpInputList& indices_list_in, - const OpInputList& values_list_in, - const OpInputList& shapes_list_in, -- const OpInputList& dense_list_in) { -+ const OpInputList& dense_list_in, -+ const DataType& internal_type) { - const auto size = indices_list_in.size(); -+ // Only perform internal_type check for SparseCrossOp. -+ // Check if the internal_type is not invalid before doing so. -+ bool check_type = internal_type != DT_INVALID; - // Validates indices_list_in OpInputList. - for (int i = 0; i < size; i++) { -+ if (check_type && indices_list_in[i].dtype() != DT_INT64) { -+ return errors::InvalidArgument("Input indices should be of type ", -+ DT_INT64, " but received ", -+ indices_list_in[i].dtype()); -+ } - if (!TensorShapeUtils::IsMatrix(indices_list_in[i].shape())) { - return errors::InvalidArgument( - "Input indices should be a matrix but received shape ", -@@ -482,6 +492,14 @@ Status ValidateInput(const OpInputList& indices_list_in, - values_list_in.size()); - } - for (int i = 0; i < size; i++) { -+ // Make sure to avoid the expected type to be string, but input values to be -+ // int64. -+ if (check_type && internal_type == DT_STRING && -+ values_list_in[i].dtype() == DT_INT64) { -+ return errors::InvalidArgument("Input values should be of internal type ", -+ internal_type, " but received ", -+ values_list_in[i].dtype()); -+ } - if (!TensorShapeUtils::IsVector(values_list_in[i].shape())) { - return errors::InvalidArgument( - "Input values should be a vector but received shape ", -@@ -502,6 +520,11 @@ Status ValidateInput(const OpInputList& indices_list_in, - shapes_list_in.size()); - } - for (int i = 0; i < size; i++) { -+ if (check_type && shapes_list_in[i].dtype() != DT_INT64) { -+ return errors::InvalidArgument("Input shape should be of type ", DT_INT64, -+ " but received ", -+ shapes_list_in[i].dtype()); -+ } - if (!TensorShapeUtils::IsVector(shapes_list_in[i].shape())) { - return errors::InvalidArgument( - "Input shapes should be a vector but received shape ", -@@ -517,6 +540,14 @@ Status ValidateInput(const OpInputList& indices_list_in, - - // Validates dense_list_in OpInputList - for (int i = 0; i < dense_list_in.size(); ++i) { -+ // Make sure to avoid the expected type to be string, but input values to be -+ // int64. -+ if (check_type && internal_type == DT_STRING && -+ dense_list_in[i].dtype() == DT_INT64) { -+ return errors::InvalidArgument("Dense inputs should be of internal type ", -+ internal_type, " but received ", -+ dense_list_in[i].dtype()); -+ } - if (!TensorShapeUtils::IsMatrix(dense_list_in[i].shape())) { - return errors::InvalidArgument( - "Dense inputs should be a matrix but received shape ", -@@ -698,6 +729,7 @@ class SparseCrossOp : public OpKernel { - int64 signed_hash_key_; - OP_REQUIRES_OK(context, context->GetAttr("hash_key", &signed_hash_key_)); - hash_key_ = static_cast(signed_hash_key_); -+ OP_REQUIRES_OK(context, context->GetAttr("internal_type", &internal_type_)); - } - - void Compute(OpKernelContext* context) override { -@@ -711,8 +743,10 @@ class SparseCrossOp : public OpKernel { - OP_REQUIRES_OK(context, - context->input_list("dense_inputs", &dense_list_in)); - -- OP_REQUIRES_OK(context, ValidateInput(indices_list_in, values_list_in, -- shapes_list_in, dense_list_in)); -+ DataType internal_type = internal_type_; -+ OP_REQUIRES_OK( -+ context, ValidateInput(indices_list_in, values_list_in, shapes_list_in, -+ dense_list_in, internal_type)); - - std::vector>> columns = - GenerateColumnsFromInput(indices_list_in, values_list_in, -@@ -756,6 +790,7 @@ class SparseCrossOp : public OpKernel { - private: - int64 num_buckets_; - uint64 hash_key_; -+ DataType internal_type_; - }; - - class SparseCrossV2Op : public OpKernel { -@@ -773,8 +808,11 @@ class SparseCrossV2Op : public OpKernel { - OP_REQUIRES_OK(context, - context->input_list("dense_inputs", &dense_list_in)); - -- OP_REQUIRES_OK(context, ValidateInput(indices_list_in, values_list_in, -- shapes_list_in, dense_list_in)); -+ // Set internal_type to invalid_type so that the check will be ignored. -+ DataType internal_type = DT_INVALID; -+ OP_REQUIRES_OK( -+ context, ValidateInput(indices_list_in, values_list_in, shapes_list_in, -+ dense_list_in, internal_type)); - - const Tensor* sep_t; - OP_REQUIRES_OK(context, context->input("sep", &sep_t)); -@@ -832,8 +870,11 @@ class SparseCrossHashedOp : public OpKernel { - OP_REQUIRES_OK(context, - context->input_list("dense_inputs", &dense_list_in)); - -- OP_REQUIRES_OK(context, ValidateInput(indices_list_in, values_list_in, -- shapes_list_in, dense_list_in)); -+ // Set internal_type to invalid_type so that the check will be ignored. -+ DataType internal_type = DT_INVALID; -+ OP_REQUIRES_OK( -+ context, ValidateInput(indices_list_in, values_list_in, shapes_list_in, -+ dense_list_in, internal_type)); - - const Tensor* num_buckets_t; - OP_REQUIRES_OK(context, context->input("num_buckets", &num_buckets_t)); diff --git a/CVE-2021-29520.patch b/CVE-2021-29520.patch deleted file mode 100644 index 110468a8cd24c60d120948e1392984de7c742273..0000000000000000000000000000000000000000 --- a/CVE-2021-29520.patch +++ /dev/null @@ -1,102 +0,0 @@ -From 8f37b52e1320d8d72a9529b2468277791a261197 Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Mon, 19 Apr 2021 13:46:32 -0700 -Subject: [PATCH] Validate some shape requirements for `Conv3DBackpropFilter*` - and `Conv3DBackpropInput*` ops. - -Older versions of Eigen might otherwise crash / produce OOB read on specially crafted inputs. - -PiperOrigin-RevId: 369293977 -Change-Id: I58f51445a93936d7cf8e616f75de17677df36718 ---- - tensorflow/core/kernels/conv_grad_ops_3d.cc | 56 +++++++++++++++++++++ - 1 file changed, 56 insertions(+) - -diff --git a/tensorflow/core/kernels/conv_grad_ops_3d.cc b/tensorflow/core/kernels/conv_grad_ops_3d.cc -index f736a12fb1ca3..8c72d01578d6d 100644 ---- a/tensorflow/core/kernels/conv_grad_ops_3d.cc -+++ b/tensorflow/core/kernels/conv_grad_ops_3d.cc -@@ -239,6 +239,20 @@ class Conv3DBackpropInputOp : public OpKernel { - input_shape = context->input(0).shape(); - } - -+ OP_REQUIRES( -+ context, input_shape.dim_size(4) == filter_shape.dim_size(3), -+ errors::InvalidArgument("input and filter_sizes must have the same " -+ "number of channels. Got ", -+ input_shape.dim_size(4), " for input and ", -+ filter_shape.dim_size(3), " for filter_sizes")); -+ OP_REQUIRES( -+ context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), -+ errors::InvalidArgument("out_backprop and filter_sizes must have the " -+ "same number of channels. Got ", -+ out_backprop_shape.dim_size(4), -+ " for out_backprop and ", -+ filter_shape.dim_size(4), " for filter_sizes")); -+ - ConvBackpropDimensions dims; - OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( - "Conv3DBackpropInputOp", /*num_spatial_dims=*/3, -@@ -346,6 +360,20 @@ class Conv3DCustomBackpropInputOp : public OpKernel { - input_shape = context->input(0).shape(); - } - -+ OP_REQUIRES( -+ context, input_shape.dim_size(4) == filter_shape.dim_size(3), -+ errors::InvalidArgument("input and filter_sizes must have the same " -+ "number of channels. Got ", -+ input_shape.dim_size(4), " for input and ", -+ filter_shape.dim_size(3), " for filter_sizes")); -+ OP_REQUIRES( -+ context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), -+ errors::InvalidArgument("out_backprop and filter_sizes must have the " -+ "same number of channels. Got ", -+ out_backprop_shape.dim_size(4), -+ " for out_backprop and ", -+ filter_shape.dim_size(4), " for filter_sizes")); -+ - ConvBackpropDimensions dims; - OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( - "Conv3DBackpropInputOp", /*num_spatial_dims=*/3, -@@ -696,6 +724,20 @@ class Conv3DBackpropFilterOp : public OpKernel { - filter_shape = context->input(1).shape(); - } - -+ OP_REQUIRES( -+ context, input_shape.dim_size(4) == filter_shape.dim_size(3), -+ errors::InvalidArgument("input and filter_sizes must have the same " -+ "number of channels. Got ", -+ input_shape.dim_size(4), " for input and ", -+ filter_shape.dim_size(3), " for filter_sizes")); -+ OP_REQUIRES( -+ context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), -+ errors::InvalidArgument("out_backprop and filter_sizes must have the " -+ "same number of channels. Got ", -+ out_backprop_shape.dim_size(4), -+ " for out_backprop and ", -+ filter_shape.dim_size(4), " for filter_sizes")); -+ - ConvBackpropDimensions dims; - OP_REQUIRES_OK(context, - ConvBackpropComputeDimensions( -@@ -808,6 +850,20 @@ class Conv3DCustomBackpropFilterOp : public OpKernel { - filter_shape = context->input(1).shape(); - } - -+ OP_REQUIRES( -+ context, input_shape.dim_size(4) == filter_shape.dim_size(3), -+ errors::InvalidArgument("input and filter_sizes must have the same " -+ "number of channels. Got ", -+ input_shape.dim_size(4), " for input and ", -+ filter_shape.dim_size(3), " for filter_sizes")); -+ OP_REQUIRES( -+ context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), -+ errors::InvalidArgument("out_backprop and filter_sizes must have the " -+ "same number of channels. Got ", -+ out_backprop_shape.dim_size(4), -+ " for out_backprop and ", -+ filter_shape.dim_size(4), " for filter_sizes")); -+ - ConvBackpropDimensions dims; - OP_REQUIRES_OK(context, - ConvBackpropComputeDimensions( diff --git a/CVE-2021-29521.patch b/CVE-2021-29521.patch deleted file mode 100644 index 16a6ea723856ef6a29cd589a50da2a050ab7be2a..0000000000000000000000000000000000000000 --- a/CVE-2021-29521.patch +++ /dev/null @@ -1,35 +0,0 @@ -From c57c0b9f3a4f8684f3489dd9a9ec627ad8b599f5 Mon Sep 17 00:00:00 2001 -From: Amit Patankar -Date: Mon, 19 Apr 2021 11:33:50 -0700 -Subject: [PATCH] Fix the segfault in `tf.raw_ops.SparseCountSparseOutput`. - ---- - tensorflow/core/kernels/count_ops.cc | 10 +++++++++- - 1 file changed, 9 insertions(+), 1 deletion(-) - -diff --git a/tensorflow/core/kernels/count_ops.cc b/tensorflow/core/kernels/count_ops.cc -index b7bb3ed9..67aafebe 100644 ---- a/tensorflow/core/kernels/count_ops.cc -+++ b/tensorflow/core/kernels/count_ops.cc -@@ -200,9 +200,17 @@ class SparseCount : public OpKernel { - "The shape argument requires at least one element.")); - - bool is_1d = shape.NumElements() == 1; -- int num_batches = is_1d ? 1 : shape.flat()(0); -+ auto shape_vector = shape.flat(); -+ int num_batches = is_1d ? 1 : shape_vector(0); - int num_values = values.NumElements(); - -+ for (int b = 0; b < shape_vector.size(); b++) { -+ OP_REQUIRES(context, shape_vector(b) >= 0, -+ errors::InvalidArgument( -+ "Elements in dense_shape must be >= 0. Instead got:", -+ shape.DebugString())); -+ } -+ - OP_REQUIRES(context, num_values == indices.shape().dim_size(0), - errors::InvalidArgument( - "Number of values must match first dimension of indices.", --- -2.23.0 - diff --git a/CVE-2021-29522.patch b/CVE-2021-29522.patch deleted file mode 100644 index cb215cd455a2c4a0018133cd7732afc6d0925f3b..0000000000000000000000000000000000000000 --- a/CVE-2021-29522.patch +++ /dev/null @@ -1,101 +0,0 @@ -From 311403edbc9816df80274bd1ea8b3c0c0f22c3fa Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Mon, 19 Apr 2021 16:00:40 -0700 -Subject: [PATCH] Eliminate a division by 0 in 3D convolutions. - -Also prevent a CHECK failed introduced in the most recent change. - -PiperOrigin-RevId: 369322073 -Change-Id: I4f609c028f89565fb2b49c3fdd20b63496582bae ---- - tensorflow/core/kernels/conv_grad_ops_3d.cc | 42 +++++++++++++++++++++ - 1 file changed, 42 insertions(+) - -diff --git a/tensorflow/core/kernels/conv_grad_ops_3d.cc b/tensorflow/core/kernels/conv_grad_ops_3d.cc -index 8c72d01578d6d..c0b57a7ae5606 100644 ---- a/tensorflow/core/kernels/conv_grad_ops_3d.cc -+++ b/tensorflow/core/kernels/conv_grad_ops_3d.cc -@@ -239,6 +239,14 @@ class Conv3DBackpropInputOp : public OpKernel { - input_shape = context->input(0).shape(); - } - -+ OP_REQUIRES(context, input_shape.dims() == 5, -+ errors::InvalidArgument("input tensor must have 5 dimensions")); -+ OP_REQUIRES( -+ context, filter_shape.dims() == 5, -+ errors::InvalidArgument("filter_sizes tensor must have 5 dimensions")); -+ OP_REQUIRES( -+ context, out_backprop_shape.dims() == 5, -+ errors::InvalidArgument("out_backprop tensor must have 5 dimensions")); - OP_REQUIRES( - context, input_shape.dim_size(4) == filter_shape.dim_size(3), - errors::InvalidArgument("input and filter_sizes must have the same " -@@ -360,6 +368,14 @@ class Conv3DCustomBackpropInputOp : public OpKernel { - input_shape = context->input(0).shape(); - } - -+ OP_REQUIRES(context, input_shape.dims() == 5, -+ errors::InvalidArgument("input tensor must have 5 dimensions")); -+ OP_REQUIRES( -+ context, filter_shape.dims() == 5, -+ errors::InvalidArgument("filter_sizes tensor must have 5 dimensions")); -+ OP_REQUIRES( -+ context, out_backprop_shape.dims() == 5, -+ errors::InvalidArgument("out_backprop tensor must have 5 dimensions")); - OP_REQUIRES( - context, input_shape.dim_size(4) == filter_shape.dim_size(3), - errors::InvalidArgument("input and filter_sizes must have the same " -@@ -444,6 +460,11 @@ class Conv3DCustomBackpropInputOp : public OpKernel { - // contraction compared to sharding and matmuls. - const bool use_parallel_contraction = dims.batch_size == 1; - -+ OP_REQUIRES( -+ context, work_unit_size > 0, -+ errors::InvalidArgument("input, filter_sizes and out_backprop tensors " -+ "must all have at least 1 element")); -+ - const size_t shard_size = - use_parallel_contraction - ? 1 -@@ -724,6 +745,14 @@ class Conv3DBackpropFilterOp : public OpKernel { - filter_shape = context->input(1).shape(); - } - -+ OP_REQUIRES(context, input_shape.dims() == 5, -+ errors::InvalidArgument("input tensor must have 5 dimensions")); -+ OP_REQUIRES( -+ context, filter_shape.dims() == 5, -+ errors::InvalidArgument("filter_sizes tensor must have 5 dimensions")); -+ OP_REQUIRES( -+ context, out_backprop_shape.dims() == 5, -+ errors::InvalidArgument("out_backprop tensor must have 5 dimensions")); - OP_REQUIRES( - context, input_shape.dim_size(4) == filter_shape.dim_size(3), - errors::InvalidArgument("input and filter_sizes must have the same " -@@ -850,6 +879,14 @@ class Conv3DCustomBackpropFilterOp : public OpKernel { - filter_shape = context->input(1).shape(); - } - -+ OP_REQUIRES(context, input_shape.dims() == 5, -+ errors::InvalidArgument("input tensor must have 5 dimensions")); -+ OP_REQUIRES( -+ context, filter_shape.dims() == 5, -+ errors::InvalidArgument("filter_sizes tensor must have 5 dimensions")); -+ OP_REQUIRES( -+ context, out_backprop_shape.dims() == 5, -+ errors::InvalidArgument("out_backprop tensor must have 5 dimensions")); - OP_REQUIRES( - context, input_shape.dim_size(4) == filter_shape.dim_size(3), - errors::InvalidArgument("input and filter_sizes must have the same " -@@ -936,6 +973,11 @@ class Conv3DCustomBackpropFilterOp : public OpKernel { - - const int64 work_unit_size = size_A + size_B + size_C; - -+ OP_REQUIRES( -+ context, work_unit_size > 0, -+ errors::InvalidArgument("input, filter_sizes and out_backprop tensors " -+ "must all have at least 1 element")); -+ - const size_t shard_size = - (target_working_set_size + work_unit_size - 1) / work_unit_size; - diff --git a/CVE-2021-29524.patch b/CVE-2021-29524.patch deleted file mode 100644 index b12fac657c54409b2c04b6af6be35635d1265cc4..0000000000000000000000000000000000000000 --- a/CVE-2021-29524.patch +++ /dev/null @@ -1,26 +0,0 @@ -From fca9874a9b42a2134f907d2fb46ab774a831404a Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Mon, 19 Apr 2021 17:33:11 -0700 -Subject: [PATCH] Prevent another division by zero. - -PiperOrigin-RevId: 369338598 -Change-Id: I55471d363e401fdcf8d259670ad4eef672b731e2 ---- - tensorflow/core/kernels/conv_grad_shape_utils.cc | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/tensorflow/core/kernels/conv_grad_shape_utils.cc b/tensorflow/core/kernels/conv_grad_shape_utils.cc -index 7543ce669923a..805f5809a472e 100644 ---- a/tensorflow/core/kernels/conv_grad_shape_utils.cc -+++ b/tensorflow/core/kernels/conv_grad_shape_utils.cc -@@ -127,6 +127,10 @@ Status ConvBackpropComputeDimensionsV2( - // dimensions of the filter Tensor. - VLOG(2) << "input vs filter_in depth " << dims->in_depth << " " - << filter_shape.dim_size(num_dims - 2); -+ if (filter_shape.dim_size(num_dims - 2) <= 0) { -+ return errors ::InvalidArgument( -+ label, ": filter depth must be strictly greated than zero"); -+ } - if (dims->in_depth % filter_shape.dim_size(num_dims - 2)) { - return errors::InvalidArgument( - label, ": input depth must be evenly divisible by filter depth"); diff --git a/CVE-2021-29526-1.patch b/CVE-2021-29526-1.patch deleted file mode 100644 index 87eb9fd5772d6fea3dd181a9798fee98255d83b0..0000000000000000000000000000000000000000 --- a/CVE-2021-29526-1.patch +++ /dev/null @@ -1,322 +0,0 @@ -From 7b8db6083b34520688dbc71f341f7aeaf156bf17 Mon Sep 17 00:00:00 2001 -From: Eugene Zhulenev -Date: Fri, 19 Mar 2021 16:16:41 -0700 -Subject: [PATCH] Implement grouped convolution on CPU - -To get better compute resources utilization group-compute loop has to be parallelized, but it involves a lot of changes in Conv2D primitives. Will address that later if it will be critical for some of the users. - -Fix for: https://github.com/tensorflow/tensorflow/issues/29005 - -PiperOrigin-RevId: 363991782 -Change-Id: I97f375b1133833c4de5181199316be7cbf4ebee0 ---- - tensorflow/core/kernels/BUILD | 1 + - tensorflow/core/kernels/conv_2d.h | 54 +++++++ - tensorflow/core/kernels/conv_ops.cc | 133 ++++++++++++++++-- - .../python/kernel_tests/conv_ops_test.py | 20 +-- - 4 files changed, 189 insertions(+), 19 deletions(-) - -diff --git a/tensorflow/core/kernels/BUILD b/tensorflow/core/kernels/BUILD -index 8e49f1e0a5caf..bc455626f4322 100644 ---- a/tensorflow/core/kernels/BUILD -+++ b/tensorflow/core/kernels/BUILD -@@ -3818,6 +3818,7 @@ tf_kernel_library( - ":ops_util", - "@com_google_absl//absl/base:dynamic_annotations", - "@com_google_absl//absl/strings", -+ "@com_google_absl//absl/synchronization", - "//third_party/eigen3", - "//tensorflow/core:core_cpu", - "//tensorflow/core:framework", -diff --git a/tensorflow/core/kernels/conv_2d.h b/tensorflow/core/kernels/conv_2d.h -index b9a8c977e11ee..87df4a848dd56 100644 ---- a/tensorflow/core/kernels/conv_2d.h -+++ b/tensorflow/core/kernels/conv_2d.h -@@ -43,6 +43,9 @@ void SpatialConvolutionFunc(const Device& d, Output output, Input input, - padding_bottom); - } - -+// TODO(ezhulenev): Non-templated `operator()` are required by explicit template -+// instantiations for the GPU device. However they are almost certainly not used -+// in any of the kernel implementation. Check if they can be removed. - template - struct SpatialConvolution { -@@ -55,6 +58,16 @@ struct SpatialConvolution { - SpatialConvolutionFunc(d, output, input, filter, row_stride, col_stride, - row_dilation, col_dilation, padding, output_kernel); - } -+ -+ template -+ void operator()(const Device& d, Output output, Input input, Filter filter, -+ int row_stride, int col_stride, int row_dilation, -+ int col_dilation, const Eigen::PaddingType& padding, -+ const OutputKernel& output_kernel = OutputKernel()) { -+ SpatialConvolutionFunc(d, output, input, filter, row_stride, col_stride, -+ row_dilation, col_dilation, padding, output_kernel); -+ } -+ - void operator()(const Device& d, typename TTypes::Tensor output, - typename TTypes::ConstTensor input, - typename TTypes::ConstTensor filter, int row_stride, -@@ -67,6 +80,18 @@ struct SpatialConvolution { - col_dilation, Eigen::PaddingType::PADDING_VALID, output_kernel, - padding_top, padding_bottom, padding_left, padding_right); - } -+ -+ template -+ void operator()(const Device& d, Output output, Input input, Filter filter, -+ int row_stride, int col_stride, int row_dilation, -+ int col_dilation, int padding_top, int padding_bottom, -+ int padding_left, int padding_right, -+ const OutputKernel& output_kernel = OutputKernel()) { -+ SpatialConvolutionFunc( -+ d, output, input, filter, row_stride, col_stride, row_dilation, -+ col_dilation, Eigen::PaddingType::PADDING_VALID, output_kernel, -+ padding_top, padding_bottom, padding_left, padding_right); -+ } - }; - - template -@@ -84,6 +109,20 @@ struct SpatialConvolution { - row_dilation, output_kernel) - .template cast(); - } -+ -+ template -+ void operator()(const Device& d, Output output, Input input, Filter filter, -+ int row_stride, int col_stride, int row_dilation, -+ int col_dilation, const Eigen::PaddingType& padding, -+ const OutputKernel& output_kernel = OutputKernel()) { -+ output.device(d) = -+ Eigen::SpatialConvolution(input.template cast(), -+ filter.template cast(), col_stride, -+ row_stride, padding, col_dilation, -+ row_dilation, output_kernel) -+ .template cast(); -+ } -+ - void operator()(const Device& d, - typename TTypes::Tensor output, - typename TTypes::ConstTensor input, -@@ -100,6 +139,21 @@ struct SpatialConvolution { - padding_bottom) - .template cast(); - } -+ -+ template -+ void operator()(const Device& d, Output output, Input input, Filter filter, -+ int row_stride, int col_stride, int row_dilation, -+ int col_dilation, int padding_top, int padding_bottom, -+ int padding_left, int padding_right, -+ const OutputKernel& output_kernel = OutputKernel()) { -+ output.device(d) = -+ Eigen::SpatialConvolution( -+ input.template cast(), filter.template cast(), -+ col_stride, row_stride, Eigen::PaddingType::PADDING_VALID, -+ col_dilation, row_dilation, output_kernel, padding_left, -+ padding_right, padding_top, padding_bottom) -+ .template cast(); -+ } - }; - - template -diff --git a/tensorflow/core/kernels/conv_ops.cc b/tensorflow/core/kernels/conv_ops.cc -index 025a8e37a94e9..8fdfe04bd1c67 100644 ---- a/tensorflow/core/kernels/conv_ops.cc -+++ b/tensorflow/core/kernels/conv_ops.cc -@@ -30,6 +30,7 @@ limitations under the License. - #include - #include - -+#include "absl/synchronization/blocking_counter.h" - #include "tensorflow/core/framework/allocator.h" - #include "tensorflow/core/framework/bounds_check.h" - #include "tensorflow/core/framework/kernel_shape_util.h" -@@ -138,6 +139,98 @@ struct LaunchGeneric { - } - } - }; -+ -+// Compute grouped 2D convolutions on CPU. Unlike grouped convolution -+// implementation in cuDNN this is faaaaaar from optimal and needs more work -+// to deliver competitive performance. Currently it exists to close the feature -+// parity gap between convolution operations on different devices. -+template -+struct LaunchGrouped { -+ void operator()(OpKernelContext* ctx, const Tensor& input, -+ const Tensor& filter, int row_stride, int col_stride, -+ int row_dilation, int col_dilation, const Padding& padding, -+ const std::vector& explicit_paddings, Tensor* output, -+ TensorFormat data_format) { -+ DCHECK(data_format == FORMAT_NHWC) -+ << "Grouped conv implementation only " -+ "supports NHWC tensor format for now."; -+ -+ const int64 in_depth = input.dim_size(3); -+ const int64 patch_depth = filter.dim_size(2); -+ const int64 num_groups = in_depth / patch_depth; -+ -+ // Shuffle input/filter tensors to have group as a leading dimension. -+ std::array shuffle({3, 0, 1, 2, 4}); -+ -+ // Compute pre shuffle dimemnsions. -+ auto pre_shuffle = [&](const Tensor& tensor) -> std::array { -+ return {tensor.dim_size(0), tensor.dim_size(1), tensor.dim_size(2), -+ num_groups, tensor.dim_size(3) / num_groups}; -+ }; -+ -+ // Compute post shuffle dimemnsions. -+ auto post_shuffle = [&](const Tensor& tensor) -> std::array { -+ return {num_groups, tensor.dim_size(0), tensor.dim_size(1), -+ tensor.dim_size(2), tensor.dim_size(3) / num_groups}; -+ }; -+ -+ auto& device = ctx->eigen_device(); -+ -+ absl::BlockingCounter shuffles_completed(2); -+ auto on_shuffled = [&]() { shuffles_completed.DecrementCount(); }; -+ -+ // Shuffle input into temporary tensor. -+ Tensor input_shuffled(input.dtype(), TensorShape(post_shuffle(input))); -+ input_shuffled.tensor().device(device, on_shuffled) = -+ input.shaped(pre_shuffle(input)).shuffle(shuffle); -+ -+ // Shuffle filter into temporary tensor. -+ Tensor filter_shuffled(filter.dtype(), TensorShape(post_shuffle(filter))); -+ filter_shuffled.tensor().device(device, on_shuffled) = -+ filter.shaped(pre_shuffle(filter)).shuffle(shuffle); -+ -+ // Wait for the completion of input/filter shuffles. -+ shuffles_completed.Wait(); -+ -+ // Write group convolution results into temporary output tensor. -+ Tensor output_shuffled(output->dtype(), TensorShape(post_shuffle(*output))); -+ -+ for (int64 i = 0; i < num_groups; ++i) { -+ // TODO(ezhulenev): Run this loop using `parallelFor` (regular parallelFor -+ // will lead to deadlock, SpatialConvolution has to use async Eigen -+ // assignment). This requires small changes to Eigen to support async -+ // exeuction for tensor chipping operation. -+ -+ // TODO(ezhulenev): Grouped convolution should also support 1x1 filter -+ // optimization. -+ -+ auto input_slice = input_shuffled.tensor().template chip<0>(i); -+ auto filter_slice = filter_shuffled.tensor().template chip<0>(i); -+ auto output_slice = output_shuffled.tensor().template chip<0>(i); -+ -+ if (padding == EXPLICIT) { -+ functor::SpatialConvolution()( -+ ctx->eigen_device(), output_slice, input_slice, -+ filter_slice, row_stride, col_stride, row_dilation, col_dilation, -+ static_cast(explicit_paddings[2]), -+ static_cast(explicit_paddings[3]), -+ static_cast(explicit_paddings[4]), -+ static_cast(explicit_paddings[5])); -+ } else { -+ functor::SpatialConvolution()( -+ ctx->eigen_device(), output_slice, input_slice, -+ filter_slice, row_stride, col_stride, row_dilation, col_dilation, -+ BrainPadding2EigenPadding(padding)); -+ } -+ } -+ -+ // Shuffle temporary output back into pre-shuffled shape. -+ std::array rev_shuffle({1, 2, 3, 0, 4}); -+ output->shaped(pre_shuffle(*output)).device(device) = -+ output_shuffled.tensor().shuffle(rev_shuffle); -+ } -+}; -+ - } // namespace - - template -@@ -155,14 +248,6 @@ struct LaunchConv2DOp { - ToString(data_format))); - return; - } -- const int64 in_depth = GetTensorDim(input, data_format, 'C'); -- OP_REQUIRES(ctx, in_depth == filter.dim_size(2), -- errors::Unimplemented( -- "The Conv2D op currently does not support grouped " -- "convolutions on the CPU. A grouped convolution was " -- "attempted to be run because the input depth of ", -- in_depth, " does not match the filter input depth of ", -- filter.dim_size(2))); - - for (int64 explicit_padding : explicit_paddings) { - if (!FastBoundsCheck(explicit_padding, std::numeric_limits::max())) { -@@ -170,9 +255,35 @@ struct LaunchConv2DOp { - return; - } - } -- LaunchGeneric()(ctx, input, filter, row_stride, col_stride, -- row_dilation, col_dilation, padding, -- explicit_paddings, output, data_format); -+ -+ const int64 in_depth = input.dim_size(3); -+ const int64 out_depth = output->dim_size(3); -+ const int64 patch_depth = filter.dim_size(2); -+ -+ if (in_depth % patch_depth != 0) { -+ ctx->SetStatus(errors::InvalidArgument( -+ "input depth must be evenly divisible by filter depth: ", in_depth, -+ " vs ", patch_depth)); -+ return; -+ } -+ -+ const int64 num_groups = in_depth / patch_depth; -+ if (out_depth % num_groups != 0 || out_depth < num_groups) { -+ ctx->SetStatus(errors::InvalidArgument( -+ "output depth must be evenly divisible by number of groups: ", -+ out_depth, " vs ", num_groups)); -+ return; -+ } -+ -+ if (in_depth != patch_depth) { -+ LaunchGrouped()(ctx, input, filter, row_stride, col_stride, -+ row_dilation, col_dilation, padding, explicit_paddings, -+ output, data_format); -+ } else { -+ LaunchGeneric()(ctx, input, filter, row_stride, col_stride, -+ row_dilation, col_dilation, padding, -+ explicit_paddings, output, data_format); -+ } - } - }; - -diff --git a/tensorflow/python/kernel_tests/conv_ops_test.py b/tensorflow/python/kernel_tests/conv_ops_test.py -index 44a67ccc55f0a..92af04359caa9 100644 ---- a/tensorflow/python/kernel_tests/conv_ops_test.py -+++ b/tensorflow/python/kernel_tests/conv_ops_test.py -@@ -834,17 +834,21 @@ def MakeConv2d(inputs, filters): - results[0], results[1], atol=tol_to_use, rtol=tol_to_use) - - @test_util.run_in_graph_and_eager_modes -- @test_util.run_cuda_only - def testConv2DGroupConvFwd(self): -- for data_format in ["NHWC", "NCHW"]: -+ if test.is_gpu_available(cuda_only=True): -+ data_formats = ["NHWC", "NCHW"] -+ else: -+ data_formats = ["NHWC"] -+ for data_format in data_formats: - for dilation in [1, 2]: - for stride in [1, 2]: -- self._VerifyGroupConvFwd([10, 32, 32, 16], [3, 3, 4, 8], -- dilations=[dilation, dilation], -- strides=[stride, stride], -- padding="SAME", -- data_format=data_format, -- dtype=dtypes.float32) -+ for filter_dims in [[3, 3, 4, 8], [1, 1, 2, 16]]: -+ self._VerifyGroupConvFwd([10, 32, 32, 16], filter_dims, -+ dilations=[dilation, dilation], -+ strides=[stride, stride], -+ padding="SAME", -+ data_format=data_format, -+ dtype=dtypes.float32) - - @test_util.deprecated_graph_mode_only - @test_util.run_cuda_only diff --git a/CVE-2021-29526-2.patch b/CVE-2021-29526-2.patch deleted file mode 100644 index 5a1bb66149131a84feec7dfe818ab8546784eab9..0000000000000000000000000000000000000000 --- a/CVE-2021-29526-2.patch +++ /dev/null @@ -1,50 +0,0 @@ -From b12aa1d44352de21d1a6faaf04172d8c2508b42b Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Mon, 19 Apr 2021 18:32:56 -0700 -Subject: [PATCH] Fix one more FPE. - ---- - tensorflow/core/kernels/conv_ops.cc | 13 +++++++++++++ - 1 file changed, 13 insertions(+) - -diff --git a/tensorflow/core/kernels/conv_ops.cc b/tensorflow/core/kernels/conv_ops.cc -index ef13eb3f..2d357710 100644 ---- a/tensorflow/core/kernels/conv_ops.cc -+++ b/tensorflow/core/kernels/conv_ops.cc -@@ -260,6 +260,11 @@ struct LaunchConv2DOp { - const int64 out_depth = output->dim_size(3); - const int64 patch_depth = filter.dim_size(2); - -+ if (patch_depth <= 0) { -+ ctx->SetStatus(errors::InvalidArgument( -+ "filter depth must be stricly positive, got ", patch_depth)); -+ return; -+ } - if (in_depth % patch_depth != 0) { - ctx->SetStatus(errors::InvalidArgument( - "input depth must be evenly divisible by filter depth: ", in_depth, -@@ -268,6 +273,11 @@ struct LaunchConv2DOp { - } - - const int64 num_groups = in_depth / patch_depth; -+ if (num_groups <= 0) { -+ ctx->SetStatus(errors::InvalidArgument( -+ "number of groups must be stricly positive, got ", num_groups)); -+ return; -+ } - if (out_depth % num_groups != 0 || out_depth < num_groups) { - ctx->SetStatus(errors::InvalidArgument( - "output depth must be evenly divisible by number of groups: ", -@@ -536,6 +546,9 @@ Status ComputeConv2DDimension(const Conv2DParameters& params, - errors::InvalidArgument("Patch depth too large")); - const int in_depth = static_cast(in_depth_raw); - const int patch_depth = static_cast(patch_depth_raw); -+ TF_REQUIRES(patch_depth > 0, -+ errors::InvalidArgument( -+ "filter depth must be stricly positive, got", patch_depth)); - TF_REQUIRES(in_depth % patch_depth == 0, - errors::InvalidArgument( - "input depth must be evenly divisible by filter depth: ", --- -2.23.0 - diff --git a/CVE-2021-29527.patch b/CVE-2021-29527.patch deleted file mode 100644 index 065cec320c0400553ecc4a1cda63426338a617c7..0000000000000000000000000000000000000000 --- a/CVE-2021-29527.patch +++ /dev/null @@ -1,49 +0,0 @@ -From cfa91be9863a91d5105a3b4941096044ab32036b Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Mon, 19 Apr 2021 18:58:47 -0700 -Subject: [PATCH] Fix one FPE and remove two CHECK-fails. - -PiperOrigin-RevId: 369349640 -Change-Id: I1fedbfc2b5bab635c5cb51f103d7c9176f79831a ---- - tensorflow/core/kernels/quantized_conv_ops.cc | 13 +++++++++++-- - 1 file changed, 11 insertions(+), 2 deletions(-) - -diff --git a/tensorflow/core/kernels/quantized_conv_ops.cc b/tensorflow/core/kernels/quantized_conv_ops.cc -index a4d36cca3e408..a339de8cfc8fa 100644 ---- a/tensorflow/core/kernels/quantized_conv_ops.cc -+++ b/tensorflow/core/kernels/quantized_conv_ops.cc -@@ -18,6 +18,8 @@ limitations under the License. - #include - #include - -+#include "tensorflow/core/platform/errors.h" -+ - #define EIGEN_USE_THREADS - - #define GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK -@@ -227,8 +229,12 @@ class Im2ColConvFunctor { - return; - } - -- CHECK_GT(output_width, 0); -- CHECK_GT(output_height, 0); -+ OP_REQUIRES( -+ context, output_width > 0, -+ errors::InvalidArgument("output_width must be strictly positive")); -+ OP_REQUIRES( -+ context, output_height > 0, -+ errors::InvalidArgument("output_height must be strictly positive")); - int filter_left_offset; - int filter_top_offset; - if (padding == VALID) { -@@ -255,6 +261,9 @@ class Im2ColConvFunctor { - // by the width, then the height. This is the standard memory order in the - // image world if it helps to visualize it. - const int filter_value_count = filter_width * filter_height * input_depth; -+ OP_REQUIRES(context, filter_value_count > 0, -+ errors::InvalidArgument( -+ "filter patch must contain at least one element")); - const int64 patches_per_chunk = - kMaxChunkSize / (filter_value_count * sizeof(T1)); - const int64 chunk_value_count = diff --git a/CVE-2021-29528.patch b/CVE-2021-29528.patch deleted file mode 100644 index 982883c150cdc4fb4c63e2b115cef06076f0b9ef..0000000000000000000000000000000000000000 --- a/CVE-2021-29528.patch +++ /dev/null @@ -1,27 +0,0 @@ -From a1b11d2fdd1e51bfe18bb1ede804f60abfa92da6 Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Tue, 20 Apr 2021 10:52:46 -0700 -Subject: [PATCH] Fix one division by zero - -PiperOrigin-RevId: 369474832 -Change-Id: I1082858ed78d9b2e4738ce30b231955973d49e1e ---- - tensorflow/core/kernels/quantized_mul_op.cc | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/tensorflow/core/kernels/quantized_mul_op.cc b/tensorflow/core/kernels/quantized_mul_op.cc -index 4e191f162662b..fb56f68bf14db 100644 ---- a/tensorflow/core/kernels/quantized_mul_op.cc -+++ b/tensorflow/core/kernels/quantized_mul_op.cc -@@ -347,6 +347,11 @@ class QuantizedMulOp : public OpKernel { - tensor_num_elements = x.NumElements(); - tensor_offset = offset_x; - } -+ if (vector_num_elements == 0) { -+ context->SetStatus( -+ errors::InvalidArgument("vector must have at least 1 element")); -+ return; -+ } - VectorTensorMultiply( - vector_data, vector_offset, vector_num_elements, tensor_data, - tensor_offset, tensor_num_elements, z_data); diff --git a/CVE-2021-29529.patch b/CVE-2021-29529.patch deleted file mode 100644 index 83fc30fdb525203f1b30cc046c080e4aabbb2633..0000000000000000000000000000000000000000 --- a/CVE-2021-29529.patch +++ /dev/null @@ -1,26 +0,0 @@ -From f851613f8f0fb0c838d160ced13c134f778e3ce7 Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Wed, 21 Apr 2021 16:20:48 -0700 -Subject: [PATCH] Fix heap buffer overflow caused by rounding. - -This was hard to fix. Due to the way we compute the pixels that influence an output pixel in resized images, for certain input configuration we might have issued a read to a pixel that is outside of boundary of the original image. This is because of floating errors that affected truncation results. - -PiperOrigin-RevId: 369757871 -Change-Id: If89425fff930983829a2168203c11858883eebc9 ---- - tensorflow/core/kernels/quantized_resize_bilinear_op.cc | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/tensorflow/core/kernels/quantized_resize_bilinear_op.cc b/tensorflow/core/kernels/quantized_resize_bilinear_op.cc -index 07453c7e73284..2fd807f6df961 100644 ---- a/tensorflow/core/kernels/quantized_resize_bilinear_op.cc -+++ b/tensorflow/core/kernels/quantized_resize_bilinear_op.cc -@@ -64,6 +64,8 @@ inline void ComputeInterpolationWeights( - std::max(static_cast(in_f), static_cast(0)); - interpolation->upper[i] = - std::min(static_cast(std::ceil(in)), in_size - 1); -+ interpolation->lower[i] = -+ std::min(interpolation->lower[i], interpolation->upper[i]); - interpolation->lerp[i] = in - in_f; - interpolation->ilerp[i] = - static_cast((in - in_f) * (1 << resolution)); diff --git a/CVE-2021-29530.patch b/CVE-2021-29530.patch deleted file mode 100644 index 5a9652e711174166c3fc44ac669053379d4bd6cc..0000000000000000000000000000000000000000 --- a/CVE-2021-29530.patch +++ /dev/null @@ -1,117 +0,0 @@ -From e6a7c7cc18c3aaad1ae0872cb0a959f5c923d2bd Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Tue, 20 Apr 2021 14:45:33 -0700 -Subject: [PATCH] Remove `OP_REQUIRES` call from helper function. - -Since `OP_REQUIRES` macro expands to a `return;` (among other), calling it in a helper function only ends the helper function's execution earlier, but the kernel will still run from start to end. Thus, all the expected validations are actually broken/useless as the code ploughs through the next crash anyway. - -PiperOrigin-RevId: 369524386 -Change-Id: I54f6cf9328445675ccc392e661b04336b229c9da ---- - .../core/kernels/sparse/sparse_cholesky_op.cc | 67 ++++++++++--------- - 1 file changed, 34 insertions(+), 33 deletions(-) - -diff --git a/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc b/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc -index 9a939276f0b6c..47ab252317de5 100644 ---- a/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc -+++ b/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc -@@ -17,6 +17,8 @@ limitations under the License. - #include - #include - -+#include "tensorflow/core/framework/op_requires.h" -+ - #define EIGEN_USE_THREADS - - #include "third_party/eigen3/Eigen/Core" -@@ -82,8 +84,8 @@ class CSRSparseCholeskyCPUOp : public OpKernel { - - int64 num_rows; - int batch_size; -- ValidateInputs(ctx, *input_matrix, input_permutation_indices, &batch_size, -- &num_rows); -+ OP_REQUIRES_OK(ctx, ValidateInputs(*input_matrix, input_permutation_indices, -+ &batch_size, &num_rows)); - - // Allocate batch pointers. - Tensor batch_ptr(cpu_allocator(), DT_INT32, TensorShape({batch_size + 1})); -@@ -226,49 +228,48 @@ class CSRSparseCholeskyCPUOp : public OpKernel { - } - - private: -- void ValidateInputs(OpKernelContext* ctx, -- const CSRSparseMatrix& sparse_matrix, -- const Tensor& permutation_indices, int* batch_size, -- int64* num_rows) { -- OP_REQUIRES(ctx, sparse_matrix.dtype() == DataTypeToEnum::value, -- errors::InvalidArgument( -- "Asked for a CSRSparseMatrix of type ", -- DataTypeString(DataTypeToEnum::value), -- " but saw dtype: ", DataTypeString(sparse_matrix.dtype()))); -+ Status ValidateInputs(const CSRSparseMatrix& sparse_matrix, -+ const Tensor& permutation_indices, int* batch_size, -+ int64* num_rows) { -+ if (sparse_matrix.dtype() != DataTypeToEnum::value) -+ return errors::InvalidArgument( -+ "Asked for a CSRSparseMatrix of type ", -+ DataTypeString(DataTypeToEnum::value), -+ " but saw dtype: ", DataTypeString(sparse_matrix.dtype())); - - const Tensor& dense_shape = sparse_matrix.dense_shape(); - const int rank = dense_shape.dim_size(0); -- OP_REQUIRES(ctx, rank == 2 || rank == 3, -- errors::InvalidArgument("sparse matrix must have rank 2 or 3; ", -- "but dense_shape has size ", rank)); -+ if (rank < 2 || rank > 3) -+ return errors::InvalidArgument("sparse matrix must have rank 2 or 3; ", -+ "but dense_shape has size ", rank); - const int row_dim = (rank == 2) ? 0 : 1; - auto dense_shape_vec = dense_shape.vec(); - *num_rows = dense_shape_vec(row_dim); - const int64 num_cols = dense_shape_vec(row_dim + 1); -- OP_REQUIRES(ctx, *num_rows == num_cols, -- errors::InvalidArgument("sparse matrix must be square; got: ", -- *num_rows, " != ", num_cols)); -+ if (*num_rows != num_cols) -+ return errors::InvalidArgument( -+ "sparse matrix must be square; got: ", *num_rows, " != ", num_cols); - const TensorShape& perm_shape = permutation_indices.shape(); -- OP_REQUIRES( -- ctx, perm_shape.dims() + 1 == rank, -- errors::InvalidArgument( -- "sparse matrix must have the same rank as permutation; got: ", rank, -- " != ", perm_shape.dims(), " + 1.")); -- OP_REQUIRES( -- ctx, perm_shape.dim_size(rank - 2) == *num_rows, -- errors::InvalidArgument( -- "permutation must have the same number of elements in each batch " -- "as the number of rows in sparse matrix; got: ", -- perm_shape.dim_size(rank - 2), " != ", *num_rows)); -+ if (perm_shape.dims() + 1 != rank) -+ return errors::InvalidArgument( -+ "sparse matrix must have the same rank as permutation; got: ", rank, -+ " != ", perm_shape.dims(), " + 1."); -+ if (perm_shape.dim_size(rank - 2) != *num_rows) -+ return errors::InvalidArgument( -+ "permutation must have the same number of elements in each batch " -+ "as the number of rows in sparse matrix; got: ", -+ perm_shape.dim_size(rank - 2), " != ", *num_rows); - - *batch_size = sparse_matrix.batch_size(); - if (*batch_size > 1) { -- OP_REQUIRES( -- ctx, perm_shape.dim_size(0) == *batch_size, -- errors::InvalidArgument("permutation must have the same batch size " -- "as sparse matrix; got: ", -- perm_shape.dim_size(0), " != ", *batch_size)); -+ if (perm_shape.dim_size(0) != *batch_size) -+ return errors::InvalidArgument( -+ "permutation must have the same batch size " -+ "as sparse matrix; got: ", -+ perm_shape.dim_size(0), " != ", *batch_size); - } -+ -+ return Status::OK(); - } - }; - diff --git a/CVE-2021-29532.patch b/CVE-2021-29532.patch deleted file mode 100644 index 5c4d376485f6fd0769cb45dbcf75e5ff77012d32..0000000000000000000000000000000000000000 --- a/CVE-2021-29532.patch +++ /dev/null @@ -1,69 +0,0 @@ -From 44b7f486c0143f68b56c34e2d01e146ee445134a Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Wed, 21 Apr 2021 16:19:54 -0700 -Subject: [PATCH] Fix out of bounds read in `ragged_cross_op.cc`. - -PiperOrigin-RevId: 369757702 -Change-Id: Ie6e5d2c21513a8d56bf41fcf35960caf76e890f9 ---- - tensorflow/core/kernels/ragged_cross_op.cc | 30 ++++++++++++++++++++++ - 1 file changed, 30 insertions(+) - -diff --git a/tensorflow/core/kernels/ragged_cross_op.cc b/tensorflow/core/kernels/ragged_cross_op.cc -index ea65c0ee2b5b2..5dfe93f416659 100644 ---- a/tensorflow/core/kernels/ragged_cross_op.cc -+++ b/tensorflow/core/kernels/ragged_cross_op.cc -@@ -21,6 +21,7 @@ limitations under the License. - #include "tensorflow/core/framework/register_types.h" - #include "tensorflow/core/framework/tensor.h" - #include "tensorflow/core/framework/tensor_shape.h" -+#include "tensorflow/core/platform/errors.h" - #include "tensorflow/core/platform/fingerprint.h" - #include "tensorflow/core/util/util.h" - #include "tensorflow/core/util/work_sharder.h" -@@ -466,16 +467,45 @@ class RaggedCrossOp : public OpKernel { - int next_dense = 0; - for (char c : input_order_) { - if (c == 'R') { -+ if (next_ragged >= ragged_values_list.size()) -+ return errors::InvalidArgument( -+ "input_order \"", input_order_, -+ "\" specifies reading a ragged tensor value at index ", -+ next_ragged, " from a list of ", ragged_values_list.size(), -+ " values."); -+ if (next_ragged >= ragged_splits_list.size()) -+ return errors::InvalidArgument( -+ "input_order \"", input_order_, -+ "\" specifies reading a ragged tensor split at index ", -+ next_ragged, " from a list of ", ragged_splits_list.size(), -+ " splits."); - TF_RETURN_IF_ERROR(BuildRaggedFeatureReader( - ragged_values_list[next_ragged], ragged_splits_list[next_ragged], - features)); - next_ragged++; - } else if (c == 'S') { -+ if (next_sparse >= sparse_values_list.size()) -+ return errors::InvalidArgument( -+ "input_order \"", input_order_, -+ "\" specifies reading a sparse tensor value at index ", -+ next_sparse, " from a list of ", sparse_values_list.size(), -+ " values."); -+ if (next_sparse >= sparse_indices_list.size()) -+ return errors::InvalidArgument( -+ "input_order \"", input_order_, -+ "\" specifies reading a sparse tensor index at index ", -+ next_sparse, " from a list of ", sparse_indices_list.size(), -+ " indices."); - TF_RETURN_IF_ERROR(BuildSparseFeatureReader( - sparse_indices_list[next_sparse], sparse_values_list[next_sparse], - batch_size, features)); - next_sparse++; - } else if (c == 'D') { -+ if (next_dense >= dense_list.size()) -+ return errors::InvalidArgument( -+ "input_order \"", input_order_, -+ "\" specifies reading a dense tensor at index ", next_dense, -+ " from a list of ", dense_list.size(), " tensors."); - TF_RETURN_IF_ERROR( - BuildDenseFeatureReader(dense_list[next_dense++], features)); - } else { diff --git a/CVE-2021-29533.patch b/CVE-2021-29533.patch deleted file mode 100644 index 11aef222c3c571f5c18d9f5b7f4257544425a623..0000000000000000000000000000000000000000 --- a/CVE-2021-29533.patch +++ /dev/null @@ -1,76 +0,0 @@ -From b432a38fe0e1b4b904a6c222cbce794c39703e87 Mon Sep 17 00:00:00 2001 -From: Amit Patankar -Date: Wed, 21 Apr 2021 15:57:36 -0700 -Subject: [PATCH] Fix overflow CHECK issue with `tf.raw_ops.DrawBoundingBoxes`. - ---- - .../core/kernels/draw_bounding_box_op.cc | 49 ++++++++++++++----- - 1 file changed, 37 insertions(+), 12 deletions(-) - -diff --git a/tensorflow/core/kernels/draw_bounding_box_op.cc b/tensorflow/core/kernels/draw_bounding_box_op.cc -index 30de99b7..39519523 100644 ---- a/tensorflow/core/kernels/draw_bounding_box_op.cc -+++ b/tensorflow/core/kernels/draw_bounding_box_op.cc -@@ -147,22 +147,47 @@ class DrawBoundingBoxesOp : public OpKernel { - - // At this point, {min,max}_box_{row,col}_clamp are inside the - // image. -- CHECK_GE(min_box_row_clamp, 0); -- CHECK_GE(max_box_row_clamp, 0); -- CHECK_LT(min_box_row_clamp, height); -- CHECK_LT(max_box_row_clamp, height); -- CHECK_GE(min_box_col_clamp, 0); -- CHECK_GE(max_box_col_clamp, 0); -- CHECK_LT(min_box_col_clamp, width); -- CHECK_LT(max_box_col_clamp, width); -+ -+ OP_REQUIRES( -+ context, min_box_row_clamp >= 0, -+ errors::InvalidArgument("Min box row clamp is less than 0.")); -+ OP_REQUIRES( -+ context, max_box_row_clamp >= 0, -+ errors::InvalidArgument("Max box row clamp is less than 0.")); -+ OP_REQUIRES(context, min_box_row_clamp <= height, -+ errors::InvalidArgument( -+ "Min box row clamp is greater than height.")); -+ OP_REQUIRES(context, max_box_row_clamp <= height, -+ errors::InvalidArgument( -+ "Max box row clamp is greater than height.")); -+ -+ OP_REQUIRES( -+ context, min_box_col_clamp >= 0, -+ errors::InvalidArgument("Min box col clamp is less than 0.")); -+ OP_REQUIRES( -+ context, max_box_col_clamp >= 0, -+ errors::InvalidArgument("Max box col clamp is less than 0.")); -+ OP_REQUIRES(context, min_box_col_clamp <= width, -+ errors::InvalidArgument( -+ "Min box col clamp is greater than width.")); -+ OP_REQUIRES(context, max_box_col_clamp <= width, -+ errors::InvalidArgument( -+ "Max box col clamp is greater than width.")); - - // At this point, the min_box_row and min_box_col are either - // in the image or above/left of it, and max_box_row and - // max_box_col are either in the image or below/right or it. -- CHECK_LT(min_box_row, height); -- CHECK_GE(max_box_row, 0); -- CHECK_LT(min_box_col, width); -- CHECK_GE(max_box_col, 0); -+ -+ OP_REQUIRES( -+ context, min_box_row <= height, -+ errors::InvalidArgument("Min box row is greater than height.")); -+ OP_REQUIRES(context, max_box_row >= 0, -+ errors::InvalidArgument("Max box row is less than 0.")); -+ OP_REQUIRES( -+ context, min_box_col <= width, -+ errors::InvalidArgument("Min box col is greater than width.")); -+ OP_REQUIRES(context, max_box_col >= 0, -+ errors::InvalidArgument("Max box col is less than 0.")); - - // Draw top line. - if (min_box_row >= 0) { --- -2.23.0 - diff --git a/CVE-2021-29534.patch b/CVE-2021-29534.patch deleted file mode 100644 index 2f710ce02f5f164b76c71361a2094bf35c2a75a7..0000000000000000000000000000000000000000 --- a/CVE-2021-29534.patch +++ /dev/null @@ -1,64 +0,0 @@ -diff -Nur a/tensorflow/core/kernels/sparse_tensors_map_ops.cc b/tensorflow/core/kernels/sparse_tensors_map_ops.cc ---- a/tensorflow/core/kernels/sparse_tensors_map_ops.cc 2020-09-22 09:57:17.000000000 +0800 -+++ b/tensorflow/core/kernels/sparse_tensors_map_ops.cc 2021-06-28 22:53:37.005305788 +0800 -@@ -21,16 +21,12 @@ - #include - #include - --#include "tensorflow/core/framework/op_kernel.h" --#include "tensorflow/core/framework/register_types.h" -- --#include "tensorflow/core/framework/op_kernel.h" --#include "tensorflow/core/framework/register_types.h" - #include "tensorflow/core/framework/resource_mgr.h" - #include "tensorflow/core/framework/tensor.h" - #include "tensorflow/core/framework/tensor_util.h" - #include "tensorflow/core/framework/types.h" - #include "tensorflow/core/lib/gtl/inlined_vector.h" -+#include "tensorflow/core/util/overflow.h" - #include "tensorflow/core/util/sparse/sparse_tensor.h" - - namespace tensorflow { -@@ -254,7 +250,22 @@ - errors::InvalidArgument( - "Rank of input SparseTensor should be > 1, but saw rank: ", rank)); - -- TensorShape tensor_input_shape(input_shape->vec()); -+ auto input_shape_vec = input_shape->vec(); -+ int new_num_elements = 1; -+ bool overflow_ocurred = false; -+ for (int i = 0; i < input_shape_vec.size(); i++) { -+ new_num_elements = -+ MultiplyWithoutOverflow(new_num_elements, input_shape_vec(i)); -+ if (new_num_elements < 0) { -+ overflow_ocurred = true; -+ } -+ } -+ -+ OP_REQUIRES( -+ context, !overflow_ocurred, -+ errors::Internal("Encountered overflow from large input shape.")); -+ -+ TensorShape tensor_input_shape(input_shape_vec); - gtl::InlinedVector std_order(rank); - std::iota(std_order.begin(), std_order.end(), 0); - SparseTensor input_st; -@@ -262,8 +273,7 @@ - tensor_input_shape, std_order, - &input_st)); - -- auto input_shape_t = input_shape->vec(); -- const int64 N = input_shape_t(0); -+ const int64 N = input_shape_vec(0); - - Tensor sparse_handles(DT_INT64, TensorShape({N})); - auto sparse_handles_t = sparse_handles.vec(); -@@ -274,7 +284,7 @@ - // minibatch entries. - TensorShape output_shape; - OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape( -- input_shape_t.data() + 1, -+ input_shape_vec.data() + 1, - input_shape->NumElements() - 1, &output_shape)); - - // Get groups by minibatch dimension diff --git a/CVE-2021-29535.patch b/CVE-2021-29535.patch deleted file mode 100644 index a708b4d2d97926829e5dc234d4b3ec39b0f3f53b..0000000000000000000000000000000000000000 --- a/CVE-2021-29535.patch +++ /dev/null @@ -1,42 +0,0 @@ -From efea03b38fb8d3b81762237dc85e579cc5fc6e87 Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Wed, 21 Apr 2021 16:15:46 -0700 -Subject: [PATCH] Validate inputs to `QuantizedMul` - -PiperOrigin-RevId: 369756982 -Change-Id: I00d960cc3b9316fd7a86bd37a44e341c96e17624 ---- - tensorflow/core/kernels/quantized_mul_op.cc | 20 ++++++++++++++++---- - 1 file changed, 16 insertions(+), 4 deletions(-) - -diff --git a/tensorflow/core/kernels/quantized_mul_op.cc b/tensorflow/core/kernels/quantized_mul_op.cc -index fb56f68bf14db..22cff8939449a 100644 ---- a/tensorflow/core/kernels/quantized_mul_op.cc -+++ b/tensorflow/core/kernels/quantized_mul_op.cc -@@ -284,10 +284,22 @@ class QuantizedMulOp : public OpKernel { - void Compute(OpKernelContext* context) override { - const Tensor& x = context->input(0); - const Tensor& y = context->input(1); -- const float min_x = context->input(2).flat()(0); -- const float max_x = context->input(3).flat()(0); -- const float min_y = context->input(4).flat()(0); -- const float max_y = context->input(5).flat()(0); -+ auto& min_x_tensor = context->input(2); -+ OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_x_tensor.shape()), -+ errors::InvalidArgument("min_x must be a scalar")); -+ const float min_x = min_x_tensor.flat()(0); -+ auto& max_x_tensor = context->input(3); -+ OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_x_tensor.shape()), -+ errors::InvalidArgument("max_x must be a scalar")); -+ const float max_x = max_x_tensor.flat()(0); -+ auto& min_y_tensor = context->input(4); -+ OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_y_tensor.shape()), -+ errors::InvalidArgument("min_y must be a scalar")); -+ const float min_y = min_y_tensor.flat()(0); -+ auto& max_y_tensor = context->input(5); -+ OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_y_tensor.shape()), -+ errors::InvalidArgument("max_y must be a scalar")); -+ const float max_y = max_y_tensor.flat()(0); - - BCast bcast(BCast::FromShape(x.shape()), BCast::FromShape(y.shape())); - if (!bcast.IsValid()) { diff --git a/CVE-2021-29536.patch b/CVE-2021-29536.patch deleted file mode 100644 index 7a8ebb2697877010fb170123391f1df4d20fade0..0000000000000000000000000000000000000000 --- a/CVE-2021-29536.patch +++ /dev/null @@ -1,57 +0,0 @@ -From a324ac84e573fba362a5e53d4e74d5de6729933e Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Wed, 21 Apr 2021 18:11:15 -0700 -Subject: [PATCH] Validate arguments to `QuantizedReshape`. - -Ensure that validations from `Reshape` also terminate `QuantizedReshape` on failure. - -PiperOrigin-RevId: 369775421 -Change-Id: If8c5342267aceea65b7cb83a4b183304886f1ce8 ---- - .../core/kernels/quantized_reshape_op.cc | 25 +++++++++++++++++-- - 1 file changed, 23 insertions(+), 2 deletions(-) - -diff --git a/tensorflow/core/kernels/quantized_reshape_op.cc b/tensorflow/core/kernels/quantized_reshape_op.cc -index bd76c94edeea7..682f4aaa1f79e 100644 ---- a/tensorflow/core/kernels/quantized_reshape_op.cc -+++ b/tensorflow/core/kernels/quantized_reshape_op.cc -@@ -17,6 +17,7 @@ limitations under the License. - - #include "tensorflow/core/framework/op_kernel.h" - #include "tensorflow/core/framework/register_types.h" -+#include "tensorflow/core/framework/tensor_shape.h" - #include "tensorflow/core/framework/tensor_types.h" - #include "tensorflow/core/framework/types.h" - #include "tensorflow/core/kernels/reshape_op.h" -@@ -30,9 +31,29 @@ class QuantizedReshapeOp : public ReshapeOp { - void Compute(OpKernelContext* ctx) override { - // This call processes inputs 1 and 2 to write output 0. - ReshapeOp::Compute(ctx); -+ if (!ctx->status().ok()) { -+ return; -+ } -+ -+ const auto& input_min_float_tensor = ctx->input(2); -+ const auto& input_min_float_shape = input_min_float_tensor.shape(); -+ OP_REQUIRES(ctx, -+ TensorShapeUtils::IsScalar(input_min_float_shape) || -+ (TensorShapeUtils::IsVector(input_min_float_shape) && -+ (input_min_float_shape.dim_size(0) == 1)), -+ errors::InvalidArgument( -+ "input_min must be a scalar or a vector of 1 element")); -+ const float input_min_float = input_min_float_tensor.flat()(0); -+ const auto& input_max_float_tensor = ctx->input(3); -+ const auto& input_max_float_shape = input_max_float_tensor.shape(); -+ OP_REQUIRES(ctx, -+ TensorShapeUtils::IsScalar(input_max_float_shape) || -+ (TensorShapeUtils::IsVector(input_max_float_shape) && -+ (input_max_float_shape.dim_size(0) == 1)), -+ errors::InvalidArgument( -+ "input_max must be a scalar or a vector of 1 element")); -+ const float input_max_float = input_max_float_tensor.flat()(0); - -- const float input_min_float = ctx->input(2).flat()(0); -- const float input_max_float = ctx->input(3).flat()(0); - Tensor* output_min = nullptr; - OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_min)); - output_min->flat()(0) = input_min_float; diff --git a/CVE-2021-29537.patch b/CVE-2021-29537.patch deleted file mode 100644 index fd8e12e01be6869cbc2f65e5bd48511ac285d3c2..0000000000000000000000000000000000000000 --- a/CVE-2021-29537.patch +++ /dev/null @@ -1,33 +0,0 @@ -From f6c40f0c6cbf00d46c7717a26419f2062f2f8694 Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Wed, 21 Apr 2021 17:00:39 -0700 -Subject: [PATCH] Validate min and max arguments to `QuantizedResizeBilinear`. - ---- - .../core/kernels/quantized_resize_bilinear_op.cc | 10 ++++++++-- - 1 file changed, 8 insertions(+), 2 deletions(-) - -diff --git a/tensorflow/core/kernels/quantized_resize_bilinear_op.cc b/tensorflow/core/kernels/quantized_resize_bilinear_op.cc -index 8270fc11..a94f56a5 100644 ---- a/tensorflow/core/kernels/quantized_resize_bilinear_op.cc -+++ b/tensorflow/core/kernels/quantized_resize_bilinear_op.cc -@@ -703,8 +703,14 @@ class QuantizedResizeBilinearOp : public OpKernel { - - void Compute(OpKernelContext* context) override { - const Tensor& input = context->input(0); -- const float in_min = context->input(2).flat()(0); -- const float in_max = context->input(3).flat()(0); -+ const auto& in_min_tensor = context->input(2); -+ OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_min_tensor.shape()), -+ errors::InvalidArgument("min must be a scalar")); -+ const float in_min = in_min_tensor.flat()(0); -+ const auto& in_max_tensor = context->input(3); -+ OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_max_tensor.shape()), -+ errors::InvalidArgument("max must be a scalar")); -+ const float in_max = in_max_tensor.flat()(0); - - ImageResizerState st(align_corners_, false); - st.ValidateAndCreateOutput(context, input); --- -2.23.0 - diff --git a/CVE-2021-29538.patch b/CVE-2021-29538.patch deleted file mode 100644 index ad661e111a801043ce4ee95099f5864ca4ea5399..0000000000000000000000000000000000000000 --- a/CVE-2021-29538.patch +++ /dev/null @@ -1,42 +0,0 @@ -From c570e2ecfc822941335ad48f6e10df4e21f11c96 Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Wed, 21 Apr 2021 17:50:10 -0700 -Subject: [PATCH] Fix issues in Conv2DBackpropFilter. - -PiperOrigin-RevId: 369772454 -Change-Id: I49b465f2ae2ce91def61b56cea8000197d5177d8 ---- - tensorflow/core/kernels/conv_grad_filter_ops.cc | 13 +++++++++++++ - 1 file changed, 13 insertions(+) - -diff --git a/tensorflow/core/kernels/conv_grad_filter_ops.cc b/tensorflow/core/kernels/conv_grad_filter_ops.cc -index fb48e3e285a27..2645d850ab7cf 100644 ---- a/tensorflow/core/kernels/conv_grad_filter_ops.cc -+++ b/tensorflow/core/kernels/conv_grad_filter_ops.cc -@@ -495,6 +495,14 @@ class Conv2DCustomBackpropFilterOp : public OpKernel { - const int filter_total_size = dims.spatial_dims[0].filter_size * - dims.spatial_dims[1].filter_size * - dims.in_depth; -+ OP_REQUIRES( -+ context, -+ filter_total_size * dims.out_depth == filter_backprop->NumElements(), -+ errors::InvalidArgument( -+ "filter_size does not have enough elements, requested ", -+ filter_total_size * dims.out_depth, ", got ", -+ filter_backprop->NumElements())); -+ - // The output image size is the spatial size of the output. - const int output_image_size = - dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size; -@@ -518,6 +526,11 @@ class Conv2DCustomBackpropFilterOp : public OpKernel { - - const size_t work_unit_size = size_A + size_B + size_C; - -+ OP_REQUIRES( -+ context, work_unit_size != 0, -+ errors::InvalidArgument( -+ "Work size for convolution would be 0, which is not acceptable")); -+ - const size_t shard_size = - (target_working_set_size + work_unit_size - 1) / work_unit_size; - diff --git a/CVE-2021-29539.patch b/CVE-2021-29539.patch deleted file mode 100644 index 232a727b1c7f48978fe6bb4498729466cd49ef1e..0000000000000000000000000000000000000000 --- a/CVE-2021-29539.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 4f663d4b8f0bec1b48da6fa091a7d29609980fa4 Mon Sep 17 00:00:00 2001 -From: Amit Patankar -Date: Mon, 8 Feb 2021 12:29:30 -0800 -Subject: [PATCH] Allowlist certain data types to avoid a seg fault. - -PiperOrigin-RevId: 356326671 -Change-Id: I23b65b52e93798cb5a6744632d31b0f88c6b6b31 ---- - tensorflow/core/kernels/immutable_constant_op.cc | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/tensorflow/core/kernels/immutable_constant_op.cc b/tensorflow/core/kernels/immutable_constant_op.cc -index 1cfbdb8277891..19aa865c1fbe4 100644 ---- a/tensorflow/core/kernels/immutable_constant_op.cc -+++ b/tensorflow/core/kernels/immutable_constant_op.cc -@@ -17,6 +17,8 @@ limitations under the License. - - #include - -+#include "tensorflow/core/framework/types.pb.h" -+ - namespace tensorflow { - - namespace { -@@ -86,6 +88,9 @@ ImmutableConstantOp::ImmutableConstantOp(OpKernelConstruction* context) - OP_REQUIRES_OK(context, - context->GetAttr(kMemoryRegionNameAttr, ®ion_name_)); - OP_REQUIRES_OK(context, context->GetAttr(kDTypeAttr, &dtype_)); -+ OP_REQUIRES(context, dtype_ != DT_RESOURCE && dtype_ != DT_VARIANT, -+ errors::InvalidArgument( -+ "Resource and variant dtypes are invalid for this op.")); - OP_REQUIRES_OK(context, context->GetAttr(kShapeAttr, &shape_)); - } - diff --git a/CVE-2021-29541_CVE-2021-29542.patch b/CVE-2021-29541_CVE-2021-29542.patch deleted file mode 100644 index b19d31db78daf451960cab5bf51fd6b7773760c0..0000000000000000000000000000000000000000 --- a/CVE-2021-29541_CVE-2021-29542.patch +++ /dev/null @@ -1,133 +0,0 @@ -From ba424dd8f16f7110eea526a8086f1a155f14f22b Mon Sep 17 00:00:00 2001 -From: Mihai Maruseac -Date: Thu, 22 Apr 2021 13:29:54 -0700 -Subject: [PATCH] Enhance validation of ngram op and handle case of 0 tokens. - -PiperOrigin-RevId: 369940178 -Change-Id: Ia82f42c09d14efe76e7dc013505b832a42282f0b ---- - tensorflow/core/kernels/string_ngrams_op.cc | 52 +++++++++++++++---- - .../core/kernels/string_ngrams_op_test.cc | 34 ++++++++++++ - 2 files changed, 75 insertions(+), 11 deletions(-) - -diff --git a/tensorflow/core/kernels/string_ngrams_op.cc b/tensorflow/core/kernels/string_ngrams_op.cc -index 8aed2b3831a2f..7008a1d766af2 100644 ---- a/tensorflow/core/kernels/string_ngrams_op.cc -+++ b/tensorflow/core/kernels/string_ngrams_op.cc -@@ -61,16 +61,28 @@ class StringNGramsOp : public tensorflow::OpKernel { - OP_REQUIRES_OK(context, context->input("data_splits", &splits)); - const auto& splits_vec = splits->flat(); - -- // Validate that the splits are valid indices into data -+ // Validate that the splits are valid indices into data, only if there are -+ // splits specified. - const int input_data_size = data->flat().size(); - const int splits_vec_size = splits_vec.size(); -- for (int i = 0; i < splits_vec_size; ++i) { -- bool valid_splits = splits_vec(i) >= 0; -- valid_splits = valid_splits && (splits_vec(i) <= input_data_size); -- OP_REQUIRES( -- context, valid_splits, -- errors::InvalidArgument("Invalid split value ", splits_vec(i), -- ", must be in [0,", input_data_size, "]")); -+ if (splits_vec_size > 0) { -+ int prev_split = splits_vec(0); -+ OP_REQUIRES(context, prev_split == 0, -+ errors::InvalidArgument("First split value must be 0, got ", -+ prev_split)); -+ for (int i = 1; i < splits_vec_size; ++i) { -+ bool valid_splits = splits_vec(i) >= prev_split; -+ valid_splits = valid_splits && (splits_vec(i) <= input_data_size); -+ OP_REQUIRES(context, valid_splits, -+ errors::InvalidArgument( -+ "Invalid split value ", splits_vec(i), ", must be in [", -+ prev_split, ", ", input_data_size, "]")); -+ prev_split = splits_vec(i); -+ } -+ OP_REQUIRES(context, prev_split == input_data_size, -+ errors::InvalidArgument( -+ "Last split value must be data size. Expected ", -+ input_data_size, ", got ", prev_split)); - } - - int num_batch_items = splits_vec.size() - 1; -@@ -174,13 +186,31 @@ class StringNGramsOp : public tensorflow::OpKernel { - ngram->append(left_pad_); - ngram->append(separator_); - } -+ // Only output first num_tokens - 1 pairs of data and separator - for (int n = 0; n < num_tokens - 1; ++n) { - ngram->append(data[data_start_index + n]); - ngram->append(separator_); - } -- ngram->append(data[data_start_index + num_tokens - 1]); -- for (int n = 0; n < right_padding; ++n) { -- ngram->append(separator_); -+ // Handle case when there are no tokens or no right padding as these can -+ // result in consecutive separators. -+ if (num_tokens > 0) { -+ // If we have tokens, then output last and then pair each separator with -+ // the right padding that follows, to ensure ngram ends either with the -+ // token or with the right pad. -+ ngram->append(data[data_start_index + num_tokens - 1]); -+ for (int n = 0; n < right_padding; ++n) { -+ ngram->append(separator_); -+ ngram->append(right_pad_); -+ } -+ } else { -+ // If we don't have tokens, then the last item inserted into the ngram -+ // has been the separator from the left padding loop above. Hence, -+ // output right pad and separator and make sure to finish with a -+ // padding, not a separator. -+ for (int n = 0; n < right_padding - 1; ++n) { -+ ngram->append(right_pad_); -+ ngram->append(separator_); -+ } - ngram->append(right_pad_); - } - -diff --git a/tensorflow/core/kernels/string_ngrams_op_test.cc b/tensorflow/core/kernels/string_ngrams_op_test.cc -index b89de9ad16dab..0d52283bd8fb9 100644 ---- a/tensorflow/core/kernels/string_ngrams_op_test.cc -+++ b/tensorflow/core/kernels/string_ngrams_op_test.cc -@@ -542,6 +542,40 @@ TEST_F(NgramKernelTest, TestEmptyInput) { - assert_int64_equal(expected_splits, *GetOutput(1)); - } - -+TEST_F(NgramKernelTest, TestNoTokens) { -+ MakeOp("|", {3}, "L", "R", -1, false); -+ // Batch items are: -+ // 0: -+ // 1: "a" -+ AddInputFromArray(TensorShape({1}), {"a"}); -+ AddInputFromArray(TensorShape({3}), {0, 0, 1}); -+ TF_ASSERT_OK(RunOpKernel()); -+ -+ std::vector expected_values( -+ {"L|L|R", "L|R|R", // no input in first split -+ "L|L|a", "L|a|R", "a|R|R"}); // second split -+ std::vector expected_splits({0, 2, 5}); -+ -+ assert_string_equal(expected_values, *GetOutput(0)); -+ assert_int64_equal(expected_splits, *GetOutput(1)); -+} -+ -+TEST_F(NgramKernelTest, TestNoTokensNoPad) { -+ MakeOp("|", {3}, "", "", 0, false); -+ // Batch items are: -+ // 0: -+ // 1: "a" -+ AddInputFromArray(TensorShape({1}), {"a"}); -+ AddInputFromArray(TensorShape({3}), {0, 0, 1}); -+ TF_ASSERT_OK(RunOpKernel()); -+ -+ std::vector expected_values({}); -+ std::vector expected_splits({0, 0, 0}); -+ -+ assert_string_equal(expected_values, *GetOutput(0)); -+ assert_int64_equal(expected_splits, *GetOutput(1)); -+} -+ - TEST_F(NgramKernelTest, ShapeFn) { - ShapeInferenceTestOp op("StringNGrams"); - INFER_OK(op, "?;?", "[?];[?]"); diff --git a/CVE-2021-29543.patch b/CVE-2021-29543.patch deleted file mode 100644 index 2f4b6dc916be4a36fef93133d2cd1105e494d9f2..0000000000000000000000000000000000000000 --- a/CVE-2021-29543.patch +++ /dev/null @@ -1,24 +0,0 @@ -From ea3b43e98c32c97b35d52b4c66f9107452ca8fb2 Mon Sep 17 00:00:00 2001 -From: Amit Patankar -Date: Thu, 22 Apr 2021 15:11:05 -0700 -Subject: [PATCH] Fix `tf.raw_ops.CTCGreedyDecoder` CHECK failure. - -PiperOrigin-RevId: 369960465 -Change-Id: If0b8b3264d5a47a24ac0970ed7b81ce6b4921fae ---- - tensorflow/core/kernels/ctc_decoder_ops.cc | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/tensorflow/core/kernels/ctc_decoder_ops.cc b/tensorflow/core/kernels/ctc_decoder_ops.cc -index d62aef2d03b98..22681f97437f0 100644 ---- a/tensorflow/core/kernels/ctc_decoder_ops.cc -+++ b/tensorflow/core/kernels/ctc_decoder_ops.cc -@@ -232,6 +232,8 @@ class CTCGreedyDecoderOp : public OpKernel { - int prev_indices = -1; - for (int t = 0; t < seq_len_t(b); ++t) { - int max_class_indices; -+ OP_REQUIRES(ctx, input_list_t[t].dimension(1) > 0, -+ errors::InvalidArgument("Invalid input dimensions.")); - log_prob_t(b, 0) += - -RowMax(input_list_t[t], b, &max_class_indices); - if (max_class_indices != blank_index && diff --git a/CVE-2021-29544-1.patch b/CVE-2021-29544-1.patch deleted file mode 100644 index f2ca044153460b3b2b47631f01f464d6205ece68..0000000000000000000000000000000000000000 --- a/CVE-2021-29544-1.patch +++ /dev/null @@ -1,919 +0,0 @@ -From 52df91c5634e6c666843849a1c6ff29b3d2676be Mon Sep 17 00:00:00 2001 -From: Pankaj Kanwar -Date: Mon, 12 Oct 2020 10:30:20 -0700 -Subject: [PATCH] Create a V2 Op to stop the gradient when the input is out of - range. - -PiperOrigin-RevId: 336692325 -Change-Id: I36fd3fcfc58a30d5218beca512fbfc7c24b8b5cb ---- - tensorflow/cc/gradients/array_grad.cc | 29 ++-- - tensorflow/compiler/tests/unary_ops_test.py | 6 +- - .../api_def_QuantizeAndDequantizeV4.pbtxt | 8 ++ - .../api_def_QuantizeAndDequantizeV4Grad.pbtxt | 8 ++ - .../api_def_QuantizeAndDequantizeV4.pbtxt | 3 + - .../api_def_QuantizeAndDequantizeV4Grad.pbtxt | 3 + - .../api_def_QuantizeAndDequantizeV4.pbtxt | 4 + - .../api_def_QuantizeAndDequantizeV4Grad.pbtxt | 4 + - .../kernels/quantize_and_dequantize_op.cc | 126 ++++++++++++++++++ - .../core/kernels/quantize_and_dequantize_op.h | 71 ++++++++++ - .../quantize_and_dequantize_op_gpu.cu.cc | 40 ++++++ - .../quantize_and_dequantize_op_test.cc | 48 +++++++ - tensorflow/core/ops/array_ops.cc | 64 +++++++++ - .../python/kernel_tests/array_ops_test.py | 21 ++- - tensorflow/python/ops/array_ops.py | 113 +++++++++++++++- - .../tools/api/golden/v1/tensorflow.pbtxt | 4 + - .../golden/v1/tensorflow.quantization.pbtxt | 4 + - .../api/golden/v1/tensorflow.raw_ops.pbtxt | 8 ++ - .../tools/api/golden/v2/tensorflow.pbtxt | 4 + - .../golden/v2/tensorflow.quantization.pbtxt | 4 + - .../api/golden/v2/tensorflow.raw_ops.pbtxt | 8 ++ - 21 files changed, 564 insertions(+), 16 deletions(-) - create mode 100644 tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV4.pbtxt - create mode 100644 tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV4Grad.pbtxt - create mode 100644 tensorflow/core/api_def/java_api/api_def_QuantizeAndDequantizeV4.pbtxt - create mode 100644 tensorflow/core/api_def/java_api/api_def_QuantizeAndDequantizeV4Grad.pbtxt - create mode 100644 tensorflow/core/api_def/python_api/api_def_QuantizeAndDequantizeV4.pbtxt - create mode 100644 tensorflow/core/api_def/python_api/api_def_QuantizeAndDequantizeV4Grad.pbtxt - -diff --git a/tensorflow/cc/gradients/array_grad.cc b/tensorflow/cc/gradients/array_grad.cc -index e9173227..480243a2 100644 ---- a/tensorflow/cc/gradients/array_grad.cc -+++ b/tensorflow/cc/gradients/array_grad.cc -@@ -15,13 +15,12 @@ limitations under the License. - - #include - -+#include "tensorflow/cc/framework/grad_op_registry.h" -+#include "tensorflow/cc/framework/gradients.h" - #include "tensorflow/cc/ops/array_ops_internal.h" - #include "tensorflow/cc/ops/standard_ops.h" - #include "tensorflow/core/lib/strings/strcat.h" - --#include "tensorflow/cc/framework/grad_op_registry.h" --#include "tensorflow/cc/framework/gradients.h" -- - namespace tensorflow { - namespace ops { - namespace { -@@ -90,15 +89,25 @@ Status QuantizeAndDequantizeGrad(const Scope& scope, const Operation& op, - } - REGISTER_GRADIENT_OP("QuantizeAndDequantize", QuantizeAndDequantizeGrad); - --Status QuantizeAndDequantizeV2Grad(const Scope& scope, const Operation& op, -- const std::vector& grad_inputs, -- std::vector* grad_outputs) { -- grad_outputs->push_back(Identity(scope, grad_inputs[0])); -- grad_outputs->push_back(NoGradient()); -- grad_outputs->push_back(NoGradient()); -+Status QuantizeAndDequantizeV4GradHelper(const Scope& scope, -+ const Operation& op, -+ const std::vector& grad_inputs, -+ std::vector* grad_outputs) { -+ Input input = Shape(scope, op.input(0)); -+ Input input_min = op.input(1); -+ Input input_max = op.input(2); -+ int64 axis; -+ TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "axis", &axis)); -+ auto qdq_v4_grad = QuantizeAndDequantizeV4Grad( -+ scope, grad_inputs[0], input, input_min, input_max, -+ QuantizeAndDequantizeV4Grad::Axis(axis)); -+ grad_outputs->push_back(qdq_v4_grad.input_backprop); -+ grad_outputs->push_back(qdq_v4_grad.input_min_backprop); -+ grad_outputs->push_back(qdq_v4_grad.input_max_backprop); - return scope.status(); - } --REGISTER_GRADIENT_OP("QuantizeAndDequantizeV2", QuantizeAndDequantizeV2Grad); -+REGISTER_GRADIENT_OP("QuantizeAndDequantizeV4", -+ QuantizeAndDequantizeV4GradHelper); - - Status QuantizeAndDequantizeV3Grad(const Scope& scope, const Operation& op, - const std::vector& grad_inputs, -diff --git a/tensorflow/compiler/tests/unary_ops_test.py b/tensorflow/compiler/tests/unary_ops_test.py -index 162693a9..dacd7232 100644 ---- a/tensorflow/compiler/tests/unary_ops_test.py -+++ b/tensorflow/compiler/tests/unary_ops_test.py -@@ -535,7 +535,7 @@ class UnaryOpsTest(xla_test.XLATestCase): - for dtype in self.float_types: - - def quantize_and_dequantize_v2(x): -- return array_ops.quantize_and_dequantize_v2( -+ return array_ops.quantize_and_dequantize( - x, -127, 127, signed_input=True, num_bits=8) - - self._assertOpOutputMatchesExpected( -@@ -544,7 +544,7 @@ class UnaryOpsTest(xla_test.XLATestCase): - expected=np.array([-1., -0.5, 0., 0.296875], dtype=dtype)) - - def quantize_and_dequantize_v2_round_half_up(x): -- return array_ops.quantize_and_dequantize_v2( -+ return array_ops.quantize_and_dequantize( - x, - -1, - 1.0, -@@ -568,7 +568,7 @@ class UnaryOpsTest(xla_test.XLATestCase): - dtype=dtype)) - - def quantize_and_dequantize_v2_round_half_to_even(x): -- return array_ops.quantize_and_dequantize_v2( -+ return array_ops.quantize_and_dequantize( - x, - -1.0, - 1.0, -diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV4.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV4.pbtxt -new file mode 100644 -index 00000000..a84ccb78 ---- /dev/null -+++ b/tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV4.pbtxt -@@ -0,0 +1,8 @@ -+op { -+ graph_op_name: "QuantizeAndDequantizeV4" -+ summary: "Returns the gradient of `QuantizeAndDequantizeV4`." -+ description: <