diff --git a/CVE-2021-37136.patch b/CVE-2021-37136.patch new file mode 100644 index 0000000000000000000000000000000000000000..7f7e7958c6fa7917bae4f67732ae87b32b727915 --- /dev/null +++ b/CVE-2021-37136.patch @@ -0,0 +1,95 @@ +From 41d3d61a61608f2223bb364955ab2045dd5e4020 Mon Sep 17 00:00:00 2001 +From: Norman Maurer +Date: Thu, 9 Sep 2021 14:53:58 +0200 +Subject: [PATCH] Merge pull request from GHSA-grg4-wf29-r9vv + +Motivation: + +We should do the Bzip2 decoding in a streaming fashion and so ensure we propagate the buffer as soon as possible through the pipeline. This allows the users to release these buffers as fast as possible. + +Modification: + +- Change the Bzip2Decoder to do the decompression of data in a streaming fashion. +- Add some safety check to ensure the block length never execeeds the maximum (as defined in the spec) + +Result: + +No more risk of an OOME by decompress some large data via bzip2. + +Thanks to Ori Hollander of JFrog Security for reporting the issue. + +(we got acquired during the process and now Vdoo is part of JFrog company) +--- + .../codec/compression/Bzip2BlockDecompressor.java | 5 +++++ + .../handler/codec/compression/Bzip2Constants.java | 2 ++ + .../handler/codec/compression/Bzip2Decoder.java | 15 ++++++++------- + 3 files changed, 15 insertions(+), 7 deletions(-) + +diff --git a/codec/src/main/java/io/netty/handler/codec/compression/Bzip2BlockDecompressor.java b/codec/src/main/java/io/netty/handler/codec/compression/Bzip2BlockDecompressor.java +index 9b8ff3f04c9..801900c4873 100644 +--- a/codec/src/main/java/io/netty/handler/codec/compression/Bzip2BlockDecompressor.java ++++ b/codec/src/main/java/io/netty/handler/codec/compression/Bzip2BlockDecompressor.java +@@ -228,6 +228,11 @@ boolean decodeHuffmanData(final Bzip2HuffmanStageDecoder huffmanDecoder) { + bwtBlock[bwtBlockLength++] = nextByte; + } + } ++ if (bwtBlockLength > MAX_BLOCK_LENGTH) { ++ throw new DecompressionException("block length exceeds max block length: " ++ + bwtBlockLength + " > " + MAX_BLOCK_LENGTH); ++ } ++ + this.bwtBlockLength = bwtBlockLength; + initialiseInverseBWT(); + return true; +diff --git a/codec/src/main/java/io/netty/handler/codec/compression/Bzip2Constants.java b/codec/src/main/java/io/netty/handler/codec/compression/Bzip2Constants.java +index ba8fee54d39..087f45faa0b 100644 +--- a/codec/src/main/java/io/netty/handler/codec/compression/Bzip2Constants.java ++++ b/codec/src/main/java/io/netty/handler/codec/compression/Bzip2Constants.java +@@ -49,6 +49,8 @@ + static final int MIN_BLOCK_SIZE = 1; + static final int MAX_BLOCK_SIZE = 9; + ++ static final int MAX_BLOCK_LENGTH = MAX_BLOCK_SIZE * BASE_BLOCK_SIZE; ++ + /** + * Maximum possible Huffman alphabet size. + */ +diff --git a/codec/src/main/java/io/netty/handler/codec/compression/Bzip2Decoder.java b/codec/src/main/java/io/netty/handler/codec/compression/Bzip2Decoder.java +index 5434b41d199..61c14f62ab0 100644 +--- a/codec/src/main/java/io/netty/handler/codec/compression/Bzip2Decoder.java ++++ b/codec/src/main/java/io/netty/handler/codec/compression/Bzip2Decoder.java +@@ -291,26 +291,27 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) t + } + + final int blockLength = blockDecompressor.blockLength(); +- final ByteBuf uncompressed = ctx.alloc().buffer(blockLength); +- boolean success = false; ++ ByteBuf uncompressed = ctx.alloc().buffer(blockLength); + try { + int uncByte; + while ((uncByte = blockDecompressor.read()) >= 0) { + uncompressed.writeByte(uncByte); + } +- ++ // We did read all the data, lets reset the state and do the CRC check. ++ currentState = State.INIT_BLOCK; + int currentBlockCRC = blockDecompressor.checkCRC(); + streamCRC = (streamCRC << 1 | streamCRC >>> 31) ^ currentBlockCRC; + + out.add(uncompressed); +- success = true; ++ uncompressed = null; + } finally { +- if (!success) { ++ if (uncompressed != null) { + uncompressed.release(); + } + } +- currentState = State.INIT_BLOCK; +- break; ++ // Return here so the ByteBuf that was put in the List will be forwarded to the user and so can be ++ // released as soon as possible. ++ return; + case EOF: + in.skipBytes(in.readableBytes()); + return; diff --git a/CVE-2021-37137.patch b/CVE-2021-37137.patch new file mode 100644 index 0000000000000000000000000000000000000000..1c7617440a18302f6d6004b56be83f818b99f065 --- /dev/null +++ b/CVE-2021-37137.patch @@ -0,0 +1,193 @@ +From 6da4956b31023ae967451e1d94ff51a746a9194f Mon Sep 17 00:00:00 2001 +From: Norman Maurer +Date: Thu, 9 Sep 2021 14:55:08 +0200 +Subject: [PATCH] Merge pull request from GHSA-9vjp-v76f-g363 + +Motivation: + +e Snappy frame decoder function doesn't restrict the size of the compressed data (and the uncompressed data) which may lead to excessive memory usage. Beside this it also may buffer reserved skippable chunks until the whole chunk was received which may lead to excessive memory usage as well. + +Modifications: + +- Add various validations for the max allowed size of a chunk +- Skip bytes on the fly when an skippable chunk is handled + +Result: + +No more risk of OOME. Thanks to Ori Hollander of JFrog Security for reporting the issue. +--- + .../handler/codec/compression/Snappy.java | 30 +++++++++--- + .../codec/compression/SnappyFrameDecoder.java | 46 ++++++++++++++++--- + 2 files changed, 62 insertions(+), 14 deletions(-) + +diff --git a/codec/src/main/java/io/netty/handler/codec/compression/Snappy.java b/codec/src/main/java/io/netty/handler/codec/compression/Snappy.java +index 0a2d1c09b32..c851e4d8d6c 100644 +--- a/codec/src/main/java/io/netty/handler/codec/compression/Snappy.java ++++ b/codec/src/main/java/io/netty/handler/codec/compression/Snappy.java +@@ -38,12 +38,11 @@ + private static final int COPY_2_BYTE_OFFSET = 2; + private static final int COPY_4_BYTE_OFFSET = 3; + +- private State state = State.READY; ++ private State state = State.READING_PREAMBLE; + private byte tag; + private int written; + + private enum State { +- READY, + READING_PREAMBLE, + READING_TAG, + READING_LITERAL, +@@ -51,7 +50,7 @@ + } + + public void reset() { +- state = State.READY; ++ state = State.READING_PREAMBLE; + tag = 0; + written = 0; + } +@@ -270,8 +269,6 @@ private static void encodeCopy(ByteBuf out, int offset, int length) { + public void decode(ByteBuf in, ByteBuf out) { + while (in.isReadable()) { + switch (state) { +- case READY: +- state = State.READING_PREAMBLE; + case READING_PREAMBLE: + int uncompressedLength = readPreamble(in); + if (uncompressedLength == PREAMBLE_NOT_FULL) { +@@ -281,7 +277,6 @@ public void decode(ByteBuf in, ByteBuf out) { + } + if (uncompressedLength == 0) { + // Should never happen, but it does mean we have nothing further to do +- state = State.READY; + return; + } + out.ensureWritable(uncompressedLength); +@@ -378,6 +373,27 @@ private static int readPreamble(ByteBuf in) { + return 0; + } + ++ /** ++ * Get the length varint (a series of bytes, where the lower 7 bits ++ * are data and the upper bit is a flag to indicate more bytes to be ++ * read). ++ * ++ * @param in The input buffer to get the preamble from ++ * @return The calculated length based on the input buffer, or 0 if ++ * no preamble is able to be calculated ++ */ ++ int getPreamble(ByteBuf in) { ++ if (state == State.READING_PREAMBLE) { ++ int readerIndex = in.readerIndex(); ++ try { ++ return readPreamble(in); ++ } finally { ++ in.readerIndex(readerIndex); ++ } ++ } ++ return 0; ++ } ++ + /** + * Reads a literal from the input buffer directly to the output buffer. + * A "literal" is an uncompressed segment of data stored directly in the +diff --git a/codec/src/main/java/io/netty/handler/codec/compression/SnappyFrameDecoder.java b/codec/src/main/java/io/netty/handler/codec/compression/SnappyFrameDecoder.java +index 74a12895946..51997596eb6 100644 +--- a/codec/src/main/java/io/netty/handler/codec/compression/SnappyFrameDecoder.java ++++ b/codec/src/main/java/io/netty/handler/codec/compression/SnappyFrameDecoder.java +@@ -45,13 +45,19 @@ + } + + private static final int SNAPPY_IDENTIFIER_LEN = 6; ++ // See https://github.com/google/snappy/blob/1.1.9/framing_format.txt#L95 + private static final int MAX_UNCOMPRESSED_DATA_SIZE = 65536 + 4; ++ // See https://github.com/google/snappy/blob/1.1.9/framing_format.txt#L82 ++ private static final int MAX_DECOMPRESSED_DATA_SIZE = 65536; ++ // See https://github.com/google/snappy/blob/1.1.9/framing_format.txt#L82 ++ private static final int MAX_COMPRESSED_CHUNK_SIZE = 16777216 - 1; + + private final Snappy snappy = new Snappy(); + private final boolean validateChecksums; + + private boolean started; + private boolean corrupted; ++ private int numBytesToSkip; + + /** + * Creates a new snappy-framed decoder with validation of checksums +@@ -82,6 +88,16 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) t + return; + } + ++ if (numBytesToSkip != 0) { ++ // The last chunkType we detected was RESERVED_SKIPPABLE and we still have some bytes to skip. ++ int skipBytes = Math.min(numBytesToSkip, in.readableBytes()); ++ in.skipBytes(skipBytes); ++ numBytesToSkip -= skipBytes; ++ ++ // Let's return and try again. ++ return; ++ } ++ + try { + int idx = in.readerIndex(); + final int inSize = in.readableBytes(); +@@ -123,12 +139,15 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) t + throw new DecompressionException("Received RESERVED_SKIPPABLE tag before STREAM_IDENTIFIER"); + } + +- if (inSize < 4 + chunkLength) { +- // TODO: Don't keep skippable bytes +- return; +- } ++ in.skipBytes(4); + +- in.skipBytes(4 + chunkLength); ++ int skipBytes = Math.min(chunkLength, in.readableBytes()); ++ in.skipBytes(skipBytes); ++ if (skipBytes != chunkLength) { ++ // We could skip all bytes, let's store the remaining so we can do so once we receive more ++ // data. ++ numBytesToSkip = chunkLength - skipBytes; ++ } + break; + case RESERVED_UNSKIPPABLE: + // The spec mandates that reserved unskippable chunks must immediately +@@ -141,7 +160,8 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) t + throw new DecompressionException("Received UNCOMPRESSED_DATA tag before STREAM_IDENTIFIER"); + } + if (chunkLength > MAX_UNCOMPRESSED_DATA_SIZE) { +- throw new DecompressionException("Received UNCOMPRESSED_DATA larger than 65540 bytes"); ++ throw new DecompressionException("Received UNCOMPRESSED_DATA larger than " + ++ MAX_UNCOMPRESSED_DATA_SIZE + " bytes"); + } + + if (inSize < 4 + chunkLength) { +@@ -162,13 +182,25 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) t + throw new DecompressionException("Received COMPRESSED_DATA tag before STREAM_IDENTIFIER"); + } + ++ if (chunkLength > MAX_COMPRESSED_CHUNK_SIZE) { ++ throw new DecompressionException("Received COMPRESSED_DATA that contains" + ++ " chunk that exceeds " + MAX_COMPRESSED_CHUNK_SIZE + " bytes"); ++ } ++ + if (inSize < 4 + chunkLength) { + return; + } + + in.skipBytes(4); + int checksum = in.readIntLE(); +- ByteBuf uncompressed = ctx.alloc().buffer(); ++ ++ int uncompressedSize = snappy.getPreamble(in); ++ if (uncompressedSize > MAX_DECOMPRESSED_DATA_SIZE) { ++ throw new DecompressionException("Received COMPRESSED_DATA that contains" + ++ " uncompressed data that exceeds " + MAX_DECOMPRESSED_DATA_SIZE + " bytes"); ++ } ++ ++ ByteBuf uncompressed = ctx.alloc().buffer(uncompressedSize, MAX_DECOMPRESSED_DATA_SIZE); + try { + if (validateChecksums) { + int oldWriterIndex = in.writerIndex(); diff --git a/netty.spec b/netty.spec index 43023ad2647cbdfb1610182727bdf79064694ae7..3be3d058d8e7763f4d2ed50fbddb4dbd765081d7 100644 --- a/netty.spec +++ b/netty.spec @@ -2,7 +2,7 @@ Name: netty Version: 4.1.13 -Release: 14 +Release: 15 Summary: Asynchronous event-driven network application Java framework License: ASL 2.0 URL: https://netty.io/ @@ -25,6 +25,8 @@ Patch0013: CVE-2021-21295-pre4.patch Patch0014: CVE-2021-21295.patch Patch0015: CVE-2021-21409.patch Patch0016: fix-build-error.patch +Patch0017: CVE-2021-37136.patch +Patch0018: CVE-2021-37137.patch BuildRequires: maven-local mvn(ant-contrib:ant-contrib) BuildRequires: mvn(com.jcraft:jzlib) mvn(commons-logging:commons-logging) @@ -146,6 +148,9 @@ export CFLAGS="$RPM_OPT_FLAGS" LDFLAGS="$RPM_LD_FLAGS" %changelog +* Wed Oct 27 2021 wangkai - 4.1.13-15 +- fix CVE-2021-37136 CVE-2021-37137 + * Mon Aug 16 2021 wangyue - 4.1.13-14 - fix build error