diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..5eb6edd9de6b03be18eff222997a268aded796fc --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.tar.bz2 filter=lfs diff=lfs merge=lfs -text diff --git a/.lfsconfig b/.lfsconfig new file mode 100644 index 0000000000000000000000000000000000000000..7ab6b1115ab9c270a1684a030596559a99221465 --- /dev/null +++ b/.lfsconfig @@ -0,0 +1,2 @@ +[lfs] + url = https://artlfs.openeuler.openatom.cn/src-openeuler/trafficserver diff --git a/CVE-2024-38311-pre-Do-not-allow-extra-CRs-in-chunks-11936-11942.patch b/CVE-2024-38311-pre-Do-not-allow-extra-CRs-in-chunks-11936-11942.patch deleted file mode 100644 index 42215f5e45c4c9814c6a40dec4e5fbbca88a773c..0000000000000000000000000000000000000000 --- a/CVE-2024-38311-pre-Do-not-allow-extra-CRs-in-chunks-11936-11942.patch +++ /dev/null @@ -1,168 +0,0 @@ -From d3fd9ac0380099de6bb1fb973234aa278000aecc Mon Sep 17 00:00:00 2001 -From: Masakazu Kitajo -Date: Wed, 15 Jan 2025 11:10:36 -0700 -Subject: [PATCH] Do not allow extra CRs in chunks (#11936) (#11942) - -* Do not allow extra CRs in chunks (#11936) - -* Do not allow extra CRs in chunks - -* Renumber test uuid - -* Add test cases and fix an oversight - -* Use prefix increment - -(cherry picked from commit f5f2256c00abbfd02c22fbae3937da1c7bd8a34f) - -* Fix test case ---- - proxy/http/HttpTunnel.cc | 12 +++++ - .../bad_chunked_encoding.test.py | 6 +-- - .../malformed_chunked_header.replay.yaml | 49 +++++++++++++++++-- - 3 files changed, 61 insertions(+), 6 deletions(-) - -diff --git a/proxy/http/HttpTunnel.cc b/proxy/http/HttpTunnel.cc -index 4b20784f395..adb3cd9bc98 100644 ---- a/proxy/http/HttpTunnel.cc -+++ b/proxy/http/HttpTunnel.cc -@@ -136,6 +136,7 @@ ChunkedHandler::read_size() - { - int64_t bytes_used; - bool done = false; -+ int cr = 0; - - while (chunked_reader->read_avail() > 0 && !done) { - const char *tmp = chunked_reader->start(); -@@ -174,6 +175,9 @@ ChunkedHandler::read_size() - done = true; - break; - } else { -+ if (ParseRules::is_cr(*tmp)) { -+ ++cr; -+ } - state = CHUNK_READ_SIZE_CRLF; // now look for CRLF - } - } -@@ -183,7 +187,15 @@ ChunkedHandler::read_size() - cur_chunk_bytes_left = (cur_chunk_size = running_sum); - state = (running_sum == 0) ? CHUNK_READ_TRAILER_BLANK : CHUNK_READ_CHUNK; - done = true; -+ cr = 0; - break; -+ } else if (ParseRules::is_cr(*tmp)) { -+ if (cr != 0) { -+ state = CHUNK_READ_ERROR; -+ done = true; -+ break; -+ } -+ ++cr; - } - } else if (state == CHUNK_READ_SIZE_START) { - if (ParseRules::is_cr(*tmp)) { -diff --git a/tests/gold_tests/chunked_encoding/bad_chunked_encoding.test.py b/tests/gold_tests/chunked_encoding/bad_chunked_encoding.test.py -index e92181ccdf7..f22cb9d2d39 100644 ---- a/tests/gold_tests/chunked_encoding/bad_chunked_encoding.test.py -+++ b/tests/gold_tests/chunked_encoding/bad_chunked_encoding.test.py -@@ -172,13 +172,13 @@ def runChunkedTraffic(self): - # code from the verifier client. - tr.Processes.Default.ReturnCode = 1 - tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( -- r"(Unexpected chunked content for key 4: too small|Failed HTTP/1 transaction with key: 4)", -+ r"(Unexpected chunked content for key 101: too small|Failed HTTP/1 transaction with key: 101)", - "Verify that ATS closed the forth transaction.") - tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( -- r"(Unexpected chunked content for key 5: too small|Failed HTTP/1 transaction with key: 5)", -+ r"(Unexpected chunked content for key 102: too small|Failed HTTP/1 transaction with key: 102)", - "Verify that ATS closed the fifth transaction.") - tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( -- r"(Unexpected chunked content for key 6: too small|Failed HTTP/1 transaction with key: 6)", -+ r"(Unexpected chunked content for key 103: too small|Failed HTTP/1 transaction with key: 103)", - "Verify that ATS closed the sixth transaction.") - - # ATS should close the connection before any body gets through. "def" -diff --git a/tests/gold_tests/chunked_encoding/replays/malformed_chunked_header.replay.yaml b/tests/gold_tests/chunked_encoding/replays/malformed_chunked_header.replay.yaml -index ae135d77ab7..5f136a7eeba 100644 ---- a/tests/gold_tests/chunked_encoding/replays/malformed_chunked_header.replay.yaml -+++ b/tests/gold_tests/chunked_encoding/replays/malformed_chunked_header.replay.yaml -@@ -78,6 +78,26 @@ sessions: - server-response: - status: 200 - -+- transactions: -+ - client-request: -+ method: "POST" -+ version: "1.1" -+ url: /malformed/chunk/header3 -+ headers: -+ fields: -+ - [ Host, example.com ] -+ - [ Transfer-Encoding, chunked ] -+ - [ uuid, 4 ] -+ content: -+ transfer: plain -+ encoding: uri -+ # BWS cannot have CR -+ data: 3%0D%0D%0Aabc%0D%0A0%0D%0A%0D%0A -+ -+ # The connection will be dropped and this response will not go out. -+ server-response: -+ status: 200 -+ - # - # Now repeat the above two malformed chunk header tests, but on the server - # side. -@@ -90,7 +110,7 @@ sessions: - headers: - fields: - - [ Host, example.com ] -- - [ uuid, 4 ] -+ - [ uuid, 101 ] - - # The connection will be dropped and this response will not go out. - server-response: -@@ -113,7 +133,7 @@ sessions: - headers: - fields: - - [ Host, example.com ] -- - [ uuid, 5 ] -+ - [ uuid, 102 ] - - # The connection will be dropped and this response will not go out. - server-response: -@@ -136,7 +156,7 @@ sessions: - headers: - fields: - - [ Host, example.com ] -- - [ uuid, 6 ] -+ - [ uuid, 103 ] - - # The connection will be dropped and this response will not go out. - server-response: -@@ -150,3 +170,26 @@ sessions: - encoding: uri - # Super large chunk header, larger than will fit in an int. - data: 111111113%0D%0Adef%0D%0A0%0D%0A%0D%0A -+ -+- transactions: -+ - client-request: -+ method: "GET" -+ version: "1.1" -+ url: /response/malformed/chunk/size2 -+ headers: -+ fields: -+ - [ Host, example.com ] -+ - [ uuid, 104 ] -+ -+ # The connection will be dropped and this response will not go out. -+ server-response: -+ status: 200 -+ reason: OK -+ headers: -+ fields: -+ - [ Transfer-Encoding, chunked ] -+ content: -+ transfer: plain -+ encoding: uri -+ # BWS cannot have CR -+ data: 3%0D%0D%0Adef%0D%0A0%0D%0A%0D%0A diff --git a/CVE-2024-38311.patch b/CVE-2024-38311.patch deleted file mode 100644 index 51d8a9305735698ad55ef658bd3cca8a1f863e90..0000000000000000000000000000000000000000 --- a/CVE-2024-38311.patch +++ /dev/null @@ -1,1580 +0,0 @@ -From a16c4b6bb0b126047c68dafbdf6311ac1586fc0b Mon Sep 17 00:00:00 2001 -From: Bryan Call -Date: Tue, 4 Mar 2025 11:39:44 -0800 -Subject: [PATCH] Fix chunked pipelined requests (#12071) - -* Fix pipelined request for chunked bodies - -This addresses a variety of bugs concerning pipelined requests. In -particular, the HttpTunnel logic had fundamentally assumed that it could -consume all bytes available in the producer's reader. If a request was -pipelined after a previous request that had a chunked body, this would -result in the second request being unparsed and either sent along to the -origin or dropped on the floor, depening on configuration. This adds an -explicit autest for pipelined requests and addresses these issues. - -This patch largely does the following: - -1. Updates the copy_partial_post_data data to take the number of bytes - it consumes rather than consuming all bytes in the reader. It also - now returns the number of bytes it consumes, which the tunnel needs - to keep track of the number of bytes it processes. -2. Previous to this patch, the HttpTunnel assumed that it could consume - all bytes in the reader originally passed to it (all bytes in - init_bytes_done). This simply will not work for pipelined requests. - This addresses this issue by adding a new variable to the tunnel: - bytes_consumed. This way the tunnel can keep track of how many bytes - it consumed while processing the request body, which allows the - HttpSM to later process just the right number of bytes from its - reader rather than eating into any pipelined requests that follow it. -3. The HttpSM must not consume bytes from its client reader that are - pipelined requests. It now uses the tunnel's processing - bytes_consumed to process bytes from its reader rather than simply - consuming all read_available() bytes from it. - -* Fix bytes consumed chunk computation - -Fix a possible miscalculation of bytes consumed while parsing chunked -content. - -* is_read_closed fix - -* Verify expected responses are received. - -* Updated formatting - ---------- - -Co-authored-by: Brian Neradt ---- - proxy/ProxyTransaction.cc | 6 + - proxy/ProxyTransaction.h | 1 + - proxy/http/HttpSM.cc | 70 ++++-- - proxy/http/HttpSM.h | 14 +- - proxy/http/HttpTunnel.cc | 171 +++++++++------ - proxy/http/HttpTunnel.h | 151 +++++++++++-- - proxy/http2/Http2Stream.cc | 6 + - proxy/http2/Http2Stream.h | 1 + - src/traffic_server/FetchSM.cc | 3 +- - tests/gold_tests/pipeline/pipeline.test.py | 137 ++++++++++++ - tests/gold_tests/pipeline/pipeline_client.py | 104 +++++++++ - tests/gold_tests/pipeline/pipeline_server.py | 201 ++++++++++++++++++ - .../gold_tests/redirect/redirect_post.test.py | 1 + - .../timeout/inactive_client_timeout.test.py | 9 +- - tests/gold_tests/timeout/slow_server.yaml | 37 +++- - 15 files changed, 796 insertions(+), 116 deletions(-) - create mode 100644 tests/gold_tests/pipeline/pipeline.test.py - create mode 100644 tests/gold_tests/pipeline/pipeline_client.py - create mode 100644 tests/gold_tests/pipeline/pipeline_server.py - -diff --git a/proxy/ProxyTransaction.cc b/proxy/ProxyTransaction.cc -index c56ddb348d3..dfe5e565130 100644 ---- a/proxy/ProxyTransaction.cc -+++ b/proxy/ProxyTransaction.cc -@@ -238,6 +238,12 @@ ProxyTransaction::get_version(HTTPHdr &hdr) const - return hdr.version_get(); - } - -+bool -+ProxyTransaction::is_read_closed() const -+{ -+ return false; -+} -+ - bool - ProxyTransaction::allow_half_open() const - { -diff --git a/proxy/ProxyTransaction.h b/proxy/ProxyTransaction.h -index 261af6829bd..1117880a04f 100644 ---- a/proxy/ProxyTransaction.h -+++ b/proxy/ProxyTransaction.h -@@ -49,6 +49,7 @@ class ProxyTransaction : public VConnection - virtual void set_inactivity_timeout(ink_hrtime timeout_in); - virtual void cancel_inactivity_timeout(); - virtual void cancel_active_timeout(); -+ virtual bool is_read_closed() const; - - // Implement VConnection interface. - VIO *do_io_read(Continuation *c, int64_t nbytes = INT64_MAX, MIOBuffer *buf = nullptr) override; -diff --git a/proxy/http/HttpSM.cc b/proxy/http/HttpSM.cc -index 4e09795f036..cdc05461320 100644 ---- a/proxy/http/HttpSM.cc -+++ b/proxy/http/HttpSM.cc -@@ -973,10 +973,8 @@ HttpSM::wait_for_full_body() - // Next order of business if copy the remaining data from the - // header buffer into new buffer - int64_t post_bytes = chunked ? INT64_MAX : t_state.hdr_info.request_content_length; -- client_request_body_bytes = -- post_buffer->write(ua_txn->get_remote_reader(), chunked ? ua_txn->get_remote_reader()->read_avail() : post_bytes); -+ post_buffer->write(ua_txn->get_remote_reader(), chunked ? ua_txn->get_remote_reader()->read_avail() : post_bytes); - -- ua_txn->get_remote_reader()->consume(client_request_body_bytes); - p = tunnel.add_producer(ua_entry->vc, post_bytes, buf_start, &HttpSM::tunnel_handler_post_ua, HT_BUFFER_READ, "ua post buffer"); - if (chunked) { - bool const drop_chunked_trailers = t_state.http_config_param->oride.http_drop_chunked_trailers == 1; -@@ -3633,7 +3631,24 @@ int - HttpSM::tunnel_handler_post_ua(int event, HttpTunnelProducer *p) - { - STATE_ENTER(&HttpSM::tunnel_handler_post_ua, event); -- client_request_body_bytes = p->init_bytes_done + p->bytes_read; -+ -+ // Now that the tunnel is done, it can tell us how many bytes were in the -+ // body. -+ if (client_request_body_bytes == 0) { -+ // This is invoked multiple times for a transaction when buffering request -+ // body data, so we only call this the first time when -+ // client_request_body_bytes is 0. -+ client_request_body_bytes = p->bytes_consumed; -+ IOBufferReader *client_reader = ua_txn->get_remote_reader(); -+ // p->bytes_consumed represents the number of body bytes the tunnel parsed -+ // and consumed from the client. However, not all those bytes may have been -+ // written to our _ua client transaction reader. We must not consume past -+ // the number of bytes available. -+ int64_t const bytes_to_consume = std::min(p->bytes_consumed, client_reader->read_avail()); -+ SMDebug("http_tunnel", "Consuming %" PRId64 " bytes from client reader with p->bytes_consumed: %" PRId64 " available: %" PRId64, -+ bytes_to_consume, p->bytes_consumed, client_reader->read_avail()); -+ client_reader->consume(bytes_to_consume); -+ } - - switch (event) { - case VC_EVENT_INACTIVITY_TIMEOUT: -@@ -6095,8 +6110,8 @@ HttpSM::do_setup_post_tunnel(HttpVC_t to_vc_type) - IOBufferReader *postdata_producer_reader = postdata_producer_buffer->alloc_reader(); - - postdata_producer_buffer->write(this->_postbuf.postdata_copy_buffer_start); -- int64_t post_bytes = postdata_producer_reader->read_avail(); -- transfered_bytes = post_bytes; -+ int64_t post_bytes = chunked ? INT64_MAX : t_state.hdr_info.request_content_length; -+ transferred_bytes = post_bytes; - p = tunnel.add_producer(HTTP_TUNNEL_STATIC_PRODUCER, post_bytes, postdata_producer_reader, (HttpProducerHandler) nullptr, - HT_STATIC, "redirect static agent post"); - } else { -@@ -6125,12 +6140,27 @@ HttpSM::do_setup_post_tunnel(HttpVC_t to_vc_type) - - // Next order of business if copy the remaining data from the - // header buffer into new buffer -- client_request_body_bytes = -- post_buffer->write(ua_txn->get_remote_reader(), chunked ? ua_txn->get_remote_reader()->read_avail() : post_bytes); - -- ua_txn->get_remote_reader()->consume(client_request_body_bytes); -- p = tunnel.add_producer(ua_entry->vc, post_bytes - transfered_bytes, buf_start, &HttpSM::tunnel_handler_post_ua, HT_HTTP_CLIENT, -- "user agent post"); -+ int64_t num_body_bytes = 0; -+ // If is_using_post_buffer has been used, then client_request_body_bytes -+ // will have already been sent in wait_for_full_body and there will be -+ // zero bytes in this user agent buffer. We don't want to clobber -+ // client_request_body_bytes with a zero value here in those cases. -+ if (client_request_body_bytes > 0) { -+ num_body_bytes = client_request_body_bytes; -+ } else { -+ num_body_bytes = -+ post_buffer->write(ua_txn->get_remote_reader(), chunked ? ua_txn->get_remote_reader()->read_avail() : post_bytes); -+ } -+ // Don't consume post_bytes here from ua_txn->get_remote_reader() since -+ // we are not sure how many bytes the tunnel will use yet. Wait until -+ // HttpSM::tunnel_handler_post_ua to consume the bytes. -+ // The user agent has already sent all it has -+ if (ua_txn->is_read_closed()) { -+ post_bytes = num_body_bytes; -+ } -+ p = tunnel.add_producer(ua_entry->vc, post_bytes - transferred_bytes, buf_start, &HttpSM::tunnel_handler_post_ua, -+ HT_HTTP_CLIENT, "user agent post"); - } - ua_entry->in_tunnel = true; - -@@ -6847,6 +6877,8 @@ HttpSM::server_transfer_init(MIOBuffer *buf, int hdr_size) - // we'll get is already in the buffer - nbytes = server_txn->get_remote_reader()->read_avail() + hdr_size; - } else if (t_state.hdr_info.response_content_length == HTTP_UNDEFINED_CL) { -+ // Chunked or otherwise, no length is defined. Pass -1 to tell the -+ // tunnel that the size is unknown. - nbytes = -1; - } else { - // Set to copy to the number of bytes we want to write as -@@ -8564,16 +8596,18 @@ HttpSM::rewind_state_machine() - - // YTS Team, yamsat Plugin - // Function to copy the partial Post data while tunnelling --void --PostDataBuffers::copy_partial_post_data() -+int64_t -+PostDataBuffers::copy_partial_post_data(int64_t consumed_bytes) - { - if (post_data_buffer_done) { -- return; -+ return 0; - } -- Debug("http_redirect", "[PostDataBuffers::copy_partial_post_data] wrote %" PRId64 " bytes to buffers %" PRId64 "", -- this->ua_buffer_reader->read_avail(), this->postdata_copy_buffer_start->read_avail()); -- this->postdata_copy_buffer->write(this->ua_buffer_reader); -- this->ua_buffer_reader->consume(this->ua_buffer_reader->read_avail()); -+ int64_t const bytes_to_copy = std::min(consumed_bytes, this->ua_buffer_reader->read_avail()); -+ Debug("http_redirect", "given %" PRId64 " bytes consumed, copying %" PRId64 " bytes to buffers with %" PRId64 " available bytes", -+ consumed_bytes, bytes_to_copy, this->ua_buffer_reader->read_avail()); -+ this->postdata_copy_buffer->write(this->ua_buffer_reader, bytes_to_copy); -+ this->ua_buffer_reader->consume(bytes_to_copy); -+ return bytes_to_copy; - } - - IOBufferReader * -diff --git a/proxy/http/HttpSM.h b/proxy/http/HttpSM.h -index 91e0504ce35..05e15f5a7f5 100644 ---- a/proxy/http/HttpSM.h -+++ b/proxy/http/HttpSM.h -@@ -179,7 +179,7 @@ class PostDataBuffers - - void clear(); - void init(IOBufferReader *ua_reader); -- void copy_partial_post_data(); -+ int64_t copy_partial_post_data(int64_t consumed_bytes); - IOBufferReader *get_post_data_buffer_clone_reader(); - void - set_post_data_buffer_done(bool done) -@@ -313,8 +313,8 @@ class HttpSM : public Continuation, public PluginUserArgs - bool debug_on = false; // Transaction specific debug flag - char *redirect_url = nullptr; // url for force redirect (provide users a functionality to redirect to another url when needed) - int redirect_url_len = 0; -- int redirection_tries = 0; // To monitor number of redirections -- int64_t transfered_bytes = 0; // Added to calculate POST data -+ int redirection_tries = 0; // To monitor number of redirections -+ int64_t transferred_bytes = 0; // Added to calculate POST data - - // Tunneling request to plugin - HttpPluginTunnel_t plugin_tunnel_type = HTTP_NO_PLUGIN_TUNNEL; -@@ -331,7 +331,7 @@ class HttpSM : public Continuation, public PluginUserArgs - int64_t postbuf_buffer_avail(); - void postbuf_clear(); - void disable_redirect(); -- void postbuf_copy_partial_data(); -+ int64_t postbuf_copy_partial_data(int64_t consumed_bytes); - void postbuf_init(IOBufferReader *ua_reader); - void set_postbuf_done(bool done); - IOBufferReader *get_postbuf_clone_reader(); -@@ -750,10 +750,10 @@ HttpSM::disable_redirect() - this->_postbuf.clear(); - } - --inline void --HttpSM::postbuf_copy_partial_data() -+inline int64_t -+HttpSM::postbuf_copy_partial_data(int64_t consumed_bytes) - { -- this->_postbuf.copy_partial_post_data(); -+ return this->_postbuf.copy_partial_post_data(consumed_bytes); - } - - inline void -diff --git a/proxy/http/HttpTunnel.cc b/proxy/http/HttpTunnel.cc -index adb3cd9bc98..1508179e6b5 100644 ---- a/proxy/http/HttpTunnel.cc -+++ b/proxy/http/HttpTunnel.cc -@@ -38,6 +38,9 @@ - #include "tscore/ParseRules.h" - #include "tscore/ink_memory.h" - -+#include -+#include -+ - static const int min_block_transfer_bytes = 256; - static const char *const CHUNK_HEADER_FMT = "%" PRIx64 "\r\n"; - // This should be as small as possible because it will only hold the -@@ -131,19 +134,19 @@ ChunkedHandler::set_max_chunk_size(int64_t size) - max_chunk_header_len = snprintf(max_chunk_header, sizeof(max_chunk_header), CHUNK_HEADER_FMT, max_chunk_size); - } - --void -+int64_t - ChunkedHandler::read_size() - { -- int64_t bytes_used; -- bool done = false; -- int cr = 0; -+ int64_t bytes_consumed = 0; -+ bool done = false; -+ int cr = 0; - -- while (chunked_reader->read_avail() > 0 && !done) { -+ while (chunked_reader->is_read_avail_more_than(0) && !done) { - const char *tmp = chunked_reader->start(); - int64_t data_size = chunked_reader->block_read_avail(); - - ink_assert(data_size > 0); -- bytes_used = 0; -+ int64_t bytes_used = 0; - - while (data_size > 0) { - bytes_used++; -@@ -218,7 +221,9 @@ ChunkedHandler::read_size() - chunked_size += bytes_used; - } - chunked_reader->consume(bytes_used); -+ bytes_consumed += bytes_used; - } -+ return bytes_consumed; - } - - // int ChunkedHandler::transfer_bytes() -@@ -240,7 +245,7 @@ ChunkedHandler::transfer_bytes() - chunked_size += moved; - } - chunked_reader->consume(moved); -- cur_chunk_bytes_left = cur_chunk_bytes_left - moved; -+ cur_chunk_bytes_left -= moved; - return moved; - } - -@@ -274,10 +279,10 @@ ChunkedHandler::transfer_bytes() - return total_moved; - } - --void -+int64_t - ChunkedHandler::read_chunk() - { -- int64_t b = transfer_bytes(); -+ int64_t transferred_bytes = transfer_bytes(); - - ink_assert(cur_chunk_bytes_left >= 0); - if (cur_chunk_bytes_left == 0) { -@@ -285,21 +290,23 @@ ChunkedHandler::read_chunk() - - state = CHUNK_READ_SIZE_START; - } else if (cur_chunk_bytes_left > 0) { -- Debug("http_chunk", "read %" PRId64 " bytes of an %" PRId64 " chunk", b, cur_chunk_size); -+ Debug("http_chunk", "read %" PRId64 " bytes of an %" PRId64 " chunk", transferred_bytes, cur_chunk_size); - } -+ return transferred_bytes; - } - --void -+int64_t - ChunkedHandler::read_trailer() - { -- int64_t bytes_used; -- bool done = false; -+ int64_t bytes_consumed = 0; -+ bool done = false; - - while (chunked_reader->is_read_avail_more_than(0) && !done) { - const char *tmp = chunked_reader->start(); - int64_t data_size = chunked_reader->block_read_avail(); - - ink_assert(data_size > 0); -+ int64_t bytes_used = 0; - for (bytes_used = 0; data_size > 0; data_size--) { - bytes_used++; - -@@ -337,43 +344,48 @@ ChunkedHandler::read_trailer() - tmp++; - } - chunked_reader->consume(bytes_used); -+ bytes_consumed += bytes_used; - } -+ return bytes_consumed; - } - --bool -+std::pair - ChunkedHandler::process_chunked_content() - { -+ int64_t bytes_read = 0; - while (chunked_reader->is_read_avail_more_than(0) && state != CHUNK_READ_DONE && state != CHUNK_READ_ERROR) { - switch (state) { - case CHUNK_READ_SIZE: - case CHUNK_READ_SIZE_CRLF: - case CHUNK_READ_SIZE_START: -- read_size(); -+ bytes_read += read_size(); - break; - case CHUNK_READ_CHUNK: -- read_chunk(); -+ bytes_read += read_chunk(); - break; - case CHUNK_READ_TRAILER_BLANK: - case CHUNK_READ_TRAILER_CR: - case CHUNK_READ_TRAILER_LINE: -- read_trailer(); -+ bytes_read += read_trailer(); - break; - case CHUNK_FLOW_CONTROL: -- return false; -+ return std::make_pair(bytes_read, false); - default: - ink_release_assert(0); - break; - } - } -- return (state == CHUNK_READ_DONE || state == CHUNK_READ_ERROR); -+ auto const done = (state == CHUNK_READ_DONE || state == CHUNK_READ_ERROR); -+ return std::make_pair(bytes_read, done); - } - --bool -+std::pair - ChunkedHandler::generate_chunked_content() - { - char tmp[16]; - bool server_done = false; - int64_t r_avail; -+ int64_t consumed_bytes = 0; - - ink_assert(max_chunk_header_len); - -@@ -414,6 +426,7 @@ ChunkedHandler::generate_chunked_content() - chunked_buffer->write(dechunked_reader, write_val); - chunked_size += write_val; - dechunked_reader->consume(write_val); -+ consumed_bytes += write_val; - - // Output the trailing CRLF. - chunked_buffer->write("\r\n", 2); -@@ -426,9 +439,9 @@ ChunkedHandler::generate_chunked_content() - // Add the chunked transfer coding trailer. - chunked_buffer->write("0\r\n\r\n", 5); - chunked_size += 5; -- return true; -+ return std::make_pair(consumed_bytes, true); - } -- return false; -+ return std::make_pair(consumed_bytes, false); - } - - HttpTunnelProducer::HttpTunnelProducer() : consumer_list() {} -@@ -678,7 +691,7 @@ HttpTunnel::add_producer(VConnection *vc, int64_t nbytes_arg, IOBufferReader *re - ink_assert(reader_start->mbuf); - if ((p = alloc_producer()) != nullptr) { - p->vc = vc; -- p->nbytes = nbytes_arg; -+ p->total_bytes = nbytes_arg; - p->buffer_start = reader_start; - p->read_buffer = reader_start->mbuf; - p->vc_handler = sm_handler; -@@ -690,22 +703,23 @@ HttpTunnel::add_producer(VConnection *vc, int64_t nbytes_arg, IOBufferReader *re - p->do_dechunking = false; - p->do_chunked_passthru = false; - -- p->init_bytes_done = reader_start->read_avail(); -- if (p->nbytes < 0) { -- p->ntodo = p->nbytes; -+ p->init_bytes_done = p->buffer_start->read_avail(); -+ if (p->total_bytes < 0 || p->total_bytes == INT64_MAX) { -+ p->total_bytes = INT64_MAX; // A negative nbytes_arg is a synonym for INT64_MAX. -+ p->ntodo = INT64_MAX; - } else { // The byte count given us includes bytes - // that already may be in the buffer. - // ntodo represents the number of bytes - // the tunneling mechanism needs to read - // for the producer -- p->ntodo = p->nbytes - p->init_bytes_done; -+ p->ntodo = std::max(p->total_bytes - p->init_bytes_done, INT64_C(0)); - ink_assert(p->ntodo >= 0); - } - - // We are static, the producer is never "alive" - // It just has data in the buffer - if (vc == HTTP_TUNNEL_STATIC_PRODUCER) { -- ink_assert(p->ntodo == 0); -+ ink_assert(p->ntodo >= 0); - p->alive = false; - p->read_success = true; - } else { -@@ -844,9 +858,15 @@ HttpTunnel::producer_run(HttpTunnelProducer *p) - } - } - } -+ if (!p->is_handling_chunked_content()) { -+ // If we are not handling chunked content, then we will be consuming all the -+ // bytes available in the reader, up until the total bytes that the tunnel -+ // will be processing. -+ p->bytes_consumed += std::min(p->total_bytes, p->init_bytes_done); -+ } - -- int64_t consumer_n; -- int64_t producer_n; -+ int64_t consumer_n = 0; -+ int64_t producer_n = 0; - - ink_assert(p->vc != nullptr); - active = true; -@@ -854,7 +874,10 @@ HttpTunnel::producer_run(HttpTunnelProducer *p) - IOBufferReader *chunked_buffer_start = nullptr; - IOBufferReader *dechunked_buffer_start = nullptr; - IOBufferReader *passthrough_buffer_start = nullptr; -- if (p->do_chunking || p->do_dechunking || p->do_chunked_passthru) { -+ if (p->is_handling_chunked_content()) { -+ // For all the chunking cases, we must only copy bytes as we process them. -+ body_bytes_to_copy = 0; -+ - p->chunked_handler.init(p->buffer_start, p, this->http_drop_chunked_trailers); - - // Copy the header into the chunked/dechunked buffers. -@@ -888,17 +911,23 @@ HttpTunnel::producer_run(HttpTunnelProducer *p) - - int64_t read_start_pos = 0; - if (p->vc_type == HT_CACHE_READ && sm->t_state.range_setup == HttpTransact::RANGE_NOT_TRANSFORM_REQUESTED) { -- ink_assert(sm->t_state.num_range_fields == 1); // we current just support only one range entry -+ ink_assert(sm->t_state.num_range_fields == 1); // we currently just support only one range entry - read_start_pos = sm->t_state.ranges[0]._start; - producer_n = (sm->t_state.ranges[0]._end - sm->t_state.ranges[0]._start) + 1; - consumer_n = (producer_n + sm->client_response_hdr_bytes); -- } else if (p->nbytes >= 0) { -- consumer_n = p->nbytes; -+ } else if (p->total_bytes >= 0) { -+ consumer_n = p->total_bytes; - producer_n = p->ntodo; - } else { - consumer_n = (producer_n = INT64_MAX); - } - -+ if (!p->is_handling_chunked_content()) { -+ // No chunking being handled, so the user specified a number of bytes -+ // described by Content-Length. Use that value. -+ body_bytes_to_copy = producer_n - body_bytes_copied; -+ } -+ - // At least set up the consumer readers first so the data - // doesn't disappear out from under the tunnel - for (c = p->consumer_list.head; c; c = c->link.next) { -@@ -933,9 +962,9 @@ HttpTunnel::producer_run(HttpTunnelProducer *p) - c->buffer_reader = p->read_buffer->clone_reader(p->buffer_start); - } - -- // Consume bytes of the reader if we skipping bytes -+ // Consume bytes from the reader if we are skipping bytes. - if (c->skip_bytes > 0) { -- ink_assert(c->skip_bytes <= c->buffer_reader->read_avail()); -+ ink_release_assert(c->skip_bytes <= c->buffer_reader->read_avail()); - c->buffer_reader->consume(c->skip_bytes); - } - } -@@ -958,7 +987,8 @@ HttpTunnel::producer_run(HttpTunnelProducer *p) - return; - } - } else { -- sm->postbuf_copy_partial_data(); -+ body_bytes_copied += sm->postbuf_copy_partial_data(body_bytes_to_copy); -+ body_bytes_to_copy = 0; - } - } // end of added logic for partial POST - -@@ -966,6 +996,7 @@ HttpTunnel::producer_run(HttpTunnelProducer *p) - // remove the chunked reader marker so that it doesn't act like a buffer guard - p->chunked_handler.chunked_buffer->dealloc_reader(chunked_buffer_start); - p->chunked_handler.dechunked_reader->consume(p->chunked_handler.skip_bytes); -+ p->bytes_consumed += p->chunked_handler.skip_bytes; - - // If there is data to process in the buffer, do it now - producer_handler(VC_EVENT_READ_READY, p); -@@ -984,12 +1015,10 @@ HttpTunnel::producer_run(HttpTunnelProducer *p) - p->chunked_handler.chunked_reader->read_avail()); - if (!transform_consumer && (p->chunked_handler.chunked_reader->read_avail() >= p->chunked_handler.skip_bytes)) { - p->chunked_handler.chunked_reader->consume(p->chunked_handler.skip_bytes); -+ p->bytes_consumed += p->chunked_handler.skip_bytes; - Debug("http_tunnel", "[producer_run] do_dechunking p->chunked_handler.skip_bytes = %" PRId64 "", - p->chunked_handler.skip_bytes); - } -- // if(p->chunked_handler.chunked_reader->read_avail() > 0) -- // p->chunked_handler.chunked_reader->consume( -- // p->chunked_handler.skip_bytes); - - producer_handler(VC_EVENT_READ_READY, p); - if (sm->get_postbuf_done() && p->vc_type == HT_HTTP_CLIENT) { // read_avail() == 0 -@@ -1104,7 +1133,10 @@ HttpTunnel::producer_handler_dechunked(int event, HttpTunnelProducer *p) - /* fallthrough */ - case VC_EVENT_READ_READY: - p->last_event = p->chunked_handler.last_server_event = event; -- if (p->chunked_handler.generate_chunked_content()) { // We are done, make sure the consumer is activated -+ auto const [consumed_bytes, done] = p->chunked_handler.generate_chunked_content(); -+ p->bytes_consumed += consumed_bytes; -+ body_bytes_to_copy = consumed_bytes; -+ if (done) { // We are done, make sure the consumer is activated - HttpTunnelConsumer *c; - for (c = p->consumer_list.head; c; c = c->link.next) { - if (c->alive) { -@@ -1147,7 +1179,9 @@ HttpTunnel::producer_handler_chunked(int event, HttpTunnelProducer *p) - } - - p->last_event = p->chunked_handler.last_server_event = event; -- bool done = p->chunked_handler.process_chunked_content(); -+ auto const [bytes_consumed, done] = p->chunked_handler.process_chunked_content(); -+ p->bytes_consumed += bytes_consumed; -+ body_bytes_to_copy = bytes_consumed; - - // If we couldn't understand the encoding, return - // an error -@@ -1201,16 +1235,10 @@ HttpTunnel::producer_handler(int event, HttpTunnelProducer *p) - - // Handle chunking/dechunking/chunked-passthrough if necessary. - if (p->do_chunking) { -+ // This will update body_bytes_to_copy with the number of bytes copied. - event = producer_handler_dechunked(event, p); -- -- // If we were in PRECOMPLETE when this function was called -- // and we are doing chunking, then we just wrote the last -- // chunk in the function call above. We are done with the -- // tunnel. -- if (event == HTTP_TUNNEL_EVENT_PRECOMPLETE) { -- event = VC_EVENT_EOS; -- } - } else if (p->do_dechunking || p->do_chunked_passthru) { -+ // This will update body_bytes_to_copy with the number of bytes copied. - event = producer_handler_chunked(event, p); - } else { - p->last_event = event; -@@ -1233,7 +1261,12 @@ HttpTunnel::producer_handler(int event, HttpTunnelProducer *p) - event = VC_EVENT_ERROR; - } - } else { -- sm->postbuf_copy_partial_data(); -+ if (!p->is_handling_chunked_content()) { -+ // The chunk handlers didn't consume bytes. Pull bytes as needed. -+ body_bytes_to_copy = p->total_bytes - body_bytes_copied; -+ } -+ body_bytes_copied += sm->postbuf_copy_partial_data(body_bytes_to_copy); -+ body_bytes_to_copy = 0; - if (event == VC_EVENT_READ_COMPLETE || event == HTTP_TUNNEL_EVENT_PRECOMPLETE || event == VC_EVENT_EOS) { - sm->set_postbuf_done(true); - } -@@ -1268,6 +1301,9 @@ HttpTunnel::producer_handler(int event, HttpTunnelProducer *p) - p->alive = false; - if (p->read_vio) { - p->bytes_read = p->read_vio->ndone; -+ if (!p->is_handling_chunked_content()) { -+ p->bytes_consumed += p->bytes_read; -+ } - } else { - // If we are chunked, we can receive the whole document - // along with the header without knowing it (due to -@@ -1305,6 +1341,9 @@ HttpTunnel::producer_handler(int event, HttpTunnelProducer *p) - p->alive = false; - if (p->read_vio) { - p->bytes_read = p->read_vio->ndone; -+ if (!p->is_handling_chunked_content()) { -+ p->bytes_consumed += p->bytes_read; -+ } - } else { - p->bytes_read = 0; - } -@@ -1526,6 +1565,9 @@ HttpTunnel::chain_abort_all(HttpTunnelProducer *p) - p->alive = false; - if (p->read_vio) { - p->bytes_read = p->read_vio->ndone; -+ if (!p->is_handling_chunked_content()) { -+ p->bytes_consumed += p->bytes_read; -+ } - } - if (p->self_consumer) { - p->self_consumer->alive = false; -@@ -1543,8 +1585,8 @@ HttpTunnel::chain_abort_all(HttpTunnelProducer *p) - int64_t - HttpTunnel::final_consumer_bytes_to_write(HttpTunnelProducer *p, HttpTunnelConsumer *c) - { -- int64_t total_bytes = 0; -- int64_t consumer_n = 0; -+ int64_t bytes_to_write = 0; -+ int64_t consumer_n = 0; - if (p->alive) { - consumer_n = INT64_MAX; - } else { -@@ -1554,25 +1596,25 @@ HttpTunnel::final_consumer_bytes_to_write(HttpTunnelProducer *p, HttpTunnelConsu - switch (action) { - case TCA_CHUNK_CONTENT: - case TCA_PASSTHRU_DECHUNKED_CONTENT: -- total_bytes = p->bytes_read + p->init_bytes_done; -+ bytes_to_write = p->bytes_consumed; - break; - case TCA_DECHUNK_CONTENT: - case TCA_PASSTHRU_CHUNKED_CONTENT: -- total_bytes = p->chunked_handler.skip_bytes + p->chunked_handler.dechunked_size; -+ bytes_to_write = p->chunked_handler.skip_bytes + p->chunked_handler.dechunked_size; - break; - default: - break; - } - } else if (action == TCA_CHUNK_CONTENT) { -- total_bytes = p->chunked_handler.skip_bytes + p->chunked_handler.chunked_size; -+ bytes_to_write = p->chunked_handler.skip_bytes + p->chunked_handler.chunked_size; - } else if (action == TCA_DECHUNK_CONTENT) { -- total_bytes = p->chunked_handler.skip_bytes + p->chunked_handler.dechunked_size; -+ bytes_to_write = p->chunked_handler.skip_bytes + p->chunked_handler.dechunked_size; - } else if (action == TCA_PASSTHRU_CHUNKED_CONTENT) { -- total_bytes = p->bytes_read + p->init_bytes_done; -+ bytes_to_write = p->bytes_consumed; - } else { -- total_bytes = p->bytes_read + p->init_bytes_done; -+ bytes_to_write = p->bytes_consumed; - } -- consumer_n = total_bytes - c->skip_bytes; -+ consumer_n = bytes_to_write - c->skip_bytes; - } - } - return consumer_n; -@@ -1594,12 +1636,10 @@ HttpTunnel::finish_all_internal(HttpTunnelProducer *p, bool chain) - TunnelChunkingAction_t action = p->chunking_action; - - if (action == TCA_PASSTHRU_CHUNKED_CONTENT) { -- // if the only chunked data was in the initial read, make sure we don't consume too much -+ // Verify that we consumed the number of bytes we accounted for via p->bytes_consumed. - if (p->bytes_read == 0 && p->buffer_start != nullptr) { - int num_read = p->buffer_start->read_avail() - p->chunked_handler.chunked_reader->read_avail(); -- if (num_read < p->init_bytes_done) { -- p->init_bytes_done = num_read; -- } -+ ink_release_assert(num_read == p->bytes_consumed); - } - } - -@@ -1694,6 +1734,9 @@ HttpTunnel::close_vc(HttpTunnelConsumer *c) - p->alive = false; - if (p->read_vio) { - p->bytes_read = p->read_vio->ndone; -+ if (!p->is_handling_chunked_content()) { -+ p->bytes_consumed += p->bytes_read; -+ } - } - } - -diff --git a/proxy/http/HttpTunnel.h b/proxy/http/HttpTunnel.h -index 3ce994b11ed..3aac38aca68 100644 ---- a/proxy/http/HttpTunnel.h -+++ b/proxy/http/HttpTunnel.h -@@ -112,7 +112,14 @@ struct ChunkedHandler { - */ - bool drop_chunked_trailers = false; - -- bool truncation = false; -+ bool truncation = false; -+ -+ /** The number of bytes to skip from the reader because they are not body bytes. -+ * -+ * These skipped bytes are generally header bytes. We copy these in for any -+ * internal buffers we'll send to consumers, but skip them when parsing body -+ * bytes. -+ */ - int64_t skip_bytes = 0; - - ChunkedState state = CHUNK_READ_CHUNK; -@@ -145,14 +152,54 @@ struct ChunkedHandler { - /// If @a size is zero it is set to @c DEFAULT_MAX_CHUNK_SIZE. - void set_max_chunk_size(int64_t size); - -- // Returns true if complete, false otherwise -- bool process_chunked_content(); -- bool generate_chunked_content(); -+ /** Consumes and processes chunked content. -+ * -+ * This consumes data from @a chunked_reader and, if dechunking, writes the -+ * dechunked body to @a dechunked_buffer. -+ * -+ * @return The number of bytes consumed from the chunked reader and true if -+ * the entire chunked content is processed, false otherwise. -+ */ -+ std::pair process_chunked_content(); -+ -+ /** Writes chunked content. -+ * -+ * This reads from @a dechunked_reader and writes chunked formatted content to -+ * @a chunked_buffer. -+ * -+ * @return The number of bytes consumed from the dechunked reader and true if -+ * the entire chunked content is written, false otherwise. -+ */ -+ std::pair generate_chunked_content(); - - private: -- void read_size(); -- void read_chunk(); -- void read_trailer(); -+ /** Read a chunk header containing the size of the chunk. -+ * -+ * @return The number of bytes consumed from the chunked buffer reader. -+ */ -+ int64_t read_size(); -+ -+ /** Read a chunk body. -+ * -+ * This is called after read_size so that the chunk size is known. -+ * -+ * @return The number of bytes consumed from the chunked buffer reader. -+ */ -+ int64_t read_chunk(); -+ -+ /** Read a chunk trailer. -+ * -+ * @return The number of bytes consumed from the chunked buffer reader. -+ */ -+ int64_t read_trailer(); -+ -+ /** Transfer body bytes from the chunked reader. -+ * -+ * This will either simply consume the chunked body bytes in the case of -+ * chunked passthrough, or transfer the chunked body to the dechunked buffer. -+ * -+ * @return The number of bytes consumed from the chunked buffer reader. -+ */ - int64_t transfer_bytes(); - - constexpr static std::string_view FINAL_CRLF = "\r\n"; -@@ -209,12 +256,51 @@ struct HttpTunnelProducer { - bool do_dechunking = false; - bool do_chunked_passthru = false; - -- int64_t init_bytes_done = 0; // bytes passed in buffer -- int64_t nbytes = 0; // total bytes (client's perspective) -- int64_t ntodo = 0; // what this vc needs to do -- int64_t bytes_read = 0; // total bytes read from the vc -- int handler_state = 0; // state used the handlers -- int last_event = 0; ///< Tracking for flow control restarts. -+ /** The number of bytes available in the reader at the start of the tunnel. -+ * -+ * @note In the case of pipelined requests, not all these bytes should be used. -+ */ -+ int64_t init_bytes_done = 0; -+ -+ /** The total number of bytes read from the reader, including any @a skip_bytes. -+ * -+ * In straightforward cases where @a total_bytes is specified (non-INT64_MAX), -+ * these should wind up being the same as @a total_bytes. For unspecified, -+ * generally chunked content, this will be the number of bytes actually -+ * consumed from the reader. -+ * -+ * @note that in the case of pipelined requests, this may be less than @a -+ * init_bytes_done because some of those bytes might be for a future request -+ * rather than for this body/tunnel. -+ */ -+ int64_t bytes_consumed = 0; -+ -+ /** The total number of bytes to be transferred through the tunnel. -+ * -+ * This will include any bytes skipped at the start of the tunnel. -+ * -+ * @note This is set by the creator of the tunnel and in the simple case the -+ * value is precisely known via a Content-Length header value. However, a user -+ * may set this to INT64_MAX or any negative value to indicate that the total -+ * is unknown at the start of the tunnel (such as is the case with chunked -+ * encoded content). -+ */ -+ int64_t total_bytes = 0; -+ -+ /** The number of bytes still to read after @a init_bytes_done to reach @a -+ * total_bytes. -+ * -+ * A value of zero indicates that all the required bytes have already been -+ * read off the socket. @a ntodo will be used to indicate how much more we -+ * have to read. -+ */ -+ int64_t ntodo = 0; -+ -+ /** The number of bytes read from the vc since the start of the tunnel. */ -+ int64_t bytes_read = 0; -+ -+ int handler_state = 0; // state used the handlers -+ int last_event = 0; ///< Tracking for flow control restarts. - - int num_consumers = 0; - -@@ -232,6 +318,12 @@ struct HttpTunnelProducer { - */ - uint64_t backlog(uint64_t limit = UINT64_MAX ///< More than this is irrelevant - ); -+ /** Indicate whether this producer is handling some kind of chunked content. -+ * -+ * @return True if this producer is handling chunked content, false -+ * otherwise. -+ */ -+ bool is_handling_chunked_content() const; - /// Check if producer is original (to ATS) source of data. - /// @return @c true if this producer is the source of bytes from outside ATS. - bool is_source() const; -@@ -301,10 +393,12 @@ class HttpTunnel : public Continuation - /// A named variable for the @a drop_chunked_trailers parameter to @a set_producer_chunking_action. - static constexpr bool DROP_CHUNKED_TRAILERS = true; - -- /** Configure how the producer should behave with chunked content. -- * @param[in] p Producer to configure. -- * @param[in] skip_bytes Number of bytes to skip at the beginning of the stream (typically the headers). -- * @param[in] action Action to take with the chunked content. -+ /** Designate chunking behavior to the producer. -+ * -+ * @param[in] producer The producer being configured. -+ * @param[in] skip_bytes The number of bytes to consume off the stream before -+ * any chunked data is encountered. These are generally header bytes, if any. -+ * @param[in] action The chunking behavior to enact on incoming bytes. - * @param[in] drop_chunked_trailers If @c true, chunked trailers are filtered - * out. Logically speaking, this is only applicable when proxying chunked - * content, thus only when @a action is @c TCA_PASSTHRU_CHUNKED_CONTENT. -@@ -388,6 +482,21 @@ class HttpTunnel : public Continuation - - /// Corresponds to proxy.config.http.drop_chunked_trailers having a value of 1. - bool http_drop_chunked_trailers = false; -+ -+ /** The number of body bytes processed in this last execution of the tunnel. -+ * -+ * This accounting is used to determine how many bytes to copy into the body -+ * buffer via HttpSM::postbuf_copy_partial_data. -+ */ -+ int64_t body_bytes_to_copy = 0; -+ -+ /** The cumulative number of bytes that all calls to -+ * HttpSM::post_copy_partial_data have copied. -+ * -+ * This is recorded so we don't copy more bytes than the creator of the tunnel -+ * told us to via nbytes. -+ */ -+ int64_t body_bytes_copied = 0; - }; - - //// -@@ -542,7 +651,7 @@ HttpTunnel::append_message_to_producer_buffer(HttpTunnelProducer *p, const char - } - - p->read_buffer->write(msg, msg_len); -- p->nbytes += msg_len; -+ p->total_bytes += msg_len; - p->bytes_read += msg_len; - } - -@@ -609,6 +718,12 @@ HttpTunnelConsumer::is_sink() const - return HT_HTTP_CLIENT == vc_type || HT_CACHE_WRITE == vc_type; - } - -+inline bool -+HttpTunnelProducer::is_handling_chunked_content() const -+{ -+ return do_chunking || do_dechunking || do_chunked_passthru; -+} -+ - inline bool - HttpTunnelProducer::is_source() const - { -diff --git a/proxy/http2/Http2Stream.cc b/proxy/http2/Http2Stream.cc -index 6a4d29584a6..cd5f0a3704c 100644 ---- a/proxy/http2/Http2Stream.cc -+++ b/proxy/http2/Http2Stream.cc -@@ -1033,6 +1033,12 @@ Http2Stream::get_transaction_priority_dependence() const - } - } - -+bool -+Http2Stream::is_read_closed() const -+{ -+ return this->recv_end_stream; -+} -+ - int64_t - Http2Stream::read_vio_read_avail() - { -diff --git a/proxy/http2/Http2Stream.h b/proxy/http2/Http2Stream.h -index 03d9decea09..e3e9f583232 100644 ---- a/proxy/http2/Http2Stream.h -+++ b/proxy/http2/Http2Stream.h -@@ -106,6 +106,7 @@ class Http2Stream : public ProxyTransaction - int get_transaction_id() const override; - int get_transaction_priority_weight() const override; - int get_transaction_priority_dependence() const override; -+ bool is_read_closed() const override; - - void clear_io_events(); - -diff --git a/src/traffic_server/FetchSM.cc b/src/traffic_server/FetchSM.cc -index 788d5335ac8..19303b7e03d 100644 ---- a/src/traffic_server/FetchSM.cc -+++ b/src/traffic_server/FetchSM.cc -@@ -23,6 +23,7 @@ - - #include "tscore/ink_config.h" - #include "FetchSM.h" -+#include - #include - #include "HTTP.h" - #include "PluginVC.h" -@@ -229,7 +230,7 @@ FetchSM::dechunk_body() - // - TS_FETCH_EVENT_EXT_BODY_READY. - // - TS_FETCH_EVENT_EXT_BODY_DONE. - // -- if (chunked_handler.process_chunked_content()) { -+ if (auto const [_, done] = chunked_handler.process_chunked_content(); done) { - return TS_FETCH_EVENT_EXT_BODY_DONE; - } - -diff --git a/tests/gold_tests/pipeline/pipeline.test.py b/tests/gold_tests/pipeline/pipeline.test.py -new file mode 100644 -index 00000000000..dea95511fb4 ---- /dev/null -+++ b/tests/gold_tests/pipeline/pipeline.test.py -@@ -0,0 +1,137 @@ -+'''Test a pipelined requests.''' -+ -+# Licensed to the Apache Software Foundation (ASF) under one -+# or more contributor license agreements. See the NOTICE file -+# distributed with this work for additional information -+# regarding copyright ownership. The ASF licenses this file -+# to you under the Apache License, Version 2.0 (the -+# "License"); you may not use this file except in compliance -+# with the License. You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+from ports import get_port -+import sys -+ -+Test.Summary = '''Test pipelined requests.''' -+ -+IP_ALLOW_CONTENT = ''' -+ip_allow: -+ - apply: in -+ ip_addrs: 0/0 -+ action: deny -+ methods: -+ - DELETE -+''' -+ -+ -+class TestPipelining: -+ """Verify that a set of pipelined requests is handled correctly.""" -+ -+ _client_script: str = 'pipeline_client.py' -+ _server_script: str = 'pipeline_server.py' -+ -+ _dns_counter: int = 0 -+ _server_counter: int = 0 -+ _ts_counter: int = 0 -+ -+ def __init__(self, buffer_requests: bool) -> None: -+ """Configure the TestRun. -+ -+ :param buffer_requests: Whether to configure ATS to buffer client requests. -+ """ -+ tr = Test.AddTestRun('Test a pipelined chunked encoded request.') -+ tr.TimeOut = 10 -+ self._configure_dns(tr) -+ self._configure_server(tr) -+ self._configure_traffic_server(tr, buffer_requests) -+ self._configure_client(tr) -+ -+ def _configure_dns(self, tr: 'TestRun') -> 'Process': -+ """Configure the DNS. -+ -+ :param tr: The test run to associate with the DNS process with. -+ :return: The DNS process. -+ """ -+ name = f'dns_{self._dns_counter}' -+ TestPipelining._dns_counter += 1 -+ dns = tr.MakeDNServer(name, default='127.0.0.1') -+ self._dns = dns -+ return dns -+ -+ def _configure_server(self, tr: 'TestRun') -> 'Process': -+ """Configure the origin server. -+ -+ :param tr: The test run to associate with the origin server with. -+ :return: The origin server process. -+ """ -+ name = f'server_{self._server_counter}' -+ TestPipelining._server_counter += 1 -+ server = tr.Processes.Process(name) -+ tr.Setup.Copy(self._server_script) -+ port = get_port(server, "http_port") -+ server.Command = f'{sys.executable} {self._server_script} 127.0.0.1 {port} ' -+ server.ReturnCode = 0 -+ server.Ready = When.PortOpenv4(port) -+ server.Streams.All += Testers.ContainsExpression('/first', 'Should receive the first request') -+ server.Streams.All += Testers.ContainsExpression('/second', 'Should receive the second request') -+ -+ # The third request should be denied due to the ip_allow.yaml rule. -+ server.Streams.All += Testers.ExcludesExpression('/third', 'Should not receive the third request') -+ self._server = server -+ return server -+ -+ def _configure_traffic_server(self, tr: 'TestRun', buffer_requests: bool) -> 'Process': -+ """Configure ATS. -+ -+ :param tr: The test run to associate with the ATS process with. -+ :param buffer_requests: Whether to enable request_buffer_enabled. -+ :return: The ATS process. -+ """ -+ name = f'ts_{self._ts_counter}' -+ TestPipelining._ts_counter += 1 -+ ts = tr.MakeATSProcess(name, enable_cache=False) -+ self._ts = ts -+ ts.Disk.remap_config.AddLine(f'map / http://backend.server.com:{self._server.Variables.http_port}') -+ ts.Disk.records_config.update( -+ { -+ 'proxy.config.diags.debug.enabled': 1, -+ 'proxy.config.diags.debug.tags': 'http|ip_allow', -+ 'proxy.config.dns.nameservers': f'127.0.0.1:{self._dns.Variables.Port}', -+ 'proxy.config.dns.resolv_conf': 'NULL', -+ }) -+ if buffer_requests: -+ ts.Disk.records_config.update({ -+ 'proxy.config.http.request_buffer_enabled': 1, -+ }) -+ ts.Disk.ip_allow_yaml.AddLines(IP_ALLOW_CONTENT.split("\n")) -+ return ts -+ -+ def _configure_client(self, tr: 'TestRun') -> 'Process': -+ """Configure the client. -+ -+ :param tr: The test run to associate with the client process with. -+ :return: The client process. -+ """ -+ client = tr.Processes.Default -+ tr.Setup.Copy(self._client_script) -+ client.Command = (f'{sys.executable} {self._client_script} 127.0.0.1 {self._ts.Variables.port} ' -+ 'server.com server.com') -+ client.ReturnCode = 0 -+ client.Streams.All += Testers.ContainsExpression('X-Response: first', "Should receive the origin's first response.") -+ client.Streams.All += Testers.ContainsExpression('X-Response: second', "Should receive the origin's second response.") -+ client.Streams.All += Testers.ExcludesExpression('X-Response: third', "Should not receive the origin's third response.") -+ client.Streams.All += Testers.ContainsExpression('403', 'Should receive the ATS-generated rejection of the DELETE request.') -+ client.StartBefore(self._dns) -+ client.StartBefore(self._server) -+ client.StartBefore(self._ts) -+ -+ -+TestPipelining(buffer_requests=False) -+TestPipelining(buffer_requests=True) -diff --git a/tests/gold_tests/pipeline/pipeline_client.py b/tests/gold_tests/pipeline/pipeline_client.py -new file mode 100644 -index 00000000000..6716d8a1ff4 ---- /dev/null -+++ b/tests/gold_tests/pipeline/pipeline_client.py -@@ -0,0 +1,104 @@ -+#!/usr/bin/env python3 -+"""A client that sends three pipelined GET requests.""" -+ -+# Licensed to the Apache Software Foundation (ASF) under one -+# or more contributor license agreements. See the NOTICE file -+# distributed with this work for additional information -+# regarding copyright ownership. The ASF licenses this file -+# to you under the Apache License, Version 2.0 (the -+# "License"); you may not use this file except in compliance -+# with the License. You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+import argparse -+import socket -+import sys -+ -+ -+def parse_args() -> argparse.Namespace: -+ """Parse the command line arguments.""" -+ parser = argparse.ArgumentParser(description=__doc__) -+ parser.add_argument("proxy_address", metavar="proxy-address", help="Address of the proxy to connect to.") -+ parser.add_argument("proxy_port", metavar="proxy-port", type=int, help="The port of the proxy to connect to.") -+ parser.add_argument('first_hostname', metavar='first-hostname', help='The Host header field value of the first request.') -+ parser.add_argument('second_hostname', metavar='second-hostname', help='The Host header field value of the second request.') -+ return parser.parse_args() -+ -+ -+def open_connection(address: str, port: int) -> socket.socket: -+ """Open a connection to the desired host. -+ -+ :param address: The address of the host to connect to. -+ :param port: The port of the host to connect to. -+ :return: The socket connected to the host. -+ """ -+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) -+ sock.connect((address, port)) -+ print(f'Connected to {address}:{port}') -+ return sock -+ -+ -+def write_pipelined_requests(sock: socket.socket, first_hostname: str, second_hostname: str) -> None: -+ """Write three pipelined requests to the socket. -+ -+ :param sock: The socket to write to. -+ :param first_hostname: The Host header field value of the first request. -+ :param second_hostname: The Host header field value of the second request. -+ """ -+ first_request = f'GET /first HTTP/1.1\r\nHost: {first_hostname}\r\nTransfer-Encoding: chunked\r\n\r\n0\r\n\r\n' -+ # For if we want to test CL first. Leave this out of the final commit. -+ #first_request = f'GET /first HTTP/1.1\r\nHost: {first_hostname}\r\nContent-Length: 5\r\n\r\n67891' -+ second_request = f'GET /second HTTP/1.1\r\nHost: {first_hostname}\r\nContent-Length: 5\r\n\r\n12345' -+ third_request = f'DELETE /third HTTP/1.1\r\nHost: {second_hostname}\r\nContent-Length: 0\r\n\r\n' -+ pipelined_requests = first_request + second_request + third_request -+ total = len(first_request) + len(second_request) + len(third_request) -+ print( -+ f'Sending three pipelined requests: {len(first_request)} bytes, ' -+ f'{len(second_request)} bytes, and {len(third_request)} bytes: ' -+ f'{total} total bytes') -+ print(pipelined_requests) -+ sock.sendall(pipelined_requests.encode()) -+ print() -+ -+ -+def wait_for_responses(sock: socket.socket, num_responses: int) -> bytes: -+ """Wait for the responses to be complete. -+ -+ :param sock: The socket to read from. -+ :param num_responses: The number of responses to wait for. -+ :returns: The bytes read off the socket. -+ """ -+ responses = b"" -+ while True: -+ data = sock.recv(1024) -+ if not data: -+ print("Socket closed.") -+ break -+ print(f'Received:\n{data}') -+ responses += data -+ if responses.count(b"\r\n\r\n") == num_responses: -+ break -+ return responses -+ -+ -+def main() -> int: -+ """Send the pipelined requests.""" -+ args = parse_args() -+ with open_connection(args.proxy_address, args.proxy_port) as s: -+ write_pipelined_requests(s, args.first_hostname, args.second_hostname) -+ print("Waiting for responses...") -+ responses = wait_for_responses(s, 3) -+ print() -+ print(f'Received responses:\n{responses.decode()}') -+ return 0 -+ -+ -+if __name__ == "__main__": -+ sys.exit(main()) -diff --git a/tests/gold_tests/pipeline/pipeline_server.py b/tests/gold_tests/pipeline/pipeline_server.py -new file mode 100644 -index 00000000000..cf13fa5696d ---- /dev/null -+++ b/tests/gold_tests/pipeline/pipeline_server.py -@@ -0,0 +1,201 @@ -+#!/usr/bin/env python3 -+"""A server that receives possibly pipelined requests.""" -+ -+# Licensed to the Apache Software Foundation (ASF) under one -+# or more contributor license agreements. See the NOTICE file -+# distributed with this work for additional information -+# regarding copyright ownership. The ASF licenses this file -+# to you under the Apache License, Version 2.0 (the -+# "License"); you may not use this file except in compliance -+# with the License. You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+import argparse -+import socket -+import sys -+import time -+"""A flag indicating whether all three requests have been received.""" -+received_third_request: bool = False -+ -+ -+def parse_args() -> argparse.Namespace: -+ """Parse the command line arguments. -+ -+ :returns: The parsed arguments. -+ """ -+ parser = argparse.ArgumentParser(description=__doc__) -+ parser.add_argument("address", help="Address to listen on.") -+ parser.add_argument("port", type=int, help="The port to listen on.") -+ return parser.parse_args() -+ -+ -+def get_listening_socket(address: str, port: int) -> socket.socket: -+ """Create a listening socket. -+ -+ :param address: The address to listen on. -+ :param port: The port to listen on. -+ :returns: A listening socket. -+ """ -+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) -+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) -+ sock.bind((address, port)) -+ sock.listen(1) -+ return sock -+ -+ -+def accept_connection(sock: socket.socket) -> socket.socket: -+ """Accept a connection. -+ -+ :param sock: The socket to accept a connection on. -+ :returns: The accepted socket. -+ """ -+ return sock.accept()[0] -+ -+ -+def receive_requests(sock: socket.socket) -> None: -+ """Receive three requests from the client. -+ -+ :param sock: The socket to read from. -+ """ -+ global received_third_request -+ -+ all_received_data: bytes = b"" -+ this_request: bytes = b"" -+ first_response_bytes: bytes = b'HTTP/1.1 200 OK\r\nX-Response: first\r\nContent-Length: 0\r\n\r\n' -+ second_response_bytes: bytes = b'HTTP/1.1 200 OK\r\nX-Response: second\r\nContent-Length: 0\r\n\r\n' -+ third_response_bytes: bytes = b'HTTP/1.1 200 OK\r\nX-Response: third\r\nContent-Length: 0\r\n\r\n' -+ processing_first_request: bool = True -+ processing_second_request: bool = False -+ processing_third_request: bool = False -+ -+ # Note that this is very ad-hoc. We expect the first request to be chunked, -+ # the second to have a Content-Length body, and the third, if we receive it, -+ # to have no body. -+ end_of_first_request: bytes = b'\r\n0\r\n\r\n' -+ #end_of_first_request: bytes = b'67891' # < revert this eventually. -+ end_of_second_request: bytes = b'12345' -+ end_of_third_request: bytes = b'\r\n\r\n' -+ while not received_third_request: -+ data = sock.recv(1024) -+ if not data: -+ print("Socket closed.") -+ break -+ print(f'Received:') -+ print(data) -+ this_request += data -+ all_received_data += data -+ while not received_third_request: -+ if processing_first_request: -+ end_of_request_index = this_request.find(end_of_first_request) -+ if end_of_request_index == -1: -+ # Need more data. -+ break -+ print(' Received the first request:') -+ print(f' {this_request[:end_of_request_index + len(end_of_first_request)]}') -+ processing_first_request = False -+ processing_second_request = True -+ # Remove the first request from the buffer. -+ this_request = this_request[end_of_request_index + len(end_of_first_request):] -+ print(' Sending response to the first request:') -+ print(f' {first_response_bytes}') -+ print() -+ time.sleep(0.01) -+ sock.sendall(first_response_bytes) -+ continue -+ -+ elif processing_second_request: -+ end_of_request_index = this_request.find(end_of_second_request) -+ if end_of_request_index == -1: -+ # Need more data. -+ break -+ print(' Received the second request:') -+ print(f' {this_request[:end_of_request_index + len(end_of_second_request)]}') -+ processing_second_request = False -+ processing_third_request = True -+ # Remove the second request from the buffer. -+ this_request = this_request[end_of_request_index + len(end_of_second_request):] -+ print(' Sending response to the second request:') -+ print(f' {second_response_bytes}') -+ print() -+ time.sleep(0.01) -+ sock.sendall(second_response_bytes) -+ continue -+ -+ elif processing_third_request: -+ end_of_request_index = this_request.find(end_of_third_request) -+ if end_of_request_index == -1: -+ # Need more data. -+ break -+ print(' Received the third request:') -+ print(f' {this_request[:end_of_request_index + len(end_of_third_request)]}') -+ processing_third_request = False -+ # Remove the third request from the buffer. -+ this_request = this_request[end_of_request_index + len(end_of_third_request):] -+ print(' Sending response to the third request:') -+ print(f' {third_response_bytes}') -+ print() -+ time.sleep(0.01) -+ sock.sendall(third_response_bytes) -+ received_third_request = True -+ break -+ return all_received_data -+ -+ -+def _run_server_inside_try(address: str, port: int) -> int: -+ """Run the server to handle the pipelined requests. -+ -+ :param address: The address to listen on. -+ :param port: The port to listen on. -+ :return: 0 on success, 1 on failure (appropriate for the command line return -+ code). -+ """ -+ with get_listening_socket(address, port) as listening_sock: -+ print(f"Listening on {address}:{port}") -+ -+ read_bytes: bytes = b"" -+ while len(read_bytes) == 0: -+ print('Waiting for a connection.') -+ with accept_connection(listening_sock) as sock: -+ read_bytes = receive_requests(sock) -+ if len(read_bytes) == 0: -+ # This is probably the PortOpenv4 test. Try again. -+ print("No bytes read on this connection. Trying again.") -+ sock.close() -+ continue -+ -+ -+def run_server(address: str, port: int) -> int: -+ """Run the server with exception handling. -+ -+ :param address: The address to listen on. -+ :param port: The port to listen on. -+ :return: 1 if the third request was received (this is bad, we expect it to -+ be denied), 0 if it wasn't. -+ """ -+ -+ try: -+ ret = _run_server_inside_try(address, port) -+ except KeyboardInterrupt: -+ print('Handling KeyboardInterrupt.') -+ -+ return 1 if received_third_request else 0 -+ -+ -+def main() -> int: -+ """Receive pipelined requests.""" -+ args = parse_args() -+ ret = run_server(args.address, args.port) -+ -+ print(f'Done. Third request was received: {received_third_request}') -+ return ret -+ -+ -+if __name__ == "__main__": -+ sys.exit(main()) -diff --git a/tests/gold_tests/redirect/redirect_post.test.py b/tests/gold_tests/redirect/redirect_post.test.py -index ea75a61c1b0..10b21c00b84 100644 ---- a/tests/gold_tests/redirect/redirect_post.test.py -+++ b/tests/gold_tests/redirect/redirect_post.test.py -@@ -79,6 +79,7 @@ - tr = Test.AddTestRun() - tr.Processes.Default.Command = 'touch largefile.txt && truncate largefile.txt -s 50M && curl -H "Expect: " -i http://127.0.0.1:{0}/redirect1 -F "filename=@./largefile.txt" && rm -f largefile.txt'.format( - ts.Variables.port) -+tr.TimeOut = 10 - tr.Processes.Default.StartBefore(ts) - tr.Processes.Default.StartBefore(redirect_serv1) - tr.Processes.Default.StartBefore(redirect_serv2) -diff --git a/tests/gold_tests/timeout/inactive_client_timeout.test.py b/tests/gold_tests/timeout/inactive_client_timeout.test.py -index ea99fd4deca..2dafd845600 100644 ---- a/tests/gold_tests/timeout/inactive_client_timeout.test.py -+++ b/tests/gold_tests/timeout/inactive_client_timeout.test.py -@@ -57,6 +57,13 @@ - # get applied after the request is sent. In other words, a slow to respond server should not - # trigger the client inactivity timeout. - tr = Test.AddTestRun("Verify that server delay does not trigger client activity timeout.") --tr.AddVerifierClientProcess("client", replay_file, http_ports=[ts.Variables.port], https_ports=[ts.Variables.ssl_port]) -+client = tr.AddVerifierClientProcess("client", replay_file, http_ports=[ts.Variables.port], https_ports=[ts.Variables.ssl_port]) - tr.Processes.Default.StartBefore(ts) - tr.Processes.Default.StartBefore(server) -+ -+client.Streams.All += Testers.ContainsExpression('x-response: 1', 'Verify that the first response is received') -+client.Streams.All += Testers.ContainsExpression('x-response: 2', 'Verify that the second response is received') -+client.Streams.All += Testers.ContainsExpression('x-response: 3', 'Verify that the third response is received') -+client.Streams.All += Testers.ContainsExpression('x-response: 4', 'Verify that the fourth response is received') -+client.Streams.All += Testers.ContainsExpression('x-response: 5', 'Verify that the fifth response is received') -+client.Streams.All += Testers.ContainsExpression('x-response: 6', 'Verify that the sixth response is received') -diff --git a/tests/gold_tests/timeout/slow_server.yaml b/tests/gold_tests/timeout/slow_server.yaml -index 7a1aeb17244..920b2fc1565 100644 ---- a/tests/gold_tests/timeout/slow_server.yaml -+++ b/tests/gold_tests/timeout/slow_server.yaml -@@ -25,7 +25,6 @@ meta: - - blocks: - - delayed_response: &delayed_response -- server-response: - delay: 3s - - status: 200 -@@ -50,7 +49,11 @@ sessions: - - [ Host, www.no_tls.com ] - - [ uuid, 1 ] - -- <<: *delayed_response -+ server-response: -+ <<: *delayed_response -+ headers: -+ fields: -+ - [ X-Response, 1 ] - - proxy-response: - status: 200 -@@ -69,7 +72,11 @@ sessions: - - [ Content-Length, 10 ] - - [ uuid, 2 ] - -- <<: *delayed_response -+ server-response: -+ <<: *delayed_response -+ headers: -+ fields: -+ - [ X-Response, 2 ] - - proxy-response: - status: 200 -@@ -92,7 +99,11 @@ sessions: - - [ Host, www.tls.com ] - - [ uuid, 3 ] - -- <<: *delayed_response -+ server-response: -+ <<: *delayed_response -+ headers: -+ fields: -+ - [ X-Response, 3 ] - - proxy-response: - status: 200 -@@ -115,7 +126,11 @@ sessions: - - [ Content-Length, 10 ] - - [ uuid, 4 ] - -- <<: *delayed_response -+ server-response: -+ <<: *delayed_response -+ headers: -+ fields: -+ - [ X-Response, 4 ] - - proxy-response: - status: 200 -@@ -139,7 +154,11 @@ sessions: - - [ :path, '/path/5' ] - - [ uuid, 5 ] - -- <<: *delayed_response -+ server-response: -+ <<: *delayed_response -+ headers: -+ fields: -+ - [ X-Response, 5 ] - - proxy-response: - status: 200 -@@ -165,7 +184,11 @@ sessions: - content: - size: 10 - -- <<: *delayed_response -+ server-response: -+ <<: *delayed_response -+ headers: -+ fields: -+ - [ X-Response, 6 ] - - proxy-response: - status: 200 diff --git a/CVE-2024-38479.patch b/CVE-2024-38479.patch deleted file mode 100644 index 0786d778dd56ff9fe29668680589839571d907dd..0000000000000000000000000000000000000000 --- a/CVE-2024-38479.patch +++ /dev/null @@ -1,129 +0,0 @@ -From b8861231702ac5df7d5de401e82440c1cf20b633 Mon Sep 17 00:00:00 2001 -From: Bryan Call -Date: Tue, 12 Nov 2024 09:51:49 -0800 -Subject: [PATCH] Add matrix params to the cachekey in the cachekey plugin - (#11856) - -Origin: https://github.com/apache/trafficserver/commit/b8861231702ac5df7d5de401e82440c1cf20b633 - ---- - plugins/cachekey/cachekey.cc | 21 +++++++++++++++++++++ - plugins/cachekey/cachekey.h | 1 + - plugins/cachekey/configs.cc | 14 ++++++++++++++ - plugins/cachekey/configs.h | 11 +++++++++++ - plugins/cachekey/plugin.cc | 4 ++++ - 5 files changed, 51 insertions(+) - -diff --git a/plugins/cachekey/cachekey.cc b/plugins/cachekey/cachekey.cc -index babc78cc999..38286e7eb28 100644 ---- a/plugins/cachekey/cachekey.cc -+++ b/plugins/cachekey/cachekey.cc -@@ -673,6 +673,27 @@ CacheKey::appendQuery(const ConfigQuery &config) - } - } - -+void -+CacheKey::appendMatrix(const ConfigMatrix &config) -+{ -+ if (config.toBeRemoved()) { -+ return; -+ } -+ -+ const char *matrix; -+ int length; -+ -+ matrix = TSUrlHttpParamsGet(_buf, _url, &length); -+ if (matrix == nullptr || length == 0) { -+ return; -+ } -+ -+ if (matrix && length) { -+ _key.append(";"); -+ _key.append(matrix, length); -+ } -+} -+ - /** - * @brief Append User-Agent header captures specified in the Pattern configuration object. - * -diff --git a/plugins/cachekey/cachekey.h b/plugins/cachekey/cachekey.h -index 0b47e85984d..dc208f93bb4 100644 ---- a/plugins/cachekey/cachekey.h -+++ b/plugins/cachekey/cachekey.h -@@ -63,6 +63,7 @@ class CacheKey - void appendPath(Pattern &pathCapture, Pattern &pathCaptureUri); - void appendHeaders(const ConfigHeaders &config); - void appendQuery(const ConfigQuery &config); -+ void appendMatrix(const ConfigMatrix &config); - void appendCookies(const ConfigCookies &config); - void appendUaCaptures(Pattern &config); - bool appendUaClass(Classifier &classifier); -diff --git a/plugins/cachekey/configs.cc b/plugins/cachekey/configs.cc -index b2bc42d5e70..d6ef13aea68 100644 ---- a/plugins/cachekey/configs.cc -+++ b/plugins/cachekey/configs.cc -@@ -208,6 +208,20 @@ ConfigQuery::name() const - return _NAME; - } - -+bool -+ConfigMatrix::finalize() -+{ -+ _remove = noIncludeExcludeRules(); -+ return true; -+} -+ -+const String ConfigMatrix::_NAME = "matrix parameter"; -+inline const String & -+ConfigMatrix::name() const -+{ -+ return _NAME; -+} -+ - /** - * @briefs finalizes the headers related configuration. - * -diff --git a/plugins/cachekey/configs.h b/plugins/cachekey/configs.h -index e98b69afd48..f5d24bdbe3c 100644 ---- a/plugins/cachekey/configs.h -+++ b/plugins/cachekey/configs.h -@@ -112,6 +112,16 @@ class ConfigQuery : public ConfigElements - static const String _NAME; - }; - -+class ConfigMatrix : public ConfigElements -+{ -+public: -+ bool finalize() override; -+ -+private: -+ const String &name() const override; -+ static const String _NAME; -+}; -+ - /** - * @brief Headers configuration class. - */ -@@ -210,6 +220,7 @@ class Configs - /* Make the following members public to avoid unnecessary accessors */ - ConfigQuery _query; /**< @brief query parameter related configuration */ - ConfigHeaders _headers; /**< @brief headers related configuration */ -+ ConfigMatrix _matrix; /**< @brief matrix parameter related configuration */ - ConfigCookies _cookies; /**< @brief cookies related configuration */ - Pattern _uaCapture; /**< @brief the capture groups and the replacement string used for the User-Agent header capture */ - String _prefix; /**< @brief cache key prefix string */ -diff --git a/plugins/cachekey/plugin.cc b/plugins/cachekey/plugin.cc -index d92c079271a..b863b94a0d5 100644 ---- a/plugins/cachekey/plugin.cc -+++ b/plugins/cachekey/plugin.cc -@@ -64,6 +64,10 @@ setCacheKey(TSHttpTxn txn, Configs *config, TSRemapRequestInfo *rri = nullptr) - if (!config->pathToBeRemoved()) { - cachekey.appendPath(config->_pathCapture, config->_pathCaptureUri); - } -+ -+ /* Append the matrix parameters to the cache key. */ -+ cachekey.appendMatrix(config->_matrix); -+ - /* Append query parameters to the cache key. */ - cachekey.appendQuery(config->_query); - diff --git a/CVE-2024-50305.patch b/CVE-2024-50305.patch deleted file mode 100644 index fb54716988ae76193e5eb23b18289e944e4ee464..0000000000000000000000000000000000000000 --- a/CVE-2024-50305.patch +++ /dev/null @@ -1,72 +0,0 @@ -From 5e39658f7c0bc91613468c9513ba22ede1739d7e Mon Sep 17 00:00:00 2001 -From: "Alan M. Carroll" -Date: Tue, 2 Nov 2021 11:47:09 -0500 -Subject: [PATCH] Tweak MimeHdr::get_host_port_values to not run over the end - of the TextView. (#8468) - -Origin: https://github.com/apache/trafficserver/commit/5e39658f7c0bc91613468c9513ba22ede1739d7e - -Fix for #8461 - -(cherry picked from commit 055ca11c2842a64bf7df8d547515670e1a04afc1) ---- - proxy/hdrs/MIME.cc | 11 +++-------- - src/tscpp/util/unit_tests/test_TextView.cc | 11 +++-------- - 2 files changed, 6 insertions(+), 16 deletions(-) - -diff --git a/proxy/hdrs/MIME.cc b/proxy/hdrs/MIME.cc -index 45c16c386dd..0a55dd06b4d 100644 ---- a/proxy/hdrs/MIME.cc -+++ b/proxy/hdrs/MIME.cc -@@ -2284,20 +2284,15 @@ MIMEHdr::get_host_port_values(const char **host_ptr, ///< Pointer to host. - if (b) { - if ('[' == *b) { - auto idx = b.find(']'); -- if (idx <= b.size() && b[idx + 1] == ':') { -+ if (idx < b.size() - 1 && b[idx + 1] == ':') { - host = b.take_prefix_at(idx + 1); - port = b; - } else { - host = b; - } - } else { -- auto x = b.split_prefix_at(':'); -- if (x) { -- host = x; -- port = b; -- } else { -- host = b; -- } -+ host = b.take_prefix_at(':'); -+ port = b; - } - - if (host) { -diff --git a/src/tscpp/util/unit_tests/test_TextView.cc b/src/tscpp/util/unit_tests/test_TextView.cc -index 8f71e0aa39d..7f365369082 100644 ---- a/src/tscpp/util/unit_tests/test_TextView.cc -+++ b/src/tscpp/util/unit_tests/test_TextView.cc -@@ -275,20 +275,15 @@ TEST_CASE("TextView Affixes", "[libts][TextView]") - auto f_host = [](TextView b, TextView &host, TextView &port) -> void { - if ('[' == *b) { - auto idx = b.find(']'); -- if (idx <= b.size() && b[idx + 1] == ':') { -+ if (idx < b.size() - 1 && b[idx + 1] == ':') { - host = b.take_prefix_at(idx + 1); - port = b; - } else { - host = b; - } - } else { -- auto x = b.split_prefix_at(':'); -- if (x) { -- host = x; -- port = b; -- } else { -- host = b; -- } -+ host = b.take_prefix_at(':'); -+ port = b; - } - }; - diff --git a/CVE-2024-50306.patch b/CVE-2024-50306.patch deleted file mode 100644 index a97a93347d10947ae3412058ff1ff9e40d3dd4d4..0000000000000000000000000000000000000000 --- a/CVE-2024-50306.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 27f504883547502b1f5e4e389edd7f26e3ab246f Mon Sep 17 00:00:00 2001 -From: Masakazu Kitajo -Date: Tue, 12 Nov 2024 11:13:59 -0700 -Subject: [PATCH] Fix unchecked return value of initgroups() (#11855) - -Origin: https://github.com/apache/trafficserver/commit/27f504883547502b1f5e4e389edd7f26e3ab246f - -* Fix unchecked return value of initgroups() - -Signed-off-by: Jeffrey Bencteux - -* clang-format - ---------- - -Signed-off-by: Jeffrey Bencteux -Co-authored-by: Jeffrey Bencteux -(cherry picked from commit ae638096e259121d92d46a9f57026a5ff5bc328b) ---- - src/tscore/ink_cap.cc | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/src/tscore/ink_cap.cc b/src/tscore/ink_cap.cc -index b4f0ecace5d..8a95d4b1329 100644 ---- a/src/tscore/ink_cap.cc -+++ b/src/tscore/ink_cap.cc -@@ -160,7 +160,9 @@ impersonate(const struct passwd *pwd, ImpersonationLevel level) - #endif - - // Always repopulate the supplementary group list for the new user. -- initgroups(pwd->pw_name, pwd->pw_gid); -+ if (initgroups(pwd->pw_name, pwd->pw_gid) != 0) { -+ Fatal("switching to user %s, failed to initialize supplementary groups ID %ld", pwd->pw_name, (long)pwd->pw_gid); -+ } - - switch (level) { - case IMPERSONATE_PERMANENT: diff --git a/CVE-2024-53868.patch b/CVE-2024-53868.patch deleted file mode 100644 index c2feb12c3cae03b4a5d6703def91d0ab8e8ac442..0000000000000000000000000000000000000000 --- a/CVE-2024-53868.patch +++ /dev/null @@ -1,550 +0,0 @@ -From 3d2f29c88f9b073cb0fd3b9c7f85430e2170acbb Mon Sep 17 00:00:00 2001 -From: Masakazu Kitajo -Date: Tue, 1 Apr 2025 12:15:16 -0600 -Subject: [PATCH] Require the use of CRLF in chunked message body (#12150) - -* Require the use of CRLF in chunked message body - -* Fix docs ---- - doc/admin-guide/files/records.config.en.rst | 9 +++ - .../functions/TSHttpOverridableConfig.en.rst | 1 + - .../api/types/TSOverridableConfigKey.en.rst | 1 + - include/ts/apidefs.h.in | 1 + - mgmt/RecordsConfig.cc | 2 + - plugins/lua/ts_lua_http_config.c | 2 + - proxy/http/HttpConfig.cc | 2 + - proxy/http/HttpConfig.h | 1 + - proxy/http/HttpSM.cc | 28 +++++--- - proxy/http/HttpTunnel.cc | 72 ++++++++++++------- - proxy/http/HttpTunnel.h | 15 +++- - src/shared/overridable_txn_vars.cc | 1 + - src/traffic_server/FetchSM.cc | 3 +- - src/traffic_server/InkAPI.cc | 3 + - src/traffic_server/InkAPITest.cc | 3 +- - .../malformed_chunked_header.replay.yaml | 44 ++++++++++++ - 16 files changed, 150 insertions(+), 38 deletions(-) - -diff --git a/doc/admin-guide/files/records.config.en.rst b/doc/admin-guide/files/records.config.en.rst -index b81510db69d..7db9e6a9f66 100644 ---- a/doc/admin-guide/files/records.config.en.rst -+++ b/doc/admin-guide/files/records.config.en.rst -@@ -987,6 +987,15 @@ mptcp - for details about chunked trailers. By default, this option is disabled - and therefore |TS| will not drop chunked trailers. - -+.. ts:cv:: CONFIG proxy.config.http.strict_chunk_parsing INT 1 -+ :reloadable: -+ :overridable: -+ -+ Specifies whether |TS| strictly checks errors in chunked message body. -+ If enabled (``1``), |TS| returns 400 Bad Request if chunked message body is -+ not compliant with RFC 9112. If disabled (``0``), |TS| allows using LF as -+ a line terminator. -+ - .. ts:cv:: CONFIG proxy.config.http.send_http11_requests INT 1 - :reloadable: - :overridable: -diff --git a/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst b/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst -index 2ec29831532..b2b0e231502 100644 ---- a/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst -+++ b/doc/developer-guide/api/functions/TSHttpOverridableConfig.en.rst -@@ -111,6 +111,7 @@ TSOverridableConfigKey Value Config - :c:enumerator:`TS_CONFIG_HTTP_CACHE_WHEN_TO_REVALIDATE` :ts:cv:`proxy.config.http.cache.when_to_revalidate` - :c:enumerator:`TS_CONFIG_HTTP_CHUNKING_ENABLED` :ts:cv:`proxy.config.http.chunking_enabled` - :c:enumerator:`TS_CONFIG_HTTP_CHUNKING_SIZE` :ts:cv:`proxy.config.http.chunking.size` -+:c:enumerator:`TS_CONFIG_HTTP_STRICT_CHUNK_PARSING` :ts:cv:`proxy.config.http.strict_chunk_parsing` - :c:enumerator:`TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES_DEAD_SERVER` :ts:cv:`proxy.config.http.connect_attempts_max_retries_dead_server` - :c:enumerator:`TS_CONFIG_HTTP_DROP_CHUNKED_TRAILERS` :ts:cv:`proxy.config.http.drop_chunked_trailers` - :c:enumerator:`TS_CONFIG_HTTP_CONNECT_ATTEMPTS_MAX_RETRIES` :ts:cv:`proxy.config.http.connect_attempts_max_retries` -diff --git a/doc/developer-guide/api/types/TSOverridableConfigKey.en.rst b/doc/developer-guide/api/types/TSOverridableConfigKey.en.rst -index 2d0941efde0..b4291b46579 100644 ---- a/doc/developer-guide/api/types/TSOverridableConfigKey.en.rst -+++ b/doc/developer-guide/api/types/TSOverridableConfigKey.en.rst -@@ -91,6 +91,7 @@ Enumeration Members - .. c:enumerator:: TS_CONFIG_NET_SOCK_PACKET_TOS_OUT - .. c:enumerator:: TS_CONFIG_HTTP_INSERT_AGE_IN_RESPONSE - .. c:enumerator:: TS_CONFIG_HTTP_CHUNKING_SIZE -+.. c:enumerator:: TS_CONFIG_HTTP_STRICT_CHUNK_PARSING - .. c:enumerator:: TS_CONFIG_HTTP_DROP_CHUNKED_TRAILERS - .. c:enumerator:: TS_CONFIG_HTTP_FLOW_CONTROL_ENABLED - .. c:enumerator:: TS_CONFIG_HTTP_FLOW_CONTROL_LOW_WATER_MARK -diff --git a/include/ts/apidefs.h.in b/include/ts/apidefs.h.in -index 1641565a1a9..893177c88b9 100644 ---- a/include/ts/apidefs.h.in -+++ b/include/ts/apidefs.h.in -@@ -875,6 +875,7 @@ typedef enum { - TS_CONFIG_HTTP_ENABLE_PARENT_TIMEOUT_MARKDOWNS, - TS_CONFIG_HTTP_DISABLE_PARENT_MARKDOWNS, - TS_CONFIG_HTTP_DROP_CHUNKED_TRAILERS, -+ TS_CONFIG_HTTP_STRICT_CHUNK_PARSING, - TS_CONFIG_LAST_ENTRY - } TSOverridableConfigKey; - -diff --git a/mgmt/RecordsConfig.cc b/mgmt/RecordsConfig.cc -index ff7fdc0e3c8..e645bb6c6f1 100644 ---- a/mgmt/RecordsConfig.cc -+++ b/mgmt/RecordsConfig.cc -@@ -363,6 +363,8 @@ static const RecordElement RecordsConfig[] = - , - {RECT_CONFIG, "proxy.config.http.drop_chunked_trailers", RECD_INT, "0", RECU_DYNAMIC, RR_NULL, RECC_NULL, "[0-1]", RECA_NULL} - , -+ {RECT_CONFIG, "proxy.config.http.strict_chunk_parsing", RECD_INT, "1", RECU_DYNAMIC, RR_NULL, RECC_NULL, "[0-1]", RECA_NULL} -+ , - {RECT_CONFIG, "proxy.config.http.flow_control.enabled", RECD_INT, "0", RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL} - , - {RECT_CONFIG, "proxy.config.http.flow_control.high_water", RECD_INT, "0", RECU_DYNAMIC, RR_NULL, RECC_NULL, nullptr, RECA_NULL} -diff --git a/plugins/lua/ts_lua_http_config.c b/plugins/lua/ts_lua_http_config.c -index a25d8ab8c8f..4b22ee94b50 100644 ---- a/plugins/lua/ts_lua_http_config.c -+++ b/plugins/lua/ts_lua_http_config.c -@@ -149,6 +149,7 @@ typedef enum { - TS_LUA_CONFIG_BODY_FACTORY_RESPONSE_SUPPRESSION_MODE = TS_CONFIG_BODY_FACTORY_RESPONSE_SUPPRESSION_MODE, - TS_LUA_CONFIG_ENABLE_PARENT_TIMEOUT_MARKDOWNS = TS_CONFIG_HTTP_ENABLE_PARENT_TIMEOUT_MARKDOWNS, - TS_LUA_CONFIG_DISABLE_PARENT_MARKDOWNS = TS_CONFIG_HTTP_DISABLE_PARENT_MARKDOWNS, -+ TS_LUA_CONFIG_HTTP_STRICT_CHUNK_PARSING = TS_CONFIG_HTTP_STRICT_CHUNK_PARSING, - TS_LUA_CONFIG_LAST_ENTRY = TS_CONFIG_LAST_ENTRY, - } TSLuaOverridableConfigKey; - -@@ -290,6 +291,7 @@ ts_lua_var_item ts_lua_http_config_vars[] = { - TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_BODY_FACTORY_RESPONSE_SUPPRESSION_MODE), - TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_ENABLE_PARENT_TIMEOUT_MARKDOWNS), - TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_DISABLE_PARENT_MARKDOWNS), -+ TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_HTTP_STRICT_CHUNK_PARSING), - TS_LUA_MAKE_VAR_ITEM(TS_LUA_CONFIG_LAST_ENTRY), - }; - -diff --git a/proxy/http/HttpConfig.cc b/proxy/http/HttpConfig.cc -index d5c1c00a283..ca2edee1ee7 100644 ---- a/proxy/http/HttpConfig.cc -+++ b/proxy/http/HttpConfig.cc -@@ -1190,6 +1190,7 @@ HttpConfig::startup() - HttpEstablishStaticConfigByte(c.oride.chunking_enabled, "proxy.config.http.chunking_enabled"); - HttpEstablishStaticConfigLongLong(c.oride.http_chunking_size, "proxy.config.http.chunking.size"); - HttpEstablishStaticConfigByte(c.oride.http_drop_chunked_trailers, "proxy.config.http.drop_chunked_trailers"); -+ HttpEstablishStaticConfigByte(c.oride.http_strict_chunk_parsing, "proxy.config.http.strict_chunk_parsing"); - HttpEstablishStaticConfigByte(c.oride.flow_control_enabled, "proxy.config.http.flow_control.enabled"); - HttpEstablishStaticConfigLongLong(c.oride.flow_high_water_mark, "proxy.config.http.flow_control.high_water"); - HttpEstablishStaticConfigLongLong(c.oride.flow_low_water_mark, "proxy.config.http.flow_control.low_water"); -@@ -1496,6 +1497,7 @@ HttpConfig::reconfigure() - params->oride.keep_alive_enabled_out = INT_TO_BOOL(m_master.oride.keep_alive_enabled_out); - params->oride.chunking_enabled = INT_TO_BOOL(m_master.oride.chunking_enabled); - params->oride.http_drop_chunked_trailers = m_master.oride.http_drop_chunked_trailers; -+ params->oride.http_strict_chunk_parsing = m_master.oride.http_strict_chunk_parsing; - params->oride.auth_server_session_private = INT_TO_BOOL(m_master.oride.auth_server_session_private); - - params->oride.http_chunking_size = m_master.oride.http_chunking_size; -diff --git a/proxy/http/HttpConfig.h b/proxy/http/HttpConfig.h -index 6c1763f84e8..53450bdbb25 100644 ---- a/proxy/http/HttpConfig.h -+++ b/proxy/http/HttpConfig.h -@@ -703,6 +703,7 @@ struct OverridableHttpConfigParams { - - MgmtInt http_chunking_size = 4096; // Maximum chunk size for chunked output. - MgmtByte http_drop_chunked_trailers = 0; ///< Whether to drop chunked trailers. -+ MgmtByte http_strict_chunk_parsing = 1; ///< Whether to parse chunked body strictly. - MgmtInt flow_high_water_mark = 0; ///< Flow control high water mark. - MgmtInt flow_low_water_mark = 0; ///< Flow control low water mark. - -diff --git a/proxy/http/HttpSM.cc b/proxy/http/HttpSM.cc -index cdc05461320..c0ba82641e1 100644 ---- a/proxy/http/HttpSM.cc -+++ b/proxy/http/HttpSM.cc -@@ -978,7 +978,8 @@ HttpSM::wait_for_full_body() - p = tunnel.add_producer(ua_entry->vc, post_bytes, buf_start, &HttpSM::tunnel_handler_post_ua, HT_BUFFER_READ, "ua post buffer"); - if (chunked) { - bool const drop_chunked_trailers = t_state.http_config_param->oride.http_drop_chunked_trailers == 1; -- tunnel.set_producer_chunking_action(p, 0, TCA_PASSTHRU_CHUNKED_CONTENT, drop_chunked_trailers); -+ bool const parse_chunk_strictly = t_state.http_config_param->oride.http_strict_chunk_parsing == 1; -+ tunnel.set_producer_chunking_action(p, 0, TCA_PASSTHRU_CHUNKED_CONTENT, drop_chunked_trailers, parse_chunk_strictly); - } - ua_entry->in_tunnel = true; - ua_txn->set_inactivity_timeout(HRTIME_SECONDS(t_state.txn_conf->transaction_no_activity_timeout_in)); -@@ -6197,10 +6198,11 @@ HttpSM::do_setup_post_tunnel(HttpVC_t to_vc_type) - // In either case, the server will support chunked (HTTP/1.1) - if (chunked) { - bool const drop_chunked_trailers = t_state.http_config_param->oride.http_drop_chunked_trailers == 1; -+ bool const parse_chunk_strictly = t_state.http_config_param->oride.http_strict_chunk_parsing == 1; - if (ua_txn->is_chunked_encoding_supported()) { -- tunnel.set_producer_chunking_action(p, 0, TCA_PASSTHRU_CHUNKED_CONTENT, drop_chunked_trailers); -+ tunnel.set_producer_chunking_action(p, 0, TCA_PASSTHRU_CHUNKED_CONTENT, drop_chunked_trailers, parse_chunk_strictly); - } else { -- tunnel.set_producer_chunking_action(p, 0, TCA_CHUNK_CONTENT, drop_chunked_trailers); -+ tunnel.set_producer_chunking_action(p, 0, TCA_CHUNK_CONTENT, drop_chunked_trailers, parse_chunk_strictly); - tunnel.set_producer_chunking_size(p, 0); - } - } -@@ -6609,7 +6611,9 @@ HttpSM::setup_cache_read_transfer() - // w/o providing a Content-Length header - if (t_state.client_info.receive_chunked_response) { - bool const drop_chunked_trailers = t_state.http_config_param->oride.http_drop_chunked_trailers == 1; -- tunnel.set_producer_chunking_action(p, client_response_hdr_bytes, TCA_CHUNK_CONTENT, drop_chunked_trailers); -+ bool const parse_chunk_strictly = t_state.http_config_param->oride.http_strict_chunk_parsing == 1; -+ tunnel.set_producer_chunking_action(p, client_response_hdr_bytes, TCA_CHUNK_CONTENT, drop_chunked_trailers, -+ parse_chunk_strictly); - tunnel.set_producer_chunking_size(p, t_state.txn_conf->http_chunking_size); - } - ua_entry->in_tunnel = true; -@@ -6927,8 +6931,10 @@ HttpSM::setup_server_transfer_to_transform() - transform_info.entry->in_tunnel = true; - - if (t_state.current.server->transfer_encoding == HttpTransact::CHUNKED_ENCODING) { -- client_response_hdr_bytes = 0; // fixed by YTS Team, yamsat -- tunnel.set_producer_chunking_action(p, client_response_hdr_bytes, TCA_DECHUNK_CONTENT, HttpTunnel::DROP_CHUNKED_TRAILERS); -+ client_response_hdr_bytes = 0; // fixed by YTS Team, yamsat -+ bool const parse_chunk_strictly = t_state.http_config_param->oride.http_strict_chunk_parsing == 1; -+ tunnel.set_producer_chunking_action(p, client_response_hdr_bytes, TCA_DECHUNK_CONTENT, HttpTunnel::DROP_CHUNKED_TRAILERS, -+ parse_chunk_strictly); - } - - return p; -@@ -6968,7 +6974,9 @@ HttpSM::setup_transfer_from_transform() - - if (t_state.client_info.receive_chunked_response) { - bool const drop_chunked_trailers = t_state.http_config_param->oride.http_drop_chunked_trailers == 1; -- tunnel.set_producer_chunking_action(p, client_response_hdr_bytes, TCA_CHUNK_CONTENT, drop_chunked_trailers); -+ bool const parse_chunk_strictly = t_state.http_config_param->oride.http_strict_chunk_parsing == 1; -+ tunnel.set_producer_chunking_action(p, client_response_hdr_bytes, TCA_CHUNK_CONTENT, drop_chunked_trailers, -+ parse_chunk_strictly); - tunnel.set_producer_chunking_size(p, t_state.txn_conf->http_chunking_size); - } - -@@ -7025,7 +7033,8 @@ HttpSM::setup_server_transfer_to_cache_only() - tunnel.add_producer(server_entry->vc, nbytes, buf_start, &HttpSM::tunnel_handler_server, HT_HTTP_SERVER, "http server"); - - bool const drop_chunked_trailers = t_state.http_config_param->oride.http_drop_chunked_trailers == 1; -- tunnel.set_producer_chunking_action(p, 0, action, drop_chunked_trailers); -+ bool const parse_chunk_strictly = t_state.http_config_param->oride.http_strict_chunk_parsing == 1; -+ tunnel.set_producer_chunking_action(p, 0, action, drop_chunked_trailers, parse_chunk_strictly); - tunnel.set_producer_chunking_size(p, t_state.txn_conf->http_chunking_size); - - setup_cache_write_transfer(&cache_sm, server_entry->vc, &t_state.cache_info.object_store, 0, "cache write"); -@@ -7114,7 +7123,8 @@ HttpSM::setup_server_transfer() - } - */ - bool const drop_chunked_trailers = t_state.http_config_param->oride.http_drop_chunked_trailers == 1; -- tunnel.set_producer_chunking_action(p, client_response_hdr_bytes, action, drop_chunked_trailers); -+ bool const parse_chunk_strictly = t_state.http_config_param->oride.http_strict_chunk_parsing == 1; -+ tunnel.set_producer_chunking_action(p, client_response_hdr_bytes, action, drop_chunked_trailers, parse_chunk_strictly); - tunnel.set_producer_chunking_size(p, t_state.txn_conf->http_chunking_size); - return p; - } -diff --git a/proxy/http/HttpTunnel.cc b/proxy/http/HttpTunnel.cc -index 1508179e6b5..e9c0c6eafea 100644 ---- a/proxy/http/HttpTunnel.cc -+++ b/proxy/http/HttpTunnel.cc -@@ -51,27 +51,28 @@ static int const CHUNK_IOBUFFER_SIZE_INDEX = MIN_IOBUFFER_SIZE; - ChunkedHandler::ChunkedHandler() : max_chunk_size(DEFAULT_MAX_CHUNK_SIZE) {} - - void --ChunkedHandler::init(IOBufferReader *buffer_in, HttpTunnelProducer *p, bool drop_chunked_trailers) -+ChunkedHandler::init(IOBufferReader *buffer_in, HttpTunnelProducer *p, bool drop_chunked_trailers, bool parse_chunk_strictly) - { - if (p->do_chunking) { -- init_by_action(buffer_in, ACTION_DOCHUNK, drop_chunked_trailers); -+ init_by_action(buffer_in, ACTION_DOCHUNK, drop_chunked_trailers, parse_chunk_strictly); - } else if (p->do_dechunking) { -- init_by_action(buffer_in, ACTION_DECHUNK, drop_chunked_trailers); -+ init_by_action(buffer_in, ACTION_DECHUNK, drop_chunked_trailers, parse_chunk_strictly); - } else { -- init_by_action(buffer_in, ACTION_PASSTHRU, drop_chunked_trailers); -+ init_by_action(buffer_in, ACTION_PASSTHRU, drop_chunked_trailers, parse_chunk_strictly); - } - return; - } - - void --ChunkedHandler::init_by_action(IOBufferReader *buffer_in, Action action, bool drop_chunked_trailers) -+ChunkedHandler::init_by_action(IOBufferReader *buffer_in, Action action, bool drop_chunked_trailers, bool parse_chunk_strictly) - { -- running_sum = 0; -- num_digits = 0; -- cur_chunk_size = 0; -- cur_chunk_bytes_left = 0; -- truncation = false; -- this->action = action; -+ running_sum = 0; -+ num_digits = 0; -+ cur_chunk_size = 0; -+ cur_chunk_bytes_left = 0; -+ truncation = false; -+ this->action = action; -+ this->strict_chunk_parsing = parse_chunk_strictly; - - switch (action) { - case ACTION_DOCHUNK: -@@ -139,7 +140,6 @@ ChunkedHandler::read_size() - { - int64_t bytes_consumed = 0; - bool done = false; -- int cr = 0; - - while (chunked_reader->is_read_avail_more_than(0) && !done) { - const char *tmp = chunked_reader->start(); -@@ -178,36 +178,59 @@ ChunkedHandler::read_size() - done = true; - break; - } else { -- if (ParseRules::is_cr(*tmp)) { -- ++cr; -+ if ((prev_is_cr = ParseRules::is_cr(*tmp)) == true) { -+ ++num_cr; - } - state = CHUNK_READ_SIZE_CRLF; // now look for CRLF - } - } - } else if (state == CHUNK_READ_SIZE_CRLF) { // Scan for a linefeed - if (ParseRules::is_lf(*tmp)) { -+ if (!prev_is_cr) { -+ Debug("http_chunk", "Found an LF without a preceding CR (protocol violation)"); -+ if (strict_chunk_parsing) { -+ state = CHUNK_READ_ERROR; -+ done = true; -+ break; -+ } -+ } - Debug("http_chunk", "read chunk size of %d bytes", running_sum); - cur_chunk_bytes_left = (cur_chunk_size = running_sum); - state = (running_sum == 0) ? CHUNK_READ_TRAILER_BLANK : CHUNK_READ_CHUNK; - done = true; -- cr = 0; -+ num_cr = 0; - break; -- } else if (ParseRules::is_cr(*tmp)) { -- if (cr != 0) { -+ } else if ((prev_is_cr = ParseRules::is_cr(*tmp)) == true) { -+ if (num_cr != 0) { - state = CHUNK_READ_ERROR; - done = true; - break; - } -- ++cr; -+ ++num_cr; - } - } else if (state == CHUNK_READ_SIZE_START) { -- if (ParseRules::is_cr(*tmp)) { -- // Skip it -- } else if (ParseRules::is_lf(*tmp) && -- bytes_used <= 2) { // bytes_used should be 2 if it's CRLF, but permit a single LF as well -+ Debug("http_chunk", "CHUNK_READ_SIZE_START 0x%02x", *tmp); -+ if (ParseRules::is_lf(*tmp)) { -+ if (!prev_is_cr) { -+ Debug("http_chunk", "Found an LF without a preceding CR (protocol violation) before chunk size"); -+ if (strict_chunk_parsing) { -+ state = CHUNK_READ_ERROR; -+ done = true; -+ break; -+ } -+ } - running_sum = 0; - num_digits = 0; -+ num_cr = 0; - state = CHUNK_READ_SIZE; -+ } else if ((prev_is_cr = ParseRules::is_cr(*tmp)) == true) { -+ if (num_cr != 0) { -+ Debug("http_chunk", "Found multiple CRs before chunk size"); -+ state = CHUNK_READ_ERROR; -+ done = true; -+ break; -+ } -+ ++num_cr; - } else { // Unexpected character - state = CHUNK_READ_ERROR; - done = true; -@@ -651,9 +674,10 @@ HttpTunnel::deallocate_buffers() - - void - HttpTunnel::set_producer_chunking_action(HttpTunnelProducer *p, int64_t skip_bytes, TunnelChunkingAction_t action, -- bool drop_chunked_trailers) -+ bool drop_chunked_trailers, bool parse_chunk_strictly) - { - this->http_drop_chunked_trailers = drop_chunked_trailers; -+ this->http_strict_chunk_parsing = parse_chunk_strictly; - p->chunked_handler.skip_bytes = skip_bytes; - p->chunking_action = action; - -@@ -878,7 +902,7 @@ HttpTunnel::producer_run(HttpTunnelProducer *p) - // For all the chunking cases, we must only copy bytes as we process them. - body_bytes_to_copy = 0; - -- p->chunked_handler.init(p->buffer_start, p, this->http_drop_chunked_trailers); -+ p->chunked_handler.init(p->buffer_start, p, this->http_drop_chunked_trailers, this->http_strict_chunk_parsing); - - // Copy the header into the chunked/dechunked buffers. - if (p->do_chunking) { -diff --git a/proxy/http/HttpTunnel.h b/proxy/http/HttpTunnel.h -index 3aac38aca68..9b7d1876425 100644 ---- a/proxy/http/HttpTunnel.h -+++ b/proxy/http/HttpTunnel.h -@@ -112,6 +112,8 @@ struct ChunkedHandler { - */ - bool drop_chunked_trailers = false; - -+ bool strict_chunk_parsing = true; -+ - bool truncation = false; - - /** The number of bytes to skip from the reader because they are not body bytes. -@@ -130,6 +132,8 @@ struct ChunkedHandler { - // Chunked header size parsing info. - int running_sum = 0; - int num_digits = 0; -+ int num_cr = 0; -+ bool prev_is_cr = false; - - /// @name Output data. - //@{ -@@ -144,8 +148,8 @@ struct ChunkedHandler { - //@} - ChunkedHandler(); - -- void init(IOBufferReader *buffer_in, HttpTunnelProducer *p, bool drop_chunked_trailers); -- void init_by_action(IOBufferReader *buffer_in, Action action, bool drop_chunked_trailers); -+ void init(IOBufferReader *buffer_in, HttpTunnelProducer *p, bool drop_chunked_trailers, bool strict_parsing); -+ void init_by_action(IOBufferReader *buffer_in, Action action, bool drop_chunked_trailers, bool strict_parsing); - void clear(); - - /// Set the max chunk @a size. -@@ -392,6 +396,7 @@ class HttpTunnel : public Continuation - - /// A named variable for the @a drop_chunked_trailers parameter to @a set_producer_chunking_action. - static constexpr bool DROP_CHUNKED_TRAILERS = true; -+ static constexpr bool PARSE_CHUNK_STRICTLY = true; - - /** Designate chunking behavior to the producer. - * -@@ -402,9 +407,10 @@ class HttpTunnel : public Continuation - * @param[in] drop_chunked_trailers If @c true, chunked trailers are filtered - * out. Logically speaking, this is only applicable when proxying chunked - * content, thus only when @a action is @c TCA_PASSTHRU_CHUNKED_CONTENT. -+ * @param[in] parse_chunk_strictly If @c true, no parse error will be allowed - */ - void set_producer_chunking_action(HttpTunnelProducer *p, int64_t skip_bytes, TunnelChunkingAction_t action, -- bool drop_chunked_trailers); -+ bool drop_chunked_trailers, bool parse_chunk_strictly); - /// Set the maximum (preferred) chunk @a size of chunked output for @a producer. - void set_producer_chunking_size(HttpTunnelProducer *producer, int64_t size); - -@@ -483,6 +489,9 @@ class HttpTunnel : public Continuation - /// Corresponds to proxy.config.http.drop_chunked_trailers having a value of 1. - bool http_drop_chunked_trailers = false; - -+ /// Corresponds to proxy.config.http.strict_chunk_parsing having a value of 1. -+ bool http_strict_chunk_parsing = false; -+ - /** The number of body bytes processed in this last execution of the tunnel. - * - * This accounting is used to determine how many bytes to copy into the body -diff --git a/src/shared/overridable_txn_vars.cc b/src/shared/overridable_txn_vars.cc -index 1a5d740794a..f8c6e6e58ea 100644 ---- a/src/shared/overridable_txn_vars.cc -+++ b/src/shared/overridable_txn_vars.cc -@@ -31,6 +31,7 @@ const std::unordered_mapinit_by_action(resp_reader, ChunkedHandler::ACTION_DECHUNK, HttpTunnel::DROP_CHUNKED_TRAILERS); -+ ch->init_by_action(resp_reader, ChunkedHandler::ACTION_DECHUNK, HttpTunnel::DROP_CHUNKED_TRAILERS, -+ HttpTunnel::PARSE_CHUNK_STRICTLY); - ch->dechunked_reader = ch->dechunked_buffer->alloc_reader(); - ch->state = ChunkedHandler::CHUNK_READ_SIZE; - resp_reader->dealloc(); -diff --git a/src/traffic_server/InkAPI.cc b/src/traffic_server/InkAPI.cc -index 71adb94d0cc..40bc7608ffc 100644 ---- a/src/traffic_server/InkAPI.cc -+++ b/src/traffic_server/InkAPI.cc -@@ -8928,6 +8928,9 @@ _conf_to_memberp(TSOverridableConfigKey conf, OverridableHttpConfigParams *overr - case TS_CONFIG_HTTP_DROP_CHUNKED_TRAILERS: - ret = _memberp_to_generic(&overridableHttpConfig->http_drop_chunked_trailers, conv); - break; -+ case TS_CONFIG_HTTP_STRICT_CHUNK_PARSING: -+ ret = _memberp_to_generic(&overridableHttpConfig->http_strict_chunk_parsing, conv); -+ break; - case TS_CONFIG_HTTP_FLOW_CONTROL_ENABLED: - ret = _memberp_to_generic(&overridableHttpConfig->flow_control_enabled, conv); - break; -diff --git a/src/traffic_server/InkAPITest.cc b/src/traffic_server/InkAPITest.cc -index a6e7217291a..0e0cac586be 100644 ---- a/src/traffic_server/InkAPITest.cc -+++ b/src/traffic_server/InkAPITest.cc -@@ -8774,7 +8774,8 @@ std::array SDK_Overridable_Configs = { - "proxy.config.body_factory.response_suppression_mode", - "proxy.config.http.parent_proxy.enable_parent_timeout_markdowns", - "proxy.config.http.parent_proxy.disable_parent_markdowns", -- "proxy.config.http.drop_chunked_trailers"}}; -+ "proxy.config.http.drop_chunked_trailers", -+ "proxy.config.http.strict_chunk_parsing"}}; - - extern ClassAllocator httpSMAllocator; - -diff --git a/tests/gold_tests/chunked_encoding/replays/malformed_chunked_header.replay.yaml b/tests/gold_tests/chunked_encoding/replays/malformed_chunked_header.replay.yaml -index 1118036b3c8..7c0ccb9a47f 100644 ---- a/tests/gold_tests/chunked_encoding/replays/malformed_chunked_header.replay.yaml -+++ b/tests/gold_tests/chunked_encoding/replays/malformed_chunked_header.replay.yaml -@@ -98,6 +98,27 @@ sessions: - server-response: - status: 200 - -+- transactions: -+ - client-request: -+ method: "POST" -+ version: "1.1" -+ url: /malformed/chunk/header3 -+ headers: -+ fields: -+ - [ Host, example.com ] -+ - [ Transfer-Encoding, chunked ] -+ - [ uuid, 5 ] -+ content: -+ transfer: plain -+ encoding: uri -+ # Chunk header must end with a sequence of CRLF. -+ data: 7;x%0Aabcwxyz%0D%0A0%0D%0A%0D%0A -+ -+ # The connection will be dropped and this response will not go out. -+ server-response: -+ status: 200 -+ -+ - # - # Now repeat the above two malformed chunk header tests, but on the server - # side. -@@ -193,3 +214,26 @@ sessions: - encoding: uri - # BWS cannot have CR - data: 3%0D%0D%0Adef%0D%0A0%0D%0A%0D%0A -+ -+- transactions: -+ - client-request: -+ method: "GET" -+ version: "1.1" -+ url: /response/malformed/chunk/size2 -+ headers: -+ fields: -+ - [ Host, example.com ] -+ - [ uuid, 105 ] -+ -+ # The connection will be dropped and this response will not go out. -+ server-response: -+ status: 200 -+ reason: OK -+ headers: -+ fields: -+ - [ Transfer-Encoding, chunked ] -+ content: -+ transfer: plain -+ encoding: uri -+ # Chunk header must end with a sequence of CRLF. -+ data: 3;x%0Adef%0D%0A0%0D%0A%0D%0A diff --git a/CVE-2024-56195.patch b/CVE-2024-56195.patch deleted file mode 100644 index 4289ed7fe115f17ce243ef49ae10f4c4c323ff76..0000000000000000000000000000000000000000 --- a/CVE-2024-56195.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 483f84ea4ae2511834abd90014770b27a5082a4c Mon Sep 17 00:00:00 2001 -From: Chris McFarlen -Date: Tue, 4 Mar 2025 13:33:06 -0600 -Subject: [PATCH] Fix intercept plugin ignoring ACL (#12077) - -(cherry picked from commit 8d678fa21e4676f8491e18094d1cd5fcb455d522) - -Co-authored-by: Chris McFarlen ---- - proxy/http/HttpTransact.cc | 9 +++++++++ - 1 file changed, 9 insertions(+) - -diff --git a/proxy/http/HttpTransact.cc b/proxy/http/HttpTransact.cc -index 0109f62dd1b..115e15f93e5 100644 ---- a/proxy/http/HttpTransact.cc -+++ b/proxy/http/HttpTransact.cc -@@ -1174,6 +1174,15 @@ HttpTransact::EndRemapRequest(State *s) - obj_describe(s->hdr_info.client_request.m_http, true); - } - -+ // If the client failed ACLs, send error response -+ // This extra condition was added to separate it from the logic below that might allow -+ // requests that use some types of plugins as that code was allowing requests that didn't -+ // pass ACL checks. ACL mismatches are also not counted as invalid client requests -+ if (!s->client_connection_enabled) { -+ TxnDebug("http_trans", "END HttpTransact::EndRemapRequest: connection not allowed"); -+ TRANSACT_RETURN(SM_ACTION_SEND_ERROR_CACHE_NOOP, nullptr); -+ } -+ - /* - if s->reverse_proxy == false, we can assume remapping failed in some way - -however- diff --git a/CVE-2024-56202.patch b/CVE-2024-56202.patch deleted file mode 100644 index 668eb73b775ad55cbd1815222c0ce3fd6fc7699e..0000000000000000000000000000000000000000 --- a/CVE-2024-56202.patch +++ /dev/null @@ -1,429 +0,0 @@ -From 1cca4a29520f9258be6c3fad5092939dbe9d3562 Mon Sep 17 00:00:00 2001 -From: Bryan Call -Date: Tue, 4 Mar 2025 11:39:32 -0800 -Subject: [PATCH] Fix send 100 Continue optimization for GET (#12075) - -This fixes a bug with the proxy.config.http.send_100_continue_response -feature for GET requests in which the following would happen: - -1. We do not send the optimized 100 Continue out of proxy for GET - requests with Exect: 100-Continue. This is reasonable since the vast - majority of 100-Continue requests will be POST. -2. Later, we inspect the proxy.config.http.send_100_continue_response - value and assume we did send a 100 Continue response and stripped the - Expect: 100-Continue header from the proxied request. This is - disasterous as it leaves the server waiting for a body which would - never come because the client is waiting for a 100 Continue response - which will never come. - -(cherry picked from commit 33b7c7c161c453d6b43c9aecbb7351ad8326c28d) - -Co-authored-by: Brian Neradt ---- - proxy/hdrs/HTTP.h | 1 + - proxy/http/HttpSM.cc | 1 + - proxy/http/HttpTransact.cc | 2 +- - tests/gold_tests/post/expect_client.py | 110 ++++++++++++++++++ - tests/gold_tests/post/expect_tests.test.py | 88 ++++++++++++++ - tests/gold_tests/post/http_utils.py | 93 +++++++++++++++ - .../post/replay/expect-continue.replay.yaml | 42 +++++++ - 7 files changed, 336 insertions(+), 1 deletion(-) - create mode 100644 tests/gold_tests/post/expect_client.py - create mode 100644 tests/gold_tests/post/expect_tests.test.py - create mode 100644 tests/gold_tests/post/http_utils.py - create mode 100644 tests/gold_tests/post/replay/expect-continue.replay.yaml - -diff --git a/proxy/hdrs/HTTP.h b/proxy/hdrs/HTTP.h -index 710fbaf00f4..3daa172f1c7 100644 ---- a/proxy/hdrs/HTTP.h -+++ b/proxy/hdrs/HTTP.h -@@ -480,6 +480,7 @@ class HTTPHdr : public MIMEHdr - mutable int m_port = 0; ///< Target port. - mutable bool m_target_cached = false; ///< Whether host name and port are cached. - mutable bool m_target_in_url = false; ///< Whether host name and port are in the URL. -+ mutable bool m_100_continue_sent = false; ///< Whether ATS sent a 100 Continue optimized response. - mutable bool m_100_continue_required = false; ///< Whether 100_continue is in the Expect header. - /// Set if the port was effectively specified in the header. - /// @c true if the target (in the URL or the HOST field) also specified -diff --git a/proxy/http/HttpSM.cc b/proxy/http/HttpSM.cc -index 4220e455af8..4e09795f036 100644 ---- a/proxy/http/HttpSM.cc -+++ b/proxy/http/HttpSM.cc -@@ -900,6 +900,7 @@ HttpSM::state_read_client_request_header(int event, void *data) - SMDebug("http_seq", "send 100 Continue response to client"); - int64_t nbytes = ua_entry->write_buffer->write(str_100_continue_response, len_100_continue_response); - ua_entry->write_vio = ua_txn->do_io_write(this, nbytes, buf_start); -+ t_state.hdr_info.client_request.m_100_continue_sent = true; - } else { - t_state.hdr_info.client_request.m_100_continue_required = true; - } -diff --git a/proxy/http/HttpTransact.cc b/proxy/http/HttpTransact.cc -index 115e15f93e5..31810e45d14 100644 ---- a/proxy/http/HttpTransact.cc -+++ b/proxy/http/HttpTransact.cc -@@ -7877,7 +7877,7 @@ HttpTransact::build_request(State *s, HTTPHdr *base_request, HTTPHdr *outgoing_r - } - } - -- if (s->http_config_param->send_100_continue_response) { -+ if (s->hdr_info.client_request.m_100_continue_sent) { - HttpTransactHeaders::remove_100_continue_headers(s, outgoing_request); - TxnDebug("http_trans", "request expect 100-continue headers removed"); - } -diff --git a/tests/gold_tests/post/expect_client.py b/tests/gold_tests/post/expect_client.py -new file mode 100644 -index 00000000000..d419f8c339b ---- /dev/null -+++ b/tests/gold_tests/post/expect_client.py -@@ -0,0 +1,110 @@ -+#!/usr/bin/env python3 -+"""Implements a client which tests Expect: 100-Continue.""" -+ -+# Licensed to the Apache Software Foundation (ASF) under one -+# or more contributor license agreements. See the NOTICE file -+# distributed with this work for additional information -+# regarding copyright ownership. The ASF licenses this file -+# to you under the Apache License, Version 2.0 (the -+# "License"); you may not use this file except in compliance -+# with the License. You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+from http_utils import (wait_for_headers_complete, determine_outstanding_bytes_to_read, drain_socket) -+ -+import argparse -+import socket -+import sys -+ -+ -+def parse_args() -> argparse.Namespace: -+ """Parse the command line arguments. -+ -+ :return: The parsed arguments. -+ """ -+ parser = argparse.ArgumentParser() -+ parser.add_argument("proxy_address", help="Address of the proxy to connect to.") -+ parser.add_argument("proxy_port", type=int, help="The port of the proxy to connect to.") -+ parser.add_argument( -+ '-s', -+ '--server-hostname', -+ dest="server_hostname", -+ default="some.server.com", -+ help="The hostname of the server to connect to.") -+ return parser.parse_args() -+ -+ -+def open_connection(address: str, port: int) -> socket.socket: -+ """Open a connection to the desired host. -+ -+ :param address: The address of the host to connect to. -+ :param port: The port of the host to connect to. -+ :return: The socket connected to the host. -+ """ -+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) -+ sock.connect((address, port)) -+ print(f'Connected to {address}:{port}') -+ return sock -+ -+ -+def send_expect_request(sock: socket.socket, server_hostname: str) -> None: -+ """Send an Expect: 100-Continue request. -+ -+ :param sock: The socket to send the request on. -+ :param server_hostname: The hostname of the server to connect to. -+ """ -+ # Send the POST request. -+ host_header: bytes = f'Host: {server_hostname}\r\n'.encode() -+ request: bytes = ( -+ b"GET /api/1 HTTP/1.1\r\n" + host_header + b"Connection: keep-alive\r\n" -+ b"Content-Length: 3\r\n" -+ b"uuid: expect\r\n" -+ b"Expect: 100-Continue\r\n" -+ b"\r\n") -+ sock.sendall(request) -+ print('Sent Expect: 100-Continue request:') -+ print(request.decode()) -+ drain_response(sock) -+ print('Got 100-Continue response.') -+ sock.sendall(b'rst') -+ print('Sent "rst" body.') -+ -+ -+def drain_response(sock: socket.socket) -> None: -+ """Drain the response from the server. -+ -+ :param sock: The socket to read the response from. -+ """ -+ print('Waiting for the response to complete.') -+ read_bytes: bytes = wait_for_headers_complete(sock) -+ try: -+ num_bytes_to_drain: int = determine_outstanding_bytes_to_read(read_bytes) -+ except ValueError: -+ print('No CL found') -+ return -+ if num_bytes_to_drain > 0: -+ drain_socket(sock, read_bytes, num_bytes_to_drain) -+ print('Response complete.') -+ -+ -+def main() -> int: -+ """Run the client.""" -+ args = parse_args() -+ print(args) -+ -+ with open_connection(args.proxy_address, args.proxy_port) as sock: -+ send_expect_request(sock, args.server_hostname) -+ drain_response(sock) -+ print('Done.') -+ return 0 -+ -+ -+if __name__ == '__main__': -+ sys.exit(main()) -diff --git a/tests/gold_tests/post/expect_tests.test.py b/tests/gold_tests/post/expect_tests.test.py -new file mode 100644 -index 00000000000..e6f85cd660c ---- /dev/null -+++ b/tests/gold_tests/post/expect_tests.test.py -@@ -0,0 +1,88 @@ -+''' -+''' -+# Licensed to the Apache Software Foundation (ASF) under one -+# or more contributor license agreements. See the NOTICE file -+# distributed with this work for additional information -+# regarding copyright ownership. The ASF licenses this file -+# to you under the Apache License, Version 2.0 (the -+# "License"); you may not use this file except in compliance -+# with the License. You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+import sys -+ -+ -+class ExpectTest: -+ -+ _expect_client: str = 'expect_client.py' -+ _http_utils: str = 'http_utils.py' -+ _replay_file: str = 'replay/expect-continue.replay.yaml' -+ -+ def __init__(self): -+ tr = Test.AddTestRun('Verify Expect: 100-Continue handling.') -+ self._setup_dns(tr) -+ self._setup_origin(tr) -+ self._setup_trafficserver(tr) -+ self._setup_client(tr) -+ -+ def _setup_dns(self, tr: 'TestRun') -> None: -+ '''Set up the DNS server. -+ -+ :param tr: The TestRun to which to add the DNS server. -+ ''' -+ dns = tr.MakeDNServer('dns', default='127.0.0.1') -+ self._dns = dns -+ -+ def _setup_origin(self, tr: 'TestRun') -> None: -+ '''Set up the origin server. -+ -+ :param tr: The TestRun to which to add the origin server. -+ ''' -+ server = tr.AddVerifierServerProcess("server", replay_path=self._replay_file) -+ self._server = server -+ -+ def _setup_trafficserver(self, tr: 'TestRun') -> None: -+ '''Set up the traffic server. -+ -+ :param tr: The TestRun to which to add the traffic server. -+ ''' -+ ts = tr.MakeATSProcess("ts", enable_cache=False) -+ self._ts = ts -+ ts.Disk.remap_config.AddLine(f'map / http://backend.example.com:{self._server.Variables.http_port}') -+ ts.Disk.records_config.update( -+ { -+ 'proxy.config.diags.debug.enabled': 1, -+ 'proxy.config.diags.debug.tags': 'http', -+ 'proxy.config.dns.nameservers': f"127.0.0.1:{self._dns.Variables.Port}", -+ 'proxy.config.dns.resolv_conf': 'NULL', -+ 'proxy.config.http.send_100_continue_response': 1, -+ }) -+ -+ def _setup_client(self, tr: 'TestRun') -> None: -+ '''Set up the client. -+ -+ :param tr: The TestRun to which to add the client. -+ ''' -+ tr.Setup.CopyAs(self._expect_client) -+ tr.Setup.CopyAs(self._http_utils) -+ tr.Processes.Default.Command = \ -+ f'{sys.executable} {self._expect_client} 127.0.0.1 {self._ts.Variables.port} -s example.com' -+ tr.Processes.Default.ReturnCode = 0 -+ tr.Processes.Default.StartBefore(self._dns) -+ tr.Processes.Default.StartBefore(self._server) -+ tr.Processes.Default.StartBefore(self._ts) -+ tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( -+ 'HTTP/1.1 100', 'Verify the 100 Continue response was received.') -+ tr.Processes.Default.Streams.stdout += Testers.ContainsExpression( -+ 'HTTP/1.1 200', 'Verify the 200 OK response was received.') -+ -+ -+Test.Summary = 'Verify Expect: 100-Continue handling.' -+ExpectTest() -diff --git a/tests/gold_tests/post/http_utils.py b/tests/gold_tests/post/http_utils.py -new file mode 100644 -index 00000000000..e1ad4e77fed ---- /dev/null -+++ b/tests/gold_tests/post/http_utils.py -@@ -0,0 +1,93 @@ -+#!/usr/bin/env python3 -+"""Common logic between the ad hoc client and server.""" -+ -+# Licensed to the Apache Software Foundation (ASF) under one -+# or more contributor license agreements. See the NOTICE file -+# distributed with this work for additional information -+# regarding copyright ownership. The ASF licenses this file -+# to you under the Apache License, Version 2.0 (the -+# "License"); you may not use this file except in compliance -+# with the License. You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+import socket -+ -+ -+def wait_for_headers_complete(sock: socket.socket) -> bytes: -+ """Wait for the headers to be complete. -+ -+ :param sock: The socket to read from. -+ :returns: The bytes read off the socket. -+ """ -+ headers = b"" -+ while True: -+ data = sock.recv(1024) -+ if not data: -+ print("Socket closed.") -+ break -+ print(f'Received:\n{data}') -+ headers += data -+ if b"\r\n\r\n" in headers: -+ break -+ return headers -+ -+ -+def determine_outstanding_bytes_to_read(read_bytes: bytes) -> int: -+ """Determine how many more bytes to read from the headers. -+ -+ This parses the Content-Length header to determine how many more bytes to -+ read. -+ -+ :param read_bytes: The bytes read so far. -+ :returns: The number of bytes to read, or -1 if it is chunked encoded. -+ """ -+ headers = read_bytes.decode("utf-8").split("\r\n") -+ content_length_value = None -+ for header in headers: -+ if header.lower().startswith("content-length:"): -+ content_length_value = int(header.split(":")[1].strip()) -+ elif header.lower().startswith("transfer-encoding: chunked"): -+ return -1 -+ if content_length_value is None: -+ raise ValueError("No Content-Length header found.") -+ -+ end_of_headers = read_bytes.find(b"\r\n\r\n") -+ if end_of_headers == -1: -+ raise ValueError("No end of headers found.") -+ -+ end_of_headers += 4 -+ return content_length_value - (len(read_bytes) - end_of_headers) -+ -+ -+def drain_socket(sock: socket.socket, previously_read_data: bytes, num_bytes_to_drain: int) -> None: -+ """Read the rest of the transaction. -+ -+ :param sock: The socket to drain. -+ :param num_bytes_to_drain: The number of bytes to drain. If -1, then drain -+ bytes until the final zero-length chunk is read. -+ """ -+ -+ read_data = previously_read_data -+ num_bytes_drained = 0 -+ while True: -+ if num_bytes_to_drain > 0: -+ if num_bytes_drained >= num_bytes_to_drain: -+ break -+ elif b'0\r\n\r\n' == read_data[-5:]: -+ print("Found end of chunked data.") -+ break -+ -+ data = sock.recv(1024) -+ print(f'Received:\n{data}') -+ if not data: -+ print("Socket closed.") -+ break -+ num_bytes_drained += len(data) -+ read_data += data -diff --git a/tests/gold_tests/post/replay/expect-continue.replay.yaml b/tests/gold_tests/post/replay/expect-continue.replay.yaml -new file mode 100644 -index 00000000000..e136b5dfda5 ---- /dev/null -+++ b/tests/gold_tests/post/replay/expect-continue.replay.yaml -@@ -0,0 +1,42 @@ -+# Licensed to the Apache Software Foundation (ASF) under one -+# or more contributor license agreements. See the NOTICE file -+# distributed with this work for additional information -+# regarding copyright ownership. The ASF licenses this file -+# to you under the Apache License, Version 2.0 (the -+# "License"); you may not use this file except in compliance -+# with the License. You may obtain a copy of the License at -+# -+# http://www.apache.org/licenses/LICENSE-2.0 -+# -+# Unless required by applicable law or agreed to in writing, software -+# distributed under the License is distributed on an "AS IS" BASIS, -+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -+# See the License for the specific language governing permissions and -+# limitations under the License. -+ -+# -+# This replay file assumes that caching is enabled and -+# proxy.config.http.cache.ignore_server_no_cache is set to 1(meaning the -+# cache-control directives in responses to bypass the cache is ignored) -+meta: -+ version: "1.0" -+ -+sessions: -+ - transactions: -+ # The client is actually the python script, not Proxy Verifier. -+ - client-request: -+ method: "GET" -+ version: "1.1" -+ headers: -+ fields: -+ - [uuid, expect] -+ - [Expect, 100-continue] -+ -+ server-response: -+ status: 200 -+ reason: OK -+ headers: -+ fields: -+ - [Content-Length, 4] -+ - [Connection, keep-alive] -+ - [X-Response, expect] diff --git a/Invoke-initgroups-iff-we-got-enough-privileges.patch b/Invoke-initgroups-iff-we-got-enough-privileges.patch deleted file mode 100644 index f0afd57ed9a45332894fae33bfc68d2d80d64161..0000000000000000000000000000000000000000 --- a/Invoke-initgroups-iff-we-got-enough-privileges.patch +++ /dev/null @@ -1,28 +0,0 @@ -From d4dda9b5583d19e2eee268fec59aa487d61fc079 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Valent=C3=ADn=20Guti=C3=A9rrez?= -Date: Thu, 21 Nov 2024 03:54:03 +0100 -Subject: [PATCH] Invoke initgroups() iff we got enough privileges (#11869) - (#11872) - -Follow up of #11855, that rendered unusable ATS as root when spawned via traffic_manager. ---- - src/tscore/ink_cap.cc | 6 ++++-- - 1 file changed, 4 insertions(+), 2 deletions(-) - -diff --git a/src/tscore/ink_cap.cc b/src/tscore/ink_cap.cc -index 0f0d6f869e2..f464daad3b1 100644 ---- a/src/tscore/ink_cap.cc -+++ b/src/tscore/ink_cap.cc -@@ -156,8 +156,10 @@ impersonate(const struct passwd *pwd, ImpersonationLevel level) - #endif - - // Always repopulate the supplementary group list for the new user. -- if (initgroups(pwd->pw_name, pwd->pw_gid) != 0) { -- Fatal("switching to user %s, failed to initialize supplementary groups ID %ld", pwd->pw_name, (long)pwd->pw_gid); -+ if (geteuid() == 0) { // check that we have enough rights to call initgroups() -+ if (initgroups(pwd->pw_name, pwd->pw_gid) != 0) { -+ Fatal("switching to user %s, failed to initialize supplementary groups ID %ld", pwd->pw_name, (long)pwd->pw_gid); -+ } - } - - switch (level) { diff --git a/add-riscv-support.patch b/add-riscv-support.patch index c77a40ab3160b84be4eb40ae37d7a6808bce6d52..09950daaeac001104e5e8b0da4b964560b63c141 100644 --- a/add-riscv-support.patch +++ b/add-riscv-support.patch @@ -1,16 +1,3 @@ -diff -Nur a/build/_aux/config.guess b/build/_aux/config.guess ---- a/build/_aux/config.guess 2023-10-10 04:36:24.000000000 +0800 -+++ b/build/_aux/config.guess 2023-10-26 15:56:04.249689094 +0800 -@@ -978,6 +978,9 @@ - aarch64:Linux:*:*) - GUESS=$UNAME_MACHINE-unknown-linux-$LIBC - ;; -+ riscv64:Linux:*:*) -+ GUESS=$UNAME_MACHINE-unknown-linux-$LIBC -+ ;; - aarch64_be:Linux:*:*) - UNAME_MACHINE=aarch64_be - GUESS=$UNAME_MACHINE-unknown-linux-$LIBC diff -Nur a/include/tscore/ink_queue.h b/include/tscore/ink_queue.h --- a/include/tscore/ink_queue.h 2023-10-10 04:36:24.000000000 +0800 +++ b/include/tscore/ink_queue.h 2023-10-26 16:00:51.693855882 +0800 diff --git a/trafficserver-9.2.11.tar.bz2 b/trafficserver-9.2.11.tar.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..97bebd3347308d0bb0e8207a45d08ffab5585443 --- /dev/null +++ b/trafficserver-9.2.11.tar.bz2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd8aa6ae8827e21ebffe5036ce45e352128c343169694f1bcd5bda686a1ec22a +size 10155348 diff --git a/trafficserver-9.2.5.tar.bz2 b/trafficserver-9.2.5.tar.bz2 deleted file mode 100644 index 3a615d015e559cfcd866eb9f50fc9fc4502126b9..0000000000000000000000000000000000000000 Binary files a/trafficserver-9.2.5.tar.bz2 and /dev/null differ diff --git a/trafficserver.spec b/trafficserver.spec index e8e8f0b66e2cfc2a8893282501b810a7b68a42cd..e3f3d97fd4dc780aff01789671d97641d3b89280 100644 --- a/trafficserver.spec +++ b/trafficserver.spec @@ -1,8 +1,8 @@ %define _hardened_build 1 %global vendor %{?_vendor:%{_vendor}}%{!?_vendor:openEuler} Name: trafficserver -Version: 9.2.5 -Release: 5 +Version: 9.2.11 +Release: 1 Summary: Apache Traffic Server, a reverse, forward and transparent HTTP proxy cache License: Apache-2.0 URL: https://trafficserver.apache.org/ @@ -14,21 +14,12 @@ Patch0003: config-layout-openEuler.patch Patch0004: Modify-storage.config-for-traffic_cache_tool.patch Patch0005: add-riscv-support.patch Patch0006: add-loong64-support.patch -Patch0007: CVE-2024-38479.patch -Patch0008: CVE-2024-50305.patch -Patch0009: CVE-2024-50306.patch -Patch0010: Invoke-initgroups-iff-we-got-enough-privileges.patch -Patch0011: CVE-2024-38311-pre-Do-not-allow-extra-CRs-in-chunks-11936-11942.patch -Patch0012: CVE-2024-38311.patch -Patch0013: CVE-2024-56195.patch -Patch0014: CVE-2024-56202.patch -Patch0015: CVE-2024-53868.patch BuildRequires: expat-devel hwloc-devel openssl-devel pcre-devel zlib-devel xz-devel BuildRequires: libcurl-devel ncurses-devel gcc gcc-c++ perl-ExtUtils-MakeMaker BuildRequires: libcap-devel cmake libunwind-devel automake chrpath Requires: expat hwloc openssl pcre zlib xz libcurl Requires: systemd ncurses pkgconfig libcap initscripts -Requires(postun): systemd +Requires(postun): systemd %description Apache Traffic Server is an OpenSource HTTP / HTTPS / HTTP/2 / QUIC reverse, forward and transparent proxy and cache. @@ -143,9 +134,15 @@ getent passwd ats >/dev/null || useradd -r -u 176 -g ats -d / -s /sbin/nologin - %{_datadir}/pkgconfig/trafficserver.pc %changelog -* Mon Apr 07 2025 yaoxin <1024769339@qq.com> - 9.2.5-5 +* Mon Jun 30 2025 yaoxin <1024769339@qq.com> - 9.2.11-1 +- Update to 9.2.11 for fix CVE-2025-49763 and CVE-2025-31698 + +* Mon Apr 07 2025 yaoxin <1024769339@qq.com> - 9.2.9-2 - Fix CVE-2024-53868 +* Tue Mar 25 2025 wangkai <13474090681@163.com> - 9.2.9-1 +- Update to 9.2.9 + * Fri Mar 07 2025 yaoxin <1024769339@qq.com> - 9.2.5-4 - Fix CVE-2024-38311,CVE-2024-56195 and CVE-2024-56202