diff --git a/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-1.patch b/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-1.patch new file mode 100644 index 0000000000000000000000000000000000000000..035f27c38193440fa2ca5380abd96043dfe06307 --- /dev/null +++ b/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-1.patch @@ -0,0 +1,85 @@ +From 2040a6943df462ef3fafd220043204ecd08f29dc Mon Sep 17 00:00:00 2001 +From: Jim Jagielski +Date: Thu, 13 Jun 2019 11:08:29 +0000 +Subject: [PATCH 1/5] Merge r1860260 from trunk: + + * modules/http2: more copying of data to disentangle worker processing from main connection + +Submitted by: icing +Reviewed by: icing, covener, jim + +git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1861247 13f79535-47bb-0310-9956-ffa450edef68 +--- + modules/http2/h2_headers.c | 11 +++++++++-- + modules/http2/h2_headers.h | 8 +++++++- + modules/http2/h2_session.c | 1 + + 3 files changed, 17 insertions(+), 3 deletions(-) + +diff --git a/modules/http2/h2_headers.c b/modules/http2/h2_headers.c +index f01ab88..2be9545 100644 +--- a/modules/http2/h2_headers.c ++++ b/modules/http2/h2_headers.c +@@ -101,8 +101,9 @@ apr_bucket *h2_bucket_headers_beam(struct h2_bucket_beam *beam, + const apr_bucket *src) + { + if (H2_BUCKET_IS_HEADERS(src)) { +- h2_headers *r = ((h2_bucket_headers *)src->data)->headers; +- apr_bucket *b = h2_bucket_headers_create(dest->bucket_alloc, r); ++ h2_headers *src_headers = ((h2_bucket_headers *)src->data)->headers; ++ apr_bucket *b = h2_bucket_headers_create(dest->bucket_alloc, ++ h2_headers_clone(dest->p, src_headers)); + APR_BRIGADE_INSERT_TAIL(dest, b); + return b; + } +@@ -153,6 +154,12 @@ h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h) + apr_table_copy(pool, h->notes), h->raw_bytes, pool); + } + ++h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h) ++{ ++ return h2_headers_create(h->status, apr_table_clone(pool, h->headers), ++ apr_table_clone(pool, h->notes), h->raw_bytes, pool); ++} ++ + h2_headers *h2_headers_die(apr_status_t type, + const h2_request *req, apr_pool_t *pool) + { +diff --git a/modules/http2/h2_headers.h b/modules/http2/h2_headers.h +index 840e8c4..b7d95a1 100644 +--- a/modules/http2/h2_headers.h ++++ b/modules/http2/h2_headers.h +@@ -59,12 +59,18 @@ h2_headers *h2_headers_rcreate(request_rec *r, int status, + apr_table_t *header, apr_pool_t *pool); + + /** +- * Clone the headers into another pool. This will not copy any ++ * Copy the headers into another pool. This will not copy any + * header strings. + */ + h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h); + + /** ++ * Clone the headers into another pool. This will also clone any ++ * header strings. ++ */ ++h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h); ++ ++/** + * Create the headers for the given error. + * @param stream_id id of the stream to create the headers for + * @param type the error code +diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c +index a1b31d2..3f0e9c9 100644 +--- a/modules/http2/h2_session.c ++++ b/modules/http2/h2_session.c +@@ -1950,6 +1950,7 @@ static void on_stream_state_enter(void *ctx, h2_stream *stream) + ev_stream_closed(session, stream); + break; + case H2_SS_CLEANUP: ++ nghttp2_session_set_stream_user_data(session->ngh2, stream->id, NULL); + h2_mplx_stream_cleanup(session->mplx, stream); + break; + default: +-- +1.8.3.1 + diff --git a/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-2.patch b/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-2.patch new file mode 100644 index 0000000000000000000000000000000000000000..8fb3e9b55a8c01e22a8a01ae94e6230e807ec036 --- /dev/null +++ b/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-2.patch @@ -0,0 +1,121 @@ +From 04f21f8422dd763da2f09badac965ff03e59aca8 Mon Sep 17 00:00:00 2001 +From: Jim Jagielski +Date: Thu, 13 Jun 2019 11:09:12 +0000 +Subject: [PATCH 2/5] Merge r1707084, r1707093, r1707159, r1707362 from trunk: + +eor_bucket: don't destroy the request multiple times should any filter +do a copy (e.g. mod_bucketeer). + +eor_bucket: follow up to r1707084: fix comment. + +eor_bucket: follow up to r1707084: use an inner shared bucket. + +eor_bucket: follow up to r1707159. +We need an apr_bucket_refcount, as spotted by Ruediger. +Submitted by: ylavic +Reviewed by: icing, covener, jim + +git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1861248 13f79535-47bb-0310-9956-ffa450edef68 +--- + server/eor_bucket.c | 43 ++++++++++++++++++++++++++++--------------- + 1 file changed, 28 insertions(+), 15 deletions(-) + +diff --git a/server/eor_bucket.c b/server/eor_bucket.c +index 4d3e1ec..ecb809c 100644 +--- a/server/eor_bucket.c ++++ b/server/eor_bucket.c +@@ -19,17 +19,22 @@ + #include "http_protocol.h" + #include "scoreboard.h" + ++typedef struct { ++ apr_bucket_refcount refcount; ++ request_rec *data; ++} ap_bucket_eor; ++ + static apr_status_t eor_bucket_cleanup(void *data) + { +- apr_bucket *b = (apr_bucket *)data; +- request_rec *r = (request_rec *)b->data; ++ request_rec **rp = data; + +- if (r != NULL) { ++ if (*rp) { ++ request_rec *r = *rp; + /* + * If eor_bucket_destroy is called after us, this prevents + * eor_bucket_destroy from trying to destroy the pool again. + */ +- b->data = NULL; ++ *rp = NULL; + /* Update child status and log the transaction */ + ap_update_child_status(r->connection->sbh, SERVER_BUSY_LOG, r); + ap_run_log_transaction(r); +@@ -50,11 +55,13 @@ static apr_status_t eor_bucket_read(apr_bucket *b, const char **str, + + AP_DECLARE(apr_bucket *) ap_bucket_eor_make(apr_bucket *b, request_rec *r) + { +- b->length = 0; +- b->start = 0; +- b->data = r; +- b->type = &ap_bucket_type_eor; ++ ap_bucket_eor *h; ++ ++ h = apr_bucket_alloc(sizeof(*h), b->list); ++ h->data = r; + ++ b = apr_bucket_shared_make(b, h, 0, 0); ++ b->type = &ap_bucket_type_eor; + return b; + } + +@@ -66,7 +73,9 @@ AP_DECLARE(apr_bucket *) ap_bucket_eor_create(apr_bucket_alloc_t *list, + APR_BUCKET_INIT(b); + b->free = apr_bucket_free; + b->list = list; ++ b = ap_bucket_eor_make(b, r); + if (r) { ++ ap_bucket_eor *h = b->data; + /* + * Register a cleanup for the request pool as the eor bucket could + * have been allocated from a different pool then the request pool +@@ -76,18 +85,22 @@ AP_DECLARE(apr_bucket *) ap_bucket_eor_create(apr_bucket_alloc_t *list, + * We need to use a pre-cleanup here because a module may create a + * sub-pool which is still needed during the log_transaction hook. + */ +- apr_pool_pre_cleanup_register(r->pool, (void *)b, eor_bucket_cleanup); ++ apr_pool_pre_cleanup_register(r->pool, &h->data, eor_bucket_cleanup); + } +- return ap_bucket_eor_make(b, r); ++ return b; + } + + static void eor_bucket_destroy(void *data) + { +- request_rec *r = (request_rec *)data; ++ ap_bucket_eor *h = data; + +- if (r) { +- /* eor_bucket_cleanup will be called when the pool gets destroyed */ +- apr_pool_destroy(r->pool); ++ if (apr_bucket_shared_destroy(h)) { ++ request_rec *r = h->data; ++ if (r) { ++ /* eor_bucket_cleanup will be called when the pool gets destroyed */ ++ apr_pool_destroy(r->pool); ++ } ++ apr_bucket_free(h); + } + } + +@@ -97,6 +110,6 @@ AP_DECLARE_DATA const apr_bucket_type_t ap_bucket_type_eor = { + eor_bucket_read, + apr_bucket_setaside_noop, + apr_bucket_split_notimpl, +- apr_bucket_simple_copy ++ apr_bucket_shared_copy + }; + +-- +1.8.3.1 + diff --git a/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-3.patch b/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-3.patch new file mode 100644 index 0000000000000000000000000000000000000000..2ca3412fcca315b97c1c0a58e9cd21d4c39bce1f --- /dev/null +++ b/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-3.patch @@ -0,0 +1,306 @@ +From 1125fc2240353c41db09eac8fedcc75dfdf44edb Mon Sep 17 00:00:00 2001 +From: Jim Jagielski +Date: Wed, 19 Sep 2018 12:55:26 +0000 +Subject: [PATCH 3/5] Merge r1835118 from trunk: + +On the trunk: + + * silencing gcc uninitialized warning + * refrainning from apr_table_addn() use since pool debug assumptions are in conflict + * adding more assertions + * copy-porting changes to base64 encoding code from mod_md + +Submitted by: icing +Reviewed by: icing, minfrin, jim + +git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1841330 13f79535-47bb-0310-9956-ffa450edef68 +--- + modules/http2/h2_bucket_beam.c | 2 +- + modules/http2/h2_from_h1.c | 4 +- + modules/http2/h2_h2.c | 2 +- + modules/http2/h2_headers.c | 7 ++-- + modules/http2/h2_mplx.c | 4 ++ + modules/http2/h2_proxy_session.c | 4 +- + modules/http2/h2_util.c | 86 +++++++++++++++++++++------------------- + 7 files changed, 58 insertions(+), 51 deletions(-) + +diff --git a/modules/http2/h2_bucket_beam.c b/modules/http2/h2_bucket_beam.c +index 9f6fa82..f79cbe3 100644 +--- a/modules/http2/h2_bucket_beam.c ++++ b/modules/http2/h2_bucket_beam.c +@@ -775,7 +775,7 @@ static apr_status_t append_bucket(h2_bucket_beam *beam, + const char *data; + apr_size_t len; + apr_status_t status; +- int can_beam, check_len; ++ int can_beam = 0, check_len; + + if (beam->aborted) { + return APR_ECONNABORTED; +diff --git a/modules/http2/h2_from_h1.c b/modules/http2/h2_from_h1.c +index ae264a9..dd6ad90 100644 +--- a/modules/http2/h2_from_h1.c ++++ b/modules/http2/h2_from_h1.c +@@ -164,7 +164,7 @@ static int copy_header(void *ctx, const char *name, const char *value) + { + apr_table_t *headers = ctx; + +- apr_table_addn(headers, name, value); ++ apr_table_add(headers, name, value); + return 1; + } + +@@ -250,7 +250,7 @@ static h2_headers *create_response(h2_task *task, request_rec *r) + if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) { + char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN); + ap_recent_rfc822_date(date, r->request_time); +- apr_table_addn(r->headers_out, "Expires", date); ++ apr_table_add(r->headers_out, "Expires", date); + } + + /* This is a hack, but I can't find anyway around it. The idea is that +diff --git a/modules/http2/h2_h2.c b/modules/http2/h2_h2.c +index dfee6b5..5580cef 100644 +--- a/modules/http2/h2_h2.c ++++ b/modules/http2/h2_h2.c +@@ -694,7 +694,7 @@ static void check_push(request_rec *r, const char *tag) + tag, conf->push_list->nelts); + for (i = 0; i < conf->push_list->nelts; ++i) { + h2_push_res *push = &APR_ARRAY_IDX(conf->push_list, i, h2_push_res); +- apr_table_addn(r->headers_out, "Link", ++ apr_table_add(r->headers_out, "Link", + apr_psprintf(r->pool, "<%s>; rel=preload%s", + push->uri_ref, push->critical? "; critical" : "")); + } +diff --git a/modules/http2/h2_headers.c b/modules/http2/h2_headers.c +index 2be9545..49d9c0a 100644 +--- a/modules/http2/h2_headers.c ++++ b/modules/http2/h2_headers.c +@@ -117,9 +117,9 @@ h2_headers *h2_headers_create(int status, apr_table_t *headers_in, + { + h2_headers *headers = apr_pcalloc(pool, sizeof(h2_headers)); + headers->status = status; +- headers->headers = (headers_in? apr_table_copy(pool, headers_in) ++ headers->headers = (headers_in? apr_table_clone(pool, headers_in) + : apr_table_make(pool, 5)); +- headers->notes = (notes? apr_table_copy(pool, notes) ++ headers->notes = (notes? apr_table_clone(pool, notes) + : apr_table_make(pool, 5)); + return headers; + } +@@ -150,8 +150,7 @@ h2_headers *h2_headers_rcreate(request_rec *r, int status, + + h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h) + { +- return h2_headers_create(h->status, apr_table_copy(pool, h->headers), +- apr_table_copy(pool, h->notes), h->raw_bytes, pool); ++ return h2_headers_create(h->status, h->headers, h->notes, h->raw_bytes, pool); + } + + h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h) +diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c +index 29f040c..db3cb63 100644 +--- a/modules/http2/h2_mplx.c ++++ b/modules/http2/h2_mplx.c +@@ -476,6 +476,7 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) + h2_ihash_iter(m->shold, report_stream_iter, m); + } + } ++ ap_assert(m->tasks_active == 0); + m->join_wait = NULL; + + /* 4. close the h2_req_enginge shed */ +@@ -765,6 +766,9 @@ apr_status_t h2_mplx_pop_task(h2_mplx *m, h2_task **ptask) + apr_status_t rv = APR_EOF; + + *ptask = NULL; ++ ap_assert(m); ++ ap_assert(m->lock); ++ + if (APR_SUCCESS != (rv = apr_thread_mutex_lock(m->lock))) { + return rv; + } +diff --git a/modules/http2/h2_proxy_session.c b/modules/http2/h2_proxy_session.c +index a077ce1..8389c7c 100644 +--- a/modules/http2/h2_proxy_session.c ++++ b/modules/http2/h2_proxy_session.c +@@ -237,7 +237,7 @@ static int before_frame_send(nghttp2_session *ngh2, + + static int add_header(void *table, const char *n, const char *v) + { +- apr_table_addn(table, n, v); ++ apr_table_add(table, n, v); + return 1; + } + +@@ -361,7 +361,7 @@ static void h2_proxy_stream_end_headers_out(h2_proxy_stream *stream) + } + + /* create a "Via:" response header entry and merge it */ +- apr_table_addn(r->headers_out, "Via", ++ apr_table_add(r->headers_out, "Via", + (session->conf->viaopt == via_full) + ? apr_psprintf(p, "%d.%d %s%s (%s)", + HTTP_VERSION_MAJOR(r->proto_num), +diff --git a/modules/http2/h2_util.c b/modules/http2/h2_util.c +index 3d7ba37..9dacd8b 100644 +--- a/modules/http2/h2_util.c ++++ b/modules/http2/h2_util.c +@@ -115,26 +115,28 @@ void h2_util_camel_case_header(char *s, size_t len) + + /* base64 url encoding ****************************************************************************/ + +-static const int BASE64URL_UINT6[] = { ++#define N6 (unsigned int)-1 ++ ++static const unsigned int BASE64URL_UINT6[] = { + /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ +- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 0 */ +- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 1 */ +- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, /* 2 */ +- 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, /* 3 */ +- -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 4 */ +- 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, 63, /* 5 */ +- -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, /* 6 */ +- 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -1, -1, -1, -1, -1, /* 7 */ +- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 8 */ +- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 9 */ +- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* a */ +- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* b */ +- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* c */ +- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* d */ +- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* e */ +- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 /* f */ ++ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 0 */ ++ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 1 */ ++ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, 62, N6, N6, /* 2 */ ++ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, N6, N6, N6, N6, N6, N6, /* 3 */ ++ N6, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 4 */ ++ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, N6, N6, N6, N6, 63, /* 5 */ ++ N6, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, /* 6 */ ++ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, N6, N6, N6, N6, N6, /* 7 */ ++ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 8 */ ++ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 9 */ ++ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* a */ ++ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* b */ ++ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* c */ ++ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* d */ ++ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* e */ ++ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6 /* f */ + }; +-static const char BASE64URL_CHARS[] = { ++static const unsigned char BASE64URL_CHARS[] = { + 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', /* 0 - 9 */ + 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', /* 10 - 19 */ + 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', /* 20 - 29 */ +@@ -144,21 +146,23 @@ static const char BASE64URL_CHARS[] = { + '8', '9', '-', '_', ' ', ' ', ' ', ' ', ' ', ' ', /* 60 - 69 */ + }; + ++#define BASE64URL_CHAR(x) BASE64URL_CHARS[ (unsigned int)(x) & 0x3fu ] ++ + apr_size_t h2_util_base64url_decode(const char **decoded, const char *encoded, + apr_pool_t *pool) + { + const unsigned char *e = (const unsigned char *)encoded; + const unsigned char *p = e; + unsigned char *d; +- int n; +- apr_size_t len, mlen, remain, i; ++ unsigned int n; ++ long len, mlen, remain, i; + +- while (*p && BASE64URL_UINT6[ *p ] != -1) { ++ while (*p && BASE64URL_UINT6[ *p ] != N6) { + ++p; + } +- len = p - e; ++ len = (int)(p - e); + mlen = (len/4)*4; +- *decoded = apr_pcalloc(pool, len+1); ++ *decoded = apr_pcalloc(pool, (apr_size_t)len + 1); + + i = 0; + d = (unsigned char*)*decoded; +@@ -167,60 +171,60 @@ apr_size_t h2_util_base64url_decode(const char **decoded, const char *encoded, + (BASE64URL_UINT6[ e[i+1] ] << 12) + + (BASE64URL_UINT6[ e[i+2] ] << 6) + + (BASE64URL_UINT6[ e[i+3] ])); +- *d++ = n >> 16; +- *d++ = n >> 8 & 0xffu; +- *d++ = n & 0xffu; ++ *d++ = (unsigned char)(n >> 16); ++ *d++ = (unsigned char)(n >> 8 & 0xffu); ++ *d++ = (unsigned char)(n & 0xffu); + } + remain = len - mlen; + switch (remain) { + case 2: + n = ((BASE64URL_UINT6[ e[mlen+0] ] << 18) + + (BASE64URL_UINT6[ e[mlen+1] ] << 12)); +- *d++ = n >> 16; ++ *d++ = (unsigned char)(n >> 16); + remain = 1; + break; + case 3: + n = ((BASE64URL_UINT6[ e[mlen+0] ] << 18) + + (BASE64URL_UINT6[ e[mlen+1] ] << 12) + + (BASE64URL_UINT6[ e[mlen+2] ] << 6)); +- *d++ = n >> 16; +- *d++ = n >> 8 & 0xffu; ++ *d++ = (unsigned char)(n >> 16); ++ *d++ = (unsigned char)(n >> 8 & 0xffu); + remain = 2; + break; + default: /* do nothing */ + break; + } +- return mlen/4*3 + remain; ++ return (apr_size_t)(mlen/4*3 + remain); + } + + const char *h2_util_base64url_encode(const char *data, + apr_size_t dlen, apr_pool_t *pool) + { +- long i, len = (int)dlen; ++ int i, len = (int)dlen; + apr_size_t slen = ((dlen+2)/3)*4 + 1; /* 0 terminated */ + const unsigned char *udata = (const unsigned char*)data; +- char *enc, *p = apr_pcalloc(pool, slen); ++ unsigned char *enc, *p = apr_pcalloc(pool, slen); + + enc = p; + for (i = 0; i < len-2; i+= 3) { +- *p++ = BASE64URL_CHARS[ (udata[i] >> 2) & 0x3fu ]; +- *p++ = BASE64URL_CHARS[ ((udata[i] << 4) + (udata[i+1] >> 4)) & 0x3fu ]; +- *p++ = BASE64URL_CHARS[ ((udata[i+1] << 2) + (udata[i+2] >> 6)) & 0x3fu ]; +- *p++ = BASE64URL_CHARS[ udata[i+2] & 0x3fu ]; ++ *p++ = BASE64URL_CHAR( (udata[i] >> 2) ); ++ *p++ = BASE64URL_CHAR( (udata[i] << 4) + (udata[i+1] >> 4) ); ++ *p++ = BASE64URL_CHAR( (udata[i+1] << 2) + (udata[i+2] >> 6) ); ++ *p++ = BASE64URL_CHAR( (udata[i+2]) ); + } + + if (i < len) { +- *p++ = BASE64URL_CHARS[ (udata[i] >> 2) & 0x3fu ]; ++ *p++ = BASE64URL_CHAR( (udata[i] >> 2) ); + if (i == (len - 1)) { +- *p++ = BASE64URL_CHARS[ (udata[i] << 4) & 0x3fu ]; ++ *p++ = BASE64URL_CHARS[ ((unsigned int)udata[i] << 4) & 0x3fu ]; + } + else { +- *p++ = BASE64URL_CHARS[ ((udata[i] << 4) + (udata[i+1] >> 4)) & 0x3fu ]; +- *p++ = BASE64URL_CHARS[ (udata[i+1] << 2) & 0x3fu ]; ++ *p++ = BASE64URL_CHAR( (udata[i] << 4) + (udata[i+1] >> 4) ); ++ *p++ = BASE64URL_CHAR( (udata[i+1] << 2) ); + } + } + *p++ = '\0'; +- return enc; ++ return (char *)enc; + } + + /******************************************************************************* +-- +1.8.3.1 + diff --git a/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-4.patch b/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-4.patch new file mode 100644 index 0000000000000000000000000000000000000000..e0a47e800829489d835bccd00e3d8a03ae80afea --- /dev/null +++ b/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-4.patch @@ -0,0 +1,4406 @@ +From a6c8c61510eea98f31961e8220bce4f07f928fd2 Mon Sep 17 00:00:00 2001 +From: Stefan Eissing +Date: Wed, 13 Mar 2019 15:00:57 +0000 +Subject: [PATCH 4/5] Merge of + 1849296,1852038,1852101,1852339,1853171,1853967,1854365,1854963,1854964,1855295,1855411 + from trunk: + + *) mod_http2: when SSL renegotiation is inhibited and a 403 ErrorDocument is + in play, the proper HTTP/2 stream reset did not trigger with H2_ERR_HTTP_1_1_REQUIRED. + Fixed. [Michael Kaufmann] + + *) mod_http2: new configuration directive: `H2Padding numbits` to control + padding of HTTP/2 payload frames. 'numbits' is a number from 0-8, + controlling the range of padding bytes added to a frame. The actual number + added is chosen randomly per frame. This applies to HEADERS, DATA and PUSH_PROMISE + frames equally. The default continues to be 0, e.g. no padding. [Stefan Eissing] + + *) mod_http2: ripping out all the h2_req_engine internal features now that mod_proxy_http2 + has no more need for it. Optional functions are still declared but no longer implemented. + While previous mod_proxy_http2 will work with this, it is recommeneded to run the matching + versions of both modules. [Stefan Eissing] + + *) mod_proxy_http2: changed mod_proxy_http2 implementation and fixed several bugs which + resolve PR63170. The proxy module does now a single h2 request on the (reused) + connection and returns. [Stefan Eissing] + + *) mod_http2/mod_proxy_http2: proxy_http2 checks correct master connection aborted status + to trigger immediate shutdown of backend connections. This is now always signalled + by mod_http2 when the the session is being released. + proxy_http2 now only sends a PING frame to the backend when there is not already one + in flight. [Stefan Eissing] + + *) mod_proxy_http2: fixed an issue where a proxy_http2 handler entered an infinite + loop when encountering certain errors on the backend connection. + See . [Stefan Eissing] + + *) mod_http2: Configuration directives H2Push and H2Upgrade can now be specified per + Location/Directory, e.g. disabling PUSH for a specific set of resources. [Stefan Eissing] + + *) mod_http2: HEAD requests to some module such as mod_cgid caused the stream to + terminate improperly and cause a HTTP/2 PROTOCOL_ERROR. + Fixes . [Michael Kaufmann] + +git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855431 13f79535-47bb-0310-9956-ffa450edef68 +--- + modules/http2/config2.m4 | 1 - + modules/http2/h2.h | 7 +- + modules/http2/h2_alt_svc.c | 12 +- + modules/http2/h2_config.c | 662 ++++++++++++++++++++++++++++----------- + modules/http2/h2_config.h | 63 ++-- + modules/http2/h2_conn.c | 30 +- + modules/http2/h2_conn.h | 8 +- + modules/http2/h2_conn_io.c | 24 +- + modules/http2/h2_conn_io.h | 3 +- + modules/http2/h2_ctx.c | 33 +- + modules/http2/h2_ctx.h | 13 +- + modules/http2/h2_filter.c | 43 ++- + modules/http2/h2_from_h1.c | 12 +- + modules/http2/h2_h2.c | 71 ++--- + modules/http2/h2_h2.h | 16 +- + modules/http2/h2_headers.c | 31 +- + modules/http2/h2_mplx.c | 235 ++------------ + modules/http2/h2_mplx.h | 32 +- + modules/http2/h2_ngn_shed.c | 392 ----------------------- + modules/http2/h2_ngn_shed.h | 79 ----- + modules/http2/h2_proxy_session.c | 57 +--- + modules/http2/h2_proxy_session.h | 3 - + modules/http2/h2_request.c | 33 +- + modules/http2/h2_session.c | 107 ++++--- + modules/http2/h2_session.h | 19 +- + modules/http2/h2_stream.c | 9 +- + modules/http2/h2_switch.c | 13 +- + modules/http2/h2_task.c | 84 +---- + modules/http2/h2_task.h | 14 +- + modules/http2/mod_http2.c | 49 +-- + modules/http2/mod_http2.h | 46 +-- + modules/http2/mod_proxy_http2.c | 384 ++++++----------------- + 32 files changed, 904 insertions(+), 1681 deletions(-) + +diff --git a/modules/http2/config2.m4 b/modules/http2/config2.m4 +index e8cefe3..5f49adf 100644 +--- a/modules/http2/config2.m4 ++++ b/modules/http2/config2.m4 +@@ -31,7 +31,6 @@ h2_from_h1.lo dnl + h2_h2.lo dnl + h2_headers.lo dnl + h2_mplx.lo dnl +-h2_ngn_shed.lo dnl + h2_push.lo dnl + h2_request.lo dnl + h2_session.lo dnl +diff --git a/modules/http2/h2.h b/modules/http2/h2.h +index 38b4019..e057d66 100644 +--- a/modules/http2/h2.h ++++ b/modules/http2/h2.h +@@ -48,12 +48,12 @@ extern const char *H2_MAGIC_TOKEN; + #define H2_HEADER_PATH_LEN 5 + #define H2_CRLF "\r\n" + +-/* Max data size to write so it fits inside a TLS record */ +-#define H2_DATA_CHUNK_SIZE ((16*1024) - 100 - 9) +- + /* Size of the frame header itself in HTTP/2 */ + #define H2_FRAME_HDR_LEN 9 + ++/* Max data size to write so it fits inside a TLS record */ ++#define H2_DATA_CHUNK_SIZE ((16*1024) - 100 - H2_FRAME_HDR_LEN) ++ + /* Maximum number of padding bytes in a frame, rfc7540 */ + #define H2_MAX_PADLEN 256 + /* Initial default window size, RFC 7540 ch. 6.5.2 */ +@@ -162,5 +162,6 @@ typedef int h2_stream_pri_cmp(int stream_id1, int stream_id2, void *ctx); + #define H2_FILTER_DEBUG_NOTE "http2-debug" + #define H2_HDR_CONFORMANCE "http2-hdr-conformance" + #define H2_HDR_CONFORMANCE_UNSAFE "unsafe" ++#define H2_PUSH_MODE_NOTE "http2-push-mode" + + #endif /* defined(__mod_h2__h2__) */ +diff --git a/modules/http2/h2_alt_svc.c b/modules/http2/h2_alt_svc.c +index 295a16d..2c3253c 100644 +--- a/modules/http2/h2_alt_svc.c ++++ b/modules/http2/h2_alt_svc.c +@@ -75,7 +75,7 @@ h2_alt_svc *h2_alt_svc_parse(const char *s, apr_pool_t *pool) + + static int h2_alt_svc_handler(request_rec *r) + { +- const h2_config *cfg; ++ apr_array_header_t *alt_svcs; + int i; + + if (r->connection->keepalives > 0) { +@@ -87,8 +87,8 @@ static int h2_alt_svc_handler(request_rec *r) + return DECLINED; + } + +- cfg = h2_config_sget(r->server); +- if (r->hostname && cfg && cfg->alt_svcs && cfg->alt_svcs->nelts > 0) { ++ alt_svcs = h2_config_alt_svcs(r); ++ if (r->hostname && alt_svcs && alt_svcs->nelts > 0) { + const char *alt_svc_used = apr_table_get(r->headers_in, "Alt-Svc-Used"); + if (!alt_svc_used) { + /* We have alt-svcs defined and client is not already using +@@ -99,7 +99,7 @@ static int h2_alt_svc_handler(request_rec *r) + const char *alt_svc = ""; + const char *svc_ma = ""; + int secure = h2_h2_is_tls(r->connection); +- int ma = h2_config_geti(cfg, H2_CONF_ALT_SVC_MAX_AGE); ++ int ma = h2_config_rgeti(r, H2_CONF_ALT_SVC_MAX_AGE); + if (ma >= 0) { + svc_ma = apr_psprintf(r->pool, "; ma=%d", ma); + } +@@ -107,8 +107,8 @@ static int h2_alt_svc_handler(request_rec *r) + "h2_alt_svc: announce %s for %s:%d", + (secure? "secure" : "insecure"), + r->hostname, (int)r->server->port); +- for (i = 0; i < cfg->alt_svcs->nelts; ++i) { +- h2_alt_svc *as = h2_alt_svc_IDX(cfg->alt_svcs, i); ++ for (i = 0; i < alt_svcs->nelts; ++i) { ++ h2_alt_svc *as = h2_alt_svc_IDX(alt_svcs, i); + const char *ahost = as->host; + if (ahost && !apr_strnatcasecmp(ahost, r->hostname)) { + ahost = NULL; +diff --git a/modules/http2/h2_config.c b/modules/http2/h2_config.c +index 8766355..29a0b55 100644 +--- a/modules/http2/h2_config.c ++++ b/modules/http2/h2_config.c +@@ -42,6 +42,55 @@ + #define H2_CONFIG_GET(a, b, n) \ + (((a)->n == DEF_VAL)? (b) : (a))->n + ++#define H2_CONFIG_SET(a, n, v) \ ++ ((a)->n = v) ++ ++#define CONFIG_CMD_SET(cmd,dir,var,val) \ ++ h2_config_seti(((cmd)->path? (dir) : NULL), h2_config_sget((cmd)->server), var, val) ++ ++#define CONFIG_CMD_SET64(cmd,dir,var,val) \ ++ h2_config_seti64(((cmd)->path? (dir) : NULL), h2_config_sget((cmd)->server), var, val) ++ ++/* Apache httpd module configuration for h2. */ ++typedef struct h2_config { ++ const char *name; ++ int h2_max_streams; /* max concurrent # streams (http2) */ ++ int h2_window_size; /* stream window size (http2) */ ++ int min_workers; /* min # of worker threads/child */ ++ int max_workers; /* max # of worker threads/child */ ++ int max_worker_idle_secs; /* max # of idle seconds for worker */ ++ int stream_max_mem_size; /* max # bytes held in memory/stream */ ++ apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */ ++ int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/ ++ int serialize_headers; /* Use serialized HTTP/1.1 headers for ++ processing, better compatibility */ ++ int h2_direct; /* if mod_h2 is active directly */ ++ int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */ ++ int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */ ++ apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */ ++ int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */ ++ int h2_push; /* if HTTP/2 server push is enabled */ ++ struct apr_hash_t *priorities;/* map of content-type to h2_priority records */ ++ ++ int push_diary_size; /* # of entries in push diary */ ++ int copy_files; /* if files shall be copied vs setaside on output */ ++ apr_array_header_t *push_list;/* list of h2_push_res configurations */ ++ int early_hints; /* support status code 103 */ ++ int padding_bits; ++ int padding_always; ++} h2_config; ++ ++typedef struct h2_dir_config { ++ const char *name; ++ apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */ ++ int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/ ++ int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */ ++ int h2_push; /* if HTTP/2 server push is enabled */ ++ apr_array_header_t *push_list;/* list of h2_push_res configurations */ ++ int early_hints; /* support status code 103 */ ++} h2_dir_config; ++ ++ + static h2_config defconf = { + "default", + 100, /* max_streams */ +@@ -64,6 +113,18 @@ static h2_config defconf = { + 0, /* copy files across threads */ + NULL, /* push list */ + 0, /* early hints, http status 103 */ ++ 0, /* padding bits */ ++ 1, /* padding always */ ++}; ++ ++static h2_dir_config defdconf = { ++ "default", ++ NULL, /* no alt-svcs */ ++ -1, /* alt-svc max age */ ++ -1, /* HTTP/1 Upgrade support */ ++ -1, /* HTTP/2 server push enabled */ ++ NULL, /* push list */ ++ -1, /* early hints, http status 103 */ + }; + + void h2_config_init(apr_pool_t *pool) +@@ -71,12 +132,10 @@ void h2_config_init(apr_pool_t *pool) + (void)pool; + } + +-static void *h2_config_create(apr_pool_t *pool, +- const char *prefix, const char *x) ++void *h2_config_create_svr(apr_pool_t *pool, server_rec *s) + { + h2_config *conf = (h2_config *)apr_pcalloc(pool, sizeof(h2_config)); +- const char *s = x? x : "unknown"; +- char *name = apr_pstrcat(pool, prefix, "[", s, "]", NULL); ++ char *name = apr_pstrcat(pool, "srv[", s->defn_name, "]", NULL); + + conf->name = name; + conf->h2_max_streams = DEF_VAL; +@@ -98,19 +157,11 @@ static void *h2_config_create(apr_pool_t *pool, + conf->copy_files = DEF_VAL; + conf->push_list = NULL; + conf->early_hints = DEF_VAL; ++ conf->padding_bits = DEF_VAL; ++ conf->padding_always = DEF_VAL; + return conf; + } + +-void *h2_config_create_svr(apr_pool_t *pool, server_rec *s) +-{ +- return h2_config_create(pool, "srv", s->defn_name); +-} +- +-void *h2_config_create_dir(apr_pool_t *pool, char *x) +-{ +- return h2_config_create(pool, "dir", x); +-} +- + static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv) + { + h2_config *base = (h2_config *)basev; +@@ -149,25 +200,52 @@ static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv) + n->push_list = add->push_list? add->push_list : base->push_list; + } + n->early_hints = H2_CONFIG_GET(add, base, early_hints); ++ n->padding_bits = H2_CONFIG_GET(add, base, padding_bits); ++ n->padding_always = H2_CONFIG_GET(add, base, padding_always); + return n; + } + +-void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv) ++void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv) + { + return h2_config_merge(pool, basev, addv); + } + +-void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv) ++void *h2_config_create_dir(apr_pool_t *pool, char *x) + { +- return h2_config_merge(pool, basev, addv); ++ h2_dir_config *conf = (h2_dir_config *)apr_pcalloc(pool, sizeof(h2_dir_config)); ++ const char *s = x? x : "unknown"; ++ char *name = apr_pstrcat(pool, "dir[", s, "]", NULL); ++ ++ conf->name = name; ++ conf->alt_svc_max_age = DEF_VAL; ++ conf->h2_upgrade = DEF_VAL; ++ conf->h2_push = DEF_VAL; ++ conf->early_hints = DEF_VAL; ++ return conf; + } + +-int h2_config_geti(const h2_config *conf, h2_config_var_t var) ++void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv) + { +- return (int)h2_config_geti64(conf, var); ++ h2_dir_config *base = (h2_dir_config *)basev; ++ h2_dir_config *add = (h2_dir_config *)addv; ++ h2_dir_config *n = (h2_dir_config *)apr_pcalloc(pool, sizeof(h2_dir_config)); ++ ++ n->name = apr_pstrcat(pool, "merged[", add->name, ", ", base->name, "]", NULL); ++ n->alt_svcs = add->alt_svcs? add->alt_svcs : base->alt_svcs; ++ n->alt_svc_max_age = H2_CONFIG_GET(add, base, alt_svc_max_age); ++ n->h2_upgrade = H2_CONFIG_GET(add, base, h2_upgrade); ++ n->h2_push = H2_CONFIG_GET(add, base, h2_push); ++ if (add->push_list && base->push_list) { ++ n->push_list = apr_array_append(pool, base->push_list, add->push_list); ++ } ++ else { ++ n->push_list = add->push_list? add->push_list : base->push_list; ++ } ++ n->early_hints = H2_CONFIG_GET(add, base, early_hints); ++ return n; + } + +-apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var) ++static apr_int64_t h2_srv_config_geti64(const h2_config *conf, h2_config_var_t var) + { + switch(var) { + case H2_CONF_MAX_STREAMS: +@@ -191,7 +269,8 @@ apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var) + case H2_CONF_UPGRADE: + return H2_CONFIG_GET(conf, &defconf, h2_upgrade); + case H2_CONF_DIRECT: +- return H2_CONFIG_GET(conf, &defconf, h2_direct); ++ return 1; ++ /*return H2_CONFIG_GET(conf, &defconf, h2_direct);*/ + case H2_CONF_TLS_WARMUP_SIZE: + return H2_CONFIG_GET(conf, &defconf, tls_warmup_size); + case H2_CONF_TLS_COOLDOWN_SECS: +@@ -204,12 +283,93 @@ apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var) + return H2_CONFIG_GET(conf, &defconf, copy_files); + case H2_CONF_EARLY_HINTS: + return H2_CONFIG_GET(conf, &defconf, early_hints); ++ case H2_CONF_PADDING_BITS: ++ return H2_CONFIG_GET(conf, &defconf, padding_bits); ++ case H2_CONF_PADDING_ALWAYS: ++ return H2_CONFIG_GET(conf, &defconf, padding_always); + default: + return DEF_VAL; + } + } + +-const h2_config *h2_config_sget(server_rec *s) ++static void h2_srv_config_seti(h2_config *conf, h2_config_var_t var, int val) ++{ ++ switch(var) { ++ case H2_CONF_MAX_STREAMS: ++ H2_CONFIG_SET(conf, h2_max_streams, val); ++ break; ++ case H2_CONF_WIN_SIZE: ++ H2_CONFIG_SET(conf, h2_window_size, val); ++ break; ++ case H2_CONF_MIN_WORKERS: ++ H2_CONFIG_SET(conf, min_workers, val); ++ break; ++ case H2_CONF_MAX_WORKERS: ++ H2_CONFIG_SET(conf, max_workers, val); ++ break; ++ case H2_CONF_MAX_WORKER_IDLE_SECS: ++ H2_CONFIG_SET(conf, max_worker_idle_secs, val); ++ break; ++ case H2_CONF_STREAM_MAX_MEM: ++ H2_CONFIG_SET(conf, stream_max_mem_size, val); ++ break; ++ case H2_CONF_ALT_SVC_MAX_AGE: ++ H2_CONFIG_SET(conf, alt_svc_max_age, val); ++ break; ++ case H2_CONF_SER_HEADERS: ++ H2_CONFIG_SET(conf, serialize_headers, val); ++ break; ++ case H2_CONF_MODERN_TLS_ONLY: ++ H2_CONFIG_SET(conf, modern_tls_only, val); ++ break; ++ case H2_CONF_UPGRADE: ++ H2_CONFIG_SET(conf, h2_upgrade, val); ++ break; ++ case H2_CONF_DIRECT: ++ H2_CONFIG_SET(conf, h2_direct, val); ++ break; ++ case H2_CONF_TLS_WARMUP_SIZE: ++ H2_CONFIG_SET(conf, tls_warmup_size, val); ++ break; ++ case H2_CONF_TLS_COOLDOWN_SECS: ++ H2_CONFIG_SET(conf, tls_cooldown_secs, val); ++ break; ++ case H2_CONF_PUSH: ++ H2_CONFIG_SET(conf, h2_push, val); ++ break; ++ case H2_CONF_PUSH_DIARY_SIZE: ++ H2_CONFIG_SET(conf, push_diary_size, val); ++ break; ++ case H2_CONF_COPY_FILES: ++ H2_CONFIG_SET(conf, copy_files, val); ++ break; ++ case H2_CONF_EARLY_HINTS: ++ H2_CONFIG_SET(conf, early_hints, val); ++ break; ++ case H2_CONF_PADDING_BITS: ++ H2_CONFIG_SET(conf, padding_bits, val); ++ break; ++ case H2_CONF_PADDING_ALWAYS: ++ H2_CONFIG_SET(conf, padding_always, val); ++ break; ++ default: ++ break; ++ } ++} ++ ++static void h2_srv_config_seti64(h2_config *conf, h2_config_var_t var, apr_int64_t val) ++{ ++ switch(var) { ++ case H2_CONF_TLS_WARMUP_SIZE: ++ H2_CONFIG_SET(conf, tls_warmup_size, val); ++ break; ++ default: ++ h2_srv_config_seti(conf, var, (int)val); ++ break; ++ } ++} ++ ++static h2_config *h2_config_sget(server_rec *s) + { + h2_config *cfg = (h2_config *)ap_get_module_config(s->module_config, + &http2_module); +@@ -217,9 +377,162 @@ const h2_config *h2_config_sget(server_rec *s) + return cfg; + } + +-const struct h2_priority *h2_config_get_priority(const h2_config *conf, +- const char *content_type) ++static const h2_dir_config *h2_config_rget(request_rec *r) ++{ ++ h2_dir_config *cfg = (h2_dir_config *)ap_get_module_config(r->per_dir_config, ++ &http2_module); ++ ap_assert(cfg); ++ return cfg; ++} ++ ++static apr_int64_t h2_dir_config_geti64(const h2_dir_config *conf, h2_config_var_t var) ++{ ++ switch(var) { ++ case H2_CONF_ALT_SVC_MAX_AGE: ++ return H2_CONFIG_GET(conf, &defdconf, alt_svc_max_age); ++ case H2_CONF_UPGRADE: ++ return H2_CONFIG_GET(conf, &defdconf, h2_upgrade); ++ case H2_CONF_PUSH: ++ return H2_CONFIG_GET(conf, &defdconf, h2_push); ++ case H2_CONF_EARLY_HINTS: ++ return H2_CONFIG_GET(conf, &defdconf, early_hints); ++ ++ default: ++ return DEF_VAL; ++ } ++} ++ ++static void h2_config_seti(h2_dir_config *dconf, h2_config *conf, h2_config_var_t var, int val) ++{ ++ int set_srv = !dconf; ++ if (dconf) { ++ switch(var) { ++ case H2_CONF_ALT_SVC_MAX_AGE: ++ H2_CONFIG_SET(dconf, alt_svc_max_age, val); ++ break; ++ case H2_CONF_UPGRADE: ++ H2_CONFIG_SET(dconf, h2_upgrade, val); ++ break; ++ case H2_CONF_PUSH: ++ H2_CONFIG_SET(dconf, h2_push, val); ++ break; ++ case H2_CONF_EARLY_HINTS: ++ H2_CONFIG_SET(dconf, early_hints, val); ++ break; ++ default: ++ /* not handled in dir_conf */ ++ set_srv = 1; ++ break; ++ } ++ } ++ ++ if (set_srv) { ++ h2_srv_config_seti(conf, var, val); ++ } ++} ++ ++static void h2_config_seti64(h2_dir_config *dconf, h2_config *conf, h2_config_var_t var, apr_int64_t val) ++{ ++ int set_srv = !dconf; ++ if (dconf) { ++ switch(var) { ++ default: ++ /* not handled in dir_conf */ ++ set_srv = 1; ++ break; ++ } ++ } ++ ++ if (set_srv) { ++ h2_srv_config_seti64(conf, var, val); ++ } ++} ++ ++static const h2_config *h2_config_get(conn_rec *c) ++{ ++ h2_ctx *ctx = h2_ctx_get(c, 0); ++ ++ if (ctx) { ++ if (ctx->config) { ++ return ctx->config; ++ } ++ else if (ctx->server) { ++ ctx->config = h2_config_sget(ctx->server); ++ return ctx->config; ++ } ++ } ++ ++ return h2_config_sget(c->base_server); ++} ++ ++int h2_config_cgeti(conn_rec *c, h2_config_var_t var) ++{ ++ return (int)h2_srv_config_geti64(h2_config_get(c), var); ++} ++ ++apr_int64_t h2_config_cgeti64(conn_rec *c, h2_config_var_t var) ++{ ++ return h2_srv_config_geti64(h2_config_get(c), var); ++} ++ ++int h2_config_sgeti(server_rec *s, h2_config_var_t var) ++{ ++ return (int)h2_srv_config_geti64(h2_config_sget(s), var); ++} ++ ++apr_int64_t h2_config_sgeti64(server_rec *s, h2_config_var_t var) ++{ ++ return h2_srv_config_geti64(h2_config_sget(s), var); ++} ++ ++int h2_config_geti(request_rec *r, server_rec *s, h2_config_var_t var) ++{ ++ return (int)h2_config_geti64(r, s, var); ++} ++ ++apr_int64_t h2_config_geti64(request_rec *r, server_rec *s, h2_config_var_t var) ++{ ++ apr_int64_t mode = r? (int)h2_dir_config_geti64(h2_config_rget(r), var) : DEF_VAL; ++ return (mode != DEF_VAL)? mode : h2_config_sgeti64(s, var); ++} ++ ++int h2_config_rgeti(request_rec *r, h2_config_var_t var) ++{ ++ return h2_config_geti(r, r->server, var); ++} ++ ++apr_int64_t h2_config_rgeti64(request_rec *r, h2_config_var_t var) ++{ ++ return h2_config_geti64(r, r->server, var); ++} ++ ++apr_array_header_t *h2_config_push_list(request_rec *r) ++{ ++ const h2_config *sconf; ++ const h2_dir_config *conf = h2_config_rget(r); ++ ++ if (conf && conf->push_list) { ++ return conf->push_list; ++ } ++ sconf = h2_config_sget(r->server); ++ return sconf? sconf->push_list : NULL; ++} ++ ++apr_array_header_t *h2_config_alt_svcs(request_rec *r) + { ++ const h2_config *sconf; ++ const h2_dir_config *conf = h2_config_rget(r); ++ ++ if (conf && conf->alt_svcs) { ++ return conf->alt_svcs; ++ } ++ sconf = h2_config_sget(r->server); ++ return sconf? sconf->alt_svcs : NULL; ++} ++ ++const struct h2_priority *h2_cconfig_get_priority(conn_rec *c, const char *content_type) ++{ ++ const h2_config *conf = h2_config_get(c); + if (content_type && conf->priorities) { + size_t len = strcspn(content_type, "; \t"); + h2_priority *prio = apr_hash_get(conf->priorities, content_type, len); +@@ -228,166 +541,156 @@ const struct h2_priority *h2_config_get_priority(const h2_config *conf, + return NULL; + } + +-static const char *h2_conf_set_max_streams(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_max_streams(cmd_parms *cmd, ++ void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); +- cfg->h2_max_streams = (int)apr_atoi64(value); +- (void)arg; +- if (cfg->h2_max_streams < 1) { ++ apr_int64_t ival = (int)apr_atoi64(value); ++ if (ival < 1) { + return "value must be > 0"; + } ++ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_MAX_STREAMS, ival); + return NULL; + } + +-static const char *h2_conf_set_window_size(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_window_size(cmd_parms *cmd, ++ void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); +- cfg->h2_window_size = (int)apr_atoi64(value); +- (void)arg; +- if (cfg->h2_window_size < 1024) { ++ int val = (int)apr_atoi64(value); ++ if (val < 1024) { + return "value must be >= 1024"; + } ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_WIN_SIZE, val); + return NULL; + } + +-static const char *h2_conf_set_min_workers(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_min_workers(cmd_parms *cmd, ++ void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); +- cfg->min_workers = (int)apr_atoi64(value); +- (void)arg; +- if (cfg->min_workers < 1) { ++ int val = (int)apr_atoi64(value); ++ if (val < 1) { + return "value must be > 0"; + } ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MIN_WORKERS, val); + return NULL; + } + +-static const char *h2_conf_set_max_workers(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_max_workers(cmd_parms *cmd, ++ void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); +- cfg->max_workers = (int)apr_atoi64(value); +- (void)arg; +- if (cfg->max_workers < 1) { ++ int val = (int)apr_atoi64(value); ++ if (val < 1) { + return "value must be > 0"; + } ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MAX_WORKERS, val); + return NULL; + } + +-static const char *h2_conf_set_max_worker_idle_secs(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_max_worker_idle_secs(cmd_parms *cmd, ++ void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); +- cfg->max_worker_idle_secs = (int)apr_atoi64(value); +- (void)arg; +- if (cfg->max_worker_idle_secs < 1) { ++ int val = (int)apr_atoi64(value); ++ if (val < 1) { + return "value must be > 0"; + } ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MAX_WORKER_IDLE_SECS, val); + return NULL; + } + +-static const char *h2_conf_set_stream_max_mem_size(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_stream_max_mem_size(cmd_parms *cmd, ++ void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); +- +- +- cfg->stream_max_mem_size = (int)apr_atoi64(value); +- (void)arg; +- if (cfg->stream_max_mem_size < 1024) { ++ int val = (int)apr_atoi64(value); ++ if (val < 1024) { + return "value must be >= 1024"; + } ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_STREAM_MAX_MEM, val); + return NULL; + } + +-static const char *h2_add_alt_svc(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_add_alt_svc(cmd_parms *cmd, ++ void *dirconf, const char *value) + { + if (value && *value) { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); +- h2_alt_svc *as = h2_alt_svc_parse(value, parms->pool); ++ h2_alt_svc *as = h2_alt_svc_parse(value, cmd->pool); + if (!as) { + return "unable to parse alt-svc specifier"; + } +- if (!cfg->alt_svcs) { +- cfg->alt_svcs = apr_array_make(parms->pool, 5, sizeof(h2_alt_svc*)); ++ ++ if (cmd->path) { ++ h2_dir_config *dcfg = (h2_dir_config *)dirconf; ++ if (!dcfg->alt_svcs) { ++ dcfg->alt_svcs = apr_array_make(cmd->pool, 5, sizeof(h2_alt_svc*)); ++ } ++ APR_ARRAY_PUSH(dcfg->alt_svcs, h2_alt_svc*) = as; ++ } ++ else { ++ h2_config *cfg = (h2_config *)h2_config_sget(cmd->server); ++ if (!cfg->alt_svcs) { ++ cfg->alt_svcs = apr_array_make(cmd->pool, 5, sizeof(h2_alt_svc*)); ++ } ++ APR_ARRAY_PUSH(cfg->alt_svcs, h2_alt_svc*) = as; + } +- APR_ARRAY_PUSH(cfg->alt_svcs, h2_alt_svc*) = as; + } +- (void)arg; + return NULL; + } + +-static const char *h2_conf_set_alt_svc_max_age(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_alt_svc_max_age(cmd_parms *cmd, ++ void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); +- cfg->alt_svc_max_age = (int)apr_atoi64(value); +- (void)arg; ++ int val = (int)apr_atoi64(value); ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_ALT_SVC_MAX_AGE, val); + return NULL; + } + +-static const char *h2_conf_set_session_extra_files(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_session_extra_files(cmd_parms *cmd, ++ void *dirconf, const char *value) + { + /* deprecated, ignore */ +- (void)arg; ++ (void)dirconf; + (void)value; +- ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, parms->pool, /* NO LOGNO */ ++ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, cmd->pool, /* NO LOGNO */ + "H2SessionExtraFiles is obsolete and will be ignored"); + return NULL; + } + +-static const char *h2_conf_set_serialize_headers(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_serialize_headers(cmd_parms *cmd, ++ void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + if (!strcasecmp(value, "On")) { +- cfg->serialize_headers = 1; ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_SER_HEADERS, 1); + return NULL; + } + else if (!strcasecmp(value, "Off")) { +- cfg->serialize_headers = 0; ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_SER_HEADERS, 0); + return NULL; + } +- +- (void)arg; + return "value must be On or Off"; + } + +-static const char *h2_conf_set_direct(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_direct(cmd_parms *cmd, ++ void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + if (!strcasecmp(value, "On")) { +- cfg->h2_direct = 1; ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_DIRECT, 1); + return NULL; + } + else if (!strcasecmp(value, "Off")) { +- cfg->h2_direct = 0; ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_DIRECT, 0); + return NULL; + } +- +- (void)arg; + return "value must be On or Off"; + } + +-static const char *h2_conf_set_push(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_push(cmd_parms *cmd, void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + if (!strcasecmp(value, "On")) { +- cfg->h2_push = 1; ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH, 1); + return NULL; + } + else if (!strcasecmp(value, "Off")) { +- cfg->h2_push = 0; ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH, 0); + return NULL; + } +- +- (void)arg; + return "value must be On or Off"; + } + +@@ -447,100 +750,88 @@ static const char *h2_conf_add_push_priority(cmd_parms *cmd, void *_cfg, + return NULL; + } + +-static const char *h2_conf_set_modern_tls_only(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_modern_tls_only(cmd_parms *cmd, ++ void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + if (!strcasecmp(value, "On")) { +- cfg->modern_tls_only = 1; ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MODERN_TLS_ONLY, 1); + return NULL; + } + else if (!strcasecmp(value, "Off")) { +- cfg->modern_tls_only = 0; ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MODERN_TLS_ONLY, 0); + return NULL; + } +- +- (void)arg; + return "value must be On or Off"; + } + +-static const char *h2_conf_set_upgrade(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_upgrade(cmd_parms *cmd, ++ void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); + if (!strcasecmp(value, "On")) { +- cfg->h2_upgrade = 1; ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_UPGRADE, 1); + return NULL; + } + else if (!strcasecmp(value, "Off")) { +- cfg->h2_upgrade = 0; ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_UPGRADE, 0); + return NULL; + } +- +- (void)arg; + return "value must be On or Off"; + } + +-static const char *h2_conf_set_tls_warmup_size(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_tls_warmup_size(cmd_parms *cmd, ++ void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); +- cfg->tls_warmup_size = apr_atoi64(value); +- (void)arg; ++ apr_int64_t val = apr_atoi64(value); ++ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_TLS_WARMUP_SIZE, val); + return NULL; + } + +-static const char *h2_conf_set_tls_cooldown_secs(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_tls_cooldown_secs(cmd_parms *cmd, ++ void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); +- cfg->tls_cooldown_secs = (int)apr_atoi64(value); +- (void)arg; ++ apr_int64_t val = (int)apr_atoi64(value); ++ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_TLS_COOLDOWN_SECS, val); + return NULL; + } + +-static const char *h2_conf_set_push_diary_size(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_push_diary_size(cmd_parms *cmd, ++ void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); +- (void)arg; +- cfg->push_diary_size = (int)apr_atoi64(value); +- if (cfg->push_diary_size < 0) { ++ int val = (int)apr_atoi64(value); ++ if (val < 0) { + return "value must be >= 0"; + } +- if (cfg->push_diary_size > 0 && (cfg->push_diary_size & (cfg->push_diary_size-1))) { ++ if (val > 0 && (val & (val-1))) { + return "value must a power of 2"; + } +- if (cfg->push_diary_size > (1 << 15)) { ++ if (val > (1 << 15)) { + return "value must <= 65536"; + } ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH_DIARY_SIZE, val); + return NULL; + } + +-static const char *h2_conf_set_copy_files(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_copy_files(cmd_parms *cmd, ++ void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)arg; + if (!strcasecmp(value, "On")) { +- cfg->copy_files = 1; ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_COPY_FILES, 1); + return NULL; + } + else if (!strcasecmp(value, "Off")) { +- cfg->copy_files = 0; ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_COPY_FILES, 0); + return NULL; + } +- +- (void)arg; + return "value must be On or Off"; + } + +-static void add_push(apr_pool_t *pool, h2_config *conf, h2_push_res *push) ++static void add_push(apr_array_header_t **plist, apr_pool_t *pool, h2_push_res *push) + { + h2_push_res *new; +- if (!conf->push_list) { +- conf->push_list = apr_array_make(pool, 10, sizeof(*push)); ++ if (!*plist) { ++ *plist = apr_array_make(pool, 10, sizeof(*push)); + } +- new = apr_array_push(conf->push_list); ++ new = apr_array_push(*plist); + new->uri_ref = push->uri_ref; + new->critical = push->critical; + } +@@ -549,8 +840,6 @@ static const char *h2_conf_add_push_res(cmd_parms *cmd, void *dirconf, + const char *arg1, const char *arg2, + const char *arg3) + { +- h2_config *dconf = (h2_config*)dirconf ; +- h2_config *sconf = (h2_config*)h2_config_sget(cmd->server); + h2_push_res push; + const char *last = arg3; + +@@ -575,42 +864,54 @@ static const char *h2_conf_add_push_res(cmd_parms *cmd, void *dirconf, + } + } + +- /* server command? set both */ +- if (cmd->path == NULL) { +- add_push(cmd->pool, sconf, &push); +- add_push(cmd->pool, dconf, &push); ++ if (cmd->path) { ++ add_push(&(((h2_dir_config*)dirconf)->push_list), cmd->pool, &push); + } + else { +- add_push(cmd->pool, dconf, &push); ++ add_push(&(h2_config_sget(cmd->server)->push_list), cmd->pool, &push); + } ++ return NULL; ++} ++ ++static const char *h2_conf_set_early_hints(cmd_parms *cmd, ++ void *dirconf, const char *value) ++{ ++ int val; + ++ if (!strcasecmp(value, "On")) val = 1; ++ else if (!strcasecmp(value, "Off")) val = 0; ++ else return "value must be On or Off"; ++ ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_EARLY_HINTS, val); ++ if (cmd->path) { ++ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, cmd->pool, ++ "H2EarlyHints = %d on path %s", val, cmd->path); ++ } + return NULL; + } + +-static const char *h2_conf_set_early_hints(cmd_parms *parms, +- void *arg, const char *value) ++static const char *h2_conf_set_padding(cmd_parms *cmd, void *dirconf, const char *value) + { +- h2_config *cfg = (h2_config *)h2_config_sget(parms->server); +- if (!strcasecmp(value, "On")) { +- cfg->early_hints = 1; +- return NULL; ++ int val; ++ ++ val = (int)apr_atoi64(value); ++ if (val < 0) { ++ return "number of bits must be >= 0"; + } +- else if (!strcasecmp(value, "Off")) { +- cfg->early_hints = 0; +- return NULL; ++ if (val > 8) { ++ return "number of bits must be <= 8"; + } +- +- (void)arg; +- return "value must be On or Off"; ++ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PADDING_BITS, val); ++ return NULL; + } + ++ + void h2_get_num_workers(server_rec *s, int *minw, int *maxw) + { + int threads_per_child = 0; +- const h2_config *config = h2_config_sget(s); + +- *minw = h2_config_geti(config, H2_CONF_MIN_WORKERS); +- *maxw = h2_config_geti(config, H2_CONF_MAX_WORKERS); ++ *minw = h2_config_sgeti(s, H2_CONF_MIN_WORKERS); ++ *maxw = h2_config_sgeti(s, H2_CONF_MAX_WORKERS); + ap_mpm_query(AP_MPMQ_MAX_THREADS, &threads_per_child); + + if (*minw <= 0) { +@@ -652,7 +953,7 @@ const command_rec h2_cmds[] = { + AP_INIT_TAKE1("H2ModernTLSOnly", h2_conf_set_modern_tls_only, NULL, + RSRC_CONF, "off to not impose RFC 7540 restrictions on TLS"), + AP_INIT_TAKE1("H2Upgrade", h2_conf_set_upgrade, NULL, +- RSRC_CONF, "on to allow HTTP/1 Upgrades to h2/h2c"), ++ RSRC_CONF|OR_AUTHCFG, "on to allow HTTP/1 Upgrades to h2/h2c"), + AP_INIT_TAKE1("H2Direct", h2_conf_set_direct, NULL, + RSRC_CONF, "on to enable direct HTTP/2 mode"), + AP_INIT_TAKE1("H2SessionExtraFiles", h2_conf_set_session_extra_files, NULL, +@@ -662,7 +963,7 @@ const command_rec h2_cmds[] = { + AP_INIT_TAKE1("H2TLSCoolDownSecs", h2_conf_set_tls_cooldown_secs, NULL, + RSRC_CONF, "seconds of idle time on TLS before shrinking writes"), + AP_INIT_TAKE1("H2Push", h2_conf_set_push, NULL, +- RSRC_CONF, "off to disable HTTP/2 server push"), ++ RSRC_CONF|OR_AUTHCFG, "off to disable HTTP/2 server push"), + AP_INIT_TAKE23("H2PushPriority", h2_conf_add_push_priority, NULL, + RSRC_CONF, "define priority of PUSHed resources per content type"), + AP_INIT_TAKE1("H2PushDiarySize", h2_conf_set_push_diary_size, NULL, +@@ -670,33 +971,12 @@ const command_rec h2_cmds[] = { + AP_INIT_TAKE1("H2CopyFiles", h2_conf_set_copy_files, NULL, + OR_FILEINFO, "on to perform copy of file data"), + AP_INIT_TAKE123("H2PushResource", h2_conf_add_push_res, NULL, +- OR_FILEINFO, "add a resource to be pushed in this location/on this server."), ++ OR_FILEINFO|OR_AUTHCFG, "add a resource to be pushed in this location/on this server."), + AP_INIT_TAKE1("H2EarlyHints", h2_conf_set_early_hints, NULL, + RSRC_CONF, "on to enable interim status 103 responses"), ++ AP_INIT_TAKE1("H2Padding", h2_conf_set_padding, NULL, ++ RSRC_CONF, "set payload padding"), + AP_END_CMD + }; + + +-const h2_config *h2_config_rget(request_rec *r) +-{ +- h2_config *cfg = (h2_config *)ap_get_module_config(r->per_dir_config, +- &http2_module); +- return cfg? cfg : h2_config_sget(r->server); +-} +- +-const h2_config *h2_config_get(conn_rec *c) +-{ +- h2_ctx *ctx = h2_ctx_get(c, 0); +- +- if (ctx) { +- if (ctx->config) { +- return ctx->config; +- } +- else if (ctx->server) { +- ctx->config = h2_config_sget(ctx->server); +- return ctx->config; +- } +- } +- +- return h2_config_sget(c->base_server); +-} +diff --git a/modules/http2/h2_config.h b/modules/http2/h2_config.h +index 17d75d6..e940c8a 100644 +--- a/modules/http2/h2_config.h ++++ b/modules/http2/h2_config.h +@@ -42,6 +42,8 @@ typedef enum { + H2_CONF_PUSH_DIARY_SIZE, + H2_CONF_COPY_FILES, + H2_CONF_EARLY_HINTS, ++ H2_CONF_PADDING_BITS, ++ H2_CONF_PADDING_ALWAYS, + } h2_config_var_t; + + struct apr_hash_t; +@@ -53,33 +55,6 @@ typedef struct h2_push_res { + int critical; + } h2_push_res; + +-/* Apache httpd module configuration for h2. */ +-typedef struct h2_config { +- const char *name; +- int h2_max_streams; /* max concurrent # streams (http2) */ +- int h2_window_size; /* stream window size (http2) */ +- int min_workers; /* min # of worker threads/child */ +- int max_workers; /* max # of worker threads/child */ +- int max_worker_idle_secs; /* max # of idle seconds for worker */ +- int stream_max_mem_size; /* max # bytes held in memory/stream */ +- apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */ +- int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/ +- int serialize_headers; /* Use serialized HTTP/1.1 headers for +- processing, better compatibility */ +- int h2_direct; /* if mod_h2 is active directly */ +- int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */ +- int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */ +- apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */ +- int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */ +- int h2_push; /* if HTTP/2 server push is enabled */ +- struct apr_hash_t *priorities;/* map of content-type to h2_priority records */ +- +- int push_diary_size; /* # of entries in push diary */ +- int copy_files; /* if files shall be copied vs setaside on output */ +- apr_array_header_t *push_list;/* list of h2_push_res configurations */ +- int early_hints; /* support status code 103 */ +-} h2_config; +- + + void *h2_config_create_dir(apr_pool_t *pool, char *x); + void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv); +@@ -88,19 +63,37 @@ void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv); + + extern const command_rec h2_cmds[]; + +-const h2_config *h2_config_get(conn_rec *c); +-const h2_config *h2_config_sget(server_rec *s); +-const h2_config *h2_config_rget(request_rec *r); ++int h2_config_geti(request_rec *r, server_rec *s, h2_config_var_t var); ++apr_int64_t h2_config_geti64(request_rec *r, server_rec *s, h2_config_var_t var); + +-int h2_config_geti(const h2_config *conf, h2_config_var_t var); +-apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var); ++/** ++ * Get the configured value for variable at the given connection. ++ */ ++int h2_config_cgeti(conn_rec *c, h2_config_var_t var); ++apr_int64_t h2_config_cgeti64(conn_rec *c, h2_config_var_t var); + +-void h2_get_num_workers(server_rec *s, int *minw, int *maxw); ++/** ++ * Get the configured value for variable at the given server. ++ */ ++int h2_config_sgeti(server_rec *s, h2_config_var_t var); ++apr_int64_t h2_config_sgeti64(server_rec *s, h2_config_var_t var); + ++/** ++ * Get the configured value for variable at the given request, ++ * if configured for the request location. ++ * Fallback to request server config otherwise. ++ */ ++int h2_config_rgeti(request_rec *r, h2_config_var_t var); ++apr_int64_t h2_config_rgeti64(request_rec *r, h2_config_var_t var); ++ ++apr_array_header_t *h2_config_push_list(request_rec *r); ++apr_array_header_t *h2_config_alt_svcs(request_rec *r); ++ ++ ++void h2_get_num_workers(server_rec *s, int *minw, int *maxw); + void h2_config_init(apr_pool_t *pool); + +-const struct h2_priority *h2_config_get_priority(const h2_config *conf, +- const char *content_type); ++const struct h2_priority *h2_cconfig_get_priority(conn_rec *c, const char *content_type); + + #endif /* __mod_h2__h2_config_h__ */ + +diff --git a/modules/http2/h2_conn.c b/modules/http2/h2_conn.c +index dc2081e..9ef0ea0 100644 +--- a/modules/http2/h2_conn.c ++++ b/modules/http2/h2_conn.c +@@ -18,6 +18,7 @@ + #include + + #include ++#include + + #include + #include +@@ -109,7 +110,6 @@ static void check_modules(int force) + + apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s) + { +- const h2_config *config = h2_config_sget(s); + apr_status_t status = APR_SUCCESS; + int minw, maxw; + int max_threads_per_child = 0; +@@ -129,7 +129,7 @@ apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s) + + h2_get_num_workers(s, &minw, &maxw); + +- idle_secs = h2_config_geti(config, H2_CONF_MAX_WORKER_IDLE_SECS); ++ idle_secs = h2_config_sgeti(s, H2_CONF_MAX_WORKER_IDLE_SECS); + ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s, + "h2_workers: min=%d max=%d, mthrpchild=%d, idle_secs=%d", + minw, maxw, max_threads_per_child, idle_secs); +@@ -172,9 +172,10 @@ static module *h2_conn_mpm_module(void) + return mpm_module; + } + +-apr_status_t h2_conn_setup(h2_ctx *ctx, conn_rec *c, request_rec *r) ++apr_status_t h2_conn_setup(conn_rec *c, request_rec *r, server_rec *s) + { + h2_session *session; ++ h2_ctx *ctx; + apr_status_t status; + + if (!workers) { +@@ -183,24 +184,19 @@ apr_status_t h2_conn_setup(h2_ctx *ctx, conn_rec *c, request_rec *r) + return APR_EGENERAL; + } + +- if (r) { +- status = h2_session_rcreate(&session, r, ctx, workers); +- } +- else { +- status = h2_session_create(&session, c, ctx, workers); +- } +- +- if (status == APR_SUCCESS) { ++ if (APR_SUCCESS == (status = h2_session_create(&session, c, r, s, workers))) { ++ ctx = h2_ctx_get(c, 1); + h2_ctx_session_set(ctx, session); + } ++ + return status; + } + +-apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c) ++apr_status_t h2_conn_run(conn_rec *c) + { + apr_status_t status; + int mpm_state = 0; +- h2_session *session = h2_ctx_session_get(ctx); ++ h2_session *session = h2_ctx_get_session(c); + + ap_assert(session); + do { +@@ -249,7 +245,7 @@ apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c) + + apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c) + { +- h2_session *session = h2_ctx_session_get(ctx); ++ h2_session *session = h2_ctx_get_session(c); + if (session) { + apr_status_t status = h2_session_pre_close(session, async_mpm); + return (status == APR_SUCCESS)? DONE : status; +@@ -310,8 +306,10 @@ conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent) + c->filter_conn_ctx = NULL; + #endif + c->bucket_alloc = apr_bucket_alloc_create(pool); +- c->data_in_input_filters = 0; +- c->data_in_output_filters = 0; ++#if !AP_MODULE_MAGIC_AT_LEAST(20180720, 1) ++ c->data_in_input_filters = 0; ++ c->data_in_output_filters = 0; ++#endif + /* prevent mpm_event from making wrong assumptions about this connection, + * like e.g. using its socket for an async read check. */ + c->clogging_input_filters = 1; +diff --git a/modules/http2/h2_conn.h b/modules/http2/h2_conn.h +index e45ff31..c560405 100644 +--- a/modules/http2/h2_conn.h ++++ b/modules/http2/h2_conn.h +@@ -23,21 +23,21 @@ struct h2_task; + /** + * Setup the connection and our context for HTTP/2 processing + * +- * @param ctx the http2 context to setup + * @param c the connection HTTP/2 is starting on + * @param r the upgrade request that still awaits an answer, optional ++ * @param s the server selected for this connection (can be != c->base_server) + */ +-apr_status_t h2_conn_setup(struct h2_ctx *ctx, conn_rec *c, request_rec *r); ++apr_status_t h2_conn_setup(conn_rec *c, request_rec *r, server_rec *s); + + /** + * Run the HTTP/2 connection in synchronous fashion. + * Return when the HTTP/2 session is done + * and the connection will close or a fatal error occurred. + * +- * @param ctx the http2 context to run ++ * @param c the http2 connection to run + * @return APR_SUCCESS when session is done. + */ +-apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c); ++apr_status_t h2_conn_run(conn_rec *c); + + /** + * The connection is about to close. If we have not send a GOAWAY +diff --git a/modules/http2/h2_conn_io.c b/modules/http2/h2_conn_io.c +index eb6ec92..5f17e85 100644 +--- a/modules/http2/h2_conn_io.c ++++ b/modules/http2/h2_conn_io.c +@@ -40,12 +40,17 @@ + * ~= 1300 bytes */ + #define WRITE_SIZE_INITIAL 1300 + +-/* Calculated like this: max TLS record size 16*1024 +- * - 40 (IP) - 20 (TCP) - 40 (TCP options) +- * - TLS overhead (60-100) +- * which seems to create less TCP packets overall ++/* The maximum we'd like to write in one chunk is ++ * the max size of a TLS record. When pushing ++ * many frames down the h2 connection, this might ++ * align differently because of headers and other ++ * frames or simply as not sufficient data is ++ * in a response body. ++ * However keeping frames at or below this limit ++ * should make optimizations at the layer that writes ++ * to TLS easier. + */ +-#define WRITE_SIZE_MAX (TLS_DATA_MAX - 100) ++#define WRITE_SIZE_MAX (TLS_DATA_MAX) + + + static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level, +@@ -123,21 +128,20 @@ static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level, + + } + +-apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, +- const h2_config *cfg) ++apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, server_rec *s) + { + io->c = c; + io->output = apr_brigade_create(c->pool, c->bucket_alloc); + io->is_tls = h2_h2_is_tls(c); + io->buffer_output = io->is_tls; +- io->flush_threshold = (apr_size_t)h2_config_geti64(cfg, H2_CONF_STREAM_MAX_MEM); ++ io->flush_threshold = (apr_size_t)h2_config_sgeti64(s, H2_CONF_STREAM_MAX_MEM); + + if (io->is_tls) { + /* This is what we start with, + * see https://issues.apache.org/jira/browse/TS-2503 + */ +- io->warmup_size = h2_config_geti64(cfg, H2_CONF_TLS_WARMUP_SIZE); +- io->cooldown_usecs = (h2_config_geti(cfg, H2_CONF_TLS_COOLDOWN_SECS) ++ io->warmup_size = h2_config_sgeti64(s, H2_CONF_TLS_WARMUP_SIZE); ++ io->cooldown_usecs = (h2_config_sgeti(s, H2_CONF_TLS_COOLDOWN_SECS) + * APR_USEC_PER_SEC); + io->write_size = (io->cooldown_usecs > 0? + WRITE_SIZE_INITIAL : WRITE_SIZE_MAX); +diff --git a/modules/http2/h2_conn_io.h b/modules/http2/h2_conn_io.h +index 2c3be1c..e96203c 100644 +--- a/modules/http2/h2_conn_io.h ++++ b/modules/http2/h2_conn_io.h +@@ -48,8 +48,7 @@ typedef struct { + apr_size_t slen; + } h2_conn_io; + +-apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, +- const struct h2_config *cfg); ++apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, server_rec *s); + + /** + * Append data to the buffered output. +diff --git a/modules/http2/h2_ctx.c b/modules/http2/h2_ctx.c +index d5ccc24..095f355 100644 +--- a/modules/http2/h2_ctx.c ++++ b/modules/http2/h2_ctx.c +@@ -29,8 +29,8 @@ static h2_ctx *h2_ctx_create(const conn_rec *c) + { + h2_ctx *ctx = apr_pcalloc(c->pool, sizeof(h2_ctx)); + ap_assert(ctx); ++ h2_ctx_server_update(ctx, c->base_server); + ap_set_module_config(c->conn_config, &http2_module, ctx); +- h2_ctx_server_set(ctx, c->base_server); + return ctx; + } + +@@ -79,8 +79,9 @@ h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto) + return ctx; + } + +-h2_session *h2_ctx_session_get(h2_ctx *ctx) ++h2_session *h2_ctx_get_session(conn_rec *c) + { ++ h2_ctx *ctx = h2_ctx_get(c, 0); + return ctx? ctx->session : NULL; + } + +@@ -89,33 +90,17 @@ void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session) + ctx->session = session; + } + +-server_rec *h2_ctx_server_get(h2_ctx *ctx) ++h2_ctx *h2_ctx_server_update(h2_ctx *ctx, server_rec *s) + { +- return ctx? ctx->server : NULL; +-} +- +-h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s) +-{ +- ctx->server = s; ++ if (ctx->server != s) { ++ ctx->server = s; ++ } + return ctx; + } + +-int h2_ctx_is_task(h2_ctx *ctx) +-{ +- return ctx && ctx->task; +-} +- +-h2_task *h2_ctx_get_task(h2_ctx *ctx) ++h2_task *h2_ctx_get_task(conn_rec *c) + { ++ h2_ctx *ctx = h2_ctx_get(c, 0); + return ctx? ctx->task : NULL; + } + +-h2_task *h2_ctx_cget_task(conn_rec *c) +-{ +- return h2_ctx_get_task(h2_ctx_get(c, 0)); +-} +- +-h2_task *h2_ctx_rget_task(request_rec *r) +-{ +- return h2_ctx_get_task(h2_ctx_rget(r)); +-} +diff --git a/modules/http2/h2_ctx.h b/modules/http2/h2_ctx.h +index cb111c9..417ef36 100644 +--- a/modules/http2/h2_ctx.h ++++ b/modules/http2/h2_ctx.h +@@ -56,12 +56,11 @@ h2_ctx *h2_ctx_create_for(const conn_rec *c, struct h2_task *task); + */ + h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto); + +-/* Set the server_rec relevant for this context. ++/* Update the server_rec relevant for this context. A server for ++ * a connection may change during SNI handling, for example. + */ +-h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s); +-server_rec *h2_ctx_server_get(h2_ctx *ctx); ++h2_ctx *h2_ctx_server_update(h2_ctx *ctx, server_rec *s); + +-struct h2_session *h2_ctx_session_get(h2_ctx *ctx); + void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session); + + /** +@@ -69,10 +68,8 @@ void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session); + */ + const char *h2_ctx_protocol_get(const conn_rec *c); + +-int h2_ctx_is_task(h2_ctx *ctx); ++struct h2_session *h2_ctx_get_session(conn_rec *c); ++struct h2_task *h2_ctx_get_task(conn_rec *c); + +-struct h2_task *h2_ctx_get_task(h2_ctx *ctx); +-struct h2_task *h2_ctx_cget_task(conn_rec *c); +-struct h2_task *h2_ctx_rget_task(request_rec *r); + + #endif /* defined(__mod_h2__h2_ctx__) */ +diff --git a/modules/http2/h2_filter.c b/modules/http2/h2_filter.c +index 8b254b1..5fd237f 100644 +--- a/modules/http2/h2_filter.c ++++ b/modules/http2/h2_filter.c +@@ -54,6 +54,7 @@ static apr_status_t recv_RAW_DATA(conn_rec *c, h2_filter_cin *cin, + const char *data; + ssize_t n; + ++ (void)c; + status = apr_bucket_read(b, &data, &len, block); + + while (status == APR_SUCCESS && len > 0) { +@@ -71,10 +72,10 @@ static apr_status_t recv_RAW_DATA(conn_rec *c, h2_filter_cin *cin, + } + else { + session->io.bytes_read += n; +- if (len <= n) { ++ if ((apr_ssize_t)len <= n) { + break; + } +- len -= n; ++ len -= (apr_size_t)n; + data += n; + } + } +@@ -277,6 +278,7 @@ apr_bucket *h2_bucket_observer_beam(struct h2_bucket_beam *beam, + apr_bucket_brigade *dest, + const apr_bucket *src) + { ++ (void)beam; + if (H2_BUCKET_IS_OBSERVER(src)) { + h2_bucket_observer *l = (h2_bucket_observer *)src->data; + apr_bucket *b = h2_bucket_observer_create(dest->bucket_alloc, +@@ -311,8 +313,7 @@ static void add_settings(apr_bucket_brigade *bb, h2_session *s, int last) + bbout(bb, " \"settings\": {\n"); + bbout(bb, " \"SETTINGS_MAX_CONCURRENT_STREAMS\": %d,\n", m->max_streams); + bbout(bb, " \"SETTINGS_MAX_FRAME_SIZE\": %d,\n", 16*1024); +- bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n", +- h2_config_geti(s->config, H2_CONF_WIN_SIZE)); ++ bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n", h2_config_sgeti(s->s, H2_CONF_WIN_SIZE)); + bbout(bb, " \"SETTINGS_ENABLE_PUSH\": %d\n", h2_session_push_enabled(s)); + bbout(bb, " }%s\n", last? "" : ","); + } +@@ -431,41 +432,38 @@ static void add_stats(apr_bucket_brigade *bb, h2_session *s, + + static apr_status_t h2_status_insert(h2_task *task, apr_bucket *b) + { +- conn_rec *c = task->c->master; +- h2_ctx *h2ctx = h2_ctx_get(c, 0); +- h2_session *session; +- h2_stream *stream; ++ h2_mplx *m = task->mplx; ++ h2_stream *stream = h2_mplx_stream_get(m, task->stream_id); ++ h2_session *s; ++ conn_rec *c; ++ + apr_bucket_brigade *bb; + apr_bucket *e; + int32_t connFlowIn, connFlowOut; + +- +- if (!h2ctx || (session = h2_ctx_session_get(h2ctx)) == NULL) { +- return APR_SUCCESS; +- } +- +- stream = h2_session_stream_get(session, task->stream_id); + if (!stream) { + /* stream already done */ + return APR_SUCCESS; + } ++ s = stream->session; ++ c = s->c; + + bb = apr_brigade_create(stream->pool, c->bucket_alloc); + +- connFlowIn = nghttp2_session_get_effective_local_window_size(session->ngh2); +- connFlowOut = nghttp2_session_get_remote_window_size(session->ngh2); ++ connFlowIn = nghttp2_session_get_effective_local_window_size(s->ngh2); ++ connFlowOut = nghttp2_session_get_remote_window_size(s->ngh2); + + bbout(bb, "{\n"); + bbout(bb, " \"version\": \"draft-01\",\n"); +- add_settings(bb, session, 0); +- add_peer_settings(bb, session, 0); ++ add_settings(bb, s, 0); ++ add_peer_settings(bb, s, 0); + bbout(bb, " \"connFlowIn\": %d,\n", connFlowIn); + bbout(bb, " \"connFlowOut\": %d,\n", connFlowOut); +- bbout(bb, " \"sentGoAway\": %d,\n", session->local.shutdown); ++ bbout(bb, " \"sentGoAway\": %d,\n", s->local.shutdown); + +- add_streams(bb, session, 0); ++ add_streams(bb, s, 0); + +- add_stats(bb, session, stream, 1); ++ add_stats(bb, s, stream, 1); + bbout(bb, "}\n"); + + while ((e = APR_BRIGADE_FIRST(bb)) != APR_BRIGADE_SENTINEL(bb)) { +@@ -497,7 +495,6 @@ static apr_status_t status_event(void *ctx, h2_bucket_event event, + + int h2_filter_h2_status_handler(request_rec *r) + { +- h2_ctx *ctx = h2_ctx_rget(r); + conn_rec *c = r->connection; + h2_task *task; + apr_bucket_brigade *bb; +@@ -511,7 +508,7 @@ int h2_filter_h2_status_handler(request_rec *r) + return DECLINED; + } + +- task = ctx? h2_ctx_get_task(ctx) : NULL; ++ task = h2_ctx_get_task(r->connection); + if (task) { + + if ((status = ap_discard_request_body(r)) != OK) { +diff --git a/modules/http2/h2_from_h1.c b/modules/http2/h2_from_h1.c +index dd6ad90..c3e3352 100644 +--- a/modules/http2/h2_from_h1.c ++++ b/modules/http2/h2_from_h1.c +@@ -586,18 +586,20 @@ apr_status_t h2_filter_headers_out(ap_filter_t *f, apr_bucket_brigade *bb) + } + } + +- if (r->header_only) { ++ if (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c, +- "h2_task(%s): header_only, cleanup output brigade", ++ "h2_task(%s): headers only, cleanup output brigade", + task->id); + b = body_bucket? body_bucket : APR_BRIGADE_FIRST(bb); + while (b != APR_BRIGADE_SENTINEL(bb)) { + next = APR_BUCKET_NEXT(b); + if (APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) { + break; +- } +- APR_BUCKET_REMOVE(b); +- apr_bucket_destroy(b); ++ } ++ if (!H2_BUCKET_IS_HEADERS(b)) { ++ APR_BUCKET_REMOVE(b); ++ apr_bucket_destroy(b); ++ } + b = next; + } + } +diff --git a/modules/http2/h2_h2.c b/modules/http2/h2_h2.c +index 5580cef..4ff1d51 100644 +--- a/modules/http2/h2_h2.c ++++ b/modules/http2/h2_h2.c +@@ -463,19 +463,18 @@ int h2_h2_is_tls(conn_rec *c) + return opt_ssl_is_https && opt_ssl_is_https(c); + } + +-int h2_is_acceptable_connection(conn_rec *c, int require_all) ++int h2_is_acceptable_connection(conn_rec *c, request_rec *r, int require_all) + { + int is_tls = h2_h2_is_tls(c); +- const h2_config *cfg = h2_config_get(c); + +- if (is_tls && h2_config_geti(cfg, H2_CONF_MODERN_TLS_ONLY) > 0) { ++ if (is_tls && h2_config_cgeti(c, H2_CONF_MODERN_TLS_ONLY) > 0) { + /* Check TLS connection for modern TLS parameters, as defined in + * RFC 7540 and https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + */ + apr_pool_t *pool = c->pool; + server_rec *s = c->base_server; + char *val; +- ++ + if (!opt_ssl_var_lookup) { + /* unable to check */ + return 0; +@@ -521,26 +520,22 @@ int h2_is_acceptable_connection(conn_rec *c, int require_all) + return 1; + } + +-int h2_allows_h2_direct(conn_rec *c) ++static int h2_allows_h2_direct(conn_rec *c) + { +- const h2_config *cfg = h2_config_get(c); + int is_tls = h2_h2_is_tls(c); + const char *needed_protocol = is_tls? "h2" : "h2c"; +- int h2_direct = h2_config_geti(cfg, H2_CONF_DIRECT); ++ int h2_direct = h2_config_cgeti(c, H2_CONF_DIRECT); + + if (h2_direct < 0) { + h2_direct = is_tls? 0 : 1; + } +- return (h2_direct +- && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol)); ++ return (h2_direct && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol)); + } + +-int h2_allows_h2_upgrade(conn_rec *c) ++int h2_allows_h2_upgrade(request_rec *r) + { +- const h2_config *cfg = h2_config_get(c); +- int h2_upgrade = h2_config_geti(cfg, H2_CONF_UPGRADE); +- +- return h2_upgrade > 0 || (h2_upgrade < 0 && !h2_h2_is_tls(c)); ++ int h2_upgrade = h2_config_rgeti(r, H2_CONF_UPGRADE); ++ return h2_upgrade > 0 || (h2_upgrade < 0 && !h2_h2_is_tls(r->connection)); + } + + /******************************************************************************* +@@ -581,14 +576,17 @@ int h2_h2_process_conn(conn_rec* c) + { + apr_status_t status; + h2_ctx *ctx; ++ server_rec *s; + + if (c->master) { + return DECLINED; + } + + ctx = h2_ctx_get(c, 0); ++ s = ctx? ctx->server : c->base_server; ++ + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn"); +- if (h2_ctx_is_task(ctx)) { ++ if (ctx && ctx->task) { + /* our stream pseudo connection */ + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "h2_h2, task, declined"); + return DECLINED; +@@ -601,19 +599,19 @@ int h2_h2_process_conn(conn_rec* c) + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn, " + "new connection using protocol '%s', direct=%d, " + "tls acceptable=%d", proto, h2_allows_h2_direct(c), +- h2_is_acceptable_connection(c, 1)); ++ h2_is_acceptable_connection(c, NULL, 1)); + } + + if (!strcmp(AP_PROTOCOL_HTTP1, proto) + && h2_allows_h2_direct(c) +- && h2_is_acceptable_connection(c, 1)) { ++ && h2_is_acceptable_connection(c, NULL, 1)) { + /* Fresh connection still is on http/1.1 and H2Direct is enabled. + * Otherwise connection is in a fully acceptable state. + * -> peek at the first 24 incoming bytes + */ + apr_bucket_brigade *temp; +- char *s = NULL; +- apr_size_t slen; ++ char *peek = NULL; ++ apr_size_t peeklen; + + temp = apr_brigade_create(c->pool, c->bucket_alloc); + status = ap_get_brigade(c->input_filters, temp, +@@ -626,8 +624,8 @@ int h2_h2_process_conn(conn_rec* c) + return DECLINED; + } + +- apr_brigade_pflatten(temp, &s, &slen, c->pool); +- if ((slen >= 24) && !memcmp(H2_MAGIC_TOKEN, s, 24)) { ++ apr_brigade_pflatten(temp, &peek, &peeklen, c->pool); ++ if ((peeklen >= 24) && !memcmp(H2_MAGIC_TOKEN, peek, 24)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_h2, direct mode detected"); + if (!ctx) { +@@ -638,7 +636,7 @@ int h2_h2_process_conn(conn_rec* c) + else if (APLOGctrace2(c)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + "h2_h2, not detected in %d bytes(base64): %s", +- (int)slen, h2_util_base64url_encode(s, slen, c->pool)); ++ (int)peeklen, h2_util_base64url_encode(peek, peeklen, c->pool)); + } + + apr_brigade_destroy(temp); +@@ -647,15 +645,16 @@ int h2_h2_process_conn(conn_rec* c) + + if (ctx) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "process_conn"); +- if (!h2_ctx_session_get(ctx)) { +- status = h2_conn_setup(ctx, c, NULL); ++ ++ if (!h2_ctx_get_session(c)) { ++ status = h2_conn_setup(c, NULL, s); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, "conn_setup"); + if (status != APR_SUCCESS) { + h2_ctx_clear(c); + return !OK; + } + } +- h2_conn_run(ctx, c); ++ h2_conn_run(c); + return OK; + } + +@@ -684,16 +683,17 @@ static int h2_h2_pre_close_conn(conn_rec *c) + + static void check_push(request_rec *r, const char *tag) + { +- const h2_config *conf = h2_config_rget(r); +- if (!r->expecting_100 +- && conf && conf->push_list && conf->push_list->nelts > 0) { ++ apr_array_header_t *push_list = h2_config_push_list(r); ++ ++ if (!r->expecting_100 && push_list && push_list->nelts > 0) { + int i, old_status; + const char *old_line; ++ + ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, + "%s, early announcing %d resources for push", +- tag, conf->push_list->nelts); +- for (i = 0; i < conf->push_list->nelts; ++i) { +- h2_push_res *push = &APR_ARRAY_IDX(conf->push_list, i, h2_push_res); ++ tag, push_list->nelts); ++ for (i = 0; i < push_list->nelts; ++i) { ++ h2_push_res *push = &APR_ARRAY_IDX(push_list, i, h2_push_res); + apr_table_add(r->headers_out, "Link", + apr_psprintf(r->pool, "<%s>; rel=preload%s", + push->uri_ref, push->critical? "; critical" : "")); +@@ -712,8 +712,7 @@ static int h2_h2_post_read_req(request_rec *r) + { + /* slave connection? */ + if (r->connection->master) { +- h2_ctx *ctx = h2_ctx_rget(r); +- struct h2_task *task = h2_ctx_get_task(ctx); ++ struct h2_task *task = h2_ctx_get_task(r->connection); + /* This hook will get called twice on internal redirects. Take care + * that we manipulate filters only once. */ + if (task && !task->filters_set) { +@@ -746,12 +745,10 @@ static int h2_h2_late_fixups(request_rec *r) + { + /* slave connection? */ + if (r->connection->master) { +- h2_ctx *ctx = h2_ctx_rget(r); +- struct h2_task *task = h2_ctx_get_task(ctx); ++ struct h2_task *task = h2_ctx_get_task(r->connection); + if (task) { + /* check if we copy vs. setaside files in this location */ +- task->output.copy_files = h2_config_geti(h2_config_rget(r), +- H2_CONF_COPY_FILES); ++ task->output.copy_files = h2_config_rgeti(r, H2_CONF_COPY_FILES); + if (task->output.copy_files) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, + "h2_slave_out(%s): copy_files on", task->id); +diff --git a/modules/http2/h2_h2.h b/modules/http2/h2_h2.h +index 367823d..339e898 100644 +--- a/modules/http2/h2_h2.h ++++ b/modules/http2/h2_h2.h +@@ -57,23 +57,15 @@ void h2_h2_register_hooks(void); + * the handshake is still ongoing. + * @return != 0 iff connection requirements are met + */ +-int h2_is_acceptable_connection(conn_rec *c, int require_all); +- +-/** +- * Check if the "direct" HTTP/2 mode of protocol handling is enabled +- * for the given connection. +- * @param c the connection to check +- * @return != 0 iff direct mode is enabled +- */ +-int h2_allows_h2_direct(conn_rec *c); ++int h2_is_acceptable_connection(conn_rec *c, request_rec *r, int require_all); + + /** + * Check if the "Upgrade" HTTP/1.1 mode of protocol switching is enabled +- * for the given connection. +- * @param c the connection to check ++ * for the given request. ++ * @param r the request to check + * @return != 0 iff Upgrade switching is enabled + */ +-int h2_allows_h2_upgrade(conn_rec *c); ++int h2_allows_h2_upgrade(request_rec *r); + + + #endif /* defined(__mod_h2__h2_h2__) */ +diff --git a/modules/http2/h2_headers.c b/modules/http2/h2_headers.c +index 49d9c0a..6d43290 100644 +--- a/modules/http2/h2_headers.c ++++ b/modules/http2/h2_headers.c +@@ -28,6 +28,7 @@ + + #include "h2_private.h" + #include "h2_h2.h" ++#include "h2_config.h" + #include "h2_util.h" + #include "h2_request.h" + #include "h2_headers.h" +@@ -129,21 +130,27 @@ h2_headers *h2_headers_rcreate(request_rec *r, int status, + { + h2_headers *headers = h2_headers_create(status, header, r->notes, 0, pool); + if (headers->status == HTTP_FORBIDDEN) { +- const char *cause = apr_table_get(r->notes, "ssl-renegotiate-forbidden"); +- if (cause) { +- /* This request triggered a TLS renegotiation that is now allowed +- * in HTTP/2. Tell the client that it should use HTTP/1.1 for this. +- */ +- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, headers->status, r, +- APLOGNO(03061) +- "h2_headers(%ld): renegotiate forbidden, cause: %s", +- (long)r->connection->id, cause); +- headers->status = H2_ERR_HTTP_1_1_REQUIRED; ++ request_rec *r_prev; ++ for (r_prev = r; r_prev != NULL; r_prev = r_prev->prev) { ++ const char *cause = apr_table_get(r_prev->notes, "ssl-renegotiate-forbidden"); ++ if (cause) { ++ /* This request triggered a TLS renegotiation that is not allowed ++ * in HTTP/2. Tell the client that it should use HTTP/1.1 for this. ++ */ ++ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, headers->status, r, ++ APLOGNO(03061) ++ "h2_headers(%ld): renegotiate forbidden, cause: %s", ++ (long)r->connection->id, cause); ++ headers->status = H2_ERR_HTTP_1_1_REQUIRED; ++ break; ++ } + } + } + if (is_unsafe(r->server)) { +- apr_table_setn(headers->notes, H2_HDR_CONFORMANCE, +- H2_HDR_CONFORMANCE_UNSAFE); ++ apr_table_setn(headers->notes, H2_HDR_CONFORMANCE, H2_HDR_CONFORMANCE_UNSAFE); ++ } ++ if (h2_config_rgeti(r, H2_CONF_PUSH) == 0 && h2_config_sgeti(r->server, H2_CONF_PUSH) != 0) { ++ apr_table_setn(headers->notes, H2_PUSH_MODE_NOTE, "0"); + } + return headers; + } +diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c +index db3cb63..fae77c7 100644 +--- a/modules/http2/h2_mplx.c ++++ b/modules/http2/h2_mplx.c +@@ -40,7 +40,6 @@ + #include "h2_ctx.h" + #include "h2_h2.h" + #include "h2_mplx.h" +-#include "h2_ngn_shed.h" + #include "h2_request.h" + #include "h2_stream.h" + #include "h2_session.h" +@@ -83,12 +82,6 @@ static void check_data_for(h2_mplx *m, h2_stream *stream, int lock); + static void stream_output_consumed(void *ctx, + h2_bucket_beam *beam, apr_off_t length) + { +- h2_stream *stream = ctx; +- h2_task *task = stream->task; +- +- if (length > 0 && task && task->assigned) { +- h2_req_engine_out_consumed(task->assigned, task->c, length); +- } + } + + static void stream_input_ev(void *ctx, h2_bucket_beam *beam) +@@ -136,7 +129,6 @@ static void stream_cleanup(h2_mplx *m, h2_stream *stream) + } + else if (stream->task) { + stream->task->c->aborted = 1; +- apr_thread_cond_broadcast(m->task_thawed); + } + } + +@@ -151,25 +143,19 @@ static void stream_cleanup(h2_mplx *m, h2_stream *stream) + * their HTTP/1 cousins, the separate allocator seems to work better + * than protecting a shared h2_session one with an own lock. + */ +-h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, +- const h2_config *conf, ++h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *parent, + h2_workers *workers) + { + apr_status_t status = APR_SUCCESS; + apr_allocator_t *allocator; + apr_thread_mutex_t *mutex; + h2_mplx *m; +- h2_ctx *ctx = h2_ctx_get(c, 0); +- ap_assert(conf); + + m = apr_pcalloc(parent, sizeof(h2_mplx)); + if (m) { + m->id = c->id; + m->c = c; +- m->s = (ctx? h2_ctx_server_get(ctx) : NULL); +- if (!m->s) { +- m->s = c->base_server; +- } ++ m->s = s; + + /* We create a pool with its own allocator to be used for + * processing slave connections. This is the only way to have the +@@ -204,14 +190,8 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, + return NULL; + } + +- status = apr_thread_cond_create(&m->task_thawed, m->pool); +- if (status != APR_SUCCESS) { +- apr_pool_destroy(m->pool); +- return NULL; +- } +- +- m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS); +- m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM); ++ m->max_streams = h2_config_sgeti(s, H2_CONF_MAX_STREAMS); ++ m->stream_max_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM); + + m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id)); + m->sredo = h2_ihash_create(m->pool, offsetof(h2_stream,id)); +@@ -232,10 +212,6 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, + m->limit_change_interval = apr_time_from_msec(100); + + m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*)); +- +- m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams, +- m->stream_max_mem); +- h2_ngn_shed_set_ctx(m->ngn_shed , m); + } + return m; + } +@@ -394,10 +370,10 @@ static int report_stream_iter(void *ctx, void *val) { + if (task) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */ + H2_STRM_MSG(stream, "->03198: %s %s %s" +- "[started=%d/done=%d/frozen=%d]"), ++ "[started=%d/done=%d]"), + task->request->method, task->request->authority, + task->request->path, task->worker_started, +- task->worker_done, task->frozen); ++ task->worker_done); + } + else { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */ +@@ -436,7 +412,7 @@ static int stream_cancel_iter(void *ctx, void *val) { + void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) + { + apr_status_t status; +- int i, wait_secs = 60; ++ int i, wait_secs = 60, old_aborted; + + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, + "h2_mplx(%ld): start release", m->id); +@@ -447,15 +423,19 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) + + H2_MPLX_ENTER_ALWAYS(m); + ++ /* While really terminating any slave connections, treat the master ++ * connection as aborted. It's not as if we could send any more data ++ * at this point. */ ++ old_aborted = m->c->aborted; ++ m->c->aborted = 1; ++ + /* How to shut down a h2 connection: + * 1. cancel all streams still active */ + while (!h2_ihash_iter(m->streams, stream_cancel_iter, m)) { + /* until empty */ + } + +- /* 2. terminate ngn_shed, no more streams +- * should be scheduled or in the active set */ +- h2_ngn_shed_abort(m->ngn_shed); ++ /* 2. no more streams should be scheduled or in the active set */ + ap_assert(h2_ihash_empty(m->streams)); + ap_assert(h2_iq_empty(m->q)); + +@@ -479,10 +459,6 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) + ap_assert(m->tasks_active == 0); + m->join_wait = NULL; + +- /* 4. close the h2_req_enginge shed */ +- h2_ngn_shed_destroy(m->ngn_shed); +- m->ngn_shed = NULL; +- + /* 4. With all workers done, all streams should be in spurge */ + if (!h2_ihash_empty(m->shold)) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03516) +@@ -491,6 +467,7 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) + h2_ihash_iter(m->shold, unexpected_stream_iter, m); + } + ++ m->c->aborted = old_aborted; + H2_MPLX_LEAVE(m); + + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, +@@ -787,47 +764,14 @@ apr_status_t h2_mplx_pop_task(h2_mplx *m, h2_task **ptask) + return rv; + } + +-static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn) ++static void task_done(h2_mplx *m, h2_task *task) + { + h2_stream *stream; + +- if (task->frozen) { +- /* this task was handed over to an engine for processing +- * and the original worker has finished. That means the +- * engine may start processing now. */ +- h2_task_thaw(task); +- apr_thread_cond_broadcast(m->task_thawed); +- return; +- } +- + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, + "h2_mplx(%ld): task(%s) done", m->id, task->id); + out_close(m, task); + +- if (ngn) { +- apr_off_t bytes = 0; +- h2_beam_send(task->output.beam, NULL, APR_NONBLOCK_READ); +- bytes += h2_beam_get_buffered(task->output.beam); +- if (bytes > 0) { +- /* we need to report consumed and current buffered output +- * to the engine. The request will be streamed out or cancelled, +- * no more data is coming from it and the engine should update +- * its calculations before we destroy this information. */ +- h2_req_engine_out_consumed(ngn, task->c, bytes); +- } +- } +- +- if (task->engine) { +- if (!m->aborted && !task->c->aborted +- && !h2_req_engine_is_shutdown(task->engine)) { +- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(10022) +- "h2_mplx(%ld): task(%s) has not-shutdown " +- "engine(%s)", m->id, task->id, +- h2_req_engine_get_id(task->engine)); +- } +- h2_ngn_shed_done_ngn(m->ngn_shed, task->engine); +- } +- + task->worker_done = 1; + task->done_at = apr_time_now(); + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, +@@ -849,18 +793,24 @@ static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn) + m->id, m->limit_active); + } + } +- ++ ++ ap_assert(task->done_done == 0); ++ + stream = h2_ihash_get(m->streams, task->stream_id); + if (stream) { + /* stream not done yet. */ + if (!m->aborted && h2_ihash_get(m->sredo, stream->id)) { + /* reset and schedule again */ ++ task->worker_done = 0; + h2_task_redo(task); + h2_ihash_remove(m->sredo, stream->id); + h2_iq_add(m->q, stream->id, NULL, NULL); ++ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c, ++ H2_STRM_MSG(stream, "redo, added to q")); + } + else { + /* stream not cleaned up, stay around */ ++ task->done_done = 1; + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, + H2_STRM_MSG(stream, "task_done, stream open")); + if (stream->input) { +@@ -873,6 +823,7 @@ static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn) + } + else if ((stream = h2_ihash_get(m->shold, task->stream_id)) != NULL) { + /* stream is done, was just waiting for this. */ ++ task->done_done = 1; + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, + H2_STRM_MSG(stream, "task_done, in hold")); + if (stream->input) { +@@ -897,7 +848,7 @@ void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask) + { + H2_MPLX_ENTER_ALWAYS(m); + +- task_done(m, task, NULL); ++ task_done(m, task); + --m->tasks_active; + + if (m->join_wait) { +@@ -1091,142 +1042,6 @@ apr_status_t h2_mplx_idle(h2_mplx *m) + } + + /******************************************************************************* +- * HTTP/2 request engines +- ******************************************************************************/ +- +-typedef struct { +- h2_mplx * m; +- h2_req_engine *ngn; +- int streams_updated; +-} ngn_update_ctx; +- +-static int ngn_update_window(void *ctx, void *val) +-{ +- ngn_update_ctx *uctx = ctx; +- h2_stream *stream = val; +- if (stream->task && stream->task->assigned == uctx->ngn +- && output_consumed_signal(uctx->m, stream->task)) { +- ++uctx->streams_updated; +- } +- return 1; +-} +- +-static apr_status_t ngn_out_update_windows(h2_mplx *m, h2_req_engine *ngn) +-{ +- ngn_update_ctx ctx; +- +- ctx.m = m; +- ctx.ngn = ngn; +- ctx.streams_updated = 0; +- h2_ihash_iter(m->streams, ngn_update_window, &ctx); +- +- return ctx.streams_updated? APR_SUCCESS : APR_EAGAIN; +-} +- +-apr_status_t h2_mplx_req_engine_push(const char *ngn_type, +- request_rec *r, +- http2_req_engine_init *einit) +-{ +- apr_status_t status; +- h2_mplx *m; +- h2_task *task; +- h2_stream *stream; +- +- task = h2_ctx_rget_task(r); +- if (!task) { +- return APR_ECONNABORTED; +- } +- m = task->mplx; +- +- H2_MPLX_ENTER(m); +- +- stream = h2_ihash_get(m->streams, task->stream_id); +- if (stream) { +- status = h2_ngn_shed_push_request(m->ngn_shed, ngn_type, r, einit); +- } +- else { +- status = APR_ECONNABORTED; +- } +- +- H2_MPLX_LEAVE(m); +- return status; +-} +- +-apr_status_t h2_mplx_req_engine_pull(h2_req_engine *ngn, +- apr_read_type_e block, +- int capacity, +- request_rec **pr) +-{ +- h2_ngn_shed *shed = h2_ngn_shed_get_shed(ngn); +- h2_mplx *m = h2_ngn_shed_get_ctx(shed); +- apr_status_t status; +- int want_shutdown; +- +- H2_MPLX_ENTER(m); +- +- want_shutdown = (block == APR_BLOCK_READ); +- +- /* Take this opportunity to update output consummation +- * for this engine */ +- ngn_out_update_windows(m, ngn); +- +- if (want_shutdown && !h2_iq_empty(m->q)) { +- /* For a blocking read, check first if requests are to be +- * had and, if not, wait a short while before doing the +- * blocking, and if unsuccessful, terminating read. +- */ +- status = h2_ngn_shed_pull_request(shed, ngn, capacity, 1, pr); +- if (APR_STATUS_IS_EAGAIN(status)) { +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, +- "h2_mplx(%ld): start block engine pull", m->id); +- apr_thread_cond_timedwait(m->task_thawed, m->lock, +- apr_time_from_msec(20)); +- status = h2_ngn_shed_pull_request(shed, ngn, capacity, 1, pr); +- } +- } +- else { +- status = h2_ngn_shed_pull_request(shed, ngn, capacity, +- want_shutdown, pr); +- } +- +- H2_MPLX_LEAVE(m); +- return status; +-} +- +-void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn, +- apr_status_t status) +-{ +- h2_task *task = h2_ctx_cget_task(r_conn); +- +- if (task) { +- h2_mplx *m = task->mplx; +- h2_stream *stream; +- +- H2_MPLX_ENTER_ALWAYS(m); +- +- stream = h2_ihash_get(m->streams, task->stream_id); +- +- ngn_out_update_windows(m, ngn); +- h2_ngn_shed_done_task(m->ngn_shed, ngn, task); +- +- if (status != APR_SUCCESS && stream +- && h2_task_can_redo(task) +- && !h2_ihash_get(m->sredo, stream->id)) { +- h2_ihash_add(m->sredo, stream); +- } +- +- if (task->engine) { +- /* cannot report that as done until engine returns */ +- } +- else { +- task_done(m, task, ngn); +- } +- +- H2_MPLX_LEAVE(m); +- } +-} +- +-/******************************************************************************* + * mplx master events dispatching + ******************************************************************************/ + +diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h +index 2890b98..575ccaf 100644 +--- a/modules/http2/h2_mplx.h ++++ b/modules/http2/h2_mplx.h +@@ -47,8 +47,6 @@ struct h2_request; + struct apr_thread_cond_t; + struct h2_workers; + struct h2_iqueue; +-struct h2_ngn_shed; +-struct h2_req_engine; + + #include + +@@ -86,7 +84,6 @@ struct h2_mplx { + + apr_thread_mutex_t *lock; + struct apr_thread_cond_t *added_output; +- struct apr_thread_cond_t *task_thawed; + struct apr_thread_cond_t *join_wait; + + apr_size_t stream_max_mem; +@@ -95,8 +92,6 @@ struct h2_mplx { + apr_array_header_t *spare_slaves; /* spare slave connections */ + + struct h2_workers *workers; +- +- struct h2_ngn_shed *ngn_shed; + }; + + +@@ -111,8 +106,7 @@ apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s); + * Create the multiplexer for the given HTTP2 session. + * Implicitly has reference count 1. + */ +-h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *master, +- const struct h2_config *conf, ++h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *master, + struct h2_workers *workers); + + /** +@@ -303,28 +297,4 @@ APR_RING_INSERT_TAIL((b), ap__b, h2_mplx, link); \ + */ + apr_status_t h2_mplx_idle(h2_mplx *m); + +-/******************************************************************************* +- * h2_req_engine handling +- ******************************************************************************/ +- +-typedef void h2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed); +-typedef apr_status_t h2_mplx_req_engine_init(struct h2_req_engine *engine, +- const char *id, +- const char *type, +- apr_pool_t *pool, +- apr_size_t req_buffer_size, +- request_rec *r, +- h2_output_consumed **pconsumed, +- void **pbaton); +- +-apr_status_t h2_mplx_req_engine_push(const char *ngn_type, +- request_rec *r, +- h2_mplx_req_engine_init *einit); +-apr_status_t h2_mplx_req_engine_pull(struct h2_req_engine *ngn, +- apr_read_type_e block, +- int capacity, +- request_rec **pr); +-void h2_mplx_req_engine_done(struct h2_req_engine *ngn, conn_rec *r_conn, +- apr_status_t status); +- + #endif /* defined(__mod_h2__h2_mplx__) */ +diff --git a/modules/http2/h2_ngn_shed.c b/modules/http2/h2_ngn_shed.c +index fb85776..e69de29 100644 +--- a/modules/http2/h2_ngn_shed.c ++++ b/modules/http2/h2_ngn_shed.c +@@ -1,392 +0,0 @@ +-/* Licensed to the Apache Software Foundation (ASF) under one or more +- * contributor license agreements. See the NOTICE file distributed with +- * this work for additional information regarding copyright ownership. +- * The ASF licenses this file to You under the Apache License, Version 2.0 +- * (the "License"); you may not use this file except in compliance with +- * the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +- +-#include +-#include +-#include +- +-#include +-#include +-#include +-#include +- +-#include +-#include +-#include +- +-#include "mod_http2.h" +- +-#include "h2_private.h" +-#include "h2.h" +-#include "h2_config.h" +-#include "h2_conn.h" +-#include "h2_ctx.h" +-#include "h2_h2.h" +-#include "h2_mplx.h" +-#include "h2_request.h" +-#include "h2_task.h" +-#include "h2_util.h" +-#include "h2_ngn_shed.h" +- +- +-typedef struct h2_ngn_entry h2_ngn_entry; +-struct h2_ngn_entry { +- APR_RING_ENTRY(h2_ngn_entry) link; +- h2_task *task; +- request_rec *r; +-}; +- +-#define H2_NGN_ENTRY_NEXT(e) APR_RING_NEXT((e), link) +-#define H2_NGN_ENTRY_PREV(e) APR_RING_PREV((e), link) +-#define H2_NGN_ENTRY_REMOVE(e) APR_RING_REMOVE((e), link) +- +-#define H2_REQ_ENTRIES_SENTINEL(b) APR_RING_SENTINEL((b), h2_ngn_entry, link) +-#define H2_REQ_ENTRIES_EMPTY(b) APR_RING_EMPTY((b), h2_ngn_entry, link) +-#define H2_REQ_ENTRIES_FIRST(b) APR_RING_FIRST(b) +-#define H2_REQ_ENTRIES_LAST(b) APR_RING_LAST(b) +- +-#define H2_REQ_ENTRIES_INSERT_HEAD(b, e) do { \ +-h2_ngn_entry *ap__b = (e); \ +-APR_RING_INSERT_HEAD((b), ap__b, h2_ngn_entry, link); \ +-} while (0) +- +-#define H2_REQ_ENTRIES_INSERT_TAIL(b, e) do { \ +-h2_ngn_entry *ap__b = (e); \ +-APR_RING_INSERT_TAIL((b), ap__b, h2_ngn_entry, link); \ +-} while (0) +- +-struct h2_req_engine { +- const char *id; /* identifier */ +- const char *type; /* name of the engine type */ +- apr_pool_t *pool; /* pool for engine specific allocations */ +- conn_rec *c; /* connection this engine is assigned to */ +- h2_task *task; /* the task this engine is based on, running in */ +- h2_ngn_shed *shed; +- +- unsigned int shutdown : 1; /* engine is being shut down */ +- unsigned int done : 1; /* engine has finished */ +- +- APR_RING_HEAD(h2_req_entries, h2_ngn_entry) entries; +- int capacity; /* maximum concurrent requests */ +- int no_assigned; /* # of assigned requests */ +- int no_live; /* # of live */ +- int no_finished; /* # of finished */ +- +- h2_output_consumed *out_consumed; +- void *out_consumed_ctx; +-}; +- +-const char *h2_req_engine_get_id(h2_req_engine *engine) +-{ +- return engine->id; +-} +- +-int h2_req_engine_is_shutdown(h2_req_engine *engine) +-{ +- return engine->shutdown; +-} +- +-void h2_req_engine_out_consumed(h2_req_engine *engine, conn_rec *c, +- apr_off_t bytes) +-{ +- if (engine->out_consumed) { +- engine->out_consumed(engine->out_consumed_ctx, c, bytes); +- } +-} +- +-h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c, +- int default_capacity, +- apr_size_t req_buffer_size) +-{ +- h2_ngn_shed *shed; +- +- shed = apr_pcalloc(pool, sizeof(*shed)); +- shed->c = c; +- shed->pool = pool; +- shed->default_capacity = default_capacity; +- shed->req_buffer_size = req_buffer_size; +- shed->ngns = apr_hash_make(pool); +- +- return shed; +-} +- +-void h2_ngn_shed_set_ctx(h2_ngn_shed *shed, void *user_ctx) +-{ +- shed->user_ctx = user_ctx; +-} +- +-void *h2_ngn_shed_get_ctx(h2_ngn_shed *shed) +-{ +- return shed->user_ctx; +-} +- +-h2_ngn_shed *h2_ngn_shed_get_shed(h2_req_engine *ngn) +-{ +- return ngn->shed; +-} +- +-void h2_ngn_shed_abort(h2_ngn_shed *shed) +-{ +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, APLOGNO(03394) +- "h2_ngn_shed(%ld): abort", shed->c->id); +- shed->aborted = 1; +-} +- +-static void ngn_add_task(h2_req_engine *ngn, h2_task *task, request_rec *r) +-{ +- h2_ngn_entry *entry = apr_pcalloc(task->pool, sizeof(*entry)); +- APR_RING_ELEM_INIT(entry, link); +- entry->task = task; +- entry->r = r; +- H2_REQ_ENTRIES_INSERT_TAIL(&ngn->entries, entry); +- ngn->no_assigned++; +-} +- +- +-apr_status_t h2_ngn_shed_push_request(h2_ngn_shed *shed, const char *ngn_type, +- request_rec *r, +- http2_req_engine_init *einit) +-{ +- h2_req_engine *ngn; +- h2_task *task = h2_ctx_rget_task(r); +- +- ap_assert(task); +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, +- "h2_ngn_shed(%ld): PUSHing request (task=%s)", shed->c->id, +- task->id); +- if (task->request->serialize) { +- /* Max compatibility, deny processing of this */ +- return APR_EOF; +- } +- +- if (task->assigned) { +- --task->assigned->no_assigned; +- --task->assigned->no_live; +- task->assigned = NULL; +- } +- +- if (task->engine) { +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, +- "h2_ngn_shed(%ld): push task(%s) hosting engine %s " +- "already with %d tasks", +- shed->c->id, task->id, task->engine->id, +- task->engine->no_assigned); +- task->assigned = task->engine; +- ngn_add_task(task->engine, task, r); +- return APR_SUCCESS; +- } +- +- ngn = apr_hash_get(shed->ngns, ngn_type, APR_HASH_KEY_STRING); +- if (ngn && !ngn->shutdown) { +- /* this task will be processed in another thread, +- * freeze any I/O for the time being. */ +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, +- "h2_ngn_shed(%ld): pushing request %s to %s", +- shed->c->id, task->id, ngn->id); +- if (!h2_task_has_thawed(task)) { +- h2_task_freeze(task); +- } +- ngn_add_task(ngn, task, r); +- return APR_SUCCESS; +- } +- +- /* no existing engine or being shut down, start a new one */ +- if (einit) { +- apr_status_t status; +- apr_pool_t *pool = task->pool; +- h2_req_engine *newngn; +- +- newngn = apr_pcalloc(pool, sizeof(*ngn)); +- newngn->pool = pool; +- newngn->id = apr_psprintf(pool, "ngn-%s", task->id); +- newngn->type = apr_pstrdup(pool, ngn_type); +- newngn->c = task->c; +- newngn->shed = shed; +- newngn->capacity = shed->default_capacity; +- newngn->no_assigned = 1; +- newngn->no_live = 1; +- APR_RING_INIT(&newngn->entries, h2_ngn_entry, link); +- +- status = einit(newngn, newngn->id, newngn->type, newngn->pool, +- shed->req_buffer_size, r, +- &newngn->out_consumed, &newngn->out_consumed_ctx); +- +- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, APLOGNO(03395) +- "h2_ngn_shed(%ld): create engine %s (%s)", +- shed->c->id, newngn->id, newngn->type); +- if (status == APR_SUCCESS) { +- newngn->task = task; +- task->engine = newngn; +- task->assigned = newngn; +- apr_hash_set(shed->ngns, newngn->type, APR_HASH_KEY_STRING, newngn); +- } +- return status; +- } +- return APR_EOF; +-} +- +-static h2_ngn_entry *pop_detached(h2_req_engine *ngn) +-{ +- h2_ngn_entry *entry; +- for (entry = H2_REQ_ENTRIES_FIRST(&ngn->entries); +- entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries); +- entry = H2_NGN_ENTRY_NEXT(entry)) { +- if (h2_task_has_thawed(entry->task) +- || (entry->task->engine == ngn)) { +- /* The task hosting this engine can always be pulled by it. +- * For other task, they need to become detached, e.g. no longer +- * assigned to another worker. */ +- H2_NGN_ENTRY_REMOVE(entry); +- return entry; +- } +- } +- return NULL; +-} +- +-apr_status_t h2_ngn_shed_pull_request(h2_ngn_shed *shed, +- h2_req_engine *ngn, +- int capacity, +- int want_shutdown, +- request_rec **pr) +-{ +- h2_ngn_entry *entry; +- +- ap_assert(ngn); +- *pr = NULL; +- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, shed->c, APLOGNO(03396) +- "h2_ngn_shed(%ld): pull task for engine %s, shutdown=%d", +- shed->c->id, ngn->id, want_shutdown); +- if (shed->aborted) { +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, APLOGNO(03397) +- "h2_ngn_shed(%ld): abort while pulling requests %s", +- shed->c->id, ngn->id); +- ngn->shutdown = 1; +- return APR_ECONNABORTED; +- } +- +- ngn->capacity = capacity; +- if (H2_REQ_ENTRIES_EMPTY(&ngn->entries)) { +- if (want_shutdown) { +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, +- "h2_ngn_shed(%ld): emtpy queue, shutdown engine %s", +- shed->c->id, ngn->id); +- ngn->shutdown = 1; +- } +- return ngn->shutdown? APR_EOF : APR_EAGAIN; +- } +- +- if ((entry = pop_detached(ngn))) { +- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, entry->task->c, APLOGNO(03398) +- "h2_ngn_shed(%ld): pulled request %s for engine %s", +- shed->c->id, entry->task->id, ngn->id); +- ngn->no_live++; +- *pr = entry->r; +- entry->task->assigned = ngn; +- /* task will now run in ngn's own thread. Modules like lua +- * seem to require the correct thread set in the conn_rec. +- * See PR 59542. */ +- if (entry->task->c && ngn->c) { +- entry->task->c->current_thread = ngn->c->current_thread; +- } +- if (entry->task->engine == ngn) { +- /* If an engine pushes its own base task, and then pulls +- * it back to itself again, it needs to be thawed. +- */ +- h2_task_thaw(entry->task); +- } +- return APR_SUCCESS; +- } +- +- if (1) { +- h2_ngn_entry *entry = H2_REQ_ENTRIES_FIRST(&ngn->entries); +- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03399) +- "h2_ngn_shed(%ld): pull task, nothing, first task %s", +- shed->c->id, entry->task->id); +- } +- return APR_EAGAIN; +-} +- +-static apr_status_t ngn_done_task(h2_ngn_shed *shed, h2_req_engine *ngn, +- h2_task *task, int waslive, int aborted) +-{ +- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03400) +- "h2_ngn_shed(%ld): task %s %s by %s", +- shed->c->id, task->id, aborted? "aborted":"done", ngn->id); +- ngn->no_finished++; +- if (waslive) ngn->no_live--; +- ngn->no_assigned--; +- task->assigned = NULL; +- +- return APR_SUCCESS; +-} +- +-apr_status_t h2_ngn_shed_done_task(h2_ngn_shed *shed, +- struct h2_req_engine *ngn, h2_task *task) +-{ +- return ngn_done_task(shed, ngn, task, 1, 0); +-} +- +-void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn) +-{ +- if (ngn->done) { +- return; +- } +- +- if (!shed->aborted && !H2_REQ_ENTRIES_EMPTY(&ngn->entries)) { +- h2_ngn_entry *entry; +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, +- "h2_ngn_shed(%ld): exit engine %s (%s), " +- "has still requests queued, shutdown=%d," +- "assigned=%ld, live=%ld, finished=%ld", +- shed->c->id, ngn->id, ngn->type, +- ngn->shutdown, +- (long)ngn->no_assigned, (long)ngn->no_live, +- (long)ngn->no_finished); +- for (entry = H2_REQ_ENTRIES_FIRST(&ngn->entries); +- entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries); +- entry = H2_NGN_ENTRY_NEXT(entry)) { +- h2_task *task = entry->task; +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, +- "h2_ngn_shed(%ld): engine %s has queued task %s, " +- "frozen=%d, aborting", +- shed->c->id, ngn->id, task->id, task->frozen); +- ngn_done_task(shed, ngn, task, 0, 1); +- task->engine = task->assigned = NULL; +- } +- } +- if (!shed->aborted && (ngn->no_assigned > 1 || ngn->no_live > 1)) { +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, +- "h2_ngn_shed(%ld): exit engine %s (%s), " +- "assigned=%ld, live=%ld, finished=%ld", +- shed->c->id, ngn->id, ngn->type, +- (long)ngn->no_assigned, (long)ngn->no_live, +- (long)ngn->no_finished); +- } +- else { +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, +- "h2_ngn_shed(%ld): exit engine %s", +- shed->c->id, ngn->id); +- } +- +- apr_hash_set(shed->ngns, ngn->type, APR_HASH_KEY_STRING, NULL); +- ngn->done = 1; +-} +- +-void h2_ngn_shed_destroy(h2_ngn_shed *shed) +-{ +- ap_assert(apr_hash_count(shed->ngns) == 0); +-} +- +diff --git a/modules/http2/h2_ngn_shed.h b/modules/http2/h2_ngn_shed.h +index 7764c18..e69de29 100644 +--- a/modules/http2/h2_ngn_shed.h ++++ b/modules/http2/h2_ngn_shed.h +@@ -1,79 +0,0 @@ +-/* Licensed to the Apache Software Foundation (ASF) under one or more +- * contributor license agreements. See the NOTICE file distributed with +- * this work for additional information regarding copyright ownership. +- * The ASF licenses this file to You under the Apache License, Version 2.0 +- * (the "License"); you may not use this file except in compliance with +- * the License. You may obtain a copy of the License at +- * +- * http://www.apache.org/licenses/LICENSE-2.0 +- * +- * Unless required by applicable law or agreed to in writing, software +- * distributed under the License is distributed on an "AS IS" BASIS, +- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +- * See the License for the specific language governing permissions and +- * limitations under the License. +- */ +- +-#ifndef h2_req_shed_h +-#define h2_req_shed_h +- +-struct h2_req_engine; +-struct h2_task; +- +-typedef struct h2_ngn_shed h2_ngn_shed; +-struct h2_ngn_shed { +- conn_rec *c; +- apr_pool_t *pool; +- apr_hash_t *ngns; +- void *user_ctx; +- +- unsigned int aborted : 1; +- +- int default_capacity; +- apr_size_t req_buffer_size; /* preferred buffer size for responses */ +-}; +- +-const char *h2_req_engine_get_id(h2_req_engine *engine); +-int h2_req_engine_is_shutdown(h2_req_engine *engine); +- +-void h2_req_engine_out_consumed(h2_req_engine *engine, conn_rec *c, +- apr_off_t bytes); +- +-typedef apr_status_t h2_shed_ngn_init(h2_req_engine *engine, +- const char *id, +- const char *type, +- apr_pool_t *pool, +- apr_size_t req_buffer_size, +- request_rec *r, +- h2_output_consumed **pconsumed, +- void **pbaton); +- +-h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c, +- int default_capactiy, +- apr_size_t req_buffer_size); +- +-void h2_ngn_shed_destroy(h2_ngn_shed *shed); +- +-void h2_ngn_shed_set_ctx(h2_ngn_shed *shed, void *user_ctx); +-void *h2_ngn_shed_get_ctx(h2_ngn_shed *shed); +- +-h2_ngn_shed *h2_ngn_shed_get_shed(struct h2_req_engine *ngn); +- +-void h2_ngn_shed_abort(h2_ngn_shed *shed); +- +-apr_status_t h2_ngn_shed_push_request(h2_ngn_shed *shed, const char *ngn_type, +- request_rec *r, +- h2_shed_ngn_init *init_cb); +- +-apr_status_t h2_ngn_shed_pull_request(h2_ngn_shed *shed, h2_req_engine *pub_ngn, +- int capacity, +- int want_shutdown, request_rec **pr); +- +-apr_status_t h2_ngn_shed_done_task(h2_ngn_shed *shed, +- struct h2_req_engine *ngn, +- struct h2_task *task); +- +-void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn); +- +- +-#endif /* h2_req_shed_h */ +diff --git a/modules/http2/h2_proxy_session.c b/modules/http2/h2_proxy_session.c +index 8389c7c..3a2718f 100644 +--- a/modules/http2/h2_proxy_session.c ++++ b/modules/http2/h2_proxy_session.c +@@ -429,12 +429,6 @@ static int stream_response_data(nghttp2_session *ngh2, uint8_t flags, + stream_id, NGHTTP2_STREAM_CLOSED); + return NGHTTP2_ERR_STREAM_CLOSING; + } +- if (stream->standalone) { +- nghttp2_session_consume(ngh2, stream_id, len); +- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r, +- "h2_proxy_session(%s): stream %d, win_update %d bytes", +- session->id, stream_id, (int)len); +- } + return 0; + } + +@@ -641,7 +635,7 @@ h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn, + + nghttp2_option_new(&option); + nghttp2_option_set_peer_max_concurrent_streams(option, 100); +- nghttp2_option_set_no_auto_window_update(option, 1); ++ nghttp2_option_set_no_auto_window_update(option, 0); + + nghttp2_session_client_new2(&session->ngh2, cbs, session, option); + +@@ -653,10 +647,12 @@ h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn, + } + else { + h2_proxy_session *session = p_conn->data; +- apr_interval_time_t age = apr_time_now() - session->last_frame_received; +- if (age > apr_time_from_sec(1)) { +- session->check_ping = 1; +- nghttp2_submit_ping(session->ngh2, 0, (const uint8_t *)"nevergonnagiveyouup"); ++ if (!session->check_ping) { ++ apr_interval_time_t age = apr_time_now() - session->last_frame_received; ++ if (age > apr_time_from_sec(1)) { ++ session->check_ping = 1; ++ nghttp2_submit_ping(session->ngh2, 0, (const uint8_t *)"nevergonnagiveyouup"); ++ } + } + } + return p_conn->data; +@@ -1543,42 +1539,3 @@ typedef struct { + int updated; + } win_update_ctx; + +-static int win_update_iter(void *udata, void *val) +-{ +- win_update_ctx *ctx = udata; +- h2_proxy_stream *stream = val; +- +- if (stream->r && stream->r->connection == ctx->c) { +- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, ctx->session->c, +- "h2_proxy_session(%s-%d): win_update %ld bytes", +- ctx->session->id, (int)stream->id, (long)ctx->bytes); +- nghttp2_session_consume(ctx->session->ngh2, stream->id, ctx->bytes); +- ctx->updated = 1; +- return 0; +- } +- return 1; +-} +- +- +-void h2_proxy_session_update_window(h2_proxy_session *session, +- conn_rec *c, apr_off_t bytes) +-{ +- if (!h2_proxy_ihash_empty(session->streams)) { +- win_update_ctx ctx; +- ctx.session = session; +- ctx.c = c; +- ctx.bytes = bytes; +- ctx.updated = 0; +- h2_proxy_ihash_iter(session->streams, win_update_iter, &ctx); +- +- if (!ctx.updated) { +- /* could not find the stream any more, possibly closed, update +- * the connection window at least */ +- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, +- "h2_proxy_session(%s): win_update conn %ld bytes", +- session->id, (long)bytes); +- nghttp2_session_consume_connection(session->ngh2, (size_t)bytes); +- } +- } +-} +- +diff --git a/modules/http2/h2_proxy_session.h b/modules/http2/h2_proxy_session.h +index ecebb61..1d0750b 100644 +--- a/modules/http2/h2_proxy_session.h ++++ b/modules/http2/h2_proxy_session.h +@@ -120,9 +120,6 @@ void h2_proxy_session_cancel_all(h2_proxy_session *s); + + void h2_proxy_session_cleanup(h2_proxy_session *s, h2_proxy_request_done *done); + +-void h2_proxy_session_update_window(h2_proxy_session *s, +- conn_rec *c, apr_off_t bytes); +- + #define H2_PROXY_REQ_URL_NOTE "h2-proxy-req-url" + + #endif /* h2_proxy_session_h */ +diff --git a/modules/http2/h2_request.c b/modules/http2/h2_request.c +index 5ee88e9..5893c8b 100644 +--- a/modules/http2/h2_request.c ++++ b/modules/http2/h2_request.c +@@ -17,6 +17,7 @@ + #include + + #include ++#include + + #include + #include +@@ -84,8 +85,7 @@ apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool, + req->path = path; + req->headers = apr_table_make(pool, 10); + if (r->server) { +- req->serialize = h2_config_geti(h2_config_sget(r->server), +- H2_CONF_SER_HEADERS); ++ req->serialize = h2_config_rgeti(r, H2_CONF_SER_HEADERS); + } + + x.pool = pool; +@@ -206,13 +206,11 @@ h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src) + return dst; + } + +-request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c) ++#if !AP_MODULE_MAGIC_AT_LEAST(20150222, 13) ++static request_rec *my_ap_create_request(conn_rec *c) + { +- int access_status = HTTP_OK; +- const char *rpath; + apr_pool_t *p; + request_rec *r; +- const char *s; + + apr_pool_create(&p, c->pool); + apr_pool_tag(p, "request"); +@@ -226,8 +224,8 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c) + r->ap_auth_type = NULL; + + r->allowed_methods = ap_make_method_list(p, 2); +- +- r->headers_in = apr_table_clone(r->pool, req->headers); ++ ++ r->headers_in = apr_table_make(r->pool, 5); + r->trailers_in = apr_table_make(r->pool, 5); + r->subprocess_env = apr_table_make(r->pool, 25); + r->headers_out = apr_table_make(r->pool, 12); +@@ -262,6 +260,24 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c) + r->useragent_addr = c->client_addr; + r->useragent_ip = c->client_ip; + ++ return r; ++} ++#endif ++ ++request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c) ++{ ++ int access_status = HTTP_OK; ++ const char *rpath; ++ const char *s; ++ ++#if AP_MODULE_MAGIC_AT_LEAST(20150222, 13) ++ request_rec *r = ap_create_request(c); ++#else ++ request_rec *r = my_ap_create_request(c); ++#endif ++ ++ r->headers_in = apr_table_clone(r->pool, req->headers); ++ + ap_run_pre_read_request(r, c); + + /* Time to populate r with the data we have. */ +@@ -337,3 +353,4 @@ traceout: + } + + ++ +diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c +index 3f0e9c9..f153422 100644 +--- a/modules/http2/h2_session.c ++++ b/modules/http2/h2_session.c +@@ -495,9 +495,7 @@ static int on_send_data_cb(nghttp2_session *ngh2, + return NGHTTP2_ERR_WOULDBLOCK; + } + +- if (frame->data.padlen > H2_MAX_PADLEN) { +- return NGHTTP2_ERR_PROTO; +- } ++ ap_assert(frame->data.padlen <= (H2_MAX_PADLEN+1)); + padlen = (unsigned char)frame->data.padlen; + + stream = h2_session_stream_get(session, stream_id); +@@ -513,8 +511,9 @@ static int on_send_data_cb(nghttp2_session *ngh2, + H2_STRM_MSG(stream, "send_data_cb for %ld bytes"), + (long)length); + +- status = h2_conn_io_write(&session->io, (const char *)framehd, 9); ++ status = h2_conn_io_write(&session->io, (const char *)framehd, H2_FRAME_HDR_LEN); + if (padlen && status == APR_SUCCESS) { ++ --padlen; + status = h2_conn_io_write(&session->io, (const char *)&padlen, 1); + } + +@@ -622,6 +621,39 @@ static int on_invalid_header_cb(nghttp2_session *ngh2, + } + #endif + ++static ssize_t select_padding_cb(nghttp2_session *ngh2, ++ const nghttp2_frame *frame, ++ size_t max_payloadlen, void *user_data) ++{ ++ h2_session *session = user_data; ++ ssize_t frame_len = frame->hd.length + H2_FRAME_HDR_LEN; /* the total length without padding */ ++ ssize_t padded_len = frame_len; ++ ++ /* Determine # of padding bytes to append to frame. Unless session->padding_always ++ * the number my be capped by the ui.write_size that currently applies. ++ */ ++ if (session->padding_max) { ++ int n = ap_random_pick(0, session->padding_max); ++ padded_len = H2MIN(max_payloadlen + H2_FRAME_HDR_LEN, frame_len + n); ++ } ++ ++ if (padded_len != frame_len) { ++ if (!session->padding_always && session->io.write_size ++ && (padded_len > session->io.write_size) ++ && (frame_len <= session->io.write_size)) { ++ padded_len = session->io.write_size; ++ } ++ if (APLOGctrace2(session->c)) { ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, ++ "select padding from [%d, %d]: %d (frame length: 0x%04x, write size: %d)", ++ (int)frame_len, (int)max_payloadlen+H2_FRAME_HDR_LEN, ++ (int)(padded_len - frame_len), (int)padded_len, (int)session->io.write_size); ++ } ++ return padded_len - H2_FRAME_HDR_LEN; ++ } ++ return frame->hd.length; ++} ++ + #define NGH2_SET_CALLBACK(callbacks, name, fn)\ + nghttp2_session_callbacks_set_##name##_callback(callbacks, fn) + +@@ -647,6 +679,7 @@ static apr_status_t init_callbacks(conn_rec *c, nghttp2_session_callbacks **pcb) + #ifdef H2_NG2_INVALID_HEADER_CB + NGH2_SET_CALLBACK(*pcb, on_invalid_header, on_invalid_header_cb); + #endif ++ NGH2_SET_CALLBACK(*pcb, select_padding, select_padding_cb); + return APR_SUCCESS; + } + +@@ -757,9 +790,8 @@ static apr_status_t session_pool_cleanup(void *data) + { + conn_rec *c = data; + h2_session *session; +- h2_ctx *ctx = h2_ctx_get(c, 0); + +- if (ctx && (session = h2_ctx_session_get(ctx))) { ++ if ((session = h2_ctx_get_session(c))) { + /* if the session is still there, now is the last chance + * to perform cleanup. Normally, cleanup should have happened + * earlier in the connection pre_close. Main reason is that +@@ -775,11 +807,8 @@ static apr_status_t session_pool_cleanup(void *data) + return APR_SUCCESS; + } + +-static apr_status_t h2_session_create_int(h2_session **psession, +- conn_rec *c, +- request_rec *r, +- h2_ctx *ctx, +- h2_workers *workers) ++apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec *r, ++ server_rec *s, h2_workers *workers) + { + nghttp2_session_callbacks *callbacks = NULL; + nghttp2_option *options = NULL; +@@ -820,19 +849,16 @@ static apr_status_t h2_session_create_int(h2_session **psession, + session->id = c->id; + session->c = c; + session->r = r; +- session->s = h2_ctx_server_get(ctx); ++ session->s = s; + session->pool = pool; +- session->config = h2_config_sget(session->s); + session->workers = workers; + + session->state = H2_SESSION_ST_INIT; + session->local.accepting = 1; + session->remote.accepting = 1; + +- session->max_stream_count = h2_config_geti(session->config, +- H2_CONF_MAX_STREAMS); +- session->max_stream_mem = h2_config_geti(session->config, +- H2_CONF_STREAM_MAX_MEM); ++ session->max_stream_count = h2_config_sgeti(s, H2_CONF_MAX_STREAMS); ++ session->max_stream_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM); + + status = apr_thread_cond_create(&session->iowait, session->pool); + if (status != APR_SUCCESS) { +@@ -862,14 +888,18 @@ static apr_status_t h2_session_create_int(h2_session **psession, + session->monitor->on_state_event = on_stream_state_event; + session->monitor->on_event = on_stream_event; + +- session->mplx = h2_mplx_create(c, session->pool, session->config, +- workers); ++ session->mplx = h2_mplx_create(c, s, session->pool, workers); + + /* connection input filter that feeds the session */ + session->cin = h2_filter_cin_create(session); + ap_add_input_filter("H2_IN", session->cin, r, c); + +- h2_conn_io_init(&session->io, c, session->config); ++ h2_conn_io_init(&session->io, c, s); ++ session->padding_max = h2_config_sgeti(s, H2_CONF_PADDING_BITS); ++ if (session->padding_max) { ++ session->padding_max = (0x01 << session->padding_max) - 1; ++ } ++ session->padding_always = h2_config_sgeti(s, H2_CONF_PADDING_ALWAYS); + session->bbtmp = apr_brigade_create(session->pool, c->bucket_alloc); + + status = init_callbacks(c, &callbacks); +@@ -888,8 +918,7 @@ static apr_status_t h2_session_create_int(h2_session **psession, + apr_pool_destroy(pool); + return status; + } +- nghttp2_option_set_peer_max_concurrent_streams( +- options, (uint32_t)session->max_stream_count); ++ nghttp2_option_set_peer_max_concurrent_streams(options, (uint32_t)session->max_stream_count); + /* We need to handle window updates ourself, otherwise we + * get flooded by nghttp2. */ + nghttp2_option_set_no_auto_window_update(options, 1); +@@ -907,7 +936,7 @@ static apr_status_t h2_session_create_int(h2_session **psession, + return APR_ENOMEM; + } + +- n = h2_config_geti(session->config, H2_CONF_PUSH_DIARY_SIZE); ++ n = h2_config_sgeti(s, H2_CONF_PUSH_DIARY_SIZE); + session->push_diary = h2_push_diary_create(session->pool, n); + + if (APLOGcdebug(c)) { +@@ -924,22 +953,11 @@ static apr_status_t h2_session_create_int(h2_session **psession, + (int)session->push_diary->N); + } + +- apr_pool_pre_cleanup_register(pool, c, session_pool_cleanup); ++ apr_pool_pre_cleanup_register(pool, c, session_pool_cleanup); ++ + return APR_SUCCESS; + } + +-apr_status_t h2_session_create(h2_session **psession, +- conn_rec *c, h2_ctx *ctx, h2_workers *workers) +-{ +- return h2_session_create_int(psession, c, NULL, ctx, workers); +-} +- +-apr_status_t h2_session_rcreate(h2_session **psession, +- request_rec *r, h2_ctx *ctx, h2_workers *workers) +-{ +- return h2_session_create_int(psession, r->connection, r, ctx, workers); +-} +- + static apr_status_t h2_session_start(h2_session *session, int *rv) + { + apr_status_t status = APR_SUCCESS; +@@ -1004,7 +1022,7 @@ static apr_status_t h2_session_start(h2_session *session, int *rv) + settings[slen].settings_id = NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS; + settings[slen].value = (uint32_t)session->max_stream_count; + ++slen; +- win_size = h2_config_geti(session->config, H2_CONF_WIN_SIZE); ++ win_size = h2_config_sgeti(session->s, H2_CONF_WIN_SIZE); + if (win_size != H2_INITIAL_WINDOW_SIZE) { + settings[slen].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE; + settings[slen].value = win_size; +@@ -1276,7 +1294,7 @@ int h2_session_push_enabled(h2_session *session) + { + /* iff we can and they can and want */ + return (session->remote.accepting /* remote GOAWAY received */ +- && h2_config_geti(session->config, H2_CONF_PUSH) ++ && h2_config_sgeti(session->s, H2_CONF_PUSH) + && nghttp2_session_get_remote_settings(session->ngh2, + NGHTTP2_SETTINGS_ENABLE_PUSH)); + } +@@ -1320,6 +1338,7 @@ static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream, + int eos) + { + apr_status_t status = APR_SUCCESS; ++ const char *s; + int rv = 0; + + ap_assert(session); +@@ -1387,8 +1406,12 @@ static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream, + && (headers->status < 400) + && (headers->status != 304) + && h2_session_push_enabled(session)) { +- +- h2_stream_submit_pushes(stream, headers); ++ /* PUSH is possibe and enabled on server, unless the request ++ * denies it, submit resources to push */ ++ s = apr_table_get(headers->notes, H2_PUSH_MODE_NOTE); ++ if (!s || strcmp(s, "0")) { ++ h2_stream_submit_pushes(stream, headers); ++ } + } + + if (!stream->pref_priority) { +@@ -1410,7 +1433,7 @@ static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream, + } + + if (headers->status == 103 +- && !h2_config_geti(session->config, H2_CONF_EARLY_HINTS)) { ++ && !h2_config_sgeti(session->s, H2_CONF_EARLY_HINTS)) { + /* suppress sending this to the client, it might have triggered + * pushes and served its purpose nevertheless */ + rv = 0; +@@ -2086,7 +2109,7 @@ apr_status_t h2_session_process(h2_session *session, int async) + switch (session->state) { + case H2_SESSION_ST_INIT: + ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c); +- if (!h2_is_acceptable_connection(c, 1)) { ++ if (!h2_is_acceptable_connection(c, session->r, 1)) { + update_child_status(session, SERVER_BUSY_READ, + "inadequate security"); + h2_session_shutdown(session, +diff --git a/modules/http2/h2_session.h b/modules/http2/h2_session.h +index df2a862..1bf6f05 100644 +--- a/modules/http2/h2_session.h ++++ b/modules/http2/h2_session.h +@@ -80,12 +80,13 @@ typedef struct h2_session { + request_rec *r; /* the request that started this in case + * of 'h2c', NULL otherwise */ + server_rec *s; /* server/vhost we're starting on */ +- const struct h2_config *config; /* Relevant config for this session */ + apr_pool_t *pool; /* pool to use in session */ + struct h2_mplx *mplx; /* multiplexer for stream data */ + struct h2_workers *workers; /* for executing stream tasks */ + struct h2_filter_cin *cin; /* connection input filter context */ + h2_conn_io io; /* io on httpd conn filters */ ++ int padding_max; /* max number of padding bytes */ ++ int padding_always; /* padding has precedence over I/O optimizations */ + struct nghttp2_session *ngh2; /* the nghttp2 session (internal use) */ + + h2_session_state state; /* state session is in */ +@@ -142,27 +143,15 @@ const char *h2_session_state_str(h2_session_state state); + * The session will apply the configured parameter. + * @param psession pointer receiving the created session on success or NULL + * @param c the connection to work on ++ * @param r optional request when protocol was upgraded + * @param cfg the module config to apply + * @param workers the worker pool to use + * @return the created session + */ + apr_status_t h2_session_create(h2_session **psession, +- conn_rec *c, struct h2_ctx *ctx, ++ conn_rec *c, request_rec *r, server_rec *, + struct h2_workers *workers); + +-/** +- * Create a new h2_session for the given request. +- * The session will apply the configured parameter. +- * @param psession pointer receiving the created session on success or NULL +- * @param r the request that was upgraded +- * @param cfg the module config to apply +- * @param workers the worker pool to use +- * @return the created session +- */ +-apr_status_t h2_session_rcreate(h2_session **psession, +- request_rec *r, struct h2_ctx *ctx, +- struct h2_workers *workers); +- + void h2_session_event(h2_session *session, h2_session_event_t ev, + int err, const char *msg); + +diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c +index 22c5902..b5763ac 100644 +--- a/modules/http2/h2_stream.c ++++ b/modules/http2/h2_stream.c +@@ -365,9 +365,8 @@ void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev) + static void set_policy_for(h2_stream *stream, h2_request *r) + { + int enabled = h2_session_push_enabled(stream->session); +- stream->push_policy = h2_push_policy_determine(r->headers, stream->pool, +- enabled); +- r->serialize = h2_config_geti(stream->session->config, H2_CONF_SER_HEADERS); ++ stream->push_policy = h2_push_policy_determine(r->headers, stream->pool, enabled); ++ r->serialize = h2_config_sgeti(stream->session->s, H2_CONF_SER_HEADERS); + } + + apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_t frame_len) +@@ -855,7 +854,7 @@ apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen, + * is requested. But we can reduce the size in case the master + * connection operates in smaller chunks. (TSL warmup) */ + if (stream->session->io.write_size > 0) { +- max_chunk = stream->session->io.write_size - 9; /* header bits */ ++ max_chunk = stream->session->io.write_size - H2_FRAME_HDR_LEN; + } + requested = (*plen > 0)? H2MIN(*plen, max_chunk) : max_chunk; + +@@ -987,7 +986,7 @@ const h2_priority *h2_stream_get_priority(h2_stream *stream, + const char *ctype = apr_table_get(response->headers, "content-type"); + if (ctype) { + /* FIXME: Not good enough, config needs to come from request->server */ +- return h2_config_get_priority(stream->session->config, ctype); ++ return h2_cconfig_get_priority(stream->session->c, ctype); + } + } + return NULL; +diff --git a/modules/http2/h2_switch.c b/modules/http2/h2_switch.c +index 5e73568..07a30cc 100644 +--- a/modules/http2/h2_switch.c ++++ b/modules/http2/h2_switch.c +@@ -55,7 +55,6 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r, + int is_tls = h2_h2_is_tls(c); + const char **protos = is_tls? h2_tls_protos : h2_clear_protos; + +- (void)s; + if (!h2_mpm_supported()) { + return DECLINED; + } +@@ -68,7 +67,7 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r, + return DECLINED; + } + +- if (!h2_is_acceptable_connection(c, 0)) { ++ if (!h2_is_acceptable_connection(c, r, 0)) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03084) + "protocol propose: connection requirements not met"); + return DECLINED; +@@ -81,7 +80,7 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r, + */ + const char *p; + +- if (!h2_allows_h2_upgrade(c)) { ++ if (!h2_allows_h2_upgrade(r)) { + return DECLINED; + } + +@@ -150,7 +149,7 @@ static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "switching protocol to '%s'", protocol); + h2_ctx_protocol_set(ctx, protocol); +- h2_ctx_server_set(ctx, s); ++ h2_ctx_server_update(ctx, s); + + if (r != NULL) { + apr_status_t status; +@@ -164,8 +163,8 @@ static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s, + ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER"); + + /* Ok, start an h2_conn on this one. */ +- h2_ctx_server_set(ctx, r->server); +- status = h2_conn_setup(ctx, r->connection, r); ++ status = h2_conn_setup(c, r, s); ++ + if (status != APR_SUCCESS) { + ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03088) + "session setup"); +@@ -173,7 +172,7 @@ static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s, + return !OK; + } + +- h2_conn_run(ctx, c); ++ h2_conn_run(c); + } + return OK; + } +diff --git a/modules/http2/h2_task.c b/modules/http2/h2_task.c +index f4c875c..a395807 100644 +--- a/modules/http2/h2_task.c ++++ b/modules/http2/h2_task.c +@@ -97,7 +97,7 @@ static apr_status_t send_out(h2_task *task, apr_bucket_brigade* bb, int block) + apr_brigade_length(bb, 0, &written); + H2_TASK_OUT_LOG(APLOG_TRACE2, task, bb, "h2_task send_out"); + h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(before)"); +- /* engines send unblocking */ ++ + status = h2_beam_send(task->output.beam, bb, + block? APR_BLOCK_READ : APR_NONBLOCK_READ); + h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(after)"); +@@ -133,26 +133,9 @@ static apr_status_t slave_out(h2_task *task, ap_filter_t* f, + apr_status_t rv = APR_SUCCESS; + int flush = 0, blocking; + +- if (task->frozen) { +- h2_util_bb_log(task->c, task->stream_id, APLOG_TRACE2, +- "frozen task output write, ignored", bb); +- while (!APR_BRIGADE_EMPTY(bb)) { +- b = APR_BRIGADE_FIRST(bb); +- if (AP_BUCKET_IS_EOR(b)) { +- APR_BUCKET_REMOVE(b); +- task->eor = b; +- } +- else { +- apr_bucket_delete(b); +- } +- } +- return APR_SUCCESS; +- } +- + send: +- /* we send block once we opened the output, so someone is there +- * reading it *and* the task is not assigned to a h2_req_engine */ +- blocking = (!task->assigned && task->output.opened); ++ /* we send block once we opened the output, so someone is there reading it */ ++ blocking = task->output.opened; + for (b = APR_BRIGADE_FIRST(bb); + b != APR_BRIGADE_SENTINEL(bb); + b = APR_BUCKET_NEXT(b)) { +@@ -236,7 +219,7 @@ static apr_status_t h2_filter_slave_in(ap_filter_t* f, + apr_size_t rmax = ((readbytes <= APR_SIZE_MAX)? + (apr_size_t)readbytes : APR_SIZE_MAX); + +- task = h2_ctx_cget_task(f->c); ++ task = h2_ctx_get_task(f->c); + ap_assert(task); + + if (trace1) { +@@ -379,7 +362,7 @@ static apr_status_t h2_filter_slave_in(ap_filter_t* f, + static apr_status_t h2_filter_slave_output(ap_filter_t* filter, + apr_bucket_brigade* brigade) + { +- h2_task *task = h2_ctx_cget_task(filter->c); ++ h2_task *task = h2_ctx_get_task(filter->c); + apr_status_t status; + + ap_assert(task); +@@ -392,7 +375,7 @@ static apr_status_t h2_filter_slave_output(ap_filter_t* filter, + + static apr_status_t h2_filter_parse_h1(ap_filter_t* f, apr_bucket_brigade* bb) + { +- h2_task *task = h2_ctx_cget_task(f->c); ++ h2_task *task = h2_ctx_get_task(f->c); + apr_status_t status; + + ap_assert(task); +@@ -502,7 +485,7 @@ static int h2_task_pre_conn(conn_rec* c, void *arg) + + ctx = h2_ctx_get(c, 0); + (void)arg; +- if (h2_ctx_is_task(ctx)) { ++ if (ctx->task) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, + "h2_slave(%s), pre_connection, adding filters", c->log_id); + ap_add_input_filter("H2_SLAVE_IN", NULL, NULL, c); +@@ -525,6 +508,7 @@ h2_task *h2_task_create(conn_rec *slave, int stream_id, + ap_assert(req); + + apr_pool_create(&pool, slave->pool); ++ apr_pool_tag(pool, "h2_task"); + task = apr_pcalloc(pool, sizeof(h2_task)); + if (task == NULL) { + return NULL; +@@ -633,18 +617,9 @@ apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id) + task->c->current_thread = thread; + ap_run_process_connection(c); + +- if (task->frozen) { +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, +- "h2_task(%s): process_conn returned frozen task", +- task->id); +- /* cleanup delayed */ +- return APR_EAGAIN; +- } +- else { +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, +- "h2_task(%s): processing done", task->id); +- return output_finish(task); +- } ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, ++ "h2_task(%s): processing done", task->id); ++ return output_finish(task); + } + + static apr_status_t h2_task_process_request(h2_task *task, conn_rec *c) +@@ -682,14 +657,8 @@ static apr_status_t h2_task_process_request(h2_task *task, conn_rec *c) + + ap_process_request(r); + +- if (task->frozen) { +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, +- "h2_task(%s): process_request frozen", task->id); +- } +- else { +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, +- "h2_task(%s): process_request done", task->id); +- } ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, ++ "h2_task(%s): process_request done", task->id); + + /* After the call to ap_process_request, the + * request pool may have been deleted. We set +@@ -724,7 +693,7 @@ static int h2_task_process_conn(conn_rec* c) + } + + ctx = h2_ctx_get(c, 0); +- if (h2_ctx_is_task(ctx)) { ++ if (ctx->task) { + if (!ctx->task->request->serialize) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, + "h2_h2, processing request directly"); +@@ -741,28 +710,3 @@ static int h2_task_process_conn(conn_rec* c) + return DECLINED; + } + +-apr_status_t h2_task_freeze(h2_task *task) +-{ +- if (!task->frozen) { +- task->frozen = 1; +- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03406) +- "h2_task(%s), frozen", task->id); +- } +- return APR_SUCCESS; +-} +- +-apr_status_t h2_task_thaw(h2_task *task) +-{ +- if (task->frozen) { +- task->frozen = 0; +- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03407) +- "h2_task(%s), thawed", task->id); +- } +- task->thawed = 1; +- return APR_SUCCESS; +-} +- +-int h2_task_has_thawed(h2_task *task) +-{ +- return task->thawed; +-} +diff --git a/modules/http2/h2_task.h b/modules/http2/h2_task.h +index ab6a746..20be429 100644 +--- a/modules/http2/h2_task.h ++++ b/modules/http2/h2_task.h +@@ -42,7 +42,6 @@ struct h2_bucket_beam; + struct h2_conn; + struct h2_mplx; + struct h2_task; +-struct h2_req_engine; + struct h2_request; + struct h2_response_parser; + struct h2_stream; +@@ -80,17 +79,14 @@ struct h2_task { + struct h2_mplx *mplx; + + unsigned int filters_set : 1; +- unsigned int frozen : 1; +- unsigned int thawed : 1; + unsigned int worker_started : 1; /* h2_worker started processing */ +- unsigned int worker_done : 1; /* h2_worker finished */ ++ ++ int worker_done; /* h2_worker finished */ ++ int done_done; /* task_done has been handled */ + + apr_time_t started_at; /* when processing started */ + apr_time_t done_at; /* when processing was done */ + apr_bucket *eor; +- +- struct h2_req_engine *engine; /* engine hosted by this task */ +- struct h2_req_engine *assigned; /* engine that task has been assigned to */ + }; + + h2_task *h2_task_create(conn_rec *slave, int stream_id, +@@ -120,8 +116,4 @@ apr_status_t h2_task_init(apr_pool_t *pool, server_rec *s); + extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in; + extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_task_logio_add_bytes_out; + +-apr_status_t h2_task_freeze(h2_task *task); +-apr_status_t h2_task_thaw(h2_task *task); +-int h2_task_has_thawed(h2_task *task); +- + #endif /* defined(__mod_h2__h2_task__) */ +diff --git a/modules/http2/mod_http2.c b/modules/http2/mod_http2.c +index 3d278e9..5664f39 100644 +--- a/modules/http2/mod_http2.c ++++ b/modules/http2/mod_http2.c +@@ -172,27 +172,6 @@ static char *http2_var_lookup(apr_pool_t *, server_rec *, + conn_rec *, request_rec *, char *name); + static int http2_is_h2(conn_rec *); + +-static apr_status_t http2_req_engine_push(const char *ngn_type, +- request_rec *r, +- http2_req_engine_init *einit) +-{ +- return h2_mplx_req_engine_push(ngn_type, r, einit); +-} +- +-static apr_status_t http2_req_engine_pull(h2_req_engine *ngn, +- apr_read_type_e block, +- int capacity, +- request_rec **pr) +-{ +- return h2_mplx_req_engine_pull(ngn, block, capacity, pr); +-} +- +-static void http2_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn, +- apr_status_t status) +-{ +- h2_mplx_req_engine_done(ngn, r_conn, status); +-} +- + static void http2_get_num_workers(server_rec *s, int *minw, int *maxw) + { + h2_get_num_workers(s, minw, maxw); +@@ -220,9 +199,6 @@ static void h2_hooks(apr_pool_t *pool) + + APR_REGISTER_OPTIONAL_FN(http2_is_h2); + APR_REGISTER_OPTIONAL_FN(http2_var_lookup); +- APR_REGISTER_OPTIONAL_FN(http2_req_engine_push); +- APR_REGISTER_OPTIONAL_FN(http2_req_engine_pull); +- APR_REGISTER_OPTIONAL_FN(http2_req_engine_done); + APR_REGISTER_OPTIONAL_FN(http2_get_num_workers); + + ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, "installing hooks"); +@@ -260,9 +236,8 @@ static const char *val_H2_PUSH(apr_pool_t *p, server_rec *s, + { + if (ctx) { + if (r) { +- h2_task *task = h2_ctx_get_task(ctx); +- if (task) { +- h2_stream *stream = h2_mplx_stream_get(task->mplx, task->stream_id); ++ if (ctx->task) { ++ h2_stream *stream = h2_mplx_stream_get(ctx->task->mplx, ctx->task->stream_id); + if (stream && stream->push_policy != H2_PUSH_NONE) { + return "on"; + } +@@ -273,8 +248,7 @@ static const char *val_H2_PUSH(apr_pool_t *p, server_rec *s, + } + } + else if (s) { +- const h2_config *cfg = h2_config_sget(s); +- if (cfg && h2_config_geti(cfg, H2_CONF_PUSH)) { ++ if (h2_config_geti(r, s, H2_CONF_PUSH)) { + return "on"; + } + } +@@ -285,8 +259,7 @@ static const char *val_H2_PUSHED(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx) + { + if (ctx) { +- h2_task *task = h2_ctx_get_task(ctx); +- if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) { ++ if (ctx->task && !H2_STREAM_CLIENT_INITIATED(ctx->task->stream_id)) { + return "PUSHED"; + } + } +@@ -297,9 +270,8 @@ static const char *val_H2_PUSHED_ON(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx) + { + if (ctx) { +- h2_task *task = h2_ctx_get_task(ctx); +- if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) { +- h2_stream *stream = h2_mplx_stream_get(task->mplx, task->stream_id); ++ if (ctx->task && !H2_STREAM_CLIENT_INITIATED(ctx->task->stream_id)) { ++ h2_stream *stream = h2_mplx_stream_get(ctx->task->mplx, ctx->task->stream_id); + if (stream) { + return apr_itoa(p, stream->initiated_on); + } +@@ -312,9 +284,8 @@ static const char *val_H2_STREAM_TAG(apr_pool_t *p, server_rec *s, + conn_rec *c, request_rec *r, h2_ctx *ctx) + { + if (ctx) { +- h2_task *task = h2_ctx_get_task(ctx); +- if (task) { +- return task->id; ++ if (ctx->task) { ++ return ctx->task->id; + } + } + return ""; +@@ -366,7 +337,7 @@ static char *http2_var_lookup(apr_pool_t *p, server_rec *s, + for (i = 0; i < H2_ALEN(H2_VARS); ++i) { + h2_var_def *vdef = &H2_VARS[i]; + if (!strcmp(vdef->name, name)) { +- h2_ctx *ctx = (r? h2_ctx_rget(r) : ++ h2_ctx *ctx = (r? h2_ctx_get(c, 0) : + h2_ctx_get(c->master? c->master : c, 0)); + return (char *)vdef->lookup(p, s, c, r, ctx); + } +@@ -377,7 +348,7 @@ static char *http2_var_lookup(apr_pool_t *p, server_rec *s, + static int h2_h2_fixups(request_rec *r) + { + if (r->connection->master) { +- h2_ctx *ctx = h2_ctx_rget(r); ++ h2_ctx *ctx = h2_ctx_get(r->connection, 0); + int i; + + for (i = 0; ctx && i < H2_ALEN(H2_VARS); ++i) { +diff --git a/modules/http2/mod_http2.h b/modules/http2/mod_http2.h +index 7a1b49a..ba5e6dd 100644 +--- a/modules/http2/mod_http2.h ++++ b/modules/http2/mod_http2.h +@@ -30,22 +30,20 @@ APR_DECLARE_OPTIONAL_FN(int, + + + /******************************************************************************* +- * HTTP/2 request engines ++ * START HTTP/2 request engines (DEPRECATED) + ******************************************************************************/ ++ ++/* The following functions were introduced for the experimental mod_proxy_http2 ++ * support, but have been abandoned since. ++ * They are still declared here for backward compatibiliy, in case someone ++ * tries to build an old mod_proxy_http2 against it, but will disappear ++ * completely sometime in the future. ++ */ + + struct apr_thread_cond_t; +- + typedef struct h2_req_engine h2_req_engine; +- + typedef void http2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed); + +-/** +- * Initialize a h2_req_engine. The structure will be passed in but +- * only the name and master are set. The function should initialize +- * all fields. +- * @param engine the allocated, partially filled structure +- * @param r the first request to process, or NULL +- */ + typedef apr_status_t http2_req_engine_init(h2_req_engine *engine, + const char *id, + const char *type, +@@ -55,35 +53,11 @@ typedef apr_status_t http2_req_engine_init(h2_req_engine *engine, + http2_output_consumed **pconsumed, + void **pbaton); + +-/** +- * Push a request to an engine with the specified name for further processing. +- * If no such engine is available, einit is not NULL, einit is called +- * with a new engine record and the caller is responsible for running the +- * new engine instance. +- * @param engine_type the type of the engine to add the request to +- * @param r the request to push to an engine for processing +- * @param einit an optional initialization callback for a new engine +- * of the requested type, should no instance be available. +- * By passing a non-NULL callback, the caller is willing +- * to init and run a new engine itself. +- * @return APR_SUCCESS iff slave was successfully added to an engine +- */ + APR_DECLARE_OPTIONAL_FN(apr_status_t, + http2_req_engine_push, (const char *engine_type, + request_rec *r, + http2_req_engine_init *einit)); + +-/** +- * Get a new request for processing in this engine. +- * @param engine the engine which is done processing the slave +- * @param block if call should block waiting for request to come +- * @param capacity how many parallel requests are acceptable +- * @param pr the request that needs processing or NULL +- * @return APR_SUCCESS if new request was assigned +- * APR_EAGAIN if no new request is available +- * APR_EOF if engine may shut down, as no more request will be scheduled +- * APR_ECONNABORTED if the engine needs to shut down immediately +- */ + APR_DECLARE_OPTIONAL_FN(apr_status_t, + http2_req_engine_pull, (h2_req_engine *engine, + apr_read_type_e block, +@@ -98,4 +72,8 @@ APR_DECLARE_OPTIONAL_FN(void, + http2_get_num_workers, (server_rec *s, + int *minw, int *max)); + ++/******************************************************************************* ++ * END HTTP/2 request engines (DEPRECATED) ++ ******************************************************************************/ ++ + #endif +diff --git a/modules/http2/mod_proxy_http2.c b/modules/http2/mod_proxy_http2.c +index a7e0dcd..95336f7 100644 +--- a/modules/http2/mod_proxy_http2.c ++++ b/modules/http2/mod_proxy_http2.c +@@ -16,13 +16,14 @@ + + #include + ++#include + #include + #include + #include "mod_http2.h" + + + #include "mod_proxy_http2.h" +-#include "h2_request.h" ++#include "h2.h" + #include "h2_proxy_util.h" + #include "h2_version.h" + #include "h2_proxy_session.h" +@@ -46,19 +47,12 @@ AP_DECLARE_MODULE(proxy_http2) = { + + /* Optional functions from mod_http2 */ + static int (*is_h2)(conn_rec *c); +-static apr_status_t (*req_engine_push)(const char *name, request_rec *r, +- http2_req_engine_init *einit); +-static apr_status_t (*req_engine_pull)(h2_req_engine *engine, +- apr_read_type_e block, +- int capacity, +- request_rec **pr); +-static void (*req_engine_done)(h2_req_engine *engine, conn_rec *r_conn, +- apr_status_t status); +- ++ + typedef struct h2_proxy_ctx { ++ const char *id; ++ conn_rec *master; + conn_rec *owner; + apr_pool_t *pool; +- request_rec *rbase; + server_rec *server; + const char *proxy_func; + char server_portstr[32]; +@@ -66,19 +60,16 @@ typedef struct h2_proxy_ctx { + proxy_worker *worker; + proxy_server_conf *conf; + +- h2_req_engine *engine; +- const char *engine_id; +- const char *engine_type; +- apr_pool_t *engine_pool; + apr_size_t req_buffer_size; +- h2_proxy_fifo *requests; + int capacity; + +- unsigned standalone : 1; + unsigned is_ssl : 1; + unsigned flushall : 1; + +- apr_status_t r_status; /* status of our first request work */ ++ request_rec *r; /* the request processed in this ctx */ ++ apr_status_t r_status; /* status of request work */ ++ int r_done; /* request was processed, not necessarily successfully */ ++ int r_may_retry; /* request may be retried */ + h2_proxy_session *session; /* current http2 session against backend */ + } h2_proxy_ctx; + +@@ -104,16 +95,6 @@ static int h2_proxy_post_config(apr_pool_t *p, apr_pool_t *plog, + MOD_HTTP2_VERSION, ngh2? ngh2->version_str : "unknown"); + + is_h2 = APR_RETRIEVE_OPTIONAL_FN(http2_is_h2); +- req_engine_push = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_push); +- req_engine_pull = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_pull); +- req_engine_done = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_done); +- +- /* we need all of them */ +- if (!req_engine_push || !req_engine_pull || !req_engine_done) { +- req_engine_push = NULL; +- req_engine_pull = NULL; +- req_engine_done = NULL; +- } + + return status; + } +@@ -204,45 +185,6 @@ static int proxy_http2_canon(request_rec *r, char *url) + return OK; + } + +-static void out_consumed(void *baton, conn_rec *c, apr_off_t bytes) +-{ +- h2_proxy_ctx *ctx = baton; +- +- if (ctx->session) { +- h2_proxy_session_update_window(ctx->session, c, bytes); +- } +-} +- +-static apr_status_t proxy_engine_init(h2_req_engine *engine, +- const char *id, +- const char *type, +- apr_pool_t *pool, +- apr_size_t req_buffer_size, +- request_rec *r, +- http2_output_consumed **pconsumed, +- void **pctx) +-{ +- h2_proxy_ctx *ctx = ap_get_module_config(r->connection->conn_config, +- &proxy_http2_module); +- if (!ctx) { +- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(03368) +- "h2_proxy_session, engine init, no ctx found"); +- return APR_ENOTIMPL; +- } +- +- ctx->pool = pool; +- ctx->engine = engine; +- ctx->engine_id = id; +- ctx->engine_type = type; +- ctx->engine_pool = pool; +- ctx->req_buffer_size = req_buffer_size; +- ctx->capacity = H2MIN(100, h2_proxy_fifo_capacity(ctx->requests)); +- +- *pconsumed = out_consumed; +- *pctx = ctx; +- return APR_SUCCESS; +-} +- + static apr_status_t add_request(h2_proxy_session *session, request_rec *r) + { + h2_proxy_ctx *ctx = session->user_data; +@@ -252,7 +194,7 @@ static apr_status_t add_request(h2_proxy_session *session, request_rec *r) + url = apr_table_get(r->notes, H2_PROXY_REQ_URL_NOTE); + apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu", + ctx->p_conn->connection->local_addr->port)); +- status = h2_proxy_session_submit(session, url, r, ctx->standalone); ++ status = h2_proxy_session_submit(session, url, r, 1); + if (status != APR_SUCCESS) { + ap_log_cerror(APLOG_MARK, APLOG_ERR, status, r->connection, APLOGNO(03351) + "pass request body failed to %pI (%s) from %s (%s)", +@@ -266,43 +208,15 @@ static apr_status_t add_request(h2_proxy_session *session, request_rec *r) + static void request_done(h2_proxy_ctx *ctx, request_rec *r, + apr_status_t status, int touched) + { +- const char *task_id = apr_table_get(r->connection->notes, H2_TASK_ID_NOTE); +- +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, r->connection, +- "h2_proxy_session(%s): request done %s, touched=%d", +- ctx->engine_id, task_id, touched); +- if (status != APR_SUCCESS) { +- if (!touched) { +- /* untouched request, need rescheduling */ +- status = h2_proxy_fifo_push(ctx->requests, r); +- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection, +- APLOGNO(03369) +- "h2_proxy_session(%s): rescheduled request %s", +- ctx->engine_id, task_id); +- return; +- } +- else { +- const char *uri; +- uri = apr_uri_unparse(r->pool, &r->parsed_uri, 0); +- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection, +- APLOGNO(03471) "h2_proxy_session(%s): request %s -> %s " +- "not complete, cannot repeat", +- ctx->engine_id, task_id, uri); +- } +- } +- +- if (r == ctx->rbase) { ++ if (r == ctx->r) { ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, r->connection, ++ "h2_proxy_session(%s): request done, touched=%d", ++ ctx->id, touched); ++ ctx->r_done = 1; ++ if (touched) ctx->r_may_retry = 0; + ctx->r_status = ((status == APR_SUCCESS)? APR_SUCCESS + : HTTP_SERVICE_UNAVAILABLE); + } +- +- if (req_engine_done && ctx->engine) { +- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection, +- APLOGNO(03370) +- "h2_proxy_session(%s): finished request %s", +- ctx->engine_id, task_id); +- req_engine_done(ctx->engine, r->connection, status); +- } + } + + static void session_req_done(h2_proxy_session *session, request_rec *r, +@@ -311,43 +225,15 @@ static void session_req_done(h2_proxy_session *session, request_rec *r, + request_done(session->user_data, r, status, touched); + } + +-static apr_status_t next_request(h2_proxy_ctx *ctx, int before_leave) +-{ +- if (h2_proxy_fifo_count(ctx->requests) > 0) { +- return APR_SUCCESS; +- } +- else if (req_engine_pull && ctx->engine) { +- apr_status_t status; +- request_rec *r = NULL; +- +- status = req_engine_pull(ctx->engine, before_leave? +- APR_BLOCK_READ: APR_NONBLOCK_READ, +- ctx->capacity, &r); +- if (status == APR_SUCCESS && r) { +- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, ctx->owner, +- "h2_proxy_engine(%s): pulled request (%s) %s", +- ctx->engine_id, +- before_leave? "before leave" : "regular", +- r->the_request); +- h2_proxy_fifo_push(ctx->requests, r); +- } +- return APR_STATUS_IS_EAGAIN(status)? APR_SUCCESS : status; +- } +- return APR_EOF; +-} +- +-static apr_status_t proxy_engine_run(h2_proxy_ctx *ctx) { ++static apr_status_t ctx_run(h2_proxy_ctx *ctx) { + apr_status_t status = OK; + int h2_front; +- request_rec *r; + + /* Step Four: Send the Request in a new HTTP/2 stream and + * loop until we got the response or encounter errors. + */ +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->owner, +- "eng(%s): setup session", ctx->engine_id); + h2_front = is_h2? is_h2(ctx->owner) : 0; +- ctx->session = h2_proxy_session_setup(ctx->engine_id, ctx->p_conn, ctx->conf, ++ ctx->session = h2_proxy_session_setup(ctx->id, ctx->p_conn, ctx->conf, + h2_front, 30, + h2_proxy_log2((int)ctx->req_buffer_size), + session_req_done); +@@ -358,105 +244,45 @@ static apr_status_t proxy_engine_run(h2_proxy_ctx *ctx) { + } + + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03373) +- "eng(%s): run session %s", ctx->engine_id, ctx->session->id); ++ "eng(%s): run session %s", ctx->id, ctx->session->id); + ctx->session->user_data = ctx; + +- while (!ctx->owner->aborted) { +- if (APR_SUCCESS == h2_proxy_fifo_try_pull(ctx->requests, (void**)&r)) { +- add_request(ctx->session, r); +- } +- ++ ctx->r_done = 0; ++ add_request(ctx->session, ctx->r); ++ ++ while (!ctx->master->aborted && !ctx->r_done) { ++ + status = h2_proxy_session_process(ctx->session); +- +- if (status == APR_SUCCESS) { +- apr_status_t s2; +- /* ongoing processing, call again */ +- if (ctx->session->remote_max_concurrent > 0 +- && ctx->session->remote_max_concurrent != ctx->capacity) { +- ctx->capacity = H2MIN((int)ctx->session->remote_max_concurrent, +- h2_proxy_fifo_capacity(ctx->requests)); +- } +- s2 = next_request(ctx, 0); +- if (s2 == APR_ECONNABORTED) { +- /* master connection gone */ +- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, s2, ctx->owner, +- APLOGNO(03374) "eng(%s): pull request", +- ctx->engine_id); +- /* give notice that we're leaving and cancel all ongoing +- * streams. */ +- next_request(ctx, 1); +- h2_proxy_session_cancel_all(ctx->session); +- h2_proxy_session_process(ctx->session); +- status = ctx->r_status = APR_SUCCESS; +- break; +- } +- if ((h2_proxy_fifo_count(ctx->requests) == 0) +- && h2_proxy_ihash_empty(ctx->session->streams)) { +- break; +- } +- } +- else { +- /* end of processing, maybe error */ ++ if (status != APR_SUCCESS) { ++ /* Encountered an error during session processing */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, + APLOGNO(03375) "eng(%s): end of session %s", +- ctx->engine_id, ctx->session->id); +- /* +- * Any open stream of that session needs to ++ ctx->id, ctx->session->id); ++ /* Any open stream of that session needs to + * a) be reopened on the new session iff safe to do so + * b) reported as done (failed) otherwise + */ + h2_proxy_session_cleanup(ctx->session, session_req_done); +- break; ++ goto out; + } + } + +- ctx->session->user_data = NULL; +- ctx->session = NULL; +- +- return status; +-} +- +-static apr_status_t push_request_somewhere(h2_proxy_ctx *ctx, request_rec *r) +-{ +- conn_rec *c = ctx->owner; +- const char *engine_type, *hostname; +- +- hostname = (ctx->p_conn->ssl_hostname? +- ctx->p_conn->ssl_hostname : ctx->p_conn->hostname); +- engine_type = apr_psprintf(ctx->pool, "proxy_http2 %s%s", hostname, +- ctx->server_portstr); +- +- if (c->master && req_engine_push && r && is_h2 && is_h2(c)) { +- /* If we are have req_engine capabilities, push the handling of this +- * request (e.g. slave connection) to a proxy_http2 engine which +- * uses the same backend. We may be called to create an engine +- * ourself. */ +- if (req_engine_push(engine_type, r, proxy_engine_init) == APR_SUCCESS) { +- if (ctx->engine == NULL) { +- /* request has been assigned to an engine in another thread */ +- return SUSPENDED; +- } ++out: ++ if (ctx->master->aborted) { ++ /* master connection gone */ ++ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, ++ APLOGNO(03374) "eng(%s): master connection gone", ctx->id); ++ /* cancel all ongoing requests */ ++ h2_proxy_session_cancel_all(ctx->session); ++ h2_proxy_session_process(ctx->session); ++ if (!ctx->master->aborted) { ++ status = ctx->r_status = APR_SUCCESS; + } + } + +- if (!ctx->engine) { +- /* No engine was available or has been initialized, handle this +- * request just by ourself. */ +- ctx->engine_id = apr_psprintf(ctx->pool, "eng-proxy-%ld", c->id); +- ctx->engine_type = engine_type; +- ctx->engine_pool = ctx->pool; +- ctx->req_buffer_size = (32*1024); +- ctx->standalone = 1; +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, +- "h2_proxy_http2(%ld): setup standalone engine for type %s", +- c->id, engine_type); +- } +- else { +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, +- "H2: hosting engine %s", ctx->engine_id); +- } +- +- return h2_proxy_fifo_push(ctx->requests, r); ++ ctx->session->user_data = NULL; ++ ctx->session = NULL; ++ return status; + } + + static int proxy_http2_handler(request_rec *r, +@@ -466,7 +292,7 @@ static int proxy_http2_handler(request_rec *r, + const char *proxyname, + apr_port_t proxyport) + { +- const char *proxy_func; ++ const char *proxy_func, *task_id; + char *locurl = url, *u; + apr_size_t slen; + int is_ssl = 0; +@@ -498,29 +324,36 @@ static int proxy_http2_handler(request_rec *r, + default: + return DECLINED; + } ++ ++ task_id = apr_table_get(r->connection->notes, H2_TASK_ID_NOTE); + + ctx = apr_pcalloc(r->pool, sizeof(*ctx)); +- ctx->owner = r->connection; +- ctx->pool = r->pool; +- ctx->rbase = r; +- ctx->server = r->server; ++ ctx->master = r->connection->master? r->connection->master : r->connection; ++ ctx->id = task_id? task_id : apr_psprintf(r->pool, "%ld", (long)ctx->master->id); ++ ctx->owner = r->connection; ++ ctx->pool = r->pool; ++ ctx->server = r->server; + ctx->proxy_func = proxy_func; +- ctx->is_ssl = is_ssl; +- ctx->worker = worker; +- ctx->conf = conf; +- ctx->flushall = apr_table_get(r->subprocess_env, "proxy-flushall")? 1 : 0; +- ctx->r_status = HTTP_SERVICE_UNAVAILABLE; +- +- h2_proxy_fifo_set_create(&ctx->requests, ctx->pool, 100); ++ ctx->is_ssl = is_ssl; ++ ctx->worker = worker; ++ ctx->conf = conf; ++ ctx->flushall = apr_table_get(r->subprocess_env, "proxy-flushall")? 1 : 0; ++ ctx->req_buffer_size = (32*1024); ++ ctx->r = r; ++ ctx->r_status = status = HTTP_SERVICE_UNAVAILABLE; ++ ctx->r_done = 0; ++ ctx->r_may_retry = 1; + + ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, ctx); + + /* scheme says, this is for us. */ +- apr_table_setn(ctx->rbase->notes, H2_PROXY_REQ_URL_NOTE, url); +- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->rbase, ++ apr_table_setn(ctx->r->notes, H2_PROXY_REQ_URL_NOTE, url); ++ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->r, + "H2: serving URL %s", url); + + run_connect: ++ if (ctx->master->aborted) goto cleanup; ++ + /* Get a proxy_conn_rec from the worker, might be a new one, might + * be one still open from another request, or it might fail if the + * worker is stopped or in error. */ +@@ -530,25 +363,11 @@ run_connect: + } + + ctx->p_conn->is_ssl = ctx->is_ssl; +- if (ctx->is_ssl && ctx->p_conn->connection) { +- /* If there are some metadata on the connection (e.g. TLS alert), +- * let mod_ssl detect them, and create a new connection below. +- */ +- apr_bucket_brigade *tmp_bb; +- tmp_bb = apr_brigade_create(ctx->rbase->pool, +- ctx->rbase->connection->bucket_alloc); +- status = ap_get_brigade(ctx->p_conn->connection->input_filters, tmp_bb, +- AP_MODE_SPECULATIVE, APR_NONBLOCK_READ, 1); +- if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) { +- ctx->p_conn->close = 1; +- } +- apr_brigade_cleanup(tmp_bb); +- } + + /* Step One: Determine the URL to connect to (might be a proxy), + * initialize the backend accordingly and determine the server + * port string we can expect in responses. */ +- if ((status = ap_proxy_determine_connection(ctx->pool, ctx->rbase, conf, worker, ++ if ((status = ap_proxy_determine_connection(ctx->pool, ctx->r, conf, worker, + ctx->p_conn, &uri, &locurl, + proxyname, proxyport, + ctx->server_portstr, +@@ -556,17 +375,6 @@ run_connect: + goto cleanup; + } + +- /* If we are not already hosting an engine, try to push the request +- * to an already existing engine or host a new engine here. */ +- if (r && !ctx->engine) { +- ctx->r_status = push_request_somewhere(ctx, r); +- r = NULL; +- if (ctx->r_status == SUSPENDED) { +- /* request was pushed to another thread, leave processing here */ +- goto cleanup; +- } +- } +- + /* Step Two: Make the Connection (or check that an already existing + * socket is still usable). On success, we have a socket connected to + * backend->hostname. */ +@@ -575,70 +383,58 @@ run_connect: + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03352) + "H2: failed to make connection to backend: %s", + ctx->p_conn->hostname); +- goto reconnect; ++ goto cleanup; + } + + /* Step Three: Create conn_rec for the socket we have open now. */ + if (!ctx->p_conn->connection) { +- status = ap_proxy_connection_create_ex(ctx->proxy_func, +- ctx->p_conn, ctx->rbase); ++ status = ap_proxy_connection_create_ex(ctx->proxy_func, ctx->p_conn, ctx->r); + if (status != OK) { + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, APLOGNO(03353) + "setup new connection: is_ssl=%d %s %s %s", + ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname, + locurl, ctx->p_conn->hostname); +- goto reconnect; ++ ctx->r_status = status; ++ goto cleanup; + } + +- if (!ctx->p_conn->data) { +- /* New conection: set a note on the connection what CN is +- * requested and what protocol we want */ ++ if (!ctx->p_conn->data && ctx->is_ssl) { ++ /* New SSL connection: set a note on the connection about what ++ * protocol we want. ++ */ ++ apr_table_setn(ctx->p_conn->connection->notes, ++ "proxy-request-alpn-protos", "h2"); + if (ctx->p_conn->ssl_hostname) { +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, ctx->owner, ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->owner, + "set SNI to %s for (%s)", + ctx->p_conn->ssl_hostname, + ctx->p_conn->hostname); + apr_table_setn(ctx->p_conn->connection->notes, + "proxy-request-hostname", ctx->p_conn->ssl_hostname); + } +- if (ctx->is_ssl) { +- apr_table_setn(ctx->p_conn->connection->notes, +- "proxy-request-alpn-protos", "h2"); +- } + } + } + +-run_session: +- status = proxy_engine_run(ctx); +- if (status == APR_SUCCESS) { +- /* session and connection still ok */ +- if (next_request(ctx, 1) == APR_SUCCESS) { +- /* more requests, run again */ +- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03376) +- "run_session, again"); +- goto run_session; +- } +- /* done */ +- ctx->engine = NULL; +- } ++ if (ctx->master->aborted) goto cleanup; ++ status = ctx_run(ctx); + +-reconnect: +- if (next_request(ctx, 1) == APR_SUCCESS) { +- /* Still more to do, tear down old conn and start over */ ++ if (ctx->r_status != APR_SUCCESS && ctx->r_may_retry && !ctx->master->aborted) { ++ /* Not successfully processed, but may retry, tear down old conn and start over */ + if (ctx->p_conn) { + ctx->p_conn->close = 1; +- /*only in trunk so far */ +- /*proxy_run_detach_backend(r, ctx->p_conn);*/ ++#if AP_MODULE_MAGIC_AT_LEAST(20140207, 2) ++ proxy_run_detach_backend(r, ctx->p_conn); ++#endif + ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server); + ctx->p_conn = NULL; + } + ++reconnects; +- if (reconnects < 5 && !ctx->owner->aborted) { ++ if (reconnects < 5) { + goto run_connect; + } + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(10023) +- "giving up after %d reconnects, %d requests todo", +- reconnects, h2_proxy_fifo_count(ctx->requests)); ++ "giving up after %d reconnects, request-done=%d", ++ reconnects, ctx->r_done); + } + + cleanup: +@@ -647,17 +443,13 @@ cleanup: + /* close socket when errors happened or session shut down (EOF) */ + ctx->p_conn->close = 1; + } +- /*only in trunk so far */ +- /*proxy_run_detach_backend(ctx->rbase, ctx->p_conn);*/ ++#if AP_MODULE_MAGIC_AT_LEAST(20140207, 2) ++ proxy_run_detach_backend(ctx->r, ctx->p_conn); ++#endif + ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server); + ctx->p_conn = NULL; + } + +- /* Any requests will still have need to fail */ +- while (APR_SUCCESS == h2_proxy_fifo_try_pull(ctx->requests, (void**)&r)) { +- request_done(ctx, r, HTTP_SERVICE_UNAVAILABLE, 1); +- } +- + ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, NULL); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, + APLOGNO(03377) "leaving handler"); +-- +1.8.3.1 + diff --git a/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-5.patch b/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-5.patch new file mode 100644 index 0000000000000000000000000000000000000000..6aff2aa2bd9be0128f7bb90e4b38440a5ea9dbf2 --- /dev/null +++ b/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-5.patch @@ -0,0 +1,860 @@ +From 94de05dacf17a60a8c3b34b5ded37fc4dc04709b Mon Sep 17 00:00:00 2001 +From: Stefan Eissing +Date: Thu, 1 Aug 2019 08:18:03 +0000 +Subject: [PATCH 5/5] Merge of r1861338,1862475,1862583,1862865,1863221,1863276 + from trunk: + + *) mod_http2: core setting "LimitRequestFieldSize" is not additionally checked on + merged header fields, just as HTTP/1.1 does. [Stefan Eissing, Michael Kaufmann] + + *) mod_http2: fixed a bug that prevented proper stream cleanup when connection + throttling was in place. Stream resets by clients on streams initiated by them + are counted as possible trigger for throttling. [Stefan Eissing] + + *) mod_http2/mpm_event: Fixes the behaviour when a HTTP/2 connection has nothing + more to write with streams ongoing (flow control block). The timeout waiting + for the client to send WINODW_UPDATE was incorrectly KeepAliveTimeout and not + Timeout as it should be. Fixes PR 63534. [Yann Ylavic, Stefan Eissing] + +git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1864126 13f79535-47bb-0310-9956-ffa450edef68 +--- + modules/http2/h2_conn.c | 7 ++ + modules/http2/h2_filter.c | 52 +++++++++- + modules/http2/h2_mplx.c | 249 +++++++++++++++++++++++++++------------------ + modules/http2/h2_mplx.h | 11 +- + modules/http2/h2_session.c | 19 +++- + modules/http2/h2_stream.c | 80 +++++++++++---- + modules/http2/h2_stream.h | 4 + + modules/http2/h2_task.c | 8 +- + modules/http2/h2_task.h | 2 + + server/mpm/event/event.c | 7 +- + 10 files changed, 304 insertions(+), 135 deletions(-) + +diff --git a/modules/http2/h2_conn.c b/modules/http2/h2_conn.c +index 9ef0ea0..0b78a84 100644 +--- a/modules/http2/h2_conn.c ++++ b/modules/http2/h2_conn.c +@@ -231,6 +231,13 @@ apr_status_t h2_conn_run(conn_rec *c) + case H2_SESSION_ST_BUSY: + case H2_SESSION_ST_WAIT: + c->cs->state = CONN_STATE_WRITE_COMPLETION; ++ if (c->cs && (session->open_streams || !session->remote.emitted_count)) { ++ /* let the MPM know that we are not done and want ++ * the Timeout behaviour instead of a KeepAliveTimeout ++ * See PR 63534. ++ */ ++ c->cs->sense = CONN_SENSE_WANT_READ; ++ } + break; + case H2_SESSION_ST_CLEANUP: + case H2_SESSION_ST_DONE: +diff --git a/modules/http2/h2_filter.c b/modules/http2/h2_filter.c +index 5fd237f..2fc5e12 100644 +--- a/modules/http2/h2_filter.c ++++ b/modules/http2/h2_filter.c +@@ -493,6 +493,52 @@ static apr_status_t status_event(void *ctx, h2_bucket_event event, + return APR_SUCCESS; + } + ++static apr_status_t discard_body(request_rec *r, apr_off_t maxlen) ++{ ++ apr_bucket_brigade *bb; ++ int seen_eos; ++ apr_status_t rv; ++ ++ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); ++ seen_eos = 0; ++ do { ++ apr_bucket *bucket; ++ ++ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES, ++ APR_BLOCK_READ, HUGE_STRING_LEN); ++ ++ if (rv != APR_SUCCESS) { ++ apr_brigade_destroy(bb); ++ return rv; ++ } ++ ++ for (bucket = APR_BRIGADE_FIRST(bb); ++ bucket != APR_BRIGADE_SENTINEL(bb); ++ bucket = APR_BUCKET_NEXT(bucket)) ++ { ++ const char *data; ++ apr_size_t len; ++ ++ if (APR_BUCKET_IS_EOS(bucket)) { ++ seen_eos = 1; ++ break; ++ } ++ if (bucket->length == 0) { ++ continue; ++ } ++ rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ); ++ if (rv != APR_SUCCESS) { ++ apr_brigade_destroy(bb); ++ return rv; ++ } ++ maxlen -= bucket->length; ++ } ++ apr_brigade_cleanup(bb); ++ } while (!seen_eos && maxlen >= 0); ++ ++ return APR_SUCCESS; ++} ++ + int h2_filter_h2_status_handler(request_rec *r) + { + conn_rec *c = r->connection; +@@ -510,8 +556,10 @@ int h2_filter_h2_status_handler(request_rec *r) + + task = h2_ctx_get_task(r->connection); + if (task) { +- +- if ((status = ap_discard_request_body(r)) != OK) { ++ /* In this handler, we do some special sauce to send footers back, ++ * IFF we received footers in the request. This is used in our test ++ * cases, since CGI has no way of handling those. */ ++ if ((status = discard_body(r, 1024)) != OK) { + return status; + } + +diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c +index fae77c7..9b504a5 100644 +--- a/modules/http2/h2_mplx.c ++++ b/modules/http2/h2_mplx.c +@@ -53,8 +53,12 @@ typedef struct { + h2_mplx *m; + h2_stream *stream; + apr_time_t now; ++ apr_size_t count; + } stream_iter_ctx; + ++static apr_status_t mplx_be_happy(h2_mplx *m); ++static apr_status_t mplx_be_annoyed(h2_mplx *m); ++ + apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s) + { + return APR_SUCCESS; +@@ -98,7 +102,7 @@ static void stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t len + + static void stream_joined(h2_mplx *m, h2_stream *stream) + { +- ap_assert(!stream->task || stream->task->worker_done); ++ ap_assert(!h2_task_has_started(stream->task) || stream->task->worker_done); + + h2_ihash_remove(m->shold, stream->id); + h2_ihash_add(m->spurge, stream); +@@ -124,7 +128,7 @@ static void stream_cleanup(h2_mplx *m, h2_stream *stream) + h2_ififo_remove(m->readyq, stream->id); + h2_ihash_add(m->shold, stream); + +- if (!stream->task || stream->task->worker_done) { ++ if (!h2_task_has_started(stream->task) || stream->task->done_done) { + stream_joined(m, stream); + } + else if (stream->task) { +@@ -194,7 +198,6 @@ h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *parent, + m->stream_max_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM); + + m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id)); +- m->sredo = h2_ihash_create(m->pool, offsetof(h2_stream,id)); + m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id)); + m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id)); + m->q = h2_iq_create(m->pool, m->max_streams); +@@ -208,8 +211,8 @@ h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *parent, + m->workers = workers; + m->max_active = workers->max_workers; + m->limit_active = 6; /* the original h1 max parallel connections */ +- m->last_limit_change = m->last_idle_block = apr_time_now(); +- m->limit_change_interval = apr_time_from_msec(100); ++ m->last_mood_change = apr_time_now(); ++ m->mood_update_interval = apr_time_from_msec(100); + + m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*)); + } +@@ -431,6 +434,10 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) + + /* How to shut down a h2 connection: + * 1. cancel all streams still active */ ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, ++ "h2_mplx(%ld): release, %d/%d/%d streams (total/hold/purge), %d active tasks", ++ m->id, (int)h2_ihash_count(m->streams), ++ (int)h2_ihash_count(m->shold), (int)h2_ihash_count(m->spurge), m->tasks_active); + while (!h2_ihash_iter(m->streams, stream_cancel_iter, m)) { + /* until empty */ + } +@@ -456,10 +463,10 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) + h2_ihash_iter(m->shold, report_stream_iter, m); + } + } +- ap_assert(m->tasks_active == 0); + m->join_wait = NULL; +- ++ + /* 4. With all workers done, all streams should be in spurge */ ++ ap_assert(m->tasks_active == 0); + if (!h2_ihash_empty(m->shold)) { + ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03516) + "h2_mplx(%ld): unexpected %d streams in hold", +@@ -470,8 +477,7 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) + m->c->aborted = old_aborted; + H2_MPLX_LEAVE(m); + +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, +- "h2_mplx(%ld): released", m->id); ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): released", m->id); + } + + apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, h2_stream *stream) +@@ -709,7 +715,6 @@ static h2_task *next_stream_task(h2_mplx *m) + } + + if (!stream->task) { +- + if (sid > m->max_stream_started) { + m->max_stream_started = sid; + } +@@ -728,9 +733,9 @@ static h2_task *next_stream_task(h2_mplx *m) + "create task")); + return NULL; + } +- + } + ++ stream->task->started_at = apr_time_now(); + ++m->tasks_active; + return stream->task; + } +@@ -778,32 +783,18 @@ static void task_done(h2_mplx *m, h2_task *task) + "h2_mplx(%s): request done, %f ms elapsed", task->id, + (task->done_at - task->started_at) / 1000.0); + +- if (task->started_at > m->last_idle_block) { +- /* this task finished without causing an 'idle block', e.g. +- * a block by flow control. +- */ +- if (task->done_at- m->last_limit_change >= m->limit_change_interval +- && m->limit_active < m->max_active) { +- /* Well behaving stream, allow it more workers */ +- m->limit_active = H2MIN(m->limit_active * 2, +- m->max_active); +- m->last_limit_change = task->done_at; +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, +- "h2_mplx(%ld): increase worker limit to %d", +- m->id, m->limit_active); +- } ++ if (task->c && !task->c->aborted && task->started_at > m->last_mood_change) { ++ mplx_be_happy(m); + } +- ++ + ap_assert(task->done_done == 0); + + stream = h2_ihash_get(m->streams, task->stream_id); + if (stream) { + /* stream not done yet. */ +- if (!m->aborted && h2_ihash_get(m->sredo, stream->id)) { ++ if (!m->aborted && task->redo) { + /* reset and schedule again */ +- task->worker_done = 0; + h2_task_redo(task); +- h2_ihash_remove(m->sredo, stream->id); + h2_iq_add(m->q, stream->id, NULL, NULL); + ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c, + H2_STRM_MSG(stream, "redo, added to q")); +@@ -848,8 +839,8 @@ void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask) + { + H2_MPLX_ENTER_ALWAYS(m); + +- task_done(m, task); + --m->tasks_active; ++ task_done(m, task); + + if (m->join_wait) { + apr_thread_cond_signal(m->join_wait); +@@ -867,94 +858,161 @@ void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask) + * h2_mplx DoS protection + ******************************************************************************/ + +-static int latest_repeatable_unsubmitted_iter(void *data, void *val) ++static int timed_out_busy_iter(void *data, void *val) + { + stream_iter_ctx *ctx = data; + h2_stream *stream = val; +- +- if (stream->task && !stream->task->worker_done +- && h2_task_can_redo(stream->task) +- && !h2_ihash_get(ctx->m->sredo, stream->id)) { +- if (!h2_stream_is_ready(stream)) { +- /* this task occupies a worker, the response has not been submitted +- * yet, not been cancelled and it is a repeatable request +- * -> it can be re-scheduled later */ +- if (!ctx->stream +- || (ctx->stream->task->started_at < stream->task->started_at)) { +- /* we did not have one or this one was started later */ +- ctx->stream = stream; +- } +- } ++ if (h2_task_has_started(stream->task) && !stream->task->worker_done ++ && (ctx->now - stream->task->started_at) > stream->task->timeout) { ++ /* timed out stream occupying a worker, found */ ++ ctx->stream = stream; ++ return 0; + } + return 1; + } + +-static h2_stream *get_latest_repeatable_unsubmitted_stream(h2_mplx *m) ++static h2_stream *get_timed_out_busy_stream(h2_mplx *m) + { + stream_iter_ctx ctx; + ctx.m = m; + ctx.stream = NULL; +- h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx); ++ ctx.now = apr_time_now(); ++ h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx); + return ctx.stream; + } + +-static int timed_out_busy_iter(void *data, void *val) ++static int latest_repeatable_unsubmitted_iter(void *data, void *val) + { + stream_iter_ctx *ctx = data; + h2_stream *stream = val; +- if (stream->task && !stream->task->worker_done +- && (ctx->now - stream->task->started_at) > stream->task->timeout) { +- /* timed out stream occupying a worker, found */ +- ctx->stream = stream; +- return 0; ++ ++ if (!stream->task) goto leave; ++ if (!h2_task_has_started(stream->task) || stream->task->worker_done) goto leave; ++ if (h2_stream_is_ready(stream)) goto leave; ++ if (stream->task->redo) { ++ ++ctx->count; ++ goto leave; ++ } ++ if (h2_task_can_redo(stream->task)) { ++ /* this task occupies a worker, the response has not been submitted ++ * yet, not been cancelled and it is a repeatable request ++ * -> we could redo it later */ ++ if (!ctx->stream ++ || (ctx->stream->task->started_at < stream->task->started_at)) { ++ /* we did not have one or this one was started later */ ++ ctx->stream = stream; ++ } + } ++leave: + return 1; + } + +-static h2_stream *get_timed_out_busy_stream(h2_mplx *m) ++static apr_status_t assess_task_to_throttle(h2_task **ptask, h2_mplx *m) + { + stream_iter_ctx ctx; ++ ++ /* count the running tasks already marked for redo and get one that could ++ * be throttled */ ++ *ptask = NULL; + ctx.m = m; + ctx.stream = NULL; +- ctx.now = apr_time_now(); +- h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx); +- return ctx.stream; ++ ctx.count = 0; ++ h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx); ++ if (m->tasks_active - ctx.count > m->limit_active) { ++ /* we are above the limit of running tasks, accounting for the ones ++ * already throttled. */ ++ if (ctx.stream && ctx.stream->task) { ++ *ptask = ctx.stream->task; ++ return APR_EAGAIN; ++ } ++ /* above limit, be seeing no candidate for easy throttling */ ++ if (get_timed_out_busy_stream(m)) { ++ /* Too many busy workers, unable to cancel enough streams ++ * and with a busy, timed out stream, we tell the client ++ * to go away... */ ++ return APR_TIMEUP; ++ } ++ } ++ return APR_SUCCESS; + } + + static apr_status_t unschedule_slow_tasks(h2_mplx *m) + { +- h2_stream *stream; +- int n; ++ h2_task *task; ++ apr_status_t rv; + + /* Try to get rid of streams that occupy workers. Look for safe requests + * that are repeatable. If none found, fail the connection. + */ +- n = (m->tasks_active - m->limit_active - (int)h2_ihash_count(m->sredo)); +- while (n > 0 && (stream = get_latest_repeatable_unsubmitted_stream(m))) { ++ while (APR_EAGAIN == (rv = assess_task_to_throttle(&task, m))) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c, + "h2_mplx(%s): unschedule, resetting task for redo later", +- stream->task->id); +- h2_task_rst(stream->task, H2_ERR_CANCEL); +- h2_ihash_add(m->sredo, stream); +- --n; ++ task->id); ++ task->redo = 1; ++ h2_task_rst(task, H2_ERR_CANCEL); + } + +- if ((m->tasks_active - h2_ihash_count(m->sredo)) > m->limit_active) { +- h2_stream *stream = get_timed_out_busy_stream(m); +- if (stream) { +- /* Too many busy workers, unable to cancel enough streams +- * and with a busy, timed out stream, we tell the client +- * to go away... */ +- return APR_TIMEUP; +- } ++ return rv; ++} ++ ++static apr_status_t mplx_be_happy(h2_mplx *m) ++{ ++ apr_time_t now; ++ ++ --m->irritations_since; ++ now = apr_time_now(); ++ if (m->limit_active < m->max_active ++ && (now - m->last_mood_change >= m->mood_update_interval ++ || m->irritations_since < -m->limit_active)) { ++ m->limit_active = H2MIN(m->limit_active * 2, m->max_active); ++ m->last_mood_change = now; ++ m->irritations_since = 0; ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, ++ "h2_mplx(%ld): mood update, increasing worker limit to %d", ++ m->id, m->limit_active); + } + return APR_SUCCESS; + } + +-apr_status_t h2_mplx_idle(h2_mplx *m) ++static apr_status_t mplx_be_annoyed(h2_mplx *m) + { + apr_status_t status = APR_SUCCESS; + apr_time_t now; ++ ++ ++m->irritations_since; ++ now = apr_time_now(); ++ if (m->limit_active > 2 && ++ ((now - m->last_mood_change >= m->mood_update_interval) ++ || (m->irritations_since >= m->limit_active))) { ++ ++ if (m->limit_active > 16) { ++ m->limit_active = 16; ++ } ++ else if (m->limit_active > 8) { ++ m->limit_active = 8; ++ } ++ else if (m->limit_active > 4) { ++ m->limit_active = 4; ++ } ++ else if (m->limit_active > 2) { ++ m->limit_active = 2; ++ } ++ m->last_mood_change = now; ++ m->irritations_since = 0; ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, ++ "h2_mplx(%ld): mood update, decreasing worker limit to %d", ++ m->id, m->limit_active); ++ } ++ ++ if (m->tasks_active > m->limit_active) { ++ status = unschedule_slow_tasks(m); ++ } ++ return status; ++} ++ ++apr_status_t h2_mplx_idle(h2_mplx *m) ++{ ++ apr_status_t status = APR_SUCCESS; + apr_size_t scount; + + H2_MPLX_ENTER(m); +@@ -974,31 +1032,7 @@ apr_status_t h2_mplx_idle(h2_mplx *m) + * of busy workers we allow for this connection until it + * well behaves. + */ +- now = apr_time_now(); +- m->last_idle_block = now; +- if (m->limit_active > 2 +- && now - m->last_limit_change >= m->limit_change_interval) { +- if (m->limit_active > 16) { +- m->limit_active = 16; +- } +- else if (m->limit_active > 8) { +- m->limit_active = 8; +- } +- else if (m->limit_active > 4) { +- m->limit_active = 4; +- } +- else if (m->limit_active > 2) { +- m->limit_active = 2; +- } +- m->last_limit_change = now; +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, +- "h2_mplx(%ld): decrease worker limit to %d", +- m->id, m->limit_active); +- } +- +- if (m->tasks_active > m->limit_active) { +- status = unschedule_slow_tasks(m); +- } ++ status = mplx_be_annoyed(m); + } + else if (!h2_iq_empty(m->q)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, +@@ -1093,11 +1127,24 @@ int h2_mplx_awaits_data(h2_mplx *m) + if (h2_ihash_empty(m->streams)) { + waiting = 0; + } +- else if (!m->tasks_active && !h2_ififo_count(m->readyq) +- && h2_iq_empty(m->q)) { ++ else if (!m->tasks_active && !h2_ififo_count(m->readyq) && h2_iq_empty(m->q)) { + waiting = 0; + } + + H2_MPLX_LEAVE(m); + return waiting; + } ++ ++apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id) ++{ ++ h2_stream *stream; ++ apr_status_t status = APR_SUCCESS; ++ ++ H2_MPLX_ENTER_ALWAYS(m); ++ stream = h2_ihash_get(m->streams, stream_id); ++ if (stream && stream->task) { ++ status = mplx_be_annoyed(m); ++ } ++ H2_MPLX_LEAVE(m); ++ return status; ++} +diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h +index 575ccaf..8a4f63f 100644 +--- a/modules/http2/h2_mplx.h ++++ b/modules/http2/h2_mplx.h +@@ -63,7 +63,6 @@ struct h2_mplx { + unsigned int is_registered; /* is registered at h2_workers */ + + struct h2_ihash_t *streams; /* all streams currently processing */ +- struct h2_ihash_t *sredo; /* all streams that need to be re-started */ + struct h2_ihash_t *shold; /* all streams done with task ongoing */ + struct h2_ihash_t *spurge; /* all streams done, ready for destroy */ + +@@ -77,10 +76,10 @@ struct h2_mplx { + int tasks_active; /* # of tasks being processed from this mplx */ + int limit_active; /* current limit on active tasks, dynamic */ + int max_active; /* max, hard limit # of active tasks in a process */ +- apr_time_t last_idle_block; /* last time, this mplx entered IDLE while +- * streams were ready */ +- apr_time_t last_limit_change; /* last time, worker limit changed */ +- apr_interval_time_t limit_change_interval; ++ ++ apr_time_t last_mood_change; /* last time, we worker limit changed */ ++ apr_interval_time_t mood_update_interval; /* how frequent we update at most */ ++ int irritations_since; /* irritations (>0) or happy events (<0) since last mood change */ + + apr_thread_mutex_t *lock; + struct apr_thread_cond_t *added_output; +@@ -205,6 +204,8 @@ typedef int h2_mplx_stream_cb(struct h2_stream *s, void *ctx); + + apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx); + ++apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id); ++ + /******************************************************************************* + * Output handling of streams. + ******************************************************************************/ +diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c +index f153422..43d26d3 100644 +--- a/modules/http2/h2_session.c ++++ b/modules/http2/h2_session.c +@@ -390,9 +390,14 @@ static int on_frame_recv_cb(nghttp2_session *ng2s, + (int)frame->rst_stream.error_code); + stream = h2_session_stream_get(session, frame->hd.stream_id); + if (stream && stream->initiated_on) { ++ /* A stream reset on a request we sent it. Normal, when the ++ * client does not want it. */ + ++session->pushes_reset; + } + else { ++ /* A stream reset on a request it sent us. Could happen in a browser ++ * when the user navigates away or cancels loading - maybe. */ ++ h2_mplx_client_rst(session->mplx, frame->hd.stream_id); + ++session->streams_reset; + } + break; +@@ -1699,7 +1704,7 @@ static void transit(h2_session *session, const char *action, h2_session_state ns + * that already served requests - not fair. */ + session->idle_sync_until = apr_time_now() + apr_time_from_sec(1); + s = "timeout"; +- timeout = H2MAX(session->s->timeout, session->s->keep_alive_timeout); ++ timeout = session->s->timeout; + update_child_status(session, SERVER_BUSY_READ, "idle"); + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, + H2_SSSN_LOG("", session, "enter idle, timeout = %d sec"), +@@ -1707,8 +1712,8 @@ static void transit(h2_session *session, const char *action, h2_session_state ns + } + else if (session->open_streams) { + s = "timeout"; +- timeout = session->s->keep_alive_timeout; +- update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle"); ++ timeout = session->s->timeout; ++ update_child_status(session, SERVER_BUSY_READ, "idle"); + } + else { + /* normal keepalive setup */ +@@ -2166,6 +2171,14 @@ apr_status_t h2_session_process(h2_session *session, int async) + session->have_read = 1; + } + else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) { ++ status = h2_mplx_idle(session->mplx); ++ if (status == APR_EAGAIN) { ++ break; ++ } ++ else if (status != APR_SUCCESS) { ++ dispatch_event(session, H2_SESSION_EV_CONN_ERROR, ++ H2_ERR_ENHANCE_YOUR_CALM, "less is more"); ++ } + status = APR_EAGAIN; + goto out; + } +diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c +index b5763ac..8c3d305 100644 +--- a/modules/http2/h2_stream.c ++++ b/modules/http2/h2_stream.c +@@ -397,13 +397,8 @@ apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_ + /* start pushed stream */ + ap_assert(stream->request == NULL); + ap_assert(stream->rtmp != NULL); +- status = h2_request_end_headers(stream->rtmp, stream->pool, 1, 0); +- if (status != APR_SUCCESS) { +- return status; +- } +- set_policy_for(stream, stream->rtmp); +- stream->request = stream->rtmp; +- stream->rtmp = NULL; ++ status = h2_stream_end_headers(stream, 1, 0); ++ if (status != APR_SUCCESS) goto leave; + break; + + default: +@@ -415,6 +410,7 @@ apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_ + if (status == APR_SUCCESS && eos) { + status = transit(stream, on_event(stream, H2_SEV_CLOSED_L)); + } ++leave: + return status; + } + +@@ -455,13 +451,8 @@ apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_ + * to abort the connection here, since this is clearly a protocol error */ + return APR_EINVAL; + } +- status = h2_request_end_headers(stream->rtmp, stream->pool, eos, frame_len); +- if (status != APR_SUCCESS) { +- return status; +- } +- set_policy_for(stream, stream->rtmp); +- stream->request = stream->rtmp; +- stream->rtmp = NULL; ++ status = h2_stream_end_headers(stream, eos, frame_len); ++ if (status != APR_SUCCESS) goto leave; + } + break; + +@@ -472,6 +463,7 @@ apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_ + if (status == APR_SUCCESS && eos) { + status = transit(stream, on_event(stream, H2_SEV_CLOSED_R)); + } ++leave: + return status; + } + +@@ -683,6 +675,8 @@ static apr_status_t add_trailer(h2_stream *stream, + hvalue = apr_pstrndup(stream->pool, value, vlen); + h2_util_camel_case_header(hname, nlen); + apr_table_mergen(stream->trailers, hname, hvalue); ++ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, ++ H2_STRM_MSG(stream, "added trailer '%s: %s'"), hname, hvalue); + + return APR_SUCCESS; + } +@@ -702,15 +696,19 @@ apr_status_t h2_stream_add_header(h2_stream *stream, + if (name[0] == ':') { + if ((vlen) > session->s->limit_req_line) { + /* pseudo header: approximation of request line size check */ +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, +- H2_STRM_MSG(stream, "pseudo %s too long"), name); ++ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c, ++ H2_STRM_LOG(APLOGNO(10178), stream, ++ "Request pseudo header exceeds " ++ "LimitRequestFieldSize: %s"), name); + error = HTTP_REQUEST_URI_TOO_LARGE; + } + } + else if ((nlen + 2 + vlen) > session->s->limit_req_fieldsize) { + /* header too long */ +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, +- H2_STRM_MSG(stream, "header %s too long"), name); ++ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c, ++ H2_STRM_LOG(APLOGNO(10180), stream,"Request header exceeds " ++ "LimitRequestFieldSize: %.*s"), ++ (int)H2MIN(nlen, 80), name); + error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE; + } + +@@ -722,8 +720,9 @@ apr_status_t h2_stream_add_header(h2_stream *stream, + h2_stream_rst(stream, H2_ERR_ENHANCE_YOUR_CALM); + return APR_ECONNRESET; + } +- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c, +- H2_STRM_MSG(stream, "too many header lines")); ++ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c, ++ H2_STRM_LOG(APLOGNO(10181), stream, "Number of request headers " ++ "exceeds LimitRequestFields")); + error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE; + } + +@@ -754,6 +753,47 @@ apr_status_t h2_stream_add_header(h2_stream *stream, + return status; + } + ++typedef struct { ++ apr_size_t maxlen; ++ const char *failed_key; ++} val_len_check_ctx; ++ ++static int table_check_val_len(void *baton, const char *key, const char *value) ++{ ++ val_len_check_ctx *ctx = baton; ++ ++ if (strlen(value) <= ctx->maxlen) return 1; ++ ctx->failed_key = key; ++ return 0; ++} ++ ++apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes) ++{ ++ apr_status_t status; ++ val_len_check_ctx ctx; ++ ++ status = h2_request_end_headers(stream->rtmp, stream->pool, eos, raw_bytes); ++ if (APR_SUCCESS == status) { ++ set_policy_for(stream, stream->rtmp); ++ stream->request = stream->rtmp; ++ stream->rtmp = NULL; ++ ++ ctx.maxlen = stream->session->s->limit_req_fieldsize; ++ ctx.failed_key = NULL; ++ apr_table_do(table_check_val_len, &ctx, stream->request->headers, NULL); ++ if (ctx.failed_key) { ++ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c, ++ H2_STRM_LOG(APLOGNO(), stream,"Request header exceeds " ++ "LimitRequestFieldSize: %.*s"), ++ (int)H2MIN(strlen(ctx.failed_key), 80), ctx.failed_key); ++ set_error_response(stream, HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE); ++ /* keep on returning APR_SUCCESS, so that we send a HTTP response and ++ * do not RST the stream. */ ++ } ++ } ++ return status; ++} ++ + static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb) + { + if (bb) { +diff --git a/modules/http2/h2_stream.h b/modules/http2/h2_stream.h +index 7ecc0ad..79cb39d 100644 +--- a/modules/http2/h2_stream.h ++++ b/modules/http2/h2_stream.h +@@ -198,6 +198,10 @@ apr_status_t h2_stream_set_request_rec(h2_stream *stream, + apr_status_t h2_stream_add_header(h2_stream *stream, + const char *name, size_t nlen, + const char *value, size_t vlen); ++ ++/* End the contruction of request headers */ ++apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes); ++ + + apr_status_t h2_stream_send_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len); + apr_status_t h2_stream_recv_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len); +diff --git a/modules/http2/h2_task.c b/modules/http2/h2_task.c +index a395807..c312459 100644 +--- a/modules/http2/h2_task.c ++++ b/modules/http2/h2_task.c +@@ -408,8 +408,15 @@ int h2_task_can_redo(h2_task *task) { + || !strcmp("OPTIONS", task->request->method)); + } + ++int h2_task_has_started(h2_task *task) ++{ ++ return task && task->started_at != 0; ++} ++ + void h2_task_redo(h2_task *task) + { ++ task->started_at = 0; ++ task->worker_done = 0; + task->rst_error = 0; + } + +@@ -548,7 +555,6 @@ apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id) + ap_assert(task); + c = task->c; + task->worker_started = 1; +- task->started_at = apr_time_now(); + + if (c->master) { + /* Each conn_rec->id is supposed to be unique at a point in time. Since +diff --git a/modules/http2/h2_task.h b/modules/http2/h2_task.h +index 20be429..9a7ad68 100644 +--- a/modules/http2/h2_task.h ++++ b/modules/http2/h2_task.h +@@ -80,6 +80,7 @@ struct h2_task { + + unsigned int filters_set : 1; + unsigned int worker_started : 1; /* h2_worker started processing */ ++ unsigned int redo : 1; /* was throttled, should be restarted later */ + + int worker_done; /* h2_worker finished */ + int done_done; /* task_done has been handled */ +@@ -101,6 +102,7 @@ apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id); + + void h2_task_redo(h2_task *task); + int h2_task_can_redo(h2_task *task); ++int h2_task_has_started(h2_task *task); + + /** + * Reset the task with the given error code, resets all input/output. +diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c +index 048ae61..7a8a197 100644 +--- a/server/mpm/event/event.c ++++ b/server/mpm/event/event.c +@@ -1112,10 +1112,11 @@ read_request: + "network write failure in core output filter"); + cs->pub.state = CONN_STATE_LINGER; + } +- else if (c->data_in_output_filters) { ++ else if (c->data_in_output_filters || ++ cs->pub.sense == CONN_SENSE_WANT_READ) { + /* Still in WRITE_COMPLETION_STATE: +- * Set a write timeout for this connection, and let the +- * event thread poll for writeability. ++ * Set a read/write timeout for this connection, and let the ++ * event thread poll for read/writeability. + */ + cs->queue_timestamp = apr_time_now(); + notify_suspend(cs); +-- +1.8.3.1 + diff --git a/CVE-2020-1927-1.patch b/CVE-2020-1927-1.patch new file mode 100644 index 0000000000000000000000000000000000000000..35fe491bcd4ffb902b8d68efaf7a59f6f2a0514d --- /dev/null +++ b/CVE-2020-1927-1.patch @@ -0,0 +1,73 @@ +From f11d5830759eb50ed366fc0690f9f4f491064ea3 Mon Sep 17 00:00:00 2001 +From: Jim Jagielski +Date: Tue, 11 Feb 2020 13:16:38 +0000 +Subject: [PATCH 1/2] Merge r1873747 from trunk: + +factor out default regex flags + +Submitted by: covener +Reviewed by: covener, minfrin, jorton + +git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1873905 13f79535-47bb-0310-9956-ffa450edef68 +--- + include/ap_mmn.h | 1 + + include/ap_regex.h | 2 ++ + server/core.c | 2 +- + server/util_pcre.c | 3 +-- + 4 files changed, 5 insertions(+), 3 deletions(-) + +diff --git a/include/ap_mmn.h b/include/ap_mmn.h +index 839228e..f5043ef 100644 +--- a/include/ap_mmn.h ++++ b/include/ap_mmn.h +@@ -515,6 +515,7 @@ + * 20120211.77 (2.4.34-dev) Add ap_exists_directive() + * 20120211.78 (2.4.34-dev) Add response_field_size to proxy_worker_shared + * 20120211.79 (2.4.34-dev) Add AP_GETLINE_NOSPC_EOL flag to http_protocol.h ++ * 20120211.90 (2.4.42-dev) AP_REG_DEFAULT macro in ap_regex.h + */ + + #define MODULE_MAGIC_COOKIE 0x41503234UL /* "AP24" */ +diff --git a/include/ap_regex.h b/include/ap_regex.h +index 7d8df79..e651eea 100644 +--- a/include/ap_regex.h ++++ b/include/ap_regex.h +@@ -86,6 +86,8 @@ extern "C" { + + #define AP_REG_MATCH "MATCH_" /** suggested prefix for ap_regname */ + ++#define AP_REG_DEFAULT (AP_REG_DOTALL|AP_REG_DOLLAR_ENDONLY) ++ + /* Error values: */ + enum { + AP_REG_ASSERT = 1, /** internal error ? */ +diff --git a/server/core.c b/server/core.c +index e892c87..a8772a3 100644 +--- a/server/core.c ++++ b/server/core.c +@@ -4938,7 +4938,7 @@ static int core_pre_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptem + apr_pool_cleanup_register(pconf, NULL, reset_config_defines, + apr_pool_cleanup_null); + +- ap_regcomp_set_default_cflags(AP_REG_DOLLAR_ENDONLY); ++ ap_regcomp_set_default_cflags(AP_REG_DEFAULT); + + mpm_common_pre_config(pconf); + +diff --git a/server/util_pcre.c b/server/util_pcre.c +index 35831f5..74722b4 100644 +--- a/server/util_pcre.c ++++ b/server/util_pcre.c +@@ -120,8 +120,7 @@ AP_DECLARE(void) ap_regfree(ap_regex_t *preg) + * Compile a regular expression * + *************************************************/ + +-static int default_cflags = AP_REG_DOTALL | +- AP_REG_DOLLAR_ENDONLY; ++static int default_cflags = AP_REG_DEFAULT; + + AP_DECLARE(int) ap_regcomp_get_default_cflags(void) + { +-- +1.8.3.1 + diff --git a/CVE-2020-1927-2.patch b/CVE-2020-1927-2.patch new file mode 100644 index 0000000000000000000000000000000000000000..33b072ef587c73f7237dbe237bf60bf51b8870c7 --- /dev/null +++ b/CVE-2020-1927-2.patch @@ -0,0 +1,99 @@ +From ff36010963d1c2f2e6b331aa6d7d7d879e3975f6 Mon Sep 17 00:00:00 2001 +From: Eric Covener +Date: Wed, 19 Feb 2020 12:26:31 +0000 +Subject: [PATCH 2/2] add AP_REG_NO_DEFAULT to allow opt-out of pcre defaults + +... and use it in mod_substitute to avoid DOTALL + +git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1874191 13f79535-47bb-0310-9956-ffa450edef68 +--- + include/ap_mmn.h | 1 + + include/ap_regex.h | 4 +++- + modules/filters/mod_substitute.c | 6 ++++-- + server/util_pcre.c | 4 +++- + server/util_regex.c | 3 ++- + 5 files changed, 13 insertions(+), 5 deletions(-) + +diff --git a/include/ap_mmn.h b/include/ap_mmn.h +index f5043ef..4c74e56 100644 +--- a/include/ap_mmn.h ++++ b/include/ap_mmn.h +@@ -516,6 +516,7 @@ + * 20120211.78 (2.4.34-dev) Add response_field_size to proxy_worker_shared + * 20120211.79 (2.4.34-dev) Add AP_GETLINE_NOSPC_EOL flag to http_protocol.h + * 20120211.90 (2.4.42-dev) AP_REG_DEFAULT macro in ap_regex.h ++ * 20120211.92 (2.4.42-dev) AP_REG_NO_DEFAULT macro in ap_regex.h + */ + + #define MODULE_MAGIC_COOKIE 0x41503234UL /* "AP24" */ +diff --git a/include/ap_regex.h b/include/ap_regex.h +index e651eea..7af2f99 100644 +--- a/include/ap_regex.h ++++ b/include/ap_regex.h +@@ -84,7 +84,9 @@ extern "C" { + + #define AP_REG_DOLLAR_ENDONLY 0x200 /* '$' matches at end of subject string only */ + +-#define AP_REG_MATCH "MATCH_" /** suggested prefix for ap_regname */ ++#define AP_REG_NO_DEFAULT 0x400 /**< Don't implicitely add AP_REG_DEFAULT options */ ++ ++#define AP_REG_MATCH "MATCH_" /**< suggested prefix for ap_regname */ + + #define AP_REG_DEFAULT (AP_REG_DOTALL|AP_REG_DOLLAR_ENDONLY) + +diff --git a/modules/filters/mod_substitute.c b/modules/filters/mod_substitute.c +index b7d5296..e976c51 100644 +--- a/modules/filters/mod_substitute.c ++++ b/modules/filters/mod_substitute.c +@@ -667,8 +667,10 @@ static const char *set_pattern(cmd_parms *cmd, void *cfg, const char *line) + + /* first see if we can compile the regex */ + if (!is_pattern) { +- r = ap_pregcomp(cmd->pool, from, AP_REG_EXTENDED | +- (ignore_case ? AP_REG_ICASE : 0)); ++ int flags = AP_REG_NO_DEFAULT ++ | (ap_regcomp_get_default_cflags() & AP_REG_DOLLAR_ENDONLY) ++ | (ignore_case ? AP_REG_ICASE : 0); ++ r = ap_pregcomp(cmd->pool, from, flags); + if (!r) + return "Substitute could not compile regex"; + } +diff --git a/server/util_pcre.c b/server/util_pcre.c +index 74722b4..8819871 100644 +--- a/server/util_pcre.c ++++ b/server/util_pcre.c +@@ -168,7 +168,9 @@ AP_DECLARE(int) ap_regcomp(ap_regex_t * preg, const char *pattern, int cflags) + int errcode = 0; + int options = PCRE_DUPNAMES; + +- cflags |= default_cflags; ++ if ((cflags & AP_REG_NO_DEFAULT) == 0) ++ cflags |= default_cflags; ++ + if ((cflags & AP_REG_ICASE) != 0) + options |= PCRE_CASELESS; + if ((cflags & AP_REG_NEWLINE) != 0) +diff --git a/server/util_regex.c b/server/util_regex.c +index 2a30d68..5405f8d 100644 +--- a/server/util_regex.c ++++ b/server/util_regex.c +@@ -94,6 +94,7 @@ AP_DECLARE(ap_rxplus_t*) ap_rxplus_compile(apr_pool_t *pool, + } + + /* anything after the current delimiter is flags */ ++ ret->flags = ap_regcomp_get_default_cflags() & AP_REG_DOLLAR_ENDONLY; + while (*++endp) { + switch (*endp) { + case 'i': ret->flags |= AP_REG_ICASE; break; +@@ -106,7 +107,7 @@ AP_DECLARE(ap_rxplus_t*) ap_rxplus_compile(apr_pool_t *pool, + default: break; /* we should probably be stricter here */ + } + } +- if (ap_regcomp(&ret->rx, rxstr, ret->flags) == 0) { ++ if (ap_regcomp(&ret->rx, rxstr, AP_REG_NO_DEFAULT | ret->flags) == 0) { + apr_pool_cleanup_register(pool, &ret->rx, rxplus_cleanup, + apr_pool_cleanup_null); + } +-- +1.8.3.1 + diff --git a/CVE-2020-1934.patch b/CVE-2020-1934.patch new file mode 100644 index 0000000000000000000000000000000000000000..5c00ecadd41ecaddcf8dac75d96708c60000ee3e --- /dev/null +++ b/CVE-2020-1934.patch @@ -0,0 +1,88 @@ +From 0b59e8ce2d978dfd6b74473df4e1309a5c226498 Mon Sep 17 00:00:00 2001 +From: Jim Jagielski +Date: Tue, 11 Feb 2020 13:14:42 +0000 +Subject: [PATCH] Merge r1873745 from trunk: + +trap bad FTP responses + +Submitted by: covener +Reviewed by: covener, minfrin, jorton + +git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1873904 13f79535-47bb-0310-9956-ffa450edef68 +--- + modules/proxy/mod_proxy_ftp.c | 20 +++++++++++++++----- + 1 file changed, 15 insertions(+), 5 deletions(-) + +diff --git a/modules/proxy/mod_proxy_ftp.c b/modules/proxy/mod_proxy_ftp.c +index 1557301..6318102 100644 +--- a/modules/proxy/mod_proxy_ftp.c ++++ b/modules/proxy/mod_proxy_ftp.c +@@ -218,7 +218,7 @@ static int ftp_check_string(const char *x) + * (EBCDIC) machines either. + */ + static apr_status_t ftp_string_read(conn_rec *c, apr_bucket_brigade *bb, +- char *buff, apr_size_t bufflen, int *eos) ++ char *buff, apr_size_t bufflen, int *eos, apr_size_t *outlen) + { + apr_bucket *e; + apr_status_t rv; +@@ -230,6 +230,7 @@ static apr_status_t ftp_string_read(conn_rec *c, apr_bucket_brigade *bb, + /* start with an empty string */ + buff[0] = 0; + *eos = 0; ++ *outlen = 0; + + /* loop through each brigade */ + while (!found) { +@@ -273,6 +274,7 @@ static apr_status_t ftp_string_read(conn_rec *c, apr_bucket_brigade *bb, + if (len > 0) { + memcpy(pos, response, len); + pos += len; ++ *outlen += len; + } + } + apr_bucket_delete(e); +@@ -385,28 +387,36 @@ static int ftp_getrc_msg(conn_rec *ftp_ctrl, apr_bucket_brigade *bb, char *msgbu + char buff[5]; + char *mb = msgbuf, *me = &msgbuf[msglen]; + apr_status_t rv; ++ apr_size_t nread; ++ + int eos; + +- if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) { ++ if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos, &nread))) { + return -1; + } + /* + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, APLOGNO(03233) + "<%s", response); + */ ++ if (nread < 4) { ++ ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, APLOGNO(10229) "Malformed FTP response '%s'", response); ++ *mb = '\0'; ++ return -1; ++ } ++ + if (!apr_isdigit(response[0]) || !apr_isdigit(response[1]) || +- !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-')) ++ !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-')) + status = 0; + else + status = 100 * response[0] + 10 * response[1] + response[2] - 111 * '0'; + + mb = apr_cpystrn(mb, response + 4, me - mb); + +- if (response[3] == '-') { ++ if (response[3] == '-') { /* multi-line reply "123-foo\nbar\n123 baz" */ + memcpy(buff, response, 3); + buff[3] = ' '; + do { +- if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) { ++ if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos, &nread))) { + return -1; + } + mb = apr_cpystrn(mb, response + (' ' == response[0] ? 1 : 4), me - mb); +-- +1.8.3.1 + diff --git a/httpd.spec b/httpd.spec index 26da9b630fe81d8faf26178da67421b2e0132cd9..4a92e67339141463c9864e584a5a20073f29abfc 100644 --- a/httpd.spec +++ b/httpd.spec @@ -8,7 +8,7 @@ Name: httpd Summary: Apache HTTP Server Version: 2.4.34 -Release: 16 +Release: 17 License: ASL 2.0 URL: https://httpd.apache.org/ Source0: https://www.apache.org/dist/httpd/httpd-%{version}.tar.bz2 @@ -104,6 +104,14 @@ Patch6029: CVE-2019-10098.patch Patch6030: CVE-2019-0196.patch Patch6031: CVE-2019-0197.patch Patch6032: CVE-2019-10097.patch +Patch6033: CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-1.patch +Patch6034: CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-2.patch +Patch6035: CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-3.patch +Patch6036: CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-4.patch +Patch6037: CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-5.patch +Patch6038: CVE-2020-1927-1.patch +Patch6039: CVE-2020-1927-2.patch +Patch6040: CVE-2020-1934.patch Patch9000: layout_add_openEuler.patch @@ -542,6 +550,12 @@ exit $rv %{_rpmconfigdir}/macros.d/macros.httpd %changelog +* Thu Apr 23 2020 openEuler Buildteam - 2.4.34-17 +- Type:cves +- ID:CVE-2019-9517 CVE-2019-10081 CVE-2019-10082 CVE-2020-1927 CVE-2020-1934 +- SUG:restart +- DESC:fix CVE-2019-9517 CVE-2019-10081 CVE-2019-10082 CVE-2020-1927 CVE-2020-1934 + * Wed Apr 15 2020 chenzhen - 2.4.34-16 - Type:cves - ID:CVE-2019-10092 CVE-2019-10097 CVE-2019-10098 CVE-2019-0196 CVE-2019-0197