diff --git a/00-base.conf b/00-base.conf
index 28dacb3bafcd31582acc48e77ed2df65f66bfabb..f673d78af55da62dd33088e8a48c64a4e2cc6392 100644
--- a/00-base.conf
+++ b/00-base.conf
@@ -55,6 +55,7 @@ LoadModule slotmem_plain_module modules/mod_slotmem_plain.so
LoadModule slotmem_shm_module modules/mod_slotmem_shm.so
LoadModule socache_dbm_module modules/mod_socache_dbm.so
LoadModule socache_memcache_module modules/mod_socache_memcache.so
+LoadModule socache_memcache_module modules/mod_socache_redis.so
LoadModule socache_shmcb_module modules/mod_socache_shmcb.so
LoadModule status_module modules/mod_status.so
LoadModule substitute_module modules/mod_substitute.so
diff --git a/CVE-2018-11763.patch b/CVE-2018-11763.patch
deleted file mode 100644
index fc2de14e373af6625093ed8017730e1ef5fd1ec5..0000000000000000000000000000000000000000
--- a/CVE-2018-11763.patch
+++ /dev/null
@@ -1,460 +0,0 @@
-diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c
-index 814d2a96..a1b31d2b 100644
---- a/modules/http2/h2_session.c
-+++ b/modules/http2/h2_session.c
-@@ -235,6 +235,7 @@ static int on_data_chunk_recv_cb(nghttp2_session *ngh2, uint8_t flags,
- stream = h2_session_stream_get(session, stream_id);
- if (stream) {
- status = h2_stream_recv_DATA(stream, flags, data, len);
-+ dispatch_event(session, H2_SESSION_EV_STREAM_CHANGE, 0, "stream data rcvd");
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03064)
-@@ -317,9 +318,9 @@ static int on_header_cb(nghttp2_session *ngh2, const nghttp2_frame *frame,
- }
-
- /**
-- * nghttp2 session has received a complete frame. Most, it uses
-- * for processing of internal state. HEADER and DATA frames however
-- * we need to handle ourself.
-+ * nghttp2 session has received a complete frame. Most are used by nghttp2
-+ * for processing of internal state. Some, like HEADER and DATA frames,
-+ * we need to act on.
- */
- static int on_frame_recv_cb(nghttp2_session *ng2s,
- const nghttp2_frame *frame,
-@@ -378,6 +379,9 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
- "h2_stream(%ld-%d): WINDOW_UPDATE incr=%d",
- session->id, (int)frame->hd.stream_id,
- frame->window_update.window_size_increment);
-+ if (nghttp2_session_want_write(session->ngh2)) {
-+ dispatch_event(session, H2_SESSION_EV_FRAME_RCVD, 0, "window update");
-+ }
- break;
- case NGHTTP2_RST_STREAM:
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03067)
-@@ -404,6 +408,12 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
- frame->goaway.error_code, NULL);
- }
- break;
-+ case NGHTTP2_SETTINGS:
-+ if (APLOGctrace2(session->c)) {
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
-+ H2_SSSN_MSG(session, "SETTINGS, len=%ld"), (long)frame->hd.length);
-+ }
-+ break;
- default:
- if (APLOGctrace2(session->c)) {
- char buffer[256];
-@@ -415,7 +425,40 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
- }
- break;
- }
-- return (APR_SUCCESS == rv)? 0 : NGHTTP2_ERR_PROTO;
-+
-+ if (session->state == H2_SESSION_ST_IDLE) {
-+ /* We received a frame, but session is in state IDLE. That means the frame
-+ * did not really progress any of the (possibly) open streams. It was a meta
-+ * frame, e.g. SETTINGS/WINDOW_UPDATE/unknown/etc.
-+ * Remember: IDLE means we cannot send because either there are no streams open or
-+ * all open streams are blocked on exhausted WINDOWs for outgoing data.
-+ * The more frames we receive that do not change this, the less interested we
-+ * become in serving this connection. This is expressed in increasing "idle_delays".
-+ * Eventually, the connection will timeout and we'll close it. */
-+ session->idle_frames = H2MIN(session->idle_frames + 1, session->frames_received);
-+ ap_log_cerror( APLOG_MARK, APLOG_TRACE2, 0, session->c,
-+ H2_SSSN_MSG(session, "session has %ld idle frames"),
-+ (long)session->idle_frames);
-+ if (session->idle_frames > 10) {
-+ apr_size_t busy_frames = H2MAX(session->frames_received - session->idle_frames, 1);
-+ int idle_ratio = (int)(session->idle_frames / busy_frames);
-+ if (idle_ratio > 100) {
-+ session->idle_delay = apr_time_from_msec(H2MIN(1000, idle_ratio));
-+ }
-+ else if (idle_ratio > 10) {
-+ session->idle_delay = apr_time_from_msec(10);
-+ }
-+ else if (idle_ratio > 1) {
-+ session->idle_delay = apr_time_from_msec(1);
-+ }
-+ else {
-+ session->idle_delay = 0;
-+ }
-+ }
-+ }
-+
-+ if (APR_SUCCESS != rv) return NGHTTP2_ERR_PROTO;
-+ return 0;
- }
-
- static int h2_session_continue_data(h2_session *session) {
-@@ -1603,23 +1646,57 @@ static void update_child_status(h2_session *session, int status, const char *msg
-
- static void transit(h2_session *session, const char *action, h2_session_state nstate)
- {
-+ apr_time_t timeout;
-+ int ostate, loglvl;
-+ const char *s;
-+
- if (session->state != nstate) {
-- int loglvl = APLOG_DEBUG;
-- if ((session->state == H2_SESSION_ST_BUSY && nstate == H2_SESSION_ST_WAIT)
-- || (session->state == H2_SESSION_ST_WAIT && nstate == H2_SESSION_ST_BUSY)){
-+ ostate = session->state;
-+ session->state = nstate;
-+
-+ loglvl = APLOG_DEBUG;
-+ if ((ostate == H2_SESSION_ST_BUSY && nstate == H2_SESSION_ST_WAIT)
-+ || (ostate == H2_SESSION_ST_WAIT && nstate == H2_SESSION_ST_BUSY)){
- loglvl = APLOG_TRACE1;
- }
- ap_log_cerror(APLOG_MARK, loglvl, 0, session->c,
- H2_SSSN_LOG(APLOGNO(03078), session,
- "transit [%s] -- %s --> [%s]"),
-- h2_session_state_str(session->state), action,
-+ h2_session_state_str(ostate), action,
- h2_session_state_str(nstate));
-- session->state = nstate;
-+
- switch (session->state) {
- case H2_SESSION_ST_IDLE:
-- update_child_status(session, (session->open_streams == 0?
-- SERVER_BUSY_KEEPALIVE
-- : SERVER_BUSY_READ), "idle");
-+ if (!session->remote.emitted_count) {
-+ /* on fresh connections, with async mpm, do not return
-+ * to mpm for a second. This gives the first request a better
-+ * chance to arrive (und connection leaving IDLE state).
-+ * If we return to mpm right away, this connection has the
-+ * same chance of being cleaned up by the mpm as connections
-+ * that already served requests - not fair. */
-+ session->idle_sync_until = apr_time_now() + apr_time_from_sec(1);
-+ s = "timeout";
-+ timeout = H2MAX(session->s->timeout, session->s->keep_alive_timeout);
-+ update_child_status(session, SERVER_BUSY_READ, "idle");
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
-+ H2_SSSN_LOG("", session, "enter idle, timeout = %d sec"),
-+ (int)apr_time_sec(H2MAX(session->s->timeout, session->s->keep_alive_timeout)));
-+ }
-+ else if (session->open_streams) {
-+ s = "timeout";
-+ timeout = session->s->keep_alive_timeout;
-+ update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle");
-+ }
-+ else {
-+ /* normal keepalive setup */
-+ s = "keepalive";
-+ timeout = session->s->keep_alive_timeout;
-+ update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle");
-+ }
-+ session->idle_until = apr_time_now() + timeout;
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
-+ H2_SSSN_LOG("", session, "enter idle, %s = %d sec"),
-+ s, (int)apr_time_sec(timeout));
- break;
- case H2_SESSION_ST_DONE:
- update_child_status(session, SERVER_CLOSING, "done");
-@@ -1726,8 +1803,6 @@ static void h2_session_ev_no_io(h2_session *session, int arg, const char *msg)
- * This means we only wait for WINDOW_UPDATE from the
- * client and can block on READ. */
- transit(session, "no io (flow wait)", H2_SESSION_ST_IDLE);
-- session->idle_until = apr_time_now() + session->s->timeout;
-- session->keep_sync_until = session->idle_until;
- /* Make sure we have flushed all previously written output
- * so that the client will react. */
- if (h2_conn_io_flush(&session->io) != APR_SUCCESS) {
-@@ -1738,12 +1813,7 @@ static void h2_session_ev_no_io(h2_session *session, int arg, const char *msg)
- }
- else if (session->local.accepting) {
- /* When we have no streams, but accept new, switch to idle */
-- apr_time_t now = apr_time_now();
- transit(session, "no io (keepalive)", H2_SESSION_ST_IDLE);
-- session->idle_until = (session->remote.emitted_count?
-- session->s->keep_alive_timeout :
-- session->s->timeout) + now;
-- session->keep_sync_until = now + apr_time_from_sec(1);
- }
- else {
- /* We are no longer accepting new streams and there are
-@@ -1758,12 +1828,25 @@ static void h2_session_ev_no_io(h2_session *session, int arg, const char *msg)
- }
- }
-
--static void h2_session_ev_data_read(h2_session *session, int arg, const char *msg)
-+static void h2_session_ev_frame_rcvd(h2_session *session, int arg, const char *msg)
-+{
-+ switch (session->state) {
-+ case H2_SESSION_ST_IDLE:
-+ case H2_SESSION_ST_WAIT:
-+ transit(session, "frame received", H2_SESSION_ST_BUSY);
-+ break;
-+ default:
-+ /* nop */
-+ break;
-+ }
-+}
-+
-+static void h2_session_ev_stream_change(h2_session *session, int arg, const char *msg)
- {
- switch (session->state) {
- case H2_SESSION_ST_IDLE:
- case H2_SESSION_ST_WAIT:
-- transit(session, "data read", H2_SESSION_ST_BUSY);
-+ transit(session, "stream change", H2_SESSION_ST_BUSY);
- break;
- default:
- /* nop */
-@@ -1803,16 +1886,6 @@ static void h2_session_ev_pre_close(h2_session *session, int arg, const char *ms
- static void ev_stream_open(h2_session *session, h2_stream *stream)
- {
- h2_iq_append(session->in_process, stream->id);
-- switch (session->state) {
-- case H2_SESSION_ST_IDLE:
-- if (session->open_streams == 1) {
-- /* enter timeout, since we have a stream again */
-- session->idle_until = (session->s->timeout + apr_time_now());
-- }
-- break;
-- default:
-- break;
-- }
- }
-
- static void ev_stream_closed(h2_session *session, h2_stream *stream)
-@@ -1825,11 +1898,6 @@ static void ev_stream_closed(h2_session *session, h2_stream *stream)
- }
- switch (session->state) {
- case H2_SESSION_ST_IDLE:
-- if (session->open_streams == 0) {
-- /* enter keepalive timeout, since we no longer have streams */
-- session->idle_until = (session->s->keep_alive_timeout
-- + apr_time_now());
-- }
- break;
- default:
- break;
-@@ -1887,6 +1955,7 @@ static void on_stream_state_enter(void *ctx, h2_stream *stream)
- default:
- break;
- }
-+ dispatch_event(session, H2_SESSION_EV_STREAM_CHANGE, 0, "stream state change");
- }
-
- static void on_stream_event(void *ctx, h2_stream *stream,
-@@ -1945,8 +2014,8 @@ static void dispatch_event(h2_session *session, h2_session_event_t ev,
- case H2_SESSION_EV_NO_IO:
- h2_session_ev_no_io(session, arg, msg);
- break;
-- case H2_SESSION_EV_DATA_READ:
-- h2_session_ev_data_read(session, arg, msg);
-+ case H2_SESSION_EV_FRAME_RCVD:
-+ h2_session_ev_frame_rcvd(session, arg, msg);
- break;
- case H2_SESSION_EV_NGH2_DONE:
- h2_session_ev_ngh2_done(session, arg, msg);
-@@ -1957,6 +2026,9 @@ static void dispatch_event(h2_session *session, h2_session_event_t ev,
- case H2_SESSION_EV_PRE_CLOSE:
- h2_session_ev_pre_close(session, arg, msg);
- break;
-+ case H2_SESSION_EV_STREAM_CHANGE:
-+ h2_session_ev_stream_change(session, arg, msg);
-+ break;
- default:
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
- H2_SSSN_MSG(session, "unknown event %d"), ev);
-@@ -1990,13 +2062,15 @@ apr_status_t h2_session_process(h2_session *session, int async)
- apr_status_t status = APR_SUCCESS;
- conn_rec *c = session->c;
- int rv, mpm_state, trace = APLOGctrace3(c);
--
-+ apr_time_t now;
-+
- if (trace) {
- ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
- H2_SSSN_MSG(session, "process start, async=%d"), async);
- }
-
- while (session->state != H2_SESSION_ST_DONE) {
-+ now = apr_time_now();
- session->have_read = session->have_written = 0;
-
- if (session->local.accepting
-@@ -2034,39 +2108,42 @@ apr_status_t h2_session_process(h2_session *session, int async)
- break;
-
- case H2_SESSION_ST_IDLE:
-- /* We trust our connection into the default timeout/keepalive
-- * handling of the core filters/mpm iff:
-- * - keep_sync_until is not set
-- * - we have an async mpm
-- * - we have no open streams to process
-- * - we are not sitting on a Upgrade: request
-- * - we already have seen at least one request
-- */
-- if (!session->keep_sync_until && async && !session->open_streams
-- && !session->r && session->remote.emitted_count) {
-+ if (session->idle_until && (apr_time_now() + session->idle_delay) > session->idle_until) {
-+ ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
-+ H2_SSSN_MSG(session, "idle, timeout reached, closing"));
-+ if (session->idle_delay) {
-+ apr_table_setn(session->c->notes, "short-lingering-close", "1");
-+ }
-+ dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout");
-+ goto out;
-+ }
-+
-+ if (session->idle_delay) {
-+ /* we are less interested in spending time on this connection */
-+ ap_log_cerror( APLOG_MARK, APLOG_TRACE2, status, c,
-+ H2_SSSN_MSG(session, "session is idle (%ld ms), idle wait %ld sec left"),
-+ (long)apr_time_as_msec(session->idle_delay),
-+ (long)apr_time_sec(session->idle_until - now));
-+ apr_sleep(session->idle_delay);
-+ session->idle_delay = 0;
-+ }
-+
-+ h2_conn_io_flush(&session->io);
-+ if (async && !session->r && (now > session->idle_sync_until)) {
- if (trace) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
- H2_SSSN_MSG(session,
- "nonblock read, %d streams open"),
- session->open_streams);
- }
-- h2_conn_io_flush(&session->io);
- status = h2_session_read(session, 0);
-
- if (status == APR_SUCCESS) {
- session->have_read = 1;
-- dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
- }
-- else if (APR_STATUS_IS_EAGAIN(status)
-- || APR_STATUS_IS_TIMEUP(status)) {
-- if (apr_time_now() > session->idle_until) {
-- dispatch_event(session,
-- H2_SESSION_EV_CONN_TIMEOUT, 0, NULL);
-- }
-- else {
-- status = APR_EAGAIN;
-- goto out;
-- }
-+ else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) {
-+ status = APR_EAGAIN;
-+ goto out;
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
-@@ -2078,7 +2155,6 @@ apr_status_t h2_session_process(h2_session *session, int async)
- }
- else {
- /* make certain, we send everything before we idle */
-- h2_conn_io_flush(&session->io);
- if (trace) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
- H2_SSSN_MSG(session,
-@@ -2090,7 +2166,6 @@ apr_status_t h2_session_process(h2_session *session, int async)
- */
- status = h2_mplx_idle(session->mplx);
- if (status == APR_EAGAIN) {
-- dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
- break;
- }
- else if (status != APR_SUCCESS) {
-@@ -2101,33 +2176,11 @@ apr_status_t h2_session_process(h2_session *session, int async)
- status = h2_session_read(session, 1);
- if (status == APR_SUCCESS) {
- session->have_read = 1;
-- dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
- }
- else if (status == APR_EAGAIN) {
- /* nothing to read */
- }
- else if (APR_STATUS_IS_TIMEUP(status)) {
-- apr_time_t now = apr_time_now();
-- if (now > session->keep_sync_until) {
-- /* if we are on an async mpm, now is the time that
-- * we may dare to pass control to it. */
-- session->keep_sync_until = 0;
-- }
-- if (now > session->idle_until) {
-- if (trace) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
-- H2_SSSN_MSG(session,
-- "keepalive timeout"));
-- }
-- dispatch_event(session,
-- H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout");
-- }
-- else if (trace) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
-- H2_SSSN_MSG(session,
-- "keepalive, %f sec left"),
-- (session->idle_until - now) / 1000000.0f);
-- }
- /* continue reading handling */
- }
- else if (APR_STATUS_IS_ECONNABORTED(status)
-@@ -2145,6 +2198,18 @@ apr_status_t h2_session_process(h2_session *session, int async)
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, "error");
- }
- }
-+ if (nghttp2_session_want_write(session->ngh2)) {
-+ ap_update_child_status(session->c->sbh, SERVER_BUSY_WRITE, NULL);
-+ status = h2_session_send(session);
-+ if (status == APR_SUCCESS) {
-+ status = h2_conn_io_flush(&session->io);
-+ }
-+ if (status != APR_SUCCESS) {
-+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
-+ H2_ERR_INTERNAL_ERROR, "writing");
-+ break;
-+ }
-+ }
- break;
-
- case H2_SESSION_ST_BUSY:
-@@ -2154,7 +2219,6 @@ apr_status_t h2_session_process(h2_session *session, int async)
- status = h2_session_read(session, 0);
- if (status == APR_SUCCESS) {
- session->have_read = 1;
-- dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
- }
- else if (status == APR_EAGAIN) {
- /* nothing to read */
-@@ -2218,7 +2282,7 @@ apr_status_t h2_session_process(h2_session *session, int async)
- session->iowait);
- if (status == APR_SUCCESS) {
- session->wait_us = 0;
-- dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
-+ dispatch_event(session, H2_SESSION_EV_STREAM_CHANGE, 0, NULL);
- }
- else if (APR_STATUS_IS_TIMEUP(status)) {
- /* go back to checking all inputs again */
-diff --git a/modules/http2/h2_session.h b/modules/http2/h2_session.h
-index 486938b0..df2a8624 100644
---- a/modules/http2/h2_session.h
-+++ b/modules/http2/h2_session.h
-@@ -66,10 +66,11 @@ typedef enum {
- H2_SESSION_EV_PROTO_ERROR, /* protocol error */
- H2_SESSION_EV_CONN_TIMEOUT, /* connection timeout */
- H2_SESSION_EV_NO_IO, /* nothing has been read or written */
-- H2_SESSION_EV_DATA_READ, /* connection data has been read */
-+ H2_SESSION_EV_FRAME_RCVD, /* a frame has been received */
- H2_SESSION_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */
- H2_SESSION_EV_MPM_STOPPING, /* the process is stopping */
- H2_SESSION_EV_PRE_CLOSE, /* connection will close after this */
-+ H2_SESSION_EV_STREAM_CHANGE, /* a stream (state/input/output) changed */
- } h2_session_event_t;
-
- typedef struct h2_session {
-@@ -118,7 +119,9 @@ typedef struct h2_session {
- apr_size_t max_stream_mem; /* max buffer memory for a single stream */
-
- apr_time_t idle_until; /* Time we shut down due to sheer boredom */
-- apr_time_t keep_sync_until; /* Time we sync wait until passing to async mpm */
-+ apr_time_t idle_sync_until; /* Time we sync wait until keepalive handling kicks in */
-+ apr_size_t idle_frames; /* number of rcvd frames that kept session in idle state */
-+ apr_interval_time_t idle_delay; /* Time we delay processing rcvd frames in idle state */
-
- apr_bucket_brigade *bbtmp; /* brigade for keeping temporary data */
- struct apr_thread_cond_t *iowait; /* our cond when trywaiting for data */
diff --git a/CVE-2018-17189.patch b/CVE-2018-17189.patch
deleted file mode 100644
index 02aaf24af5b945b0ef13648fd3a2e4584be458f5..0000000000000000000000000000000000000000
--- a/CVE-2018-17189.patch
+++ /dev/null
@@ -1,20 +0,0 @@
-diff --git a/modules/http2/h2_conn.c b/modules/http2/h2_conn.c
-index 2e95659..f7f81be 100644
---- a/modules/http2/h2_conn.c
-+++ b/modules/http2/h2_conn.c
-@@ -354,6 +354,15 @@ apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd)
- * (Not necessarily in pre_connection, but later. Set it here, so it
- * is in place.) */
- slave->keepalives = 1;
-+ /* We signal that this connection will be closed after the request.
-+ * Which is true in that sense that we throw away all traffic data
-+ * on this slave connection after each requests. Although we might
-+ * reuse internal structures like memory pools.
-+ * The wanted effect of this is that httpd does not try to clean up
-+ * any dangling data on this connection when a request is done. Which
-+ * is unneccessary on a h2 stream.
-+ */
-+ slave->keepalive = AP_CONN_CLOSE;
- return ap_run_pre_connection(slave, csd);
- }
- return APR_SUCCESS;
diff --git a/CVE-2018-17199.patch b/CVE-2018-17199.patch
deleted file mode 100644
index e42f56b7ff2710a373fc895212633efdc76564dd..0000000000000000000000000000000000000000
--- a/CVE-2018-17199.patch
+++ /dev/null
@@ -1,85 +0,0 @@
-From 34f58ae20d9a85f2a1508a9a732874239491d456 Mon Sep 17 00:00:00 2001
-From: Hank Ibell
-Date: Tue, 15 Jan 2019 19:54:41 +0000
-Subject: [PATCH] mod_session: Always decode session attributes early.
-
-Backport r1850947 from trunk
-Submitted by: hwibell
-Reviewed by: hwibell, covener, wrowe
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1851409 13f79535-47bb-0310-9956-ffa450edef68
----
- CHANGES | 2 ++
- STATUS | 5 -----
- modules/session/mod_session.c | 25 ++++++++++++++-----------
- 3 files changed, 16 insertions(+), 16 deletions(-)
-
-#diff --git a/CHANGES b/CHANGES
-#index c4d9f6c2ea8..4b0a07fdcf5 100644
-#--- a/CHANGES
-#+++ b/CHANGES
-#@@ -9,6 +9,8 @@ Changes with Apache 2.4.38
-# and we should just set the value for the environment variable
-# like in the pattern case. [Ruediger Pluem]
-#
-#+ *) mod_session: Always decode session attributes early. [Hank Ibell]
-#+
-# *) core: Incorrect values for environment variables are substituted when
-# multiple environment variables are specified in a directive. [Hank Ibell]
-#
-#diff --git a/STATUS b/STATUS
-#index 00070f9f247..45a92ba4d81 100644
-#--- a/STATUS
-#+++ b/STATUS
-#@@ -125,11 +125,6 @@ RELEASE SHOWSTOPPERS:
-# PATCHES ACCEPTED TO BACKPORT FROM TRUNK:
-# [ start all new proposals below, under PATCHES PROPOSED. ]
-#
-#- *) mod_session: Always decode session attributes early.
-#- trunk patch: http://svn.apache.org/r1850947
-#- 2.4.x patch: svn merge -c 1850947 ^/httpd/httpd/trunk .
-#- +1: hwibell, covener, wrowe
-#-
-# *) mod_ssl (ssl_engine_io.c: bio_filter_out_write, bio_filter_in_read)
-# Clear retry flags before aborting on client-initiated reneg. [Joe Orton]
-# PR: 63052
-diff --git a/modules/session/mod_session.c b/modules/session/mod_session.c
-index d517020d995..64e6e4a8132 100644
---- a/modules/session/mod_session.c
-+++ b/modules/session/mod_session.c
-@@ -126,20 +126,23 @@ static apr_status_t ap_session_load(request_rec * r, session_rec ** z)
-
- /* found a session that hasn't expired? */
- now = apr_time_now();
-+
- if (zz) {
-- if (zz->expiry && zz->expiry < now) {
-+ /* load the session attibutes */
-+ rv = ap_run_session_decode(r, zz);
-+
-+ /* having a session we cannot decode is just as good as having
-+ none at all */
-+ if (OK != rv) {
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01817)
-+ "error while decoding the session, "
-+ "session not loaded: %s", r->uri);
- zz = NULL;
- }
-- else {
-- /* having a session we cannot decode is just as good as having
-- none at all */
-- rv = ap_run_session_decode(r, zz);
-- if (OK != rv) {
-- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01817)
-- "error while decoding the session, "
-- "session not loaded: %s", r->uri);
-- zz = NULL;
-- }
-+
-+ /* invalidate session if session is expired */
-+ if (zz && zz->expiry && zz->expiry < now) {
-+ zz = NULL;
- }
- }
-
diff --git a/CVE-2019-0196.patch b/CVE-2019-0196.patch
deleted file mode 100644
index 2b3220612dbecaa22590ebe4343effa01b54a36e..0000000000000000000000000000000000000000
--- a/CVE-2019-0196.patch
+++ /dev/null
@@ -1,28 +0,0 @@
-From 0ab8232f93d1861a6d63ffcfc127c9fe7f701d78 Mon Sep 17 00:00:00 2001
-From: Stefan Eissing
-Date: Tue, 5 Feb 2019 11:52:28 +0000
-Subject: [PATCH] Merge of r1852986 from trunk:
-
-mod_http2: disentangelment of stream and request method.
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1852989 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/http2/h2_request.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/modules/http2/h2_request.c b/modules/http2/h2_request.c
-index 8899c4f..5ee88e9 100644
---- a/modules/http2/h2_request.c
-+++ b/modules/http2/h2_request.c
-@@ -266,7 +266,7 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
-
- /* Time to populate r with the data we have. */
- r->request_time = req->request_time;
-- r->method = req->method;
-+ r->method = apr_pstrdup(r->pool, req->method);
- /* Provide quick information about the request method as soon as known */
- r->method_number = ap_method_number_of(r->method);
- if (r->method_number == M_GET && r->method[0] == 'H') {
---
-1.8.3.1
-
diff --git a/CVE-2019-0197.patch b/CVE-2019-0197.patch
deleted file mode 100644
index 226e70a0f8647d3c2955e437f84c540fddb7fca2..0000000000000000000000000000000000000000
--- a/CVE-2019-0197.patch
+++ /dev/null
@@ -1,120 +0,0 @@
-From 610b78f35a5dd12f953aac23d867c890c92c46d1 Mon Sep 17 00:00:00 2001
-From: Jim Jagielski
-Date: Wed, 13 Mar 2019 12:30:20 +0000
-Subject: [PATCH] Merge r1852038, r1852101 from trunk:
-
-mod_http2: enable re-use of slave connections again.
-
-mod_http2: fixed slave connection keepalives counter.
-
-Submitted by: icing
-Reviewed by: icing, ylavic, jim
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855406 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/http2/h2_conn.c | 14 +++++++++-----
- modules/http2/h2_mplx.c | 8 +++++++-
- modules/http2/h2_task.c | 3 +--
- 3 files changed, 17 insertions(+), 8 deletions(-)
-
-diff --git a/modules/http2/h2_conn.c b/modules/http2/h2_conn.c
-index f7f81be..dc2081e 100644
---- a/modules/http2/h2_conn.c
-+++ b/modules/http2/h2_conn.c
-@@ -305,6 +305,10 @@ conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent)
- c->notes = apr_table_make(pool, 5);
- c->input_filters = NULL;
- c->output_filters = NULL;
-+ c->keepalives = 0;
-+#if AP_MODULE_MAGIC_AT_LEAST(20180903, 1)
-+ c->filter_conn_ctx = NULL;
-+#endif
- c->bucket_alloc = apr_bucket_alloc_create(pool);
- c->data_in_input_filters = 0;
- c->data_in_output_filters = 0;
-@@ -332,16 +336,15 @@ conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent)
- ap_set_module_config(c->conn_config, mpm, cfg);
- }
-
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
-- "h2_stream(%ld-%d): created slave", master->id, slave_id);
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c,
-+ "h2_slave(%s): created", c->log_id);
- return c;
- }
-
- void h2_slave_destroy(conn_rec *slave)
- {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, slave,
-- "h2_stream(%s): destroy slave",
-- apr_table_get(slave->notes, H2_TASK_ID_NOTE));
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, slave,
-+ "h2_slave(%s): destroy", slave->log_id);
- slave->sbh = NULL;
- apr_pool_destroy(slave->pool);
- }
-@@ -365,6 +368,7 @@ apr_status_t h2_slave_run_pre_connection(conn_rec *slave, apr_socket_t *csd)
- slave->keepalive = AP_CONN_CLOSE;
- return ap_run_pre_connection(slave, csd);
- }
-+ ap_assert(slave->output_filters);
- return APR_SUCCESS;
- }
-
-diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c
-index 05667ab..29f040c 100644
---- a/modules/http2/h2_mplx.c
-+++ b/modules/http2/h2_mplx.c
-@@ -327,7 +327,8 @@ static int stream_destroy_iter(void *ctx, void *val)
- && !task->rst_error);
- }
-
-- if (reuse_slave && slave->keepalive == AP_CONN_KEEPALIVE) {
-+ task->c = NULL;
-+ if (reuse_slave) {
- h2_beam_log(task->output.beam, m->c, APLOG_DEBUG,
- APLOGNO(03385) "h2_task_destroy, reuse slave");
- h2_task_destroy(task);
-@@ -437,6 +438,8 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
- apr_status_t status;
- int i, wait_secs = 60;
-
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
-+ "h2_mplx(%ld): start release", m->id);
- /* How to shut down a h2 connection:
- * 0. abort and tell the workers that no more tasks will come from us */
- m->aborted = 1;
-@@ -973,6 +976,9 @@ static apr_status_t unschedule_slow_tasks(h2_mplx *m)
- */
- n = (m->tasks_active - m->limit_active - (int)h2_ihash_count(m->sredo));
- while (n > 0 && (stream = get_latest_repeatable_unsubmitted_stream(m))) {
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
-+ "h2_mplx(%s): unschedule, resetting task for redo later",
-+ stream->task->id);
- h2_task_rst(stream->task, H2_ERR_CANCEL);
- h2_ihash_add(m->sredo, stream);
- --n;
-diff --git a/modules/http2/h2_task.c b/modules/http2/h2_task.c
-index 86fb026..f4c875c 100644
---- a/modules/http2/h2_task.c
-+++ b/modules/http2/h2_task.c
-@@ -504,7 +504,7 @@ static int h2_task_pre_conn(conn_rec* c, void *arg)
- (void)arg;
- if (h2_ctx_is_task(ctx)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
-- "h2_h2, pre_connection, found stream task");
-+ "h2_slave(%s), pre_connection, adding filters", c->log_id);
- ap_add_input_filter("H2_SLAVE_IN", NULL, NULL, c);
- ap_add_output_filter("H2_PARSE_H1", NULL, NULL, c);
- ap_add_output_filter("H2_SLAVE_OUT", NULL, NULL, c);
-@@ -545,7 +545,6 @@ h2_task *h2_task_create(conn_rec *slave, int stream_id,
- void h2_task_destroy(h2_task *task)
- {
- if (task->output.beam) {
-- h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "task_destroy");
- h2_beam_destroy(task->output.beam);
- task->output.beam = NULL;
- }
---
-1.8.3.1
-
diff --git a/CVE-2019-0211.patch b/CVE-2019-0211.patch
deleted file mode 100644
index e117f521ff881de9f0f51774e02b0ec251f39214..0000000000000000000000000000000000000000
--- a/CVE-2019-0211.patch
+++ /dev/null
@@ -1,220 +0,0 @@
-MPMs unix: bind the bucket number of each child to its slot number
-
-We need not remember each child's bucket number in SHM for restarts, for the
-lifetime of the httpd main process the bucket number can be bound to the slot
-number such that: bucket = slot % num_buckets.
-
-This both simplifies the logic and helps children maintenance per bucket in
-threaded MPMs, where previously perform_idle_server_maintenance() could create
-or kill children processes for the buckets it was not in charge of.
-
-diff --git a/include/scoreboard.h b/include/scoreboard.h
-index 57cf3df..b714a8c 100644
---- a/include/scoreboard.h
-+++ b/include/scoreboard.h
-@@ -143,7 +143,9 @@ struct process_score {
- apr_uint32_t lingering_close; /* async connections in lingering close */
- apr_uint32_t keep_alive; /* async connections in keep alive */
- apr_uint32_t suspended; /* connections suspended by some module */
-- int bucket; /* Listener bucket used by this child */
-+ int bucket; /* Listener bucket used by this child; this field is DEPRECATED
-+ * and no longer updated by the MPMs (i.e. always zero).
-+ */
- };
-
- /* Scoreboard is now in 'local' memory, since it isn't updated once created,
-diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c
-index ffe8a23..048ae61 100644
---- a/server/mpm/event/event.c
-+++ b/server/mpm/event/event.c
-@@ -2695,7 +2695,6 @@ static int make_child(server_rec * s, int slot, int bucket)
-
- ap_scoreboard_image->parent[slot].quiescing = 0;
- ap_scoreboard_image->parent[slot].not_accepting = 0;
-- ap_scoreboard_image->parent[slot].bucket = bucket;
- event_note_child_started(slot, pid);
- active_daemons++;
- retained->total_daemons++;
-@@ -2734,6 +2733,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
- * that threads_per_child is always > 0 */
- int status = SERVER_DEAD;
- int child_threads_active = 0;
-+ int bucket = i % num_buckets;
-
- if (i >= retained->max_daemons_limit &&
- free_length == retained->idle_spawn_rate[child_bucket]) {
-@@ -2757,7 +2757,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
- */
- if (status <= SERVER_READY && !ps->quiescing && !ps->not_accepting
- && ps->generation == retained->mpm->my_generation
-- && ps->bucket == child_bucket)
-+ && bucket == child_bucket)
- {
- ++idle_thread_count;
- }
-@@ -2768,7 +2768,9 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
- last_non_dead = i;
- }
- active_thread_count += child_threads_active;
-- if (!ps->pid && free_length < retained->idle_spawn_rate[child_bucket])
-+ if (!ps->pid
-+ && bucket == child_bucket
-+ && free_length < retained->idle_spawn_rate[child_bucket])
- free_slots[free_length++] = i;
- else if (child_threads_active == threads_per_child)
- had_healthy_child = 1;
-@@ -2951,13 +2953,14 @@ static void server_main_loop(int remaining_children_to_start, int num_buckets)
- retained->total_daemons--;
- if (processed_status == APEXIT_CHILDSICK) {
- /* resource shortage, minimize the fork rate */
-- retained->idle_spawn_rate[ps->bucket] = 1;
-+ retained->idle_spawn_rate[child_slot % num_buckets] = 1;
- }
- else if (remaining_children_to_start) {
- /* we're still doing a 1-for-1 replacement of dead
- * children with new children
- */
-- make_child(ap_server_conf, child_slot, ps->bucket);
-+ make_child(ap_server_conf, child_slot,
-+ child_slot % num_buckets);
- --remaining_children_to_start;
- }
- }
-diff --git a/server/mpm/prefork/prefork.c b/server/mpm/prefork/prefork.c
-index 8efda72..7c00625 100644
---- a/server/mpm/prefork/prefork.c
-+++ b/server/mpm/prefork/prefork.c
-@@ -637,8 +637,9 @@ static void child_main(int child_num_arg, int child_bucket)
- }
-
-
--static int make_child(server_rec *s, int slot, int bucket)
-+static int make_child(server_rec *s, int slot)
- {
-+ int bucket = slot % retained->mpm->num_buckets;
- int pid;
-
- if (slot + 1 > retained->max_daemons_limit) {
-@@ -716,7 +717,6 @@ static int make_child(server_rec *s, int slot, int bucket)
- child_main(slot, bucket);
- }
-
-- ap_scoreboard_image->parent[slot].bucket = bucket;
- prefork_note_child_started(slot, pid);
-
- return 0;
-@@ -732,7 +732,7 @@ static void startup_children(int number_to_start)
- if (ap_scoreboard_image->servers[i][0].status != SERVER_DEAD) {
- continue;
- }
-- if (make_child(ap_server_conf, i, i % retained->mpm->num_buckets) < 0) {
-+ if (make_child(ap_server_conf, i) < 0) {
- break;
- }
- --number_to_start;
-@@ -741,8 +741,6 @@ static void startup_children(int number_to_start)
-
- static void perform_idle_server_maintenance(apr_pool_t *p)
- {
-- static int bucket_make_child_record = -1;
-- static int bucket_kill_child_record = -1;
- int i;
- int idle_count;
- worker_score *ws;
-@@ -789,6 +787,7 @@ static void perform_idle_server_maintenance(apr_pool_t *p)
- }
- retained->max_daemons_limit = last_non_dead + 1;
- if (idle_count > ap_daemons_max_free) {
-+ static int bucket_kill_child_record = -1;
- /* kill off one child... we use the pod because that'll cause it to
- * shut down gracefully, in case it happened to pick up a request
- * while we were counting
-@@ -819,10 +818,7 @@ static void perform_idle_server_maintenance(apr_pool_t *p)
- idle_count, total_non_dead);
- }
- for (i = 0; i < free_length; ++i) {
-- bucket_make_child_record++;
-- bucket_make_child_record %= retained->mpm->num_buckets;
-- make_child(ap_server_conf, free_slots[i],
-- bucket_make_child_record);
-+ make_child(ap_server_conf, free_slots[i]);
- }
- /* the next time around we want to spawn twice as many if this
- * wasn't good enough, but not if we've just done a graceful
-@@ -867,7 +863,7 @@ static int prefork_run(apr_pool_t *_pconf, apr_pool_t *plog, server_rec *s)
-
- if (one_process) {
- AP_MONCONTROL(1);
-- make_child(ap_server_conf, 0, 0);
-+ make_child(ap_server_conf, 0);
- /* NOTREACHED */
- ap_assert(0);
- return !OK;
-@@ -976,8 +972,7 @@ static int prefork_run(apr_pool_t *_pconf, apr_pool_t *plog, server_rec *s)
- /* we're still doing a 1-for-1 replacement of dead
- * children with new children
- */
-- make_child(ap_server_conf, child_slot,
-- ap_get_scoreboard_process(child_slot)->bucket);
-+ make_child(ap_server_conf, child_slot);
- --remaining_children_to_start;
- }
- #if APR_HAS_OTHER_CHILD
-diff --git a/server/mpm/worker/worker.c b/server/mpm/worker/worker.c
-index 8012fe2..a927942 100644
---- a/server/mpm/worker/worker.c
-+++ b/server/mpm/worker/worker.c
-@@ -1339,7 +1339,6 @@ static int make_child(server_rec *s, int slot, int bucket)
- worker_note_child_lost_slot(slot, pid);
- }
- ap_scoreboard_image->parent[slot].quiescing = 0;
-- ap_scoreboard_image->parent[slot].bucket = bucket;
- worker_note_child_started(slot, pid);
- return 0;
- }
-@@ -1388,6 +1387,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
- int any_dead_threads = 0;
- int all_dead_threads = 1;
- int child_threads_active = 0;
-+ int bucket = i % num_buckets;
-
- if (i >= retained->max_daemons_limit &&
- totally_free_length == retained->idle_spawn_rate[child_bucket]) {
-@@ -1420,7 +1420,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
- if (status <= SERVER_READY &&
- !ps->quiescing &&
- ps->generation == retained->mpm->my_generation &&
-- ps->bucket == child_bucket) {
-+ bucket == child_bucket) {
- ++idle_thread_count;
- }
- if (status >= SERVER_READY && status < SERVER_GRACEFUL) {
-@@ -1430,6 +1430,7 @@ static void perform_idle_server_maintenance(int child_bucket, int num_buckets)
- }
- active_thread_count += child_threads_active;
- if (any_dead_threads
-+ && bucket == child_bucket
- && totally_free_length < retained->idle_spawn_rate[child_bucket]
- && free_length < MAX_SPAWN_RATE / num_buckets
- && (!ps->pid /* no process in the slot */
-@@ -1615,14 +1616,15 @@ static void server_main_loop(int remaining_children_to_start, int num_buckets)
- ps->quiescing = 0;
- if (processed_status == APEXIT_CHILDSICK) {
- /* resource shortage, minimize the fork rate */
-- retained->idle_spawn_rate[ps->bucket] = 1;
-+ retained->idle_spawn_rate[child_slot % num_buckets] = 1;
- }
- else if (remaining_children_to_start
- && child_slot < ap_daemons_limit) {
- /* we're still doing a 1-for-1 replacement of dead
- * children with new children
- */
-- make_child(ap_server_conf, child_slot, ps->bucket);
-+ make_child(ap_server_conf, child_slot,
-+ child_slot % num_buckets);
- --remaining_children_to_start;
- }
- }
---
-2.19.1
-
diff --git a/CVE-2019-0215.patch b/CVE-2019-0215.patch
deleted file mode 100644
index 88ff2ba83d0ea92938bb07b13df50e5e8b38ceea..0000000000000000000000000000000000000000
--- a/CVE-2019-0215.patch
+++ /dev/null
@@ -1,20 +0,0 @@
-diff --git a/modules/ssl/ssl_engine_kernel.c b/modules/ssl/ssl_engine_kernel.c
-index cf841b0..7459503 100644
---- a/modules/ssl/ssl_engine_kernel.c
-+++ b/modules/ssl/ssl_engine_kernel.c
-@@ -1154,6 +1154,7 @@ static int ssl_hook_Access_modern(request_rec *r, SSLSrvConfigRec *sc, SSLDirCon
- ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server);
- apr_table_setn(r->notes, "error-notes",
- "Reason: Cannot perform Post-Handshake Authentication.
");
-+ SSL_set_verify(ssl, vmode_inplace, NULL);
- return HTTP_FORBIDDEN;
- }
-
-@@ -1175,6 +1176,7 @@ static int ssl_hook_Access_modern(request_rec *r, SSLSrvConfigRec *sc, SSLDirCon
- * Finally check for acceptable renegotiation results
- */
- if (OK != (rc = ssl_check_post_client_verify(r, sc, dc, sslconn, ssl))) {
-+ SSL_set_verify(ssl, vmode_inplace, NULL);
- return rc;
- }
- }
diff --git a/CVE-2019-0220-1.patch b/CVE-2019-0220-1.patch
deleted file mode 100644
index faf9aadad1ebac65581a91583d0dacec311f6604..0000000000000000000000000000000000000000
--- a/CVE-2019-0220-1.patch
+++ /dev/null
@@ -1,203 +0,0 @@
-From 9bc1917a27a2323e535aadb081e38172ae0e3fc2 Mon Sep 17 00:00:00 2001
-From: Stefan Eissing
-Date: Mon, 18 Mar 2019 08:49:59 +0000
-Subject: [PATCH] Merge of r1855705 from trunk:
-
-core: merge consecutive slashes in the path
-
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855737 13f79535-47bb-0310-9956-ffa450edef68
----
- include/http_core.h | 2 +-
- include/httpd.h | 14 ++++++++++++--
- server/core.c | 13 +++++++++++++
- server/request.c | 25 +++++++++----------------
- server/util.c | 10 +++++++---
- 5 files changed, 43 insertions(+), 21 deletions(-)
-
-diff --git a/include/http_core.h b/include/http_core.h
-index 35df5dc9601..8e109882244 100644
---- a/include/http_core.h
-+++ b/include/http_core.h
-@@ -740,7 +740,7 @@ typedef struct {
- #define AP_HTTP_METHODS_LENIENT 1
- #define AP_HTTP_METHODS_REGISTERED 2
- char http_methods;
--
-+ unsigned int merge_slashes;
- } core_server_config;
-
- /* for AddOutputFiltersByType in core.c */
-diff --git a/include/httpd.h b/include/httpd.h
-index 65392f83546..99f7f041aea 100644
---- a/include/httpd.h
-+++ b/include/httpd.h
-@@ -1697,11 +1697,21 @@ AP_DECLARE(int) ap_unescape_url_keep2f(char *url, int decode_slashes);
- AP_DECLARE(int) ap_unescape_urlencoded(char *query);
-
- /**
-- * Convert all double slashes to single slashes
-- * @param name The string to convert
-+ * Convert all double slashes to single slashes, except where significant
-+ * to the filesystem on the current platform.
-+ * @param name The string to convert, assumed to be a filesystem path
- */
- AP_DECLARE(void) ap_no2slash(char *name);
-
-+/**
-+ * Convert all double slashes to single slashes, except where significant
-+ * to the filesystem on the current platform.
-+ * @param name The string to convert
-+ * @param is_fs_path if set to 0, the significance of any double-slashes is
-+ * ignored.
-+ */
-+AP_DECLARE(void) ap_no2slash_ex(char *name, int is_fs_path);
-+
- /**
- * Remove all ./ and xx/../ substrings from a file name. Also remove
- * any leading ../ or /../ substrings.
-diff --git a/server/core.c b/server/core.c
-index e2a91c7a0c6..eacb54fecec 100644
---- a/server/core.c
-+++ b/server/core.c
-@@ -490,6 +490,7 @@ static void *create_core_server_config(apr_pool_t *a, server_rec *s)
-
- conf->protocols = apr_array_make(a, 5, sizeof(const char *));
- conf->protocols_honor_order = -1;
-+ conf->merge_slashes = AP_CORE_CONFIG_UNSET;
-
- return (void *)conf;
- }
-@@ -555,6 +556,7 @@ static void *merge_core_server_configs(apr_pool_t *p, void *basev, void *virtv)
- conf->protocols_honor_order = ((virt->protocols_honor_order < 0)?
- base->protocols_honor_order :
- virt->protocols_honor_order);
-+ AP_CORE_MERGE_FLAG(merge_slashes, conf, base, virt);
-
- return conf;
- }
-@@ -1863,6 +1865,13 @@ static const char *set_qualify_redirect_url(cmd_parms *cmd, void *d_, int flag)
- return NULL;
- }
-
-+static const char *set_core_server_flag(cmd_parms *cmd, void *s_, int flag)
-+{
-+ core_server_config *conf =
-+ ap_get_core_module_config(cmd->server->module_config);
-+ return ap_set_flag_slot(cmd, conf, flag);
-+}
-+
- static const char *set_override_list(cmd_parms *cmd, void *d_, int argc, char *const argv[])
- {
- core_dir_config *d = d_;
-@@ -4562,6 +4571,10 @@ AP_INIT_ITERATE("HttpProtocolOptions", set_http_protocol_options, NULL, RSRC_CON
- "'Unsafe' or 'Strict' (default). Sets HTTP acceptance rules"),
- AP_INIT_ITERATE("RegisterHttpMethod", set_http_method, NULL, RSRC_CONF,
- "Registers non-standard HTTP methods"),
-+AP_INIT_FLAG("MergeSlashes", set_core_server_flag,
-+ (void *)APR_OFFSETOF(core_server_config, merge_slashes),
-+ RSRC_CONF,
-+ "Controls whether consecutive slashes in the URI path are merged"),
- { NULL }
- };
-
-diff --git a/server/request.c b/server/request.c
-index dbe3e07f150..1ce8908824b 100644
---- a/server/request.c
-+++ b/server/request.c
-@@ -167,6 +167,8 @@ AP_DECLARE(int) ap_process_request_internal(request_rec *r)
- int file_req = (r->main && r->filename);
- int access_status;
- core_dir_config *d;
-+ core_server_config *sconf =
-+ ap_get_core_module_config(r->server->module_config);
-
- /* Ignore embedded %2F's in path for proxy requests */
- if (!r->proxyreq && r->parsed_uri.path) {
-@@ -191,6 +193,10 @@ AP_DECLARE(int) ap_process_request_internal(request_rec *r)
- }
-
- ap_getparents(r->uri); /* OK --- shrinking transformations... */
-+ if (sconf->merge_slashes != AP_CORE_CONFIG_OFF) {
-+ ap_no2slash(r->uri);
-+ ap_no2slash(r->parsed_uri.path);
-+ }
-
- /* All file subrequests are a huge pain... they cannot bubble through the
- * next several steps. Only file subrequests are allowed an empty uri,
-@@ -1411,20 +1417,7 @@ AP_DECLARE(int) ap_location_walk(request_rec *r)
-
- cache = prep_walk_cache(AP_NOTE_LOCATION_WALK, r);
- cached = (cache->cached != NULL);
--
-- /* Location and LocationMatch differ on their behaviour w.r.t. multiple
-- * slashes. Location matches multiple slashes with a single slash,
-- * LocationMatch doesn't. An exception, for backwards brokenness is
-- * absoluteURIs... in which case neither match multiple slashes.
-- */
-- if (r->uri[0] != '/') {
-- entry_uri = r->uri;
-- }
-- else {
-- char *uri = apr_pstrdup(r->pool, r->uri);
-- ap_no2slash(uri);
-- entry_uri = uri;
-- }
-+ entry_uri = r->uri;
-
- /* If we have an cache->cached location that matches r->uri,
- * and the vhost's list of locations hasn't changed, we can skip
-@@ -1491,7 +1484,7 @@ AP_DECLARE(int) ap_location_walk(request_rec *r)
- pmatch = apr_palloc(rxpool, nmatch*sizeof(ap_regmatch_t));
- }
-
-- if (ap_regexec(entry_core->r, r->uri, nmatch, pmatch, 0)) {
-+ if (ap_regexec(entry_core->r, entry_uri, nmatch, pmatch, 0)) {
- continue;
- }
-
-@@ -1501,7 +1494,7 @@ AP_DECLARE(int) ap_location_walk(request_rec *r)
- apr_table_setn(r->subprocess_env,
- ((const char **)entry_core->refs->elts)[i],
- apr_pstrndup(r->pool,
-- r->uri + pmatch[i].rm_so,
-+ entry_uri + pmatch[i].rm_so,
- pmatch[i].rm_eo - pmatch[i].rm_so));
- }
- }
-diff --git a/server/util.c b/server/util.c
-index fd7a0a14763..607c4850d86 100644
---- a/server/util.c
-+++ b/server/util.c
-@@ -561,16 +561,16 @@ AP_DECLARE(void) ap_getparents(char *name)
- name[l] = '\0';
- }
- }
--
--AP_DECLARE(void) ap_no2slash(char *name)
-+AP_DECLARE(void) ap_no2slash_ex(char *name, int is_fs_path)
- {
-+
- char *d, *s;
-
- s = d = name;
-
- #ifdef HAVE_UNC_PATHS
- /* Check for UNC names. Leave leading two slashes. */
-- if (s[0] == '/' && s[1] == '/')
-+ if (is_fs_path && s[0] == '/' && s[1] == '/')
- *d++ = *s++;
- #endif
-
-@@ -587,6 +587,10 @@ AP_DECLARE(void) ap_no2slash(char *name)
- *d = '\0';
- }
-
-+AP_DECLARE(void) ap_no2slash(char *name)
-+{
-+ ap_no2slash_ex(name, 1);
-+}
-
- /*
- * copy at most n leading directories of s into d
diff --git a/CVE-2019-0220-2.patch b/CVE-2019-0220-2.patch
deleted file mode 100644
index 0204259c8e7d98e74d0debd017d3a23d1cbab3e0..0000000000000000000000000000000000000000
--- a/CVE-2019-0220-2.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-From c4ef468b25718a26f2b92cbea3ca093729b79331 Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Mon, 18 Mar 2019 12:10:15 +0000
-Subject: [PATCH] merge 1855743,1855744 ^/httpd/httpd/trunk .
-
-r->parsed_uri.path safety in recent backport
-
-*) core: fix SEGFAULT in CONNECT with recent change
- 2.4.x: svn merge -c 1855743,1855744 ^/httpd/httpd/trunk .
- +1: rpluem, icing, covener
-
-
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855751 13f79535-47bb-0310-9956-ffa450edef68
----
- server/request.c | 4 +++-
- server/util.c | 4 ++++
- 2 files changed, 7 insertions(+), 1 deletion(-)
-
-diff --git a/server/request.c b/server/request.c
-index 1ce8908824b..d5c558afa30 100644
---- a/server/request.c
-+++ b/server/request.c
-@@ -195,7 +195,9 @@ AP_DECLARE(int) ap_process_request_internal(request_rec *r)
- ap_getparents(r->uri); /* OK --- shrinking transformations... */
- if (sconf->merge_slashes != AP_CORE_CONFIG_OFF) {
- ap_no2slash(r->uri);
-- ap_no2slash(r->parsed_uri.path);
-+ if (r->parsed_uri.path) {
-+ ap_no2slash(r->parsed_uri.path);
-+ }
- }
-
- /* All file subrequests are a huge pain... they cannot bubble through the
-diff --git a/server/util.c b/server/util.c
-index 607c4850d86..f3b17f1581e 100644
---- a/server/util.c
-+++ b/server/util.c
-@@ -566,6 +566,10 @@ AP_DECLARE(void) ap_no2slash_ex(char *name, int is_fs_path)
-
- char *d, *s;
-
-+ if (!name || !*name) {
-+ return;
-+ }
-+
- s = d = name;
-
- #ifdef HAVE_UNC_PATHS
diff --git a/CVE-2019-0220-3.patch b/CVE-2019-0220-3.patch
deleted file mode 100644
index 7b3ff6fb1cfb328c5a43375b25572edd587cd390..0000000000000000000000000000000000000000
--- a/CVE-2019-0220-3.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From 3451fc2bf8708b0dc8cd6a7d0ac0fe5b6401befc Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Tue, 19 Mar 2019 18:01:21 +0000
-Subject: [PATCH] *) maintainer mode fix for util.c no2slash_ex trunk
- patch: http://svn.apache.org/r1855755 2.4.x patch svn merge -c 1855755
- ^/httpd/httpd/trunk . +1: covener, rpluem, jim, ylavic
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855853 13f79535-47bb-0310-9956-ffa450edef68
----
- STATUS | 6 ------
- server/util.c | 2 +-
- 2 files changed, 1 insertion(+), 7 deletions(-)
-
-#diff --git a/STATUS b/STATUS
-#index ffe5d22550c..1f8cb2f7884 100644
-#--- a/STATUS
-#+++ b/STATUS
-#@@ -126,12 +126,6 @@ RELEASE SHOWSTOPPERS:
-# PATCHES ACCEPTED TO BACKPORT FROM TRUNK:
-# [ start all new proposals below, under PATCHES PROPOSED. ]
-#
-#- *) maintainer mode fix for util.c no2slash_ex
-#- trunk patch: http://svn.apache.org/r1855755
-#- 2.4.x patch svn merge -c 1855755 ^/httpd/httpd/trunk .
-#- +1: covener, rpluem, jim, ylavic
-#-
-#-
-# PATCHES PROPOSED TO BACKPORT FROM TRUNK:
-# [ New proposals should be added at the end of the list ]
-#
-diff --git a/server/util.c b/server/util.c
-index f3b17f1581e..e0c558cee2d 100644
---- a/server/util.c
-+++ b/server/util.c
-@@ -566,7 +566,7 @@ AP_DECLARE(void) ap_no2slash_ex(char *name, int is_fs_path)
-
- char *d, *s;
-
-- if (!name || !*name) {
-+ if (!*name) {
- return;
- }
-
diff --git a/CVE-2019-10092-1.patch b/CVE-2019-10092-1.patch
deleted file mode 100644
index 44e3bfd98d2462cb47eb55f1219e9986619fd787..0000000000000000000000000000000000000000
--- a/CVE-2019-10092-1.patch
+++ /dev/null
@@ -1,211 +0,0 @@
-From d656b2c1f4a152c5050f4a154461c4f4dbf3952b Mon Sep 17 00:00:00 2001
-From: Stefan Eissing
-Date: Fri, 2 Aug 2019 09:10:06 +0000
-Subject: [PATCH 1/2] Merge of r1864191 from trunk:
-
- *) core, proxy: remove request URL and headers from error docs (CVE-2019-10092)
- [Eric Covener]
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1864207 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/http/http_protocol.c | 86 +++++++++++++------------------------------
- modules/proxy/mod_proxy.c | 7 ++--
- modules/proxy/mod_proxy_ftp.c | 5 ++-
- modules/proxy/proxy_util.c | 5 +--
- 4 files changed, 34 insertions(+), 69 deletions(-)
-
-diff --git a/modules/http/http_protocol.c b/modules/http/http_protocol.c
-index 8543fd1..b85e2f7 100644
---- a/modules/http/http_protocol.c
-+++ b/modules/http/http_protocol.c
-@@ -1133,13 +1133,10 @@ static const char *get_canned_error_string(int status,
- "\">here.
\n",
- NULL));
- case HTTP_USE_PROXY:
-- return(apr_pstrcat(p,
-- "This resource is only accessible "
-- "through the proxy\n",
-- ap_escape_html(r->pool, location),
-- "
\nYou will need to configure "
-- "your client to use that proxy.
\n",
-- NULL));
-+ return("This resource is only accessible "
-+ "through the proxy\n"
-+ "
\nYou will need to configure "
-+ "your client to use that proxy.
\n");
- case HTTP_PROXY_AUTHENTICATION_REQUIRED:
- case HTTP_UNAUTHORIZED:
- return("This server could not verify that you\n"
-@@ -1155,34 +1152,20 @@ static const char *get_canned_error_string(int status,
- "error-notes",
- "
\n"));
- case HTTP_FORBIDDEN:
-- s1 = apr_pstrcat(p,
-- "You don't have permission to access ",
-- ap_escape_html(r->pool, r->uri),
-- "\non this server.
\n",
-- NULL);
-- return(add_optional_notes(r, s1, "error-notes", "
\n"));
-+ return(add_optional_notes(r, "You don't have permission to access this resource.", "error-notes", "
\n"));
- case HTTP_NOT_FOUND:
-- return(apr_pstrcat(p,
-- "The requested URL ",
-- ap_escape_html(r->pool, r->uri),
-- " was not found on this server.
\n",
-- NULL));
-+ return("The requested URL was not found on this server.
\n");
- case HTTP_METHOD_NOT_ALLOWED:
- return(apr_pstrcat(p,
- "The requested method ",
- ap_escape_html(r->pool, r->method),
-- " is not allowed for the URL ",
-- ap_escape_html(r->pool, r->uri),
-- ".
\n",
-+ " is not allowed for this URL.\n",
- NULL));
- case HTTP_NOT_ACCEPTABLE:
-- s1 = apr_pstrcat(p,
-- "An appropriate representation of the "
-- "requested resource ",
-- ap_escape_html(r->pool, r->uri),
-- " could not be found on this server.
\n",
-- NULL);
-- return(add_optional_notes(r, s1, "variant-list", ""));
-+ return(add_optional_notes(r,
-+ "An appropriate representation of the requested resource "
-+ "could not be found on this server.
\n",
-+ "variant-list", ""));
- case HTTP_MULTIPLE_CHOICES:
- return(add_optional_notes(r, "", "variant-list", ""));
- case HTTP_LENGTH_REQUIRED:
-@@ -1193,18 +1176,13 @@ static const char *get_canned_error_string(int status,
- NULL);
- return(add_optional_notes(r, s1, "error-notes", "\n"));
- case HTTP_PRECONDITION_FAILED:
-- return(apr_pstrcat(p,
-- "The precondition on the request "
-- "for the URL ",
-- ap_escape_html(r->pool, r->uri),
-- " evaluated to false.
\n",
-- NULL));
-+ return("The precondition on the request "
-+ "for this URL evaluated to false.
\n");
- case HTTP_NOT_IMPLEMENTED:
- s1 = apr_pstrcat(p,
- "",
-- ap_escape_html(r->pool, r->method), " to ",
-- ap_escape_html(r->pool, r->uri),
-- " not supported.
\n",
-+ ap_escape_html(r->pool, r->method), " ",
-+ " not supported for current URL.
\n",
- NULL);
- return(add_optional_notes(r, s1, "error-notes", "
\n"));
- case HTTP_BAD_GATEWAY:
-@@ -1212,29 +1190,19 @@ static const char *get_canned_error_string(int status,
- "response from an upstream server.
" CRLF;
- return(add_optional_notes(r, s1, "error-notes", "\n"));
- case HTTP_VARIANT_ALSO_VARIES:
-- return(apr_pstrcat(p,
-- "A variant for the requested "
-- "resource\n
\n",
-- ap_escape_html(r->pool, r->uri),
-- "\n
\nis itself a negotiable resource. "
-- "This indicates a configuration error.\n",
-- NULL));
-+ return("A variant for the requested "
-+ "resource\n
\n"
-+ "\n
\nis itself a negotiable resource. "
-+ "This indicates a configuration error.\n");
- case HTTP_REQUEST_TIME_OUT:
- return("Server timeout waiting for the HTTP request from the client.
\n");
- case HTTP_GONE:
-- return(apr_pstrcat(p,
-- "The requested resource
",
-- ap_escape_html(r->pool, r->uri),
-- "
\nis no longer available on this server "
-- "and there is no forwarding address.\n"
-- "Please remove all references to this "
-- "resource.
\n",
-- NULL));
-+ return("The requested resource is no longer available on this server"
-+ " and there is no forwarding address.\n"
-+ "Please remove all references to this resource.
\n");
- case HTTP_REQUEST_ENTITY_TOO_LARGE:
- return(apr_pstrcat(p,
-- "The requested resource
",
-- ap_escape_html(r->pool, r->uri), "
\n",
-- "does not allow request data with ",
-+ "The requested resource does not allow request data with ",
- ap_escape_html(r->pool, r->method),
- " requests, or the amount of data provided in\n"
- "the request exceeds the capacity limit.\n",
-@@ -1318,11 +1286,9 @@ static const char *get_canned_error_string(int status,
- "the Server Name Indication (SNI) in use for this\n"
- "connection.\n");
- case HTTP_UNAVAILABLE_FOR_LEGAL_REASONS:
-- s1 = apr_pstrcat(p,
-- "Access to ", ap_escape_html(r->pool, r->uri),
-- "\nhas been denied for legal reasons.
\n",
-- NULL);
-- return(add_optional_notes(r, s1, "error-notes", "
\n"));
-+ return(add_optional_notes(r,
-+ "Access to this URL has been denied for legal reasons.
\n",
-+ "error-notes", "
\n"));
- default: /* HTTP_INTERNAL_SERVER_ERROR */
- /*
- * This comparison to expose error-notes could be modified to
-diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c
-index 9e5de3d..af56af9 100644
---- a/modules/proxy/mod_proxy.c
-+++ b/modules/proxy/mod_proxy.c
-@@ -1055,9 +1055,10 @@ static int proxy_handler(request_rec *r)
- char *end;
- maxfwd = apr_strtoi64(str, &end, 10);
- if (maxfwd < 0 || maxfwd == APR_INT64_MAX || *end) {
-- return ap_proxyerror(r, HTTP_BAD_REQUEST,
-- apr_psprintf(r->pool,
-- "Max-Forwards value '%s' could not be parsed", str));
-+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO()
-+ "Max-Forwards value '%s' could not be parsed", str);
-+ return ap_proxyerror(r, HTTP_BAD_REQUEST,
-+ "Max-Forwards request header could not be parsed");
- }
- else if (maxfwd == 0) {
- switch (r->method_number) {
-diff --git a/modules/proxy/mod_proxy_ftp.c b/modules/proxy/mod_proxy_ftp.c
-index 4a10987..8f6f853 100644
---- a/modules/proxy/mod_proxy_ftp.c
-+++ b/modules/proxy/mod_proxy_ftp.c
-@@ -1024,8 +1024,9 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
- /* We break the URL into host, port, path-search */
- if (r->parsed_uri.hostname == NULL) {
- if (APR_SUCCESS != apr_uri_parse(p, url, &uri)) {
-- return ap_proxyerror(r, HTTP_BAD_REQUEST,
-- apr_psprintf(p, "URI cannot be parsed: %s", url));
-+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO()
-+ "URI cannot be parsed: %s", url);
-+ return ap_proxyerror(r, HTTP_BAD_REQUEST, "URI cannot be parsed");
- }
- connectname = uri.hostname;
- connectport = uri.port;
-diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
-index 8cc9673..f6aef84 100644
---- a/modules/proxy/proxy_util.c
-+++ b/modules/proxy/proxy_util.c
-@@ -364,12 +364,9 @@ PROXY_DECLARE(char *)
-
- PROXY_DECLARE(int) ap_proxyerror(request_rec *r, int statuscode, const char *message)
- {
-- const char *uri = ap_escape_html(r->pool, r->uri);
- apr_table_setn(r->notes, "error-notes",
- apr_pstrcat(r->pool,
-- "The proxy server could not handle the request ", ap_escape_html(r->pool, r->method), " ", uri,
-- ".\n"
-+ "The proxy server could not handle the request
"
- "Reason: ", ap_escape_html(r->pool, message),
- "
",
- NULL));
---
-1.8.3.1
-
diff --git a/CVE-2019-10092-2.patch b/CVE-2019-10092-2.patch
deleted file mode 100644
index 2dca5a8b08eb088dca819c714a82bb1f4fa4ebc4..0000000000000000000000000000000000000000
--- a/CVE-2019-10092-2.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-From b66f9e4fdae9a75955a478da83a5637afaa6cf38 Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Thu, 8 Aug 2019 13:09:10 +0000
-Subject: [PATCH 2/2] Merge r1864699 from trunk:
-
-lognos
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1864702 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/proxy/mod_proxy.c | 2 +-
- modules/proxy/mod_proxy_ftp.c | 2 +-
- 2 files changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/modules/proxy/mod_proxy.c b/modules/proxy/mod_proxy.c
-index af56af9..eee6a0f 100644
---- a/modules/proxy/mod_proxy.c
-+++ b/modules/proxy/mod_proxy.c
-@@ -1055,7 +1055,7 @@ static int proxy_handler(request_rec *r)
- char *end;
- maxfwd = apr_strtoi64(str, &end, 10);
- if (maxfwd < 0 || maxfwd == APR_INT64_MAX || *end) {
-- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO()
-+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(10188)
- "Max-Forwards value '%s' could not be parsed", str);
- return ap_proxyerror(r, HTTP_BAD_REQUEST,
- "Max-Forwards request header could not be parsed");
-diff --git a/modules/proxy/mod_proxy_ftp.c b/modules/proxy/mod_proxy_ftp.c
-index 8f6f853..1557301 100644
---- a/modules/proxy/mod_proxy_ftp.c
-+++ b/modules/proxy/mod_proxy_ftp.c
-@@ -1024,7 +1024,7 @@ static int proxy_ftp_handler(request_rec *r, proxy_worker *worker,
- /* We break the URL into host, port, path-search */
- if (r->parsed_uri.hostname == NULL) {
- if (APR_SUCCESS != apr_uri_parse(p, url, &uri)) {
-- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO()
-+ ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(10189)
- "URI cannot be parsed: %s", url);
- return ap_proxyerror(r, HTTP_BAD_REQUEST, "URI cannot be parsed");
- }
---
-1.8.3.1
-
diff --git a/CVE-2019-10097.patch b/CVE-2019-10097.patch
deleted file mode 100644
index f0a976561cbd7bb7079990b283a1b21d33cfae08..0000000000000000000000000000000000000000
--- a/CVE-2019-10097.patch
+++ /dev/null
@@ -1,87 +0,0 @@
-From 1c598076631973877437a91fcb37753bd93112eb Mon Sep 17 00:00:00 2001
-From: Jim Jagielski
-Date: Wed, 7 Aug 2019 11:14:58 +0000
-Subject: [PATCH] Merge r1864526 from trunk:
-
-* modules/metadata/mod_remoteip.c (remoteip_process_v2_header,
- remoteip_input_filter): Add sanity checks.
-
-Submitted by: jorton, Daniel McCarney
-
-Submitted by: jorton
-Reviewed by: jorton, covener, jim
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1864613 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/metadata/mod_remoteip.c | 36 +++++++++++++++++++++++++++---------
- 1 file changed, 27 insertions(+), 9 deletions(-)
-
-diff --git a/modules/metadata/mod_remoteip.c b/modules/metadata/mod_remoteip.c
-index 4572ce1..a0cbc0f 100644
---- a/modules/metadata/mod_remoteip.c
-+++ b/modules/metadata/mod_remoteip.c
-@@ -987,15 +987,13 @@ static remoteip_parse_status_t remoteip_process_v2_header(conn_rec *c,
- return HDR_ERROR;
- #endif
- default:
-- /* unsupported protocol, keep local connection address */
-- return HDR_DONE;
-+ /* unsupported protocol */
-+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(10183)
-+ "RemoteIPProxyProtocol: unsupported protocol %.2hx",
-+ (unsigned short)hdr->v2.fam);
-+ return HDR_ERROR;
- }
- break; /* we got a sockaddr now */
--
-- case 0x00: /* LOCAL command */
-- /* keep local connection address for LOCAL */
-- return HDR_DONE;
--
- default:
- /* not a supported command */
- ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(03507)
-@@ -1087,11 +1085,24 @@ static apr_status_t remoteip_input_filter(ap_filter_t *f,
- /* try to read a header's worth of data */
- while (!ctx->done) {
- if (APR_BRIGADE_EMPTY(ctx->bb)) {
-- ret = ap_get_brigade(f->next, ctx->bb, ctx->mode, block,
-- ctx->need - ctx->rcvd);
-+ apr_off_t got, want = ctx->need - ctx->rcvd;
-+
-+ ret = ap_get_brigade(f->next, ctx->bb, ctx->mode, block, want);
- if (ret != APR_SUCCESS) {
-+ ap_log_cerror(APLOG_MARK, APLOG_ERR, ret, f->c, APLOGNO(10184)
-+ "failed reading input");
- return ret;
- }
-+
-+ ret = apr_brigade_length(ctx->bb, 1, &got);
-+ if (ret || got > want) {
-+ ap_log_cerror(APLOG_MARK, APLOG_ERR, ret, f->c, APLOGNO(10185)
-+ "RemoteIPProxyProtocol header too long, "
-+ "got %" APR_OFF_T_FMT " expected %" APR_OFF_T_FMT,
-+ got, want);
-+ f->c->aborted = 1;
-+ return APR_ECONNABORTED;
-+ }
- }
- if (APR_BRIGADE_EMPTY(ctx->bb)) {
- return block == APR_NONBLOCK_READ ? APR_SUCCESS : APR_EOF;
-@@ -1139,6 +1150,13 @@ static apr_status_t remoteip_input_filter(ap_filter_t *f,
- if (ctx->rcvd >= MIN_V2_HDR_LEN) {
- ctx->need = MIN_V2_HDR_LEN +
- remoteip_get_v2_len((proxy_header *) ctx->header);
-+ if (ctx->need > sizeof(proxy_v2)) {
-+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, f->c, APLOGNO(10186)
-+ "RemoteIPProxyProtocol protocol header length too long");
-+ f->c->aborted = 1;
-+ apr_brigade_destroy(ctx->bb);
-+ return APR_ECONNABORTED;
-+ }
- }
- if (ctx->rcvd >= ctx->need) {
- psts = remoteip_process_v2_header(f->c, conn_conf,
---
-1.8.3.1
-
diff --git a/CVE-2019-10098.patch b/CVE-2019-10098.patch
deleted file mode 100644
index 874cf9df792600cb9ac9293d02cd9a5a64fe7687..0000000000000000000000000000000000000000
--- a/CVE-2019-10098.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From 950e6da61ffb4a2a616fe4d99550ba664bdeaf17 Mon Sep 17 00:00:00 2001
-From: Stefan Eissing
-Date: Fri, 2 Aug 2019 09:24:58 +0000
-Subject: [PATCH] Merge of r1864192 from trunk:
-
- *) core, rewrite: Set PCRE_DOTALL by default
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1864213 13f79535-47bb-0310-9956-ffa450edef68
----
- server/util_pcre.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
-diff --git a/server/util_pcre.c b/server/util_pcre.c
-index f2cb1bb..35831f5 100644
---- a/server/util_pcre.c
-+++ b/server/util_pcre.c
-@@ -120,7 +120,8 @@ AP_DECLARE(void) ap_regfree(ap_regex_t *preg)
- * Compile a regular expression *
- *************************************************/
-
--static int default_cflags = AP_REG_DOLLAR_ENDONLY;
-+static int default_cflags = AP_REG_DOTALL |
-+ AP_REG_DOLLAR_ENDONLY;
-
- AP_DECLARE(int) ap_regcomp_get_default_cflags(void)
- {
---
-1.8.3.1
-
diff --git a/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-1.patch b/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-1.patch
deleted file mode 100644
index 035f27c38193440fa2ca5380abd96043dfe06307..0000000000000000000000000000000000000000
--- a/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-1.patch
+++ /dev/null
@@ -1,85 +0,0 @@
-From 2040a6943df462ef3fafd220043204ecd08f29dc Mon Sep 17 00:00:00 2001
-From: Jim Jagielski
-Date: Thu, 13 Jun 2019 11:08:29 +0000
-Subject: [PATCH 1/5] Merge r1860260 from trunk:
-
- * modules/http2: more copying of data to disentangle worker processing from main connection
-
-Submitted by: icing
-Reviewed by: icing, covener, jim
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1861247 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/http2/h2_headers.c | 11 +++++++++--
- modules/http2/h2_headers.h | 8 +++++++-
- modules/http2/h2_session.c | 1 +
- 3 files changed, 17 insertions(+), 3 deletions(-)
-
-diff --git a/modules/http2/h2_headers.c b/modules/http2/h2_headers.c
-index f01ab88..2be9545 100644
---- a/modules/http2/h2_headers.c
-+++ b/modules/http2/h2_headers.c
-@@ -101,8 +101,9 @@ apr_bucket *h2_bucket_headers_beam(struct h2_bucket_beam *beam,
- const apr_bucket *src)
- {
- if (H2_BUCKET_IS_HEADERS(src)) {
-- h2_headers *r = ((h2_bucket_headers *)src->data)->headers;
-- apr_bucket *b = h2_bucket_headers_create(dest->bucket_alloc, r);
-+ h2_headers *src_headers = ((h2_bucket_headers *)src->data)->headers;
-+ apr_bucket *b = h2_bucket_headers_create(dest->bucket_alloc,
-+ h2_headers_clone(dest->p, src_headers));
- APR_BRIGADE_INSERT_TAIL(dest, b);
- return b;
- }
-@@ -153,6 +154,12 @@ h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h)
- apr_table_copy(pool, h->notes), h->raw_bytes, pool);
- }
-
-+h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h)
-+{
-+ return h2_headers_create(h->status, apr_table_clone(pool, h->headers),
-+ apr_table_clone(pool, h->notes), h->raw_bytes, pool);
-+}
-+
- h2_headers *h2_headers_die(apr_status_t type,
- const h2_request *req, apr_pool_t *pool)
- {
-diff --git a/modules/http2/h2_headers.h b/modules/http2/h2_headers.h
-index 840e8c4..b7d95a1 100644
---- a/modules/http2/h2_headers.h
-+++ b/modules/http2/h2_headers.h
-@@ -59,12 +59,18 @@ h2_headers *h2_headers_rcreate(request_rec *r, int status,
- apr_table_t *header, apr_pool_t *pool);
-
- /**
-- * Clone the headers into another pool. This will not copy any
-+ * Copy the headers into another pool. This will not copy any
- * header strings.
- */
- h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h);
-
- /**
-+ * Clone the headers into another pool. This will also clone any
-+ * header strings.
-+ */
-+h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h);
-+
-+/**
- * Create the headers for the given error.
- * @param stream_id id of the stream to create the headers for
- * @param type the error code
-diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c
-index a1b31d2..3f0e9c9 100644
---- a/modules/http2/h2_session.c
-+++ b/modules/http2/h2_session.c
-@@ -1950,6 +1950,7 @@ static void on_stream_state_enter(void *ctx, h2_stream *stream)
- ev_stream_closed(session, stream);
- break;
- case H2_SS_CLEANUP:
-+ nghttp2_session_set_stream_user_data(session->ngh2, stream->id, NULL);
- h2_mplx_stream_cleanup(session->mplx, stream);
- break;
- default:
---
-1.8.3.1
-
diff --git a/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-2.patch b/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-2.patch
deleted file mode 100644
index 8fb3e9b55a8c01e22a8a01ae94e6230e807ec036..0000000000000000000000000000000000000000
--- a/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-2.patch
+++ /dev/null
@@ -1,121 +0,0 @@
-From 04f21f8422dd763da2f09badac965ff03e59aca8 Mon Sep 17 00:00:00 2001
-From: Jim Jagielski
-Date: Thu, 13 Jun 2019 11:09:12 +0000
-Subject: [PATCH 2/5] Merge r1707084, r1707093, r1707159, r1707362 from trunk:
-
-eor_bucket: don't destroy the request multiple times should any filter
-do a copy (e.g. mod_bucketeer).
-
-eor_bucket: follow up to r1707084: fix comment.
-
-eor_bucket: follow up to r1707084: use an inner shared bucket.
-
-eor_bucket: follow up to r1707159.
-We need an apr_bucket_refcount, as spotted by Ruediger.
-Submitted by: ylavic
-Reviewed by: icing, covener, jim
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1861248 13f79535-47bb-0310-9956-ffa450edef68
----
- server/eor_bucket.c | 43 ++++++++++++++++++++++++++++---------------
- 1 file changed, 28 insertions(+), 15 deletions(-)
-
-diff --git a/server/eor_bucket.c b/server/eor_bucket.c
-index 4d3e1ec..ecb809c 100644
---- a/server/eor_bucket.c
-+++ b/server/eor_bucket.c
-@@ -19,17 +19,22 @@
- #include "http_protocol.h"
- #include "scoreboard.h"
-
-+typedef struct {
-+ apr_bucket_refcount refcount;
-+ request_rec *data;
-+} ap_bucket_eor;
-+
- static apr_status_t eor_bucket_cleanup(void *data)
- {
-- apr_bucket *b = (apr_bucket *)data;
-- request_rec *r = (request_rec *)b->data;
-+ request_rec **rp = data;
-
-- if (r != NULL) {
-+ if (*rp) {
-+ request_rec *r = *rp;
- /*
- * If eor_bucket_destroy is called after us, this prevents
- * eor_bucket_destroy from trying to destroy the pool again.
- */
-- b->data = NULL;
-+ *rp = NULL;
- /* Update child status and log the transaction */
- ap_update_child_status(r->connection->sbh, SERVER_BUSY_LOG, r);
- ap_run_log_transaction(r);
-@@ -50,11 +55,13 @@ static apr_status_t eor_bucket_read(apr_bucket *b, const char **str,
-
- AP_DECLARE(apr_bucket *) ap_bucket_eor_make(apr_bucket *b, request_rec *r)
- {
-- b->length = 0;
-- b->start = 0;
-- b->data = r;
-- b->type = &ap_bucket_type_eor;
-+ ap_bucket_eor *h;
-+
-+ h = apr_bucket_alloc(sizeof(*h), b->list);
-+ h->data = r;
-
-+ b = apr_bucket_shared_make(b, h, 0, 0);
-+ b->type = &ap_bucket_type_eor;
- return b;
- }
-
-@@ -66,7 +73,9 @@ AP_DECLARE(apr_bucket *) ap_bucket_eor_create(apr_bucket_alloc_t *list,
- APR_BUCKET_INIT(b);
- b->free = apr_bucket_free;
- b->list = list;
-+ b = ap_bucket_eor_make(b, r);
- if (r) {
-+ ap_bucket_eor *h = b->data;
- /*
- * Register a cleanup for the request pool as the eor bucket could
- * have been allocated from a different pool then the request pool
-@@ -76,18 +85,22 @@ AP_DECLARE(apr_bucket *) ap_bucket_eor_create(apr_bucket_alloc_t *list,
- * We need to use a pre-cleanup here because a module may create a
- * sub-pool which is still needed during the log_transaction hook.
- */
-- apr_pool_pre_cleanup_register(r->pool, (void *)b, eor_bucket_cleanup);
-+ apr_pool_pre_cleanup_register(r->pool, &h->data, eor_bucket_cleanup);
- }
-- return ap_bucket_eor_make(b, r);
-+ return b;
- }
-
- static void eor_bucket_destroy(void *data)
- {
-- request_rec *r = (request_rec *)data;
-+ ap_bucket_eor *h = data;
-
-- if (r) {
-- /* eor_bucket_cleanup will be called when the pool gets destroyed */
-- apr_pool_destroy(r->pool);
-+ if (apr_bucket_shared_destroy(h)) {
-+ request_rec *r = h->data;
-+ if (r) {
-+ /* eor_bucket_cleanup will be called when the pool gets destroyed */
-+ apr_pool_destroy(r->pool);
-+ }
-+ apr_bucket_free(h);
- }
- }
-
-@@ -97,6 +110,6 @@ AP_DECLARE_DATA const apr_bucket_type_t ap_bucket_type_eor = {
- eor_bucket_read,
- apr_bucket_setaside_noop,
- apr_bucket_split_notimpl,
-- apr_bucket_simple_copy
-+ apr_bucket_shared_copy
- };
-
---
-1.8.3.1
-
diff --git a/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-3.patch b/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-3.patch
deleted file mode 100644
index 2ca3412fcca315b97c1c0a58e9cd21d4c39bce1f..0000000000000000000000000000000000000000
--- a/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-3.patch
+++ /dev/null
@@ -1,306 +0,0 @@
-From 1125fc2240353c41db09eac8fedcc75dfdf44edb Mon Sep 17 00:00:00 2001
-From: Jim Jagielski
-Date: Wed, 19 Sep 2018 12:55:26 +0000
-Subject: [PATCH 3/5] Merge r1835118 from trunk:
-
-On the trunk:
-
- * silencing gcc uninitialized warning
- * refrainning from apr_table_addn() use since pool debug assumptions are in conflict
- * adding more assertions
- * copy-porting changes to base64 encoding code from mod_md
-
-Submitted by: icing
-Reviewed by: icing, minfrin, jim
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1841330 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/http2/h2_bucket_beam.c | 2 +-
- modules/http2/h2_from_h1.c | 4 +-
- modules/http2/h2_h2.c | 2 +-
- modules/http2/h2_headers.c | 7 ++--
- modules/http2/h2_mplx.c | 4 ++
- modules/http2/h2_proxy_session.c | 4 +-
- modules/http2/h2_util.c | 86 +++++++++++++++++++++-------------------
- 7 files changed, 58 insertions(+), 51 deletions(-)
-
-diff --git a/modules/http2/h2_bucket_beam.c b/modules/http2/h2_bucket_beam.c
-index 9f6fa82..f79cbe3 100644
---- a/modules/http2/h2_bucket_beam.c
-+++ b/modules/http2/h2_bucket_beam.c
-@@ -775,7 +775,7 @@ static apr_status_t append_bucket(h2_bucket_beam *beam,
- const char *data;
- apr_size_t len;
- apr_status_t status;
-- int can_beam, check_len;
-+ int can_beam = 0, check_len;
-
- if (beam->aborted) {
- return APR_ECONNABORTED;
-diff --git a/modules/http2/h2_from_h1.c b/modules/http2/h2_from_h1.c
-index ae264a9..dd6ad90 100644
---- a/modules/http2/h2_from_h1.c
-+++ b/modules/http2/h2_from_h1.c
-@@ -164,7 +164,7 @@ static int copy_header(void *ctx, const char *name, const char *value)
- {
- apr_table_t *headers = ctx;
-
-- apr_table_addn(headers, name, value);
-+ apr_table_add(headers, name, value);
- return 1;
- }
-
-@@ -250,7 +250,7 @@ static h2_headers *create_response(h2_task *task, request_rec *r)
- if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) {
- char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
- ap_recent_rfc822_date(date, r->request_time);
-- apr_table_addn(r->headers_out, "Expires", date);
-+ apr_table_add(r->headers_out, "Expires", date);
- }
-
- /* This is a hack, but I can't find anyway around it. The idea is that
-diff --git a/modules/http2/h2_h2.c b/modules/http2/h2_h2.c
-index dfee6b5..5580cef 100644
---- a/modules/http2/h2_h2.c
-+++ b/modules/http2/h2_h2.c
-@@ -694,7 +694,7 @@ static void check_push(request_rec *r, const char *tag)
- tag, conf->push_list->nelts);
- for (i = 0; i < conf->push_list->nelts; ++i) {
- h2_push_res *push = &APR_ARRAY_IDX(conf->push_list, i, h2_push_res);
-- apr_table_addn(r->headers_out, "Link",
-+ apr_table_add(r->headers_out, "Link",
- apr_psprintf(r->pool, "<%s>; rel=preload%s",
- push->uri_ref, push->critical? "; critical" : ""));
- }
-diff --git a/modules/http2/h2_headers.c b/modules/http2/h2_headers.c
-index 2be9545..49d9c0a 100644
---- a/modules/http2/h2_headers.c
-+++ b/modules/http2/h2_headers.c
-@@ -117,9 +117,9 @@ h2_headers *h2_headers_create(int status, apr_table_t *headers_in,
- {
- h2_headers *headers = apr_pcalloc(pool, sizeof(h2_headers));
- headers->status = status;
-- headers->headers = (headers_in? apr_table_copy(pool, headers_in)
-+ headers->headers = (headers_in? apr_table_clone(pool, headers_in)
- : apr_table_make(pool, 5));
-- headers->notes = (notes? apr_table_copy(pool, notes)
-+ headers->notes = (notes? apr_table_clone(pool, notes)
- : apr_table_make(pool, 5));
- return headers;
- }
-@@ -150,8 +150,7 @@ h2_headers *h2_headers_rcreate(request_rec *r, int status,
-
- h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h)
- {
-- return h2_headers_create(h->status, apr_table_copy(pool, h->headers),
-- apr_table_copy(pool, h->notes), h->raw_bytes, pool);
-+ return h2_headers_create(h->status, h->headers, h->notes, h->raw_bytes, pool);
- }
-
- h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h)
-diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c
-index 29f040c..db3cb63 100644
---- a/modules/http2/h2_mplx.c
-+++ b/modules/http2/h2_mplx.c
-@@ -476,6 +476,7 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
- h2_ihash_iter(m->shold, report_stream_iter, m);
- }
- }
-+ ap_assert(m->tasks_active == 0);
- m->join_wait = NULL;
-
- /* 4. close the h2_req_enginge shed */
-@@ -765,6 +766,9 @@ apr_status_t h2_mplx_pop_task(h2_mplx *m, h2_task **ptask)
- apr_status_t rv = APR_EOF;
-
- *ptask = NULL;
-+ ap_assert(m);
-+ ap_assert(m->lock);
-+
- if (APR_SUCCESS != (rv = apr_thread_mutex_lock(m->lock))) {
- return rv;
- }
-diff --git a/modules/http2/h2_proxy_session.c b/modules/http2/h2_proxy_session.c
-index a077ce1..8389c7c 100644
---- a/modules/http2/h2_proxy_session.c
-+++ b/modules/http2/h2_proxy_session.c
-@@ -237,7 +237,7 @@ static int before_frame_send(nghttp2_session *ngh2,
-
- static int add_header(void *table, const char *n, const char *v)
- {
-- apr_table_addn(table, n, v);
-+ apr_table_add(table, n, v);
- return 1;
- }
-
-@@ -361,7 +361,7 @@ static void h2_proxy_stream_end_headers_out(h2_proxy_stream *stream)
- }
-
- /* create a "Via:" response header entry and merge it */
-- apr_table_addn(r->headers_out, "Via",
-+ apr_table_add(r->headers_out, "Via",
- (session->conf->viaopt == via_full)
- ? apr_psprintf(p, "%d.%d %s%s (%s)",
- HTTP_VERSION_MAJOR(r->proto_num),
-diff --git a/modules/http2/h2_util.c b/modules/http2/h2_util.c
-index 3d7ba37..9dacd8b 100644
---- a/modules/http2/h2_util.c
-+++ b/modules/http2/h2_util.c
-@@ -115,26 +115,28 @@ void h2_util_camel_case_header(char *s, size_t len)
-
- /* base64 url encoding ****************************************************************************/
-
--static const int BASE64URL_UINT6[] = {
-+#define N6 (unsigned int)-1
-+
-+static const unsigned int BASE64URL_UINT6[] = {
- /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
-- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 0 */
-- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 1 */
-- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 62, -1, -1, /* 2 */
-- 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1, -1, -1, -1, -1, -1, /* 3 */
-- -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 4 */
-- 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, 63, /* 5 */
-- -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, /* 6 */
-- 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -1, -1, -1, -1, -1, /* 7 */
-- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 8 */
-- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* 9 */
-- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* a */
-- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* b */
-- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* c */
-- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* d */
-- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, /* e */
-- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 /* f */
-+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 0 */
-+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 1 */
-+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, 62, N6, N6, /* 2 */
-+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, N6, N6, N6, N6, N6, N6, /* 3 */
-+ N6, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, /* 4 */
-+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, N6, N6, N6, N6, 63, /* 5 */
-+ N6, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, /* 6 */
-+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, N6, N6, N6, N6, N6, /* 7 */
-+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 8 */
-+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* 9 */
-+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* a */
-+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* b */
-+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* c */
-+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* d */
-+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, /* e */
-+ N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6, N6 /* f */
- };
--static const char BASE64URL_CHARS[] = {
-+static const unsigned char BASE64URL_CHARS[] = {
- 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', /* 0 - 9 */
- 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', /* 10 - 19 */
- 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', /* 20 - 29 */
-@@ -144,21 +146,23 @@ static const char BASE64URL_CHARS[] = {
- '8', '9', '-', '_', ' ', ' ', ' ', ' ', ' ', ' ', /* 60 - 69 */
- };
-
-+#define BASE64URL_CHAR(x) BASE64URL_CHARS[ (unsigned int)(x) & 0x3fu ]
-+
- apr_size_t h2_util_base64url_decode(const char **decoded, const char *encoded,
- apr_pool_t *pool)
- {
- const unsigned char *e = (const unsigned char *)encoded;
- const unsigned char *p = e;
- unsigned char *d;
-- int n;
-- apr_size_t len, mlen, remain, i;
-+ unsigned int n;
-+ long len, mlen, remain, i;
-
-- while (*p && BASE64URL_UINT6[ *p ] != -1) {
-+ while (*p && BASE64URL_UINT6[ *p ] != N6) {
- ++p;
- }
-- len = p - e;
-+ len = (int)(p - e);
- mlen = (len/4)*4;
-- *decoded = apr_pcalloc(pool, len+1);
-+ *decoded = apr_pcalloc(pool, (apr_size_t)len + 1);
-
- i = 0;
- d = (unsigned char*)*decoded;
-@@ -167,60 +171,60 @@ apr_size_t h2_util_base64url_decode(const char **decoded, const char *encoded,
- (BASE64URL_UINT6[ e[i+1] ] << 12) +
- (BASE64URL_UINT6[ e[i+2] ] << 6) +
- (BASE64URL_UINT6[ e[i+3] ]));
-- *d++ = n >> 16;
-- *d++ = n >> 8 & 0xffu;
-- *d++ = n & 0xffu;
-+ *d++ = (unsigned char)(n >> 16);
-+ *d++ = (unsigned char)(n >> 8 & 0xffu);
-+ *d++ = (unsigned char)(n & 0xffu);
- }
- remain = len - mlen;
- switch (remain) {
- case 2:
- n = ((BASE64URL_UINT6[ e[mlen+0] ] << 18) +
- (BASE64URL_UINT6[ e[mlen+1] ] << 12));
-- *d++ = n >> 16;
-+ *d++ = (unsigned char)(n >> 16);
- remain = 1;
- break;
- case 3:
- n = ((BASE64URL_UINT6[ e[mlen+0] ] << 18) +
- (BASE64URL_UINT6[ e[mlen+1] ] << 12) +
- (BASE64URL_UINT6[ e[mlen+2] ] << 6));
-- *d++ = n >> 16;
-- *d++ = n >> 8 & 0xffu;
-+ *d++ = (unsigned char)(n >> 16);
-+ *d++ = (unsigned char)(n >> 8 & 0xffu);
- remain = 2;
- break;
- default: /* do nothing */
- break;
- }
-- return mlen/4*3 + remain;
-+ return (apr_size_t)(mlen/4*3 + remain);
- }
-
- const char *h2_util_base64url_encode(const char *data,
- apr_size_t dlen, apr_pool_t *pool)
- {
-- long i, len = (int)dlen;
-+ int i, len = (int)dlen;
- apr_size_t slen = ((dlen+2)/3)*4 + 1; /* 0 terminated */
- const unsigned char *udata = (const unsigned char*)data;
-- char *enc, *p = apr_pcalloc(pool, slen);
-+ unsigned char *enc, *p = apr_pcalloc(pool, slen);
-
- enc = p;
- for (i = 0; i < len-2; i+= 3) {
-- *p++ = BASE64URL_CHARS[ (udata[i] >> 2) & 0x3fu ];
-- *p++ = BASE64URL_CHARS[ ((udata[i] << 4) + (udata[i+1] >> 4)) & 0x3fu ];
-- *p++ = BASE64URL_CHARS[ ((udata[i+1] << 2) + (udata[i+2] >> 6)) & 0x3fu ];
-- *p++ = BASE64URL_CHARS[ udata[i+2] & 0x3fu ];
-+ *p++ = BASE64URL_CHAR( (udata[i] >> 2) );
-+ *p++ = BASE64URL_CHAR( (udata[i] << 4) + (udata[i+1] >> 4) );
-+ *p++ = BASE64URL_CHAR( (udata[i+1] << 2) + (udata[i+2] >> 6) );
-+ *p++ = BASE64URL_CHAR( (udata[i+2]) );
- }
-
- if (i < len) {
-- *p++ = BASE64URL_CHARS[ (udata[i] >> 2) & 0x3fu ];
-+ *p++ = BASE64URL_CHAR( (udata[i] >> 2) );
- if (i == (len - 1)) {
-- *p++ = BASE64URL_CHARS[ (udata[i] << 4) & 0x3fu ];
-+ *p++ = BASE64URL_CHARS[ ((unsigned int)udata[i] << 4) & 0x3fu ];
- }
- else {
-- *p++ = BASE64URL_CHARS[ ((udata[i] << 4) + (udata[i+1] >> 4)) & 0x3fu ];
-- *p++ = BASE64URL_CHARS[ (udata[i+1] << 2) & 0x3fu ];
-+ *p++ = BASE64URL_CHAR( (udata[i] << 4) + (udata[i+1] >> 4) );
-+ *p++ = BASE64URL_CHAR( (udata[i+1] << 2) );
- }
- }
- *p++ = '\0';
-- return enc;
-+ return (char *)enc;
- }
-
- /*******************************************************************************
---
-1.8.3.1
-
diff --git a/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-4.patch b/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-4.patch
deleted file mode 100644
index e0a47e800829489d835bccd00e3d8a03ae80afea..0000000000000000000000000000000000000000
--- a/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-4.patch
+++ /dev/null
@@ -1,4406 +0,0 @@
-From a6c8c61510eea98f31961e8220bce4f07f928fd2 Mon Sep 17 00:00:00 2001
-From: Stefan Eissing
-Date: Wed, 13 Mar 2019 15:00:57 +0000
-Subject: [PATCH 4/5] Merge of
- 1849296,1852038,1852101,1852339,1853171,1853967,1854365,1854963,1854964,1855295,1855411
- from trunk:
-
- *) mod_http2: when SSL renegotiation is inhibited and a 403 ErrorDocument is
- in play, the proper HTTP/2 stream reset did not trigger with H2_ERR_HTTP_1_1_REQUIRED.
- Fixed. [Michael Kaufmann]
-
- *) mod_http2: new configuration directive: `H2Padding numbits` to control
- padding of HTTP/2 payload frames. 'numbits' is a number from 0-8,
- controlling the range of padding bytes added to a frame. The actual number
- added is chosen randomly per frame. This applies to HEADERS, DATA and PUSH_PROMISE
- frames equally. The default continues to be 0, e.g. no padding. [Stefan Eissing]
-
- *) mod_http2: ripping out all the h2_req_engine internal features now that mod_proxy_http2
- has no more need for it. Optional functions are still declared but no longer implemented.
- While previous mod_proxy_http2 will work with this, it is recommeneded to run the matching
- versions of both modules. [Stefan Eissing]
-
- *) mod_proxy_http2: changed mod_proxy_http2 implementation and fixed several bugs which
- resolve PR63170. The proxy module does now a single h2 request on the (reused)
- connection and returns. [Stefan Eissing]
-
- *) mod_http2/mod_proxy_http2: proxy_http2 checks correct master connection aborted status
- to trigger immediate shutdown of backend connections. This is now always signalled
- by mod_http2 when the the session is being released.
- proxy_http2 now only sends a PING frame to the backend when there is not already one
- in flight. [Stefan Eissing]
-
- *) mod_proxy_http2: fixed an issue where a proxy_http2 handler entered an infinite
- loop when encountering certain errors on the backend connection.
- See . [Stefan Eissing]
-
- *) mod_http2: Configuration directives H2Push and H2Upgrade can now be specified per
- Location/Directory, e.g. disabling PUSH for a specific set of resources. [Stefan Eissing]
-
- *) mod_http2: HEAD requests to some module such as mod_cgid caused the stream to
- terminate improperly and cause a HTTP/2 PROTOCOL_ERROR.
- Fixes . [Michael Kaufmann]
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855431 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/http2/config2.m4 | 1 -
- modules/http2/h2.h | 7 +-
- modules/http2/h2_alt_svc.c | 12 +-
- modules/http2/h2_config.c | 662 ++++++++++++++++++++++++++++-----------
- modules/http2/h2_config.h | 63 ++--
- modules/http2/h2_conn.c | 30 +-
- modules/http2/h2_conn.h | 8 +-
- modules/http2/h2_conn_io.c | 24 +-
- modules/http2/h2_conn_io.h | 3 +-
- modules/http2/h2_ctx.c | 33 +-
- modules/http2/h2_ctx.h | 13 +-
- modules/http2/h2_filter.c | 43 ++-
- modules/http2/h2_from_h1.c | 12 +-
- modules/http2/h2_h2.c | 71 ++---
- modules/http2/h2_h2.h | 16 +-
- modules/http2/h2_headers.c | 31 +-
- modules/http2/h2_mplx.c | 235 ++------------
- modules/http2/h2_mplx.h | 32 +-
- modules/http2/h2_ngn_shed.c | 392 -----------------------
- modules/http2/h2_ngn_shed.h | 79 -----
- modules/http2/h2_proxy_session.c | 57 +---
- modules/http2/h2_proxy_session.h | 3 -
- modules/http2/h2_request.c | 33 +-
- modules/http2/h2_session.c | 107 ++++---
- modules/http2/h2_session.h | 19 +-
- modules/http2/h2_stream.c | 9 +-
- modules/http2/h2_switch.c | 13 +-
- modules/http2/h2_task.c | 84 +----
- modules/http2/h2_task.h | 14 +-
- modules/http2/mod_http2.c | 49 +--
- modules/http2/mod_http2.h | 46 +--
- modules/http2/mod_proxy_http2.c | 384 ++++++-----------------
- 32 files changed, 904 insertions(+), 1681 deletions(-)
-
-diff --git a/modules/http2/config2.m4 b/modules/http2/config2.m4
-index e8cefe3..5f49adf 100644
---- a/modules/http2/config2.m4
-+++ b/modules/http2/config2.m4
-@@ -31,7 +31,6 @@ h2_from_h1.lo dnl
- h2_h2.lo dnl
- h2_headers.lo dnl
- h2_mplx.lo dnl
--h2_ngn_shed.lo dnl
- h2_push.lo dnl
- h2_request.lo dnl
- h2_session.lo dnl
-diff --git a/modules/http2/h2.h b/modules/http2/h2.h
-index 38b4019..e057d66 100644
---- a/modules/http2/h2.h
-+++ b/modules/http2/h2.h
-@@ -48,12 +48,12 @@ extern const char *H2_MAGIC_TOKEN;
- #define H2_HEADER_PATH_LEN 5
- #define H2_CRLF "\r\n"
-
--/* Max data size to write so it fits inside a TLS record */
--#define H2_DATA_CHUNK_SIZE ((16*1024) - 100 - 9)
--
- /* Size of the frame header itself in HTTP/2 */
- #define H2_FRAME_HDR_LEN 9
-
-+/* Max data size to write so it fits inside a TLS record */
-+#define H2_DATA_CHUNK_SIZE ((16*1024) - 100 - H2_FRAME_HDR_LEN)
-+
- /* Maximum number of padding bytes in a frame, rfc7540 */
- #define H2_MAX_PADLEN 256
- /* Initial default window size, RFC 7540 ch. 6.5.2 */
-@@ -162,5 +162,6 @@ typedef int h2_stream_pri_cmp(int stream_id1, int stream_id2, void *ctx);
- #define H2_FILTER_DEBUG_NOTE "http2-debug"
- #define H2_HDR_CONFORMANCE "http2-hdr-conformance"
- #define H2_HDR_CONFORMANCE_UNSAFE "unsafe"
-+#define H2_PUSH_MODE_NOTE "http2-push-mode"
-
- #endif /* defined(__mod_h2__h2__) */
-diff --git a/modules/http2/h2_alt_svc.c b/modules/http2/h2_alt_svc.c
-index 295a16d..2c3253c 100644
---- a/modules/http2/h2_alt_svc.c
-+++ b/modules/http2/h2_alt_svc.c
-@@ -75,7 +75,7 @@ h2_alt_svc *h2_alt_svc_parse(const char *s, apr_pool_t *pool)
-
- static int h2_alt_svc_handler(request_rec *r)
- {
-- const h2_config *cfg;
-+ apr_array_header_t *alt_svcs;
- int i;
-
- if (r->connection->keepalives > 0) {
-@@ -87,8 +87,8 @@ static int h2_alt_svc_handler(request_rec *r)
- return DECLINED;
- }
-
-- cfg = h2_config_sget(r->server);
-- if (r->hostname && cfg && cfg->alt_svcs && cfg->alt_svcs->nelts > 0) {
-+ alt_svcs = h2_config_alt_svcs(r);
-+ if (r->hostname && alt_svcs && alt_svcs->nelts > 0) {
- const char *alt_svc_used = apr_table_get(r->headers_in, "Alt-Svc-Used");
- if (!alt_svc_used) {
- /* We have alt-svcs defined and client is not already using
-@@ -99,7 +99,7 @@ static int h2_alt_svc_handler(request_rec *r)
- const char *alt_svc = "";
- const char *svc_ma = "";
- int secure = h2_h2_is_tls(r->connection);
-- int ma = h2_config_geti(cfg, H2_CONF_ALT_SVC_MAX_AGE);
-+ int ma = h2_config_rgeti(r, H2_CONF_ALT_SVC_MAX_AGE);
- if (ma >= 0) {
- svc_ma = apr_psprintf(r->pool, "; ma=%d", ma);
- }
-@@ -107,8 +107,8 @@ static int h2_alt_svc_handler(request_rec *r)
- "h2_alt_svc: announce %s for %s:%d",
- (secure? "secure" : "insecure"),
- r->hostname, (int)r->server->port);
-- for (i = 0; i < cfg->alt_svcs->nelts; ++i) {
-- h2_alt_svc *as = h2_alt_svc_IDX(cfg->alt_svcs, i);
-+ for (i = 0; i < alt_svcs->nelts; ++i) {
-+ h2_alt_svc *as = h2_alt_svc_IDX(alt_svcs, i);
- const char *ahost = as->host;
- if (ahost && !apr_strnatcasecmp(ahost, r->hostname)) {
- ahost = NULL;
-diff --git a/modules/http2/h2_config.c b/modules/http2/h2_config.c
-index 8766355..29a0b55 100644
---- a/modules/http2/h2_config.c
-+++ b/modules/http2/h2_config.c
-@@ -42,6 +42,55 @@
- #define H2_CONFIG_GET(a, b, n) \
- (((a)->n == DEF_VAL)? (b) : (a))->n
-
-+#define H2_CONFIG_SET(a, n, v) \
-+ ((a)->n = v)
-+
-+#define CONFIG_CMD_SET(cmd,dir,var,val) \
-+ h2_config_seti(((cmd)->path? (dir) : NULL), h2_config_sget((cmd)->server), var, val)
-+
-+#define CONFIG_CMD_SET64(cmd,dir,var,val) \
-+ h2_config_seti64(((cmd)->path? (dir) : NULL), h2_config_sget((cmd)->server), var, val)
-+
-+/* Apache httpd module configuration for h2. */
-+typedef struct h2_config {
-+ const char *name;
-+ int h2_max_streams; /* max concurrent # streams (http2) */
-+ int h2_window_size; /* stream window size (http2) */
-+ int min_workers; /* min # of worker threads/child */
-+ int max_workers; /* max # of worker threads/child */
-+ int max_worker_idle_secs; /* max # of idle seconds for worker */
-+ int stream_max_mem_size; /* max # bytes held in memory/stream */
-+ apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */
-+ int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/
-+ int serialize_headers; /* Use serialized HTTP/1.1 headers for
-+ processing, better compatibility */
-+ int h2_direct; /* if mod_h2 is active directly */
-+ int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */
-+ int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
-+ apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */
-+ int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */
-+ int h2_push; /* if HTTP/2 server push is enabled */
-+ struct apr_hash_t *priorities;/* map of content-type to h2_priority records */
-+
-+ int push_diary_size; /* # of entries in push diary */
-+ int copy_files; /* if files shall be copied vs setaside on output */
-+ apr_array_header_t *push_list;/* list of h2_push_res configurations */
-+ int early_hints; /* support status code 103 */
-+ int padding_bits;
-+ int padding_always;
-+} h2_config;
-+
-+typedef struct h2_dir_config {
-+ const char *name;
-+ apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */
-+ int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/
-+ int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
-+ int h2_push; /* if HTTP/2 server push is enabled */
-+ apr_array_header_t *push_list;/* list of h2_push_res configurations */
-+ int early_hints; /* support status code 103 */
-+} h2_dir_config;
-+
-+
- static h2_config defconf = {
- "default",
- 100, /* max_streams */
-@@ -64,6 +113,18 @@ static h2_config defconf = {
- 0, /* copy files across threads */
- NULL, /* push list */
- 0, /* early hints, http status 103 */
-+ 0, /* padding bits */
-+ 1, /* padding always */
-+};
-+
-+static h2_dir_config defdconf = {
-+ "default",
-+ NULL, /* no alt-svcs */
-+ -1, /* alt-svc max age */
-+ -1, /* HTTP/1 Upgrade support */
-+ -1, /* HTTP/2 server push enabled */
-+ NULL, /* push list */
-+ -1, /* early hints, http status 103 */
- };
-
- void h2_config_init(apr_pool_t *pool)
-@@ -71,12 +132,10 @@ void h2_config_init(apr_pool_t *pool)
- (void)pool;
- }
-
--static void *h2_config_create(apr_pool_t *pool,
-- const char *prefix, const char *x)
-+void *h2_config_create_svr(apr_pool_t *pool, server_rec *s)
- {
- h2_config *conf = (h2_config *)apr_pcalloc(pool, sizeof(h2_config));
-- const char *s = x? x : "unknown";
-- char *name = apr_pstrcat(pool, prefix, "[", s, "]", NULL);
-+ char *name = apr_pstrcat(pool, "srv[", s->defn_name, "]", NULL);
-
- conf->name = name;
- conf->h2_max_streams = DEF_VAL;
-@@ -98,19 +157,11 @@ static void *h2_config_create(apr_pool_t *pool,
- conf->copy_files = DEF_VAL;
- conf->push_list = NULL;
- conf->early_hints = DEF_VAL;
-+ conf->padding_bits = DEF_VAL;
-+ conf->padding_always = DEF_VAL;
- return conf;
- }
-
--void *h2_config_create_svr(apr_pool_t *pool, server_rec *s)
--{
-- return h2_config_create(pool, "srv", s->defn_name);
--}
--
--void *h2_config_create_dir(apr_pool_t *pool, char *x)
--{
-- return h2_config_create(pool, "dir", x);
--}
--
- static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv)
- {
- h2_config *base = (h2_config *)basev;
-@@ -149,25 +200,52 @@ static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv)
- n->push_list = add->push_list? add->push_list : base->push_list;
- }
- n->early_hints = H2_CONFIG_GET(add, base, early_hints);
-+ n->padding_bits = H2_CONFIG_GET(add, base, padding_bits);
-+ n->padding_always = H2_CONFIG_GET(add, base, padding_always);
- return n;
- }
-
--void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv)
-+void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv)
- {
- return h2_config_merge(pool, basev, addv);
- }
-
--void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv)
-+void *h2_config_create_dir(apr_pool_t *pool, char *x)
- {
-- return h2_config_merge(pool, basev, addv);
-+ h2_dir_config *conf = (h2_dir_config *)apr_pcalloc(pool, sizeof(h2_dir_config));
-+ const char *s = x? x : "unknown";
-+ char *name = apr_pstrcat(pool, "dir[", s, "]", NULL);
-+
-+ conf->name = name;
-+ conf->alt_svc_max_age = DEF_VAL;
-+ conf->h2_upgrade = DEF_VAL;
-+ conf->h2_push = DEF_VAL;
-+ conf->early_hints = DEF_VAL;
-+ return conf;
- }
-
--int h2_config_geti(const h2_config *conf, h2_config_var_t var)
-+void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv)
- {
-- return (int)h2_config_geti64(conf, var);
-+ h2_dir_config *base = (h2_dir_config *)basev;
-+ h2_dir_config *add = (h2_dir_config *)addv;
-+ h2_dir_config *n = (h2_dir_config *)apr_pcalloc(pool, sizeof(h2_dir_config));
-+
-+ n->name = apr_pstrcat(pool, "merged[", add->name, ", ", base->name, "]", NULL);
-+ n->alt_svcs = add->alt_svcs? add->alt_svcs : base->alt_svcs;
-+ n->alt_svc_max_age = H2_CONFIG_GET(add, base, alt_svc_max_age);
-+ n->h2_upgrade = H2_CONFIG_GET(add, base, h2_upgrade);
-+ n->h2_push = H2_CONFIG_GET(add, base, h2_push);
-+ if (add->push_list && base->push_list) {
-+ n->push_list = apr_array_append(pool, base->push_list, add->push_list);
-+ }
-+ else {
-+ n->push_list = add->push_list? add->push_list : base->push_list;
-+ }
-+ n->early_hints = H2_CONFIG_GET(add, base, early_hints);
-+ return n;
- }
-
--apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var)
-+static apr_int64_t h2_srv_config_geti64(const h2_config *conf, h2_config_var_t var)
- {
- switch(var) {
- case H2_CONF_MAX_STREAMS:
-@@ -191,7 +269,8 @@ apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var)
- case H2_CONF_UPGRADE:
- return H2_CONFIG_GET(conf, &defconf, h2_upgrade);
- case H2_CONF_DIRECT:
-- return H2_CONFIG_GET(conf, &defconf, h2_direct);
-+ return 1;
-+ /*return H2_CONFIG_GET(conf, &defconf, h2_direct);*/
- case H2_CONF_TLS_WARMUP_SIZE:
- return H2_CONFIG_GET(conf, &defconf, tls_warmup_size);
- case H2_CONF_TLS_COOLDOWN_SECS:
-@@ -204,12 +283,93 @@ apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var)
- return H2_CONFIG_GET(conf, &defconf, copy_files);
- case H2_CONF_EARLY_HINTS:
- return H2_CONFIG_GET(conf, &defconf, early_hints);
-+ case H2_CONF_PADDING_BITS:
-+ return H2_CONFIG_GET(conf, &defconf, padding_bits);
-+ case H2_CONF_PADDING_ALWAYS:
-+ return H2_CONFIG_GET(conf, &defconf, padding_always);
- default:
- return DEF_VAL;
- }
- }
-
--const h2_config *h2_config_sget(server_rec *s)
-+static void h2_srv_config_seti(h2_config *conf, h2_config_var_t var, int val)
-+{
-+ switch(var) {
-+ case H2_CONF_MAX_STREAMS:
-+ H2_CONFIG_SET(conf, h2_max_streams, val);
-+ break;
-+ case H2_CONF_WIN_SIZE:
-+ H2_CONFIG_SET(conf, h2_window_size, val);
-+ break;
-+ case H2_CONF_MIN_WORKERS:
-+ H2_CONFIG_SET(conf, min_workers, val);
-+ break;
-+ case H2_CONF_MAX_WORKERS:
-+ H2_CONFIG_SET(conf, max_workers, val);
-+ break;
-+ case H2_CONF_MAX_WORKER_IDLE_SECS:
-+ H2_CONFIG_SET(conf, max_worker_idle_secs, val);
-+ break;
-+ case H2_CONF_STREAM_MAX_MEM:
-+ H2_CONFIG_SET(conf, stream_max_mem_size, val);
-+ break;
-+ case H2_CONF_ALT_SVC_MAX_AGE:
-+ H2_CONFIG_SET(conf, alt_svc_max_age, val);
-+ break;
-+ case H2_CONF_SER_HEADERS:
-+ H2_CONFIG_SET(conf, serialize_headers, val);
-+ break;
-+ case H2_CONF_MODERN_TLS_ONLY:
-+ H2_CONFIG_SET(conf, modern_tls_only, val);
-+ break;
-+ case H2_CONF_UPGRADE:
-+ H2_CONFIG_SET(conf, h2_upgrade, val);
-+ break;
-+ case H2_CONF_DIRECT:
-+ H2_CONFIG_SET(conf, h2_direct, val);
-+ break;
-+ case H2_CONF_TLS_WARMUP_SIZE:
-+ H2_CONFIG_SET(conf, tls_warmup_size, val);
-+ break;
-+ case H2_CONF_TLS_COOLDOWN_SECS:
-+ H2_CONFIG_SET(conf, tls_cooldown_secs, val);
-+ break;
-+ case H2_CONF_PUSH:
-+ H2_CONFIG_SET(conf, h2_push, val);
-+ break;
-+ case H2_CONF_PUSH_DIARY_SIZE:
-+ H2_CONFIG_SET(conf, push_diary_size, val);
-+ break;
-+ case H2_CONF_COPY_FILES:
-+ H2_CONFIG_SET(conf, copy_files, val);
-+ break;
-+ case H2_CONF_EARLY_HINTS:
-+ H2_CONFIG_SET(conf, early_hints, val);
-+ break;
-+ case H2_CONF_PADDING_BITS:
-+ H2_CONFIG_SET(conf, padding_bits, val);
-+ break;
-+ case H2_CONF_PADDING_ALWAYS:
-+ H2_CONFIG_SET(conf, padding_always, val);
-+ break;
-+ default:
-+ break;
-+ }
-+}
-+
-+static void h2_srv_config_seti64(h2_config *conf, h2_config_var_t var, apr_int64_t val)
-+{
-+ switch(var) {
-+ case H2_CONF_TLS_WARMUP_SIZE:
-+ H2_CONFIG_SET(conf, tls_warmup_size, val);
-+ break;
-+ default:
-+ h2_srv_config_seti(conf, var, (int)val);
-+ break;
-+ }
-+}
-+
-+static h2_config *h2_config_sget(server_rec *s)
- {
- h2_config *cfg = (h2_config *)ap_get_module_config(s->module_config,
- &http2_module);
-@@ -217,9 +377,162 @@ const h2_config *h2_config_sget(server_rec *s)
- return cfg;
- }
-
--const struct h2_priority *h2_config_get_priority(const h2_config *conf,
-- const char *content_type)
-+static const h2_dir_config *h2_config_rget(request_rec *r)
-+{
-+ h2_dir_config *cfg = (h2_dir_config *)ap_get_module_config(r->per_dir_config,
-+ &http2_module);
-+ ap_assert(cfg);
-+ return cfg;
-+}
-+
-+static apr_int64_t h2_dir_config_geti64(const h2_dir_config *conf, h2_config_var_t var)
-+{
-+ switch(var) {
-+ case H2_CONF_ALT_SVC_MAX_AGE:
-+ return H2_CONFIG_GET(conf, &defdconf, alt_svc_max_age);
-+ case H2_CONF_UPGRADE:
-+ return H2_CONFIG_GET(conf, &defdconf, h2_upgrade);
-+ case H2_CONF_PUSH:
-+ return H2_CONFIG_GET(conf, &defdconf, h2_push);
-+ case H2_CONF_EARLY_HINTS:
-+ return H2_CONFIG_GET(conf, &defdconf, early_hints);
-+
-+ default:
-+ return DEF_VAL;
-+ }
-+}
-+
-+static void h2_config_seti(h2_dir_config *dconf, h2_config *conf, h2_config_var_t var, int val)
-+{
-+ int set_srv = !dconf;
-+ if (dconf) {
-+ switch(var) {
-+ case H2_CONF_ALT_SVC_MAX_AGE:
-+ H2_CONFIG_SET(dconf, alt_svc_max_age, val);
-+ break;
-+ case H2_CONF_UPGRADE:
-+ H2_CONFIG_SET(dconf, h2_upgrade, val);
-+ break;
-+ case H2_CONF_PUSH:
-+ H2_CONFIG_SET(dconf, h2_push, val);
-+ break;
-+ case H2_CONF_EARLY_HINTS:
-+ H2_CONFIG_SET(dconf, early_hints, val);
-+ break;
-+ default:
-+ /* not handled in dir_conf */
-+ set_srv = 1;
-+ break;
-+ }
-+ }
-+
-+ if (set_srv) {
-+ h2_srv_config_seti(conf, var, val);
-+ }
-+}
-+
-+static void h2_config_seti64(h2_dir_config *dconf, h2_config *conf, h2_config_var_t var, apr_int64_t val)
-+{
-+ int set_srv = !dconf;
-+ if (dconf) {
-+ switch(var) {
-+ default:
-+ /* not handled in dir_conf */
-+ set_srv = 1;
-+ break;
-+ }
-+ }
-+
-+ if (set_srv) {
-+ h2_srv_config_seti64(conf, var, val);
-+ }
-+}
-+
-+static const h2_config *h2_config_get(conn_rec *c)
-+{
-+ h2_ctx *ctx = h2_ctx_get(c, 0);
-+
-+ if (ctx) {
-+ if (ctx->config) {
-+ return ctx->config;
-+ }
-+ else if (ctx->server) {
-+ ctx->config = h2_config_sget(ctx->server);
-+ return ctx->config;
-+ }
-+ }
-+
-+ return h2_config_sget(c->base_server);
-+}
-+
-+int h2_config_cgeti(conn_rec *c, h2_config_var_t var)
-+{
-+ return (int)h2_srv_config_geti64(h2_config_get(c), var);
-+}
-+
-+apr_int64_t h2_config_cgeti64(conn_rec *c, h2_config_var_t var)
-+{
-+ return h2_srv_config_geti64(h2_config_get(c), var);
-+}
-+
-+int h2_config_sgeti(server_rec *s, h2_config_var_t var)
-+{
-+ return (int)h2_srv_config_geti64(h2_config_sget(s), var);
-+}
-+
-+apr_int64_t h2_config_sgeti64(server_rec *s, h2_config_var_t var)
-+{
-+ return h2_srv_config_geti64(h2_config_sget(s), var);
-+}
-+
-+int h2_config_geti(request_rec *r, server_rec *s, h2_config_var_t var)
-+{
-+ return (int)h2_config_geti64(r, s, var);
-+}
-+
-+apr_int64_t h2_config_geti64(request_rec *r, server_rec *s, h2_config_var_t var)
-+{
-+ apr_int64_t mode = r? (int)h2_dir_config_geti64(h2_config_rget(r), var) : DEF_VAL;
-+ return (mode != DEF_VAL)? mode : h2_config_sgeti64(s, var);
-+}
-+
-+int h2_config_rgeti(request_rec *r, h2_config_var_t var)
-+{
-+ return h2_config_geti(r, r->server, var);
-+}
-+
-+apr_int64_t h2_config_rgeti64(request_rec *r, h2_config_var_t var)
-+{
-+ return h2_config_geti64(r, r->server, var);
-+}
-+
-+apr_array_header_t *h2_config_push_list(request_rec *r)
-+{
-+ const h2_config *sconf;
-+ const h2_dir_config *conf = h2_config_rget(r);
-+
-+ if (conf && conf->push_list) {
-+ return conf->push_list;
-+ }
-+ sconf = h2_config_sget(r->server);
-+ return sconf? sconf->push_list : NULL;
-+}
-+
-+apr_array_header_t *h2_config_alt_svcs(request_rec *r)
- {
-+ const h2_config *sconf;
-+ const h2_dir_config *conf = h2_config_rget(r);
-+
-+ if (conf && conf->alt_svcs) {
-+ return conf->alt_svcs;
-+ }
-+ sconf = h2_config_sget(r->server);
-+ return sconf? sconf->alt_svcs : NULL;
-+}
-+
-+const struct h2_priority *h2_cconfig_get_priority(conn_rec *c, const char *content_type)
-+{
-+ const h2_config *conf = h2_config_get(c);
- if (content_type && conf->priorities) {
- size_t len = strcspn(content_type, "; \t");
- h2_priority *prio = apr_hash_get(conf->priorities, content_type, len);
-@@ -228,166 +541,156 @@ const struct h2_priority *h2_config_get_priority(const h2_config *conf,
- return NULL;
- }
-
--static const char *h2_conf_set_max_streams(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_max_streams(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- cfg->h2_max_streams = (int)apr_atoi64(value);
-- (void)arg;
-- if (cfg->h2_max_streams < 1) {
-+ apr_int64_t ival = (int)apr_atoi64(value);
-+ if (ival < 1) {
- return "value must be > 0";
- }
-+ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_MAX_STREAMS, ival);
- return NULL;
- }
-
--static const char *h2_conf_set_window_size(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_window_size(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- cfg->h2_window_size = (int)apr_atoi64(value);
-- (void)arg;
-- if (cfg->h2_window_size < 1024) {
-+ int val = (int)apr_atoi64(value);
-+ if (val < 1024) {
- return "value must be >= 1024";
- }
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_WIN_SIZE, val);
- return NULL;
- }
-
--static const char *h2_conf_set_min_workers(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_min_workers(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- cfg->min_workers = (int)apr_atoi64(value);
-- (void)arg;
-- if (cfg->min_workers < 1) {
-+ int val = (int)apr_atoi64(value);
-+ if (val < 1) {
- return "value must be > 0";
- }
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MIN_WORKERS, val);
- return NULL;
- }
-
--static const char *h2_conf_set_max_workers(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_max_workers(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- cfg->max_workers = (int)apr_atoi64(value);
-- (void)arg;
-- if (cfg->max_workers < 1) {
-+ int val = (int)apr_atoi64(value);
-+ if (val < 1) {
- return "value must be > 0";
- }
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MAX_WORKERS, val);
- return NULL;
- }
-
--static const char *h2_conf_set_max_worker_idle_secs(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_max_worker_idle_secs(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- cfg->max_worker_idle_secs = (int)apr_atoi64(value);
-- (void)arg;
-- if (cfg->max_worker_idle_secs < 1) {
-+ int val = (int)apr_atoi64(value);
-+ if (val < 1) {
- return "value must be > 0";
- }
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MAX_WORKER_IDLE_SECS, val);
- return NULL;
- }
-
--static const char *h2_conf_set_stream_max_mem_size(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_stream_max_mem_size(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
--
--
-- cfg->stream_max_mem_size = (int)apr_atoi64(value);
-- (void)arg;
-- if (cfg->stream_max_mem_size < 1024) {
-+ int val = (int)apr_atoi64(value);
-+ if (val < 1024) {
- return "value must be >= 1024";
- }
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_STREAM_MAX_MEM, val);
- return NULL;
- }
-
--static const char *h2_add_alt_svc(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_add_alt_svc(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
- if (value && *value) {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- h2_alt_svc *as = h2_alt_svc_parse(value, parms->pool);
-+ h2_alt_svc *as = h2_alt_svc_parse(value, cmd->pool);
- if (!as) {
- return "unable to parse alt-svc specifier";
- }
-- if (!cfg->alt_svcs) {
-- cfg->alt_svcs = apr_array_make(parms->pool, 5, sizeof(h2_alt_svc*));
-+
-+ if (cmd->path) {
-+ h2_dir_config *dcfg = (h2_dir_config *)dirconf;
-+ if (!dcfg->alt_svcs) {
-+ dcfg->alt_svcs = apr_array_make(cmd->pool, 5, sizeof(h2_alt_svc*));
-+ }
-+ APR_ARRAY_PUSH(dcfg->alt_svcs, h2_alt_svc*) = as;
-+ }
-+ else {
-+ h2_config *cfg = (h2_config *)h2_config_sget(cmd->server);
-+ if (!cfg->alt_svcs) {
-+ cfg->alt_svcs = apr_array_make(cmd->pool, 5, sizeof(h2_alt_svc*));
-+ }
-+ APR_ARRAY_PUSH(cfg->alt_svcs, h2_alt_svc*) = as;
- }
-- APR_ARRAY_PUSH(cfg->alt_svcs, h2_alt_svc*) = as;
- }
-- (void)arg;
- return NULL;
- }
-
--static const char *h2_conf_set_alt_svc_max_age(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_alt_svc_max_age(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- cfg->alt_svc_max_age = (int)apr_atoi64(value);
-- (void)arg;
-+ int val = (int)apr_atoi64(value);
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_ALT_SVC_MAX_AGE, val);
- return NULL;
- }
-
--static const char *h2_conf_set_session_extra_files(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_session_extra_files(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
- /* deprecated, ignore */
-- (void)arg;
-+ (void)dirconf;
- (void)value;
-- ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, parms->pool, /* NO LOGNO */
-+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, cmd->pool, /* NO LOGNO */
- "H2SessionExtraFiles is obsolete and will be ignored");
- return NULL;
- }
-
--static const char *h2_conf_set_serialize_headers(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_serialize_headers(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
- if (!strcasecmp(value, "On")) {
-- cfg->serialize_headers = 1;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_SER_HEADERS, 1);
- return NULL;
- }
- else if (!strcasecmp(value, "Off")) {
-- cfg->serialize_headers = 0;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_SER_HEADERS, 0);
- return NULL;
- }
--
-- (void)arg;
- return "value must be On or Off";
- }
-
--static const char *h2_conf_set_direct(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_direct(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
- if (!strcasecmp(value, "On")) {
-- cfg->h2_direct = 1;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_DIRECT, 1);
- return NULL;
- }
- else if (!strcasecmp(value, "Off")) {
-- cfg->h2_direct = 0;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_DIRECT, 0);
- return NULL;
- }
--
-- (void)arg;
- return "value must be On or Off";
- }
-
--static const char *h2_conf_set_push(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_push(cmd_parms *cmd, void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
- if (!strcasecmp(value, "On")) {
-- cfg->h2_push = 1;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH, 1);
- return NULL;
- }
- else if (!strcasecmp(value, "Off")) {
-- cfg->h2_push = 0;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH, 0);
- return NULL;
- }
--
-- (void)arg;
- return "value must be On or Off";
- }
-
-@@ -447,100 +750,88 @@ static const char *h2_conf_add_push_priority(cmd_parms *cmd, void *_cfg,
- return NULL;
- }
-
--static const char *h2_conf_set_modern_tls_only(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_modern_tls_only(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
- if (!strcasecmp(value, "On")) {
-- cfg->modern_tls_only = 1;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MODERN_TLS_ONLY, 1);
- return NULL;
- }
- else if (!strcasecmp(value, "Off")) {
-- cfg->modern_tls_only = 0;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_MODERN_TLS_ONLY, 0);
- return NULL;
- }
--
-- (void)arg;
- return "value must be On or Off";
- }
-
--static const char *h2_conf_set_upgrade(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_upgrade(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
- if (!strcasecmp(value, "On")) {
-- cfg->h2_upgrade = 1;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_UPGRADE, 1);
- return NULL;
- }
- else if (!strcasecmp(value, "Off")) {
-- cfg->h2_upgrade = 0;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_UPGRADE, 0);
- return NULL;
- }
--
-- (void)arg;
- return "value must be On or Off";
- }
-
--static const char *h2_conf_set_tls_warmup_size(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_tls_warmup_size(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- cfg->tls_warmup_size = apr_atoi64(value);
-- (void)arg;
-+ apr_int64_t val = apr_atoi64(value);
-+ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_TLS_WARMUP_SIZE, val);
- return NULL;
- }
-
--static const char *h2_conf_set_tls_cooldown_secs(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_tls_cooldown_secs(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- cfg->tls_cooldown_secs = (int)apr_atoi64(value);
-- (void)arg;
-+ apr_int64_t val = (int)apr_atoi64(value);
-+ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_TLS_COOLDOWN_SECS, val);
- return NULL;
- }
-
--static const char *h2_conf_set_push_diary_size(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_push_diary_size(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- (void)arg;
-- cfg->push_diary_size = (int)apr_atoi64(value);
-- if (cfg->push_diary_size < 0) {
-+ int val = (int)apr_atoi64(value);
-+ if (val < 0) {
- return "value must be >= 0";
- }
-- if (cfg->push_diary_size > 0 && (cfg->push_diary_size & (cfg->push_diary_size-1))) {
-+ if (val > 0 && (val & (val-1))) {
- return "value must a power of 2";
- }
-- if (cfg->push_diary_size > (1 << 15)) {
-+ if (val > (1 << 15)) {
- return "value must <= 65536";
- }
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PUSH_DIARY_SIZE, val);
- return NULL;
- }
-
--static const char *h2_conf_set_copy_files(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_copy_files(cmd_parms *cmd,
-+ void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)arg;
- if (!strcasecmp(value, "On")) {
-- cfg->copy_files = 1;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_COPY_FILES, 1);
- return NULL;
- }
- else if (!strcasecmp(value, "Off")) {
-- cfg->copy_files = 0;
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_COPY_FILES, 0);
- return NULL;
- }
--
-- (void)arg;
- return "value must be On or Off";
- }
-
--static void add_push(apr_pool_t *pool, h2_config *conf, h2_push_res *push)
-+static void add_push(apr_array_header_t **plist, apr_pool_t *pool, h2_push_res *push)
- {
- h2_push_res *new;
-- if (!conf->push_list) {
-- conf->push_list = apr_array_make(pool, 10, sizeof(*push));
-+ if (!*plist) {
-+ *plist = apr_array_make(pool, 10, sizeof(*push));
- }
-- new = apr_array_push(conf->push_list);
-+ new = apr_array_push(*plist);
- new->uri_ref = push->uri_ref;
- new->critical = push->critical;
- }
-@@ -549,8 +840,6 @@ static const char *h2_conf_add_push_res(cmd_parms *cmd, void *dirconf,
- const char *arg1, const char *arg2,
- const char *arg3)
- {
-- h2_config *dconf = (h2_config*)dirconf ;
-- h2_config *sconf = (h2_config*)h2_config_sget(cmd->server);
- h2_push_res push;
- const char *last = arg3;
-
-@@ -575,42 +864,54 @@ static const char *h2_conf_add_push_res(cmd_parms *cmd, void *dirconf,
- }
- }
-
-- /* server command? set both */
-- if (cmd->path == NULL) {
-- add_push(cmd->pool, sconf, &push);
-- add_push(cmd->pool, dconf, &push);
-+ if (cmd->path) {
-+ add_push(&(((h2_dir_config*)dirconf)->push_list), cmd->pool, &push);
- }
- else {
-- add_push(cmd->pool, dconf, &push);
-+ add_push(&(h2_config_sget(cmd->server)->push_list), cmd->pool, &push);
- }
-+ return NULL;
-+}
-+
-+static const char *h2_conf_set_early_hints(cmd_parms *cmd,
-+ void *dirconf, const char *value)
-+{
-+ int val;
-
-+ if (!strcasecmp(value, "On")) val = 1;
-+ else if (!strcasecmp(value, "Off")) val = 0;
-+ else return "value must be On or Off";
-+
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_EARLY_HINTS, val);
-+ if (cmd->path) {
-+ ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, cmd->pool,
-+ "H2EarlyHints = %d on path %s", val, cmd->path);
-+ }
- return NULL;
- }
-
--static const char *h2_conf_set_early_hints(cmd_parms *parms,
-- void *arg, const char *value)
-+static const char *h2_conf_set_padding(cmd_parms *cmd, void *dirconf, const char *value)
- {
-- h2_config *cfg = (h2_config *)h2_config_sget(parms->server);
-- if (!strcasecmp(value, "On")) {
-- cfg->early_hints = 1;
-- return NULL;
-+ int val;
-+
-+ val = (int)apr_atoi64(value);
-+ if (val < 0) {
-+ return "number of bits must be >= 0";
- }
-- else if (!strcasecmp(value, "Off")) {
-- cfg->early_hints = 0;
-- return NULL;
-+ if (val > 8) {
-+ return "number of bits must be <= 8";
- }
--
-- (void)arg;
-- return "value must be On or Off";
-+ CONFIG_CMD_SET(cmd, dirconf, H2_CONF_PADDING_BITS, val);
-+ return NULL;
- }
-
-+
- void h2_get_num_workers(server_rec *s, int *minw, int *maxw)
- {
- int threads_per_child = 0;
-- const h2_config *config = h2_config_sget(s);
-
-- *minw = h2_config_geti(config, H2_CONF_MIN_WORKERS);
-- *maxw = h2_config_geti(config, H2_CONF_MAX_WORKERS);
-+ *minw = h2_config_sgeti(s, H2_CONF_MIN_WORKERS);
-+ *maxw = h2_config_sgeti(s, H2_CONF_MAX_WORKERS);
- ap_mpm_query(AP_MPMQ_MAX_THREADS, &threads_per_child);
-
- if (*minw <= 0) {
-@@ -652,7 +953,7 @@ const command_rec h2_cmds[] = {
- AP_INIT_TAKE1("H2ModernTLSOnly", h2_conf_set_modern_tls_only, NULL,
- RSRC_CONF, "off to not impose RFC 7540 restrictions on TLS"),
- AP_INIT_TAKE1("H2Upgrade", h2_conf_set_upgrade, NULL,
-- RSRC_CONF, "on to allow HTTP/1 Upgrades to h2/h2c"),
-+ RSRC_CONF|OR_AUTHCFG, "on to allow HTTP/1 Upgrades to h2/h2c"),
- AP_INIT_TAKE1("H2Direct", h2_conf_set_direct, NULL,
- RSRC_CONF, "on to enable direct HTTP/2 mode"),
- AP_INIT_TAKE1("H2SessionExtraFiles", h2_conf_set_session_extra_files, NULL,
-@@ -662,7 +963,7 @@ const command_rec h2_cmds[] = {
- AP_INIT_TAKE1("H2TLSCoolDownSecs", h2_conf_set_tls_cooldown_secs, NULL,
- RSRC_CONF, "seconds of idle time on TLS before shrinking writes"),
- AP_INIT_TAKE1("H2Push", h2_conf_set_push, NULL,
-- RSRC_CONF, "off to disable HTTP/2 server push"),
-+ RSRC_CONF|OR_AUTHCFG, "off to disable HTTP/2 server push"),
- AP_INIT_TAKE23("H2PushPriority", h2_conf_add_push_priority, NULL,
- RSRC_CONF, "define priority of PUSHed resources per content type"),
- AP_INIT_TAKE1("H2PushDiarySize", h2_conf_set_push_diary_size, NULL,
-@@ -670,33 +971,12 @@ const command_rec h2_cmds[] = {
- AP_INIT_TAKE1("H2CopyFiles", h2_conf_set_copy_files, NULL,
- OR_FILEINFO, "on to perform copy of file data"),
- AP_INIT_TAKE123("H2PushResource", h2_conf_add_push_res, NULL,
-- OR_FILEINFO, "add a resource to be pushed in this location/on this server."),
-+ OR_FILEINFO|OR_AUTHCFG, "add a resource to be pushed in this location/on this server."),
- AP_INIT_TAKE1("H2EarlyHints", h2_conf_set_early_hints, NULL,
- RSRC_CONF, "on to enable interim status 103 responses"),
-+ AP_INIT_TAKE1("H2Padding", h2_conf_set_padding, NULL,
-+ RSRC_CONF, "set payload padding"),
- AP_END_CMD
- };
-
-
--const h2_config *h2_config_rget(request_rec *r)
--{
-- h2_config *cfg = (h2_config *)ap_get_module_config(r->per_dir_config,
-- &http2_module);
-- return cfg? cfg : h2_config_sget(r->server);
--}
--
--const h2_config *h2_config_get(conn_rec *c)
--{
-- h2_ctx *ctx = h2_ctx_get(c, 0);
--
-- if (ctx) {
-- if (ctx->config) {
-- return ctx->config;
-- }
-- else if (ctx->server) {
-- ctx->config = h2_config_sget(ctx->server);
-- return ctx->config;
-- }
-- }
--
-- return h2_config_sget(c->base_server);
--}
-diff --git a/modules/http2/h2_config.h b/modules/http2/h2_config.h
-index 17d75d6..e940c8a 100644
---- a/modules/http2/h2_config.h
-+++ b/modules/http2/h2_config.h
-@@ -42,6 +42,8 @@ typedef enum {
- H2_CONF_PUSH_DIARY_SIZE,
- H2_CONF_COPY_FILES,
- H2_CONF_EARLY_HINTS,
-+ H2_CONF_PADDING_BITS,
-+ H2_CONF_PADDING_ALWAYS,
- } h2_config_var_t;
-
- struct apr_hash_t;
-@@ -53,33 +55,6 @@ typedef struct h2_push_res {
- int critical;
- } h2_push_res;
-
--/* Apache httpd module configuration for h2. */
--typedef struct h2_config {
-- const char *name;
-- int h2_max_streams; /* max concurrent # streams (http2) */
-- int h2_window_size; /* stream window size (http2) */
-- int min_workers; /* min # of worker threads/child */
-- int max_workers; /* max # of worker threads/child */
-- int max_worker_idle_secs; /* max # of idle seconds for worker */
-- int stream_max_mem_size; /* max # bytes held in memory/stream */
-- apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */
-- int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/
-- int serialize_headers; /* Use serialized HTTP/1.1 headers for
-- processing, better compatibility */
-- int h2_direct; /* if mod_h2 is active directly */
-- int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */
-- int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
-- apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */
-- int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */
-- int h2_push; /* if HTTP/2 server push is enabled */
-- struct apr_hash_t *priorities;/* map of content-type to h2_priority records */
--
-- int push_diary_size; /* # of entries in push diary */
-- int copy_files; /* if files shall be copied vs setaside on output */
-- apr_array_header_t *push_list;/* list of h2_push_res configurations */
-- int early_hints; /* support status code 103 */
--} h2_config;
--
-
- void *h2_config_create_dir(apr_pool_t *pool, char *x);
- void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv);
-@@ -88,19 +63,37 @@ void *h2_config_merge_svr(apr_pool_t *pool, void *basev, void *addv);
-
- extern const command_rec h2_cmds[];
-
--const h2_config *h2_config_get(conn_rec *c);
--const h2_config *h2_config_sget(server_rec *s);
--const h2_config *h2_config_rget(request_rec *r);
-+int h2_config_geti(request_rec *r, server_rec *s, h2_config_var_t var);
-+apr_int64_t h2_config_geti64(request_rec *r, server_rec *s, h2_config_var_t var);
-
--int h2_config_geti(const h2_config *conf, h2_config_var_t var);
--apr_int64_t h2_config_geti64(const h2_config *conf, h2_config_var_t var);
-+/**
-+ * Get the configured value for variable at the given connection.
-+ */
-+int h2_config_cgeti(conn_rec *c, h2_config_var_t var);
-+apr_int64_t h2_config_cgeti64(conn_rec *c, h2_config_var_t var);
-
--void h2_get_num_workers(server_rec *s, int *minw, int *maxw);
-+/**
-+ * Get the configured value for variable at the given server.
-+ */
-+int h2_config_sgeti(server_rec *s, h2_config_var_t var);
-+apr_int64_t h2_config_sgeti64(server_rec *s, h2_config_var_t var);
-
-+/**
-+ * Get the configured value for variable at the given request,
-+ * if configured for the request location.
-+ * Fallback to request server config otherwise.
-+ */
-+int h2_config_rgeti(request_rec *r, h2_config_var_t var);
-+apr_int64_t h2_config_rgeti64(request_rec *r, h2_config_var_t var);
-+
-+apr_array_header_t *h2_config_push_list(request_rec *r);
-+apr_array_header_t *h2_config_alt_svcs(request_rec *r);
-+
-+
-+void h2_get_num_workers(server_rec *s, int *minw, int *maxw);
- void h2_config_init(apr_pool_t *pool);
-
--const struct h2_priority *h2_config_get_priority(const h2_config *conf,
-- const char *content_type);
-+const struct h2_priority *h2_cconfig_get_priority(conn_rec *c, const char *content_type);
-
- #endif /* __mod_h2__h2_config_h__ */
-
-diff --git a/modules/http2/h2_conn.c b/modules/http2/h2_conn.c
-index dc2081e..9ef0ea0 100644
---- a/modules/http2/h2_conn.c
-+++ b/modules/http2/h2_conn.c
-@@ -18,6 +18,7 @@
- #include
-
- #include
-+#include
-
- #include
- #include
-@@ -109,7 +110,6 @@ static void check_modules(int force)
-
- apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s)
- {
-- const h2_config *config = h2_config_sget(s);
- apr_status_t status = APR_SUCCESS;
- int minw, maxw;
- int max_threads_per_child = 0;
-@@ -129,7 +129,7 @@ apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s)
-
- h2_get_num_workers(s, &minw, &maxw);
-
-- idle_secs = h2_config_geti(config, H2_CONF_MAX_WORKER_IDLE_SECS);
-+ idle_secs = h2_config_sgeti(s, H2_CONF_MAX_WORKER_IDLE_SECS);
- ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
- "h2_workers: min=%d max=%d, mthrpchild=%d, idle_secs=%d",
- minw, maxw, max_threads_per_child, idle_secs);
-@@ -172,9 +172,10 @@ static module *h2_conn_mpm_module(void)
- return mpm_module;
- }
-
--apr_status_t h2_conn_setup(h2_ctx *ctx, conn_rec *c, request_rec *r)
-+apr_status_t h2_conn_setup(conn_rec *c, request_rec *r, server_rec *s)
- {
- h2_session *session;
-+ h2_ctx *ctx;
- apr_status_t status;
-
- if (!workers) {
-@@ -183,24 +184,19 @@ apr_status_t h2_conn_setup(h2_ctx *ctx, conn_rec *c, request_rec *r)
- return APR_EGENERAL;
- }
-
-- if (r) {
-- status = h2_session_rcreate(&session, r, ctx, workers);
-- }
-- else {
-- status = h2_session_create(&session, c, ctx, workers);
-- }
--
-- if (status == APR_SUCCESS) {
-+ if (APR_SUCCESS == (status = h2_session_create(&session, c, r, s, workers))) {
-+ ctx = h2_ctx_get(c, 1);
- h2_ctx_session_set(ctx, session);
- }
-+
- return status;
- }
-
--apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c)
-+apr_status_t h2_conn_run(conn_rec *c)
- {
- apr_status_t status;
- int mpm_state = 0;
-- h2_session *session = h2_ctx_session_get(ctx);
-+ h2_session *session = h2_ctx_get_session(c);
-
- ap_assert(session);
- do {
-@@ -249,7 +245,7 @@ apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c)
-
- apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c)
- {
-- h2_session *session = h2_ctx_session_get(ctx);
-+ h2_session *session = h2_ctx_get_session(c);
- if (session) {
- apr_status_t status = h2_session_pre_close(session, async_mpm);
- return (status == APR_SUCCESS)? DONE : status;
-@@ -310,8 +306,10 @@ conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent)
- c->filter_conn_ctx = NULL;
- #endif
- c->bucket_alloc = apr_bucket_alloc_create(pool);
-- c->data_in_input_filters = 0;
-- c->data_in_output_filters = 0;
-+#if !AP_MODULE_MAGIC_AT_LEAST(20180720, 1)
-+ c->data_in_input_filters = 0;
-+ c->data_in_output_filters = 0;
-+#endif
- /* prevent mpm_event from making wrong assumptions about this connection,
- * like e.g. using its socket for an async read check. */
- c->clogging_input_filters = 1;
-diff --git a/modules/http2/h2_conn.h b/modules/http2/h2_conn.h
-index e45ff31..c560405 100644
---- a/modules/http2/h2_conn.h
-+++ b/modules/http2/h2_conn.h
-@@ -23,21 +23,21 @@ struct h2_task;
- /**
- * Setup the connection and our context for HTTP/2 processing
- *
-- * @param ctx the http2 context to setup
- * @param c the connection HTTP/2 is starting on
- * @param r the upgrade request that still awaits an answer, optional
-+ * @param s the server selected for this connection (can be != c->base_server)
- */
--apr_status_t h2_conn_setup(struct h2_ctx *ctx, conn_rec *c, request_rec *r);
-+apr_status_t h2_conn_setup(conn_rec *c, request_rec *r, server_rec *s);
-
- /**
- * Run the HTTP/2 connection in synchronous fashion.
- * Return when the HTTP/2 session is done
- * and the connection will close or a fatal error occurred.
- *
-- * @param ctx the http2 context to run
-+ * @param c the http2 connection to run
- * @return APR_SUCCESS when session is done.
- */
--apr_status_t h2_conn_run(struct h2_ctx *ctx, conn_rec *c);
-+apr_status_t h2_conn_run(conn_rec *c);
-
- /**
- * The connection is about to close. If we have not send a GOAWAY
-diff --git a/modules/http2/h2_conn_io.c b/modules/http2/h2_conn_io.c
-index eb6ec92..5f17e85 100644
---- a/modules/http2/h2_conn_io.c
-+++ b/modules/http2/h2_conn_io.c
-@@ -40,12 +40,17 @@
- * ~= 1300 bytes */
- #define WRITE_SIZE_INITIAL 1300
-
--/* Calculated like this: max TLS record size 16*1024
-- * - 40 (IP) - 20 (TCP) - 40 (TCP options)
-- * - TLS overhead (60-100)
-- * which seems to create less TCP packets overall
-+/* The maximum we'd like to write in one chunk is
-+ * the max size of a TLS record. When pushing
-+ * many frames down the h2 connection, this might
-+ * align differently because of headers and other
-+ * frames or simply as not sufficient data is
-+ * in a response body.
-+ * However keeping frames at or below this limit
-+ * should make optimizations at the layer that writes
-+ * to TLS easier.
- */
--#define WRITE_SIZE_MAX (TLS_DATA_MAX - 100)
-+#define WRITE_SIZE_MAX (TLS_DATA_MAX)
-
-
- static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level,
-@@ -123,21 +128,20 @@ static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level,
-
- }
-
--apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c,
-- const h2_config *cfg)
-+apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, server_rec *s)
- {
- io->c = c;
- io->output = apr_brigade_create(c->pool, c->bucket_alloc);
- io->is_tls = h2_h2_is_tls(c);
- io->buffer_output = io->is_tls;
-- io->flush_threshold = (apr_size_t)h2_config_geti64(cfg, H2_CONF_STREAM_MAX_MEM);
-+ io->flush_threshold = (apr_size_t)h2_config_sgeti64(s, H2_CONF_STREAM_MAX_MEM);
-
- if (io->is_tls) {
- /* This is what we start with,
- * see https://issues.apache.org/jira/browse/TS-2503
- */
-- io->warmup_size = h2_config_geti64(cfg, H2_CONF_TLS_WARMUP_SIZE);
-- io->cooldown_usecs = (h2_config_geti(cfg, H2_CONF_TLS_COOLDOWN_SECS)
-+ io->warmup_size = h2_config_sgeti64(s, H2_CONF_TLS_WARMUP_SIZE);
-+ io->cooldown_usecs = (h2_config_sgeti(s, H2_CONF_TLS_COOLDOWN_SECS)
- * APR_USEC_PER_SEC);
- io->write_size = (io->cooldown_usecs > 0?
- WRITE_SIZE_INITIAL : WRITE_SIZE_MAX);
-diff --git a/modules/http2/h2_conn_io.h b/modules/http2/h2_conn_io.h
-index 2c3be1c..e96203c 100644
---- a/modules/http2/h2_conn_io.h
-+++ b/modules/http2/h2_conn_io.h
-@@ -48,8 +48,7 @@ typedef struct {
- apr_size_t slen;
- } h2_conn_io;
-
--apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c,
-- const struct h2_config *cfg);
-+apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, server_rec *s);
-
- /**
- * Append data to the buffered output.
-diff --git a/modules/http2/h2_ctx.c b/modules/http2/h2_ctx.c
-index d5ccc24..095f355 100644
---- a/modules/http2/h2_ctx.c
-+++ b/modules/http2/h2_ctx.c
-@@ -29,8 +29,8 @@ static h2_ctx *h2_ctx_create(const conn_rec *c)
- {
- h2_ctx *ctx = apr_pcalloc(c->pool, sizeof(h2_ctx));
- ap_assert(ctx);
-+ h2_ctx_server_update(ctx, c->base_server);
- ap_set_module_config(c->conn_config, &http2_module, ctx);
-- h2_ctx_server_set(ctx, c->base_server);
- return ctx;
- }
-
-@@ -79,8 +79,9 @@ h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto)
- return ctx;
- }
-
--h2_session *h2_ctx_session_get(h2_ctx *ctx)
-+h2_session *h2_ctx_get_session(conn_rec *c)
- {
-+ h2_ctx *ctx = h2_ctx_get(c, 0);
- return ctx? ctx->session : NULL;
- }
-
-@@ -89,33 +90,17 @@ void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session)
- ctx->session = session;
- }
-
--server_rec *h2_ctx_server_get(h2_ctx *ctx)
-+h2_ctx *h2_ctx_server_update(h2_ctx *ctx, server_rec *s)
- {
-- return ctx? ctx->server : NULL;
--}
--
--h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s)
--{
-- ctx->server = s;
-+ if (ctx->server != s) {
-+ ctx->server = s;
-+ }
- return ctx;
- }
-
--int h2_ctx_is_task(h2_ctx *ctx)
--{
-- return ctx && ctx->task;
--}
--
--h2_task *h2_ctx_get_task(h2_ctx *ctx)
-+h2_task *h2_ctx_get_task(conn_rec *c)
- {
-+ h2_ctx *ctx = h2_ctx_get(c, 0);
- return ctx? ctx->task : NULL;
- }
-
--h2_task *h2_ctx_cget_task(conn_rec *c)
--{
-- return h2_ctx_get_task(h2_ctx_get(c, 0));
--}
--
--h2_task *h2_ctx_rget_task(request_rec *r)
--{
-- return h2_ctx_get_task(h2_ctx_rget(r));
--}
-diff --git a/modules/http2/h2_ctx.h b/modules/http2/h2_ctx.h
-index cb111c9..417ef36 100644
---- a/modules/http2/h2_ctx.h
-+++ b/modules/http2/h2_ctx.h
-@@ -56,12 +56,11 @@ h2_ctx *h2_ctx_create_for(const conn_rec *c, struct h2_task *task);
- */
- h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto);
-
--/* Set the server_rec relevant for this context.
-+/* Update the server_rec relevant for this context. A server for
-+ * a connection may change during SNI handling, for example.
- */
--h2_ctx *h2_ctx_server_set(h2_ctx *ctx, server_rec *s);
--server_rec *h2_ctx_server_get(h2_ctx *ctx);
-+h2_ctx *h2_ctx_server_update(h2_ctx *ctx, server_rec *s);
-
--struct h2_session *h2_ctx_session_get(h2_ctx *ctx);
- void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session);
-
- /**
-@@ -69,10 +68,8 @@ void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session);
- */
- const char *h2_ctx_protocol_get(const conn_rec *c);
-
--int h2_ctx_is_task(h2_ctx *ctx);
-+struct h2_session *h2_ctx_get_session(conn_rec *c);
-+struct h2_task *h2_ctx_get_task(conn_rec *c);
-
--struct h2_task *h2_ctx_get_task(h2_ctx *ctx);
--struct h2_task *h2_ctx_cget_task(conn_rec *c);
--struct h2_task *h2_ctx_rget_task(request_rec *r);
-
- #endif /* defined(__mod_h2__h2_ctx__) */
-diff --git a/modules/http2/h2_filter.c b/modules/http2/h2_filter.c
-index 8b254b1..5fd237f 100644
---- a/modules/http2/h2_filter.c
-+++ b/modules/http2/h2_filter.c
-@@ -54,6 +54,7 @@ static apr_status_t recv_RAW_DATA(conn_rec *c, h2_filter_cin *cin,
- const char *data;
- ssize_t n;
-
-+ (void)c;
- status = apr_bucket_read(b, &data, &len, block);
-
- while (status == APR_SUCCESS && len > 0) {
-@@ -71,10 +72,10 @@ static apr_status_t recv_RAW_DATA(conn_rec *c, h2_filter_cin *cin,
- }
- else {
- session->io.bytes_read += n;
-- if (len <= n) {
-+ if ((apr_ssize_t)len <= n) {
- break;
- }
-- len -= n;
-+ len -= (apr_size_t)n;
- data += n;
- }
- }
-@@ -277,6 +278,7 @@ apr_bucket *h2_bucket_observer_beam(struct h2_bucket_beam *beam,
- apr_bucket_brigade *dest,
- const apr_bucket *src)
- {
-+ (void)beam;
- if (H2_BUCKET_IS_OBSERVER(src)) {
- h2_bucket_observer *l = (h2_bucket_observer *)src->data;
- apr_bucket *b = h2_bucket_observer_create(dest->bucket_alloc,
-@@ -311,8 +313,7 @@ static void add_settings(apr_bucket_brigade *bb, h2_session *s, int last)
- bbout(bb, " \"settings\": {\n");
- bbout(bb, " \"SETTINGS_MAX_CONCURRENT_STREAMS\": %d,\n", m->max_streams);
- bbout(bb, " \"SETTINGS_MAX_FRAME_SIZE\": %d,\n", 16*1024);
-- bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n",
-- h2_config_geti(s->config, H2_CONF_WIN_SIZE));
-+ bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n", h2_config_sgeti(s->s, H2_CONF_WIN_SIZE));
- bbout(bb, " \"SETTINGS_ENABLE_PUSH\": %d\n", h2_session_push_enabled(s));
- bbout(bb, " }%s\n", last? "" : ",");
- }
-@@ -431,41 +432,38 @@ static void add_stats(apr_bucket_brigade *bb, h2_session *s,
-
- static apr_status_t h2_status_insert(h2_task *task, apr_bucket *b)
- {
-- conn_rec *c = task->c->master;
-- h2_ctx *h2ctx = h2_ctx_get(c, 0);
-- h2_session *session;
-- h2_stream *stream;
-+ h2_mplx *m = task->mplx;
-+ h2_stream *stream = h2_mplx_stream_get(m, task->stream_id);
-+ h2_session *s;
-+ conn_rec *c;
-+
- apr_bucket_brigade *bb;
- apr_bucket *e;
- int32_t connFlowIn, connFlowOut;
-
--
-- if (!h2ctx || (session = h2_ctx_session_get(h2ctx)) == NULL) {
-- return APR_SUCCESS;
-- }
--
-- stream = h2_session_stream_get(session, task->stream_id);
- if (!stream) {
- /* stream already done */
- return APR_SUCCESS;
- }
-+ s = stream->session;
-+ c = s->c;
-
- bb = apr_brigade_create(stream->pool, c->bucket_alloc);
-
-- connFlowIn = nghttp2_session_get_effective_local_window_size(session->ngh2);
-- connFlowOut = nghttp2_session_get_remote_window_size(session->ngh2);
-+ connFlowIn = nghttp2_session_get_effective_local_window_size(s->ngh2);
-+ connFlowOut = nghttp2_session_get_remote_window_size(s->ngh2);
-
- bbout(bb, "{\n");
- bbout(bb, " \"version\": \"draft-01\",\n");
-- add_settings(bb, session, 0);
-- add_peer_settings(bb, session, 0);
-+ add_settings(bb, s, 0);
-+ add_peer_settings(bb, s, 0);
- bbout(bb, " \"connFlowIn\": %d,\n", connFlowIn);
- bbout(bb, " \"connFlowOut\": %d,\n", connFlowOut);
-- bbout(bb, " \"sentGoAway\": %d,\n", session->local.shutdown);
-+ bbout(bb, " \"sentGoAway\": %d,\n", s->local.shutdown);
-
-- add_streams(bb, session, 0);
-+ add_streams(bb, s, 0);
-
-- add_stats(bb, session, stream, 1);
-+ add_stats(bb, s, stream, 1);
- bbout(bb, "}\n");
-
- while ((e = APR_BRIGADE_FIRST(bb)) != APR_BRIGADE_SENTINEL(bb)) {
-@@ -497,7 +495,6 @@ static apr_status_t status_event(void *ctx, h2_bucket_event event,
-
- int h2_filter_h2_status_handler(request_rec *r)
- {
-- h2_ctx *ctx = h2_ctx_rget(r);
- conn_rec *c = r->connection;
- h2_task *task;
- apr_bucket_brigade *bb;
-@@ -511,7 +508,7 @@ int h2_filter_h2_status_handler(request_rec *r)
- return DECLINED;
- }
-
-- task = ctx? h2_ctx_get_task(ctx) : NULL;
-+ task = h2_ctx_get_task(r->connection);
- if (task) {
-
- if ((status = ap_discard_request_body(r)) != OK) {
-diff --git a/modules/http2/h2_from_h1.c b/modules/http2/h2_from_h1.c
-index dd6ad90..c3e3352 100644
---- a/modules/http2/h2_from_h1.c
-+++ b/modules/http2/h2_from_h1.c
-@@ -586,18 +586,20 @@ apr_status_t h2_filter_headers_out(ap_filter_t *f, apr_bucket_brigade *bb)
- }
- }
-
-- if (r->header_only) {
-+ if (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
-- "h2_task(%s): header_only, cleanup output brigade",
-+ "h2_task(%s): headers only, cleanup output brigade",
- task->id);
- b = body_bucket? body_bucket : APR_BRIGADE_FIRST(bb);
- while (b != APR_BRIGADE_SENTINEL(bb)) {
- next = APR_BUCKET_NEXT(b);
- if (APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) {
- break;
-- }
-- APR_BUCKET_REMOVE(b);
-- apr_bucket_destroy(b);
-+ }
-+ if (!H2_BUCKET_IS_HEADERS(b)) {
-+ APR_BUCKET_REMOVE(b);
-+ apr_bucket_destroy(b);
-+ }
- b = next;
- }
- }
-diff --git a/modules/http2/h2_h2.c b/modules/http2/h2_h2.c
-index 5580cef..4ff1d51 100644
---- a/modules/http2/h2_h2.c
-+++ b/modules/http2/h2_h2.c
-@@ -463,19 +463,18 @@ int h2_h2_is_tls(conn_rec *c)
- return opt_ssl_is_https && opt_ssl_is_https(c);
- }
-
--int h2_is_acceptable_connection(conn_rec *c, int require_all)
-+int h2_is_acceptable_connection(conn_rec *c, request_rec *r, int require_all)
- {
- int is_tls = h2_h2_is_tls(c);
-- const h2_config *cfg = h2_config_get(c);
-
-- if (is_tls && h2_config_geti(cfg, H2_CONF_MODERN_TLS_ONLY) > 0) {
-+ if (is_tls && h2_config_cgeti(c, H2_CONF_MODERN_TLS_ONLY) > 0) {
- /* Check TLS connection for modern TLS parameters, as defined in
- * RFC 7540 and https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility
- */
- apr_pool_t *pool = c->pool;
- server_rec *s = c->base_server;
- char *val;
--
-+
- if (!opt_ssl_var_lookup) {
- /* unable to check */
- return 0;
-@@ -521,26 +520,22 @@ int h2_is_acceptable_connection(conn_rec *c, int require_all)
- return 1;
- }
-
--int h2_allows_h2_direct(conn_rec *c)
-+static int h2_allows_h2_direct(conn_rec *c)
- {
-- const h2_config *cfg = h2_config_get(c);
- int is_tls = h2_h2_is_tls(c);
- const char *needed_protocol = is_tls? "h2" : "h2c";
-- int h2_direct = h2_config_geti(cfg, H2_CONF_DIRECT);
-+ int h2_direct = h2_config_cgeti(c, H2_CONF_DIRECT);
-
- if (h2_direct < 0) {
- h2_direct = is_tls? 0 : 1;
- }
-- return (h2_direct
-- && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol));
-+ return (h2_direct && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol));
- }
-
--int h2_allows_h2_upgrade(conn_rec *c)
-+int h2_allows_h2_upgrade(request_rec *r)
- {
-- const h2_config *cfg = h2_config_get(c);
-- int h2_upgrade = h2_config_geti(cfg, H2_CONF_UPGRADE);
--
-- return h2_upgrade > 0 || (h2_upgrade < 0 && !h2_h2_is_tls(c));
-+ int h2_upgrade = h2_config_rgeti(r, H2_CONF_UPGRADE);
-+ return h2_upgrade > 0 || (h2_upgrade < 0 && !h2_h2_is_tls(r->connection));
- }
-
- /*******************************************************************************
-@@ -581,14 +576,17 @@ int h2_h2_process_conn(conn_rec* c)
- {
- apr_status_t status;
- h2_ctx *ctx;
-+ server_rec *s;
-
- if (c->master) {
- return DECLINED;
- }
-
- ctx = h2_ctx_get(c, 0);
-+ s = ctx? ctx->server : c->base_server;
-+
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn");
-- if (h2_ctx_is_task(ctx)) {
-+ if (ctx && ctx->task) {
- /* our stream pseudo connection */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "h2_h2, task, declined");
- return DECLINED;
-@@ -601,19 +599,19 @@ int h2_h2_process_conn(conn_rec* c)
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn, "
- "new connection using protocol '%s', direct=%d, "
- "tls acceptable=%d", proto, h2_allows_h2_direct(c),
-- h2_is_acceptable_connection(c, 1));
-+ h2_is_acceptable_connection(c, NULL, 1));
- }
-
- if (!strcmp(AP_PROTOCOL_HTTP1, proto)
- && h2_allows_h2_direct(c)
-- && h2_is_acceptable_connection(c, 1)) {
-+ && h2_is_acceptable_connection(c, NULL, 1)) {
- /* Fresh connection still is on http/1.1 and H2Direct is enabled.
- * Otherwise connection is in a fully acceptable state.
- * -> peek at the first 24 incoming bytes
- */
- apr_bucket_brigade *temp;
-- char *s = NULL;
-- apr_size_t slen;
-+ char *peek = NULL;
-+ apr_size_t peeklen;
-
- temp = apr_brigade_create(c->pool, c->bucket_alloc);
- status = ap_get_brigade(c->input_filters, temp,
-@@ -626,8 +624,8 @@ int h2_h2_process_conn(conn_rec* c)
- return DECLINED;
- }
-
-- apr_brigade_pflatten(temp, &s, &slen, c->pool);
-- if ((slen >= 24) && !memcmp(H2_MAGIC_TOKEN, s, 24)) {
-+ apr_brigade_pflatten(temp, &peek, &peeklen, c->pool);
-+ if ((peeklen >= 24) && !memcmp(H2_MAGIC_TOKEN, peek, 24)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_h2, direct mode detected");
- if (!ctx) {
-@@ -638,7 +636,7 @@ int h2_h2_process_conn(conn_rec* c)
- else if (APLOGctrace2(c)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
- "h2_h2, not detected in %d bytes(base64): %s",
-- (int)slen, h2_util_base64url_encode(s, slen, c->pool));
-+ (int)peeklen, h2_util_base64url_encode(peek, peeklen, c->pool));
- }
-
- apr_brigade_destroy(temp);
-@@ -647,15 +645,16 @@ int h2_h2_process_conn(conn_rec* c)
-
- if (ctx) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "process_conn");
-- if (!h2_ctx_session_get(ctx)) {
-- status = h2_conn_setup(ctx, c, NULL);
-+
-+ if (!h2_ctx_get_session(c)) {
-+ status = h2_conn_setup(c, NULL, s);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, "conn_setup");
- if (status != APR_SUCCESS) {
- h2_ctx_clear(c);
- return !OK;
- }
- }
-- h2_conn_run(ctx, c);
-+ h2_conn_run(c);
- return OK;
- }
-
-@@ -684,16 +683,17 @@ static int h2_h2_pre_close_conn(conn_rec *c)
-
- static void check_push(request_rec *r, const char *tag)
- {
-- const h2_config *conf = h2_config_rget(r);
-- if (!r->expecting_100
-- && conf && conf->push_list && conf->push_list->nelts > 0) {
-+ apr_array_header_t *push_list = h2_config_push_list(r);
-+
-+ if (!r->expecting_100 && push_list && push_list->nelts > 0) {
- int i, old_status;
- const char *old_line;
-+
- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
- "%s, early announcing %d resources for push",
-- tag, conf->push_list->nelts);
-- for (i = 0; i < conf->push_list->nelts; ++i) {
-- h2_push_res *push = &APR_ARRAY_IDX(conf->push_list, i, h2_push_res);
-+ tag, push_list->nelts);
-+ for (i = 0; i < push_list->nelts; ++i) {
-+ h2_push_res *push = &APR_ARRAY_IDX(push_list, i, h2_push_res);
- apr_table_add(r->headers_out, "Link",
- apr_psprintf(r->pool, "<%s>; rel=preload%s",
- push->uri_ref, push->critical? "; critical" : ""));
-@@ -712,8 +712,7 @@ static int h2_h2_post_read_req(request_rec *r)
- {
- /* slave connection? */
- if (r->connection->master) {
-- h2_ctx *ctx = h2_ctx_rget(r);
-- struct h2_task *task = h2_ctx_get_task(ctx);
-+ struct h2_task *task = h2_ctx_get_task(r->connection);
- /* This hook will get called twice on internal redirects. Take care
- * that we manipulate filters only once. */
- if (task && !task->filters_set) {
-@@ -746,12 +745,10 @@ static int h2_h2_late_fixups(request_rec *r)
- {
- /* slave connection? */
- if (r->connection->master) {
-- h2_ctx *ctx = h2_ctx_rget(r);
-- struct h2_task *task = h2_ctx_get_task(ctx);
-+ struct h2_task *task = h2_ctx_get_task(r->connection);
- if (task) {
- /* check if we copy vs. setaside files in this location */
-- task->output.copy_files = h2_config_geti(h2_config_rget(r),
-- H2_CONF_COPY_FILES);
-+ task->output.copy_files = h2_config_rgeti(r, H2_CONF_COPY_FILES);
- if (task->output.copy_files) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
- "h2_slave_out(%s): copy_files on", task->id);
-diff --git a/modules/http2/h2_h2.h b/modules/http2/h2_h2.h
-index 367823d..339e898 100644
---- a/modules/http2/h2_h2.h
-+++ b/modules/http2/h2_h2.h
-@@ -57,23 +57,15 @@ void h2_h2_register_hooks(void);
- * the handshake is still ongoing.
- * @return != 0 iff connection requirements are met
- */
--int h2_is_acceptable_connection(conn_rec *c, int require_all);
--
--/**
-- * Check if the "direct" HTTP/2 mode of protocol handling is enabled
-- * for the given connection.
-- * @param c the connection to check
-- * @return != 0 iff direct mode is enabled
-- */
--int h2_allows_h2_direct(conn_rec *c);
-+int h2_is_acceptable_connection(conn_rec *c, request_rec *r, int require_all);
-
- /**
- * Check if the "Upgrade" HTTP/1.1 mode of protocol switching is enabled
-- * for the given connection.
-- * @param c the connection to check
-+ * for the given request.
-+ * @param r the request to check
- * @return != 0 iff Upgrade switching is enabled
- */
--int h2_allows_h2_upgrade(conn_rec *c);
-+int h2_allows_h2_upgrade(request_rec *r);
-
-
- #endif /* defined(__mod_h2__h2_h2__) */
-diff --git a/modules/http2/h2_headers.c b/modules/http2/h2_headers.c
-index 49d9c0a..6d43290 100644
---- a/modules/http2/h2_headers.c
-+++ b/modules/http2/h2_headers.c
-@@ -28,6 +28,7 @@
-
- #include "h2_private.h"
- #include "h2_h2.h"
-+#include "h2_config.h"
- #include "h2_util.h"
- #include "h2_request.h"
- #include "h2_headers.h"
-@@ -129,21 +130,27 @@ h2_headers *h2_headers_rcreate(request_rec *r, int status,
- {
- h2_headers *headers = h2_headers_create(status, header, r->notes, 0, pool);
- if (headers->status == HTTP_FORBIDDEN) {
-- const char *cause = apr_table_get(r->notes, "ssl-renegotiate-forbidden");
-- if (cause) {
-- /* This request triggered a TLS renegotiation that is now allowed
-- * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
-- */
-- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, headers->status, r,
-- APLOGNO(03061)
-- "h2_headers(%ld): renegotiate forbidden, cause: %s",
-- (long)r->connection->id, cause);
-- headers->status = H2_ERR_HTTP_1_1_REQUIRED;
-+ request_rec *r_prev;
-+ for (r_prev = r; r_prev != NULL; r_prev = r_prev->prev) {
-+ const char *cause = apr_table_get(r_prev->notes, "ssl-renegotiate-forbidden");
-+ if (cause) {
-+ /* This request triggered a TLS renegotiation that is not allowed
-+ * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
-+ */
-+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, headers->status, r,
-+ APLOGNO(03061)
-+ "h2_headers(%ld): renegotiate forbidden, cause: %s",
-+ (long)r->connection->id, cause);
-+ headers->status = H2_ERR_HTTP_1_1_REQUIRED;
-+ break;
-+ }
- }
- }
- if (is_unsafe(r->server)) {
-- apr_table_setn(headers->notes, H2_HDR_CONFORMANCE,
-- H2_HDR_CONFORMANCE_UNSAFE);
-+ apr_table_setn(headers->notes, H2_HDR_CONFORMANCE, H2_HDR_CONFORMANCE_UNSAFE);
-+ }
-+ if (h2_config_rgeti(r, H2_CONF_PUSH) == 0 && h2_config_sgeti(r->server, H2_CONF_PUSH) != 0) {
-+ apr_table_setn(headers->notes, H2_PUSH_MODE_NOTE, "0");
- }
- return headers;
- }
-diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c
-index db3cb63..fae77c7 100644
---- a/modules/http2/h2_mplx.c
-+++ b/modules/http2/h2_mplx.c
-@@ -40,7 +40,6 @@
- #include "h2_ctx.h"
- #include "h2_h2.h"
- #include "h2_mplx.h"
--#include "h2_ngn_shed.h"
- #include "h2_request.h"
- #include "h2_stream.h"
- #include "h2_session.h"
-@@ -83,12 +82,6 @@ static void check_data_for(h2_mplx *m, h2_stream *stream, int lock);
- static void stream_output_consumed(void *ctx,
- h2_bucket_beam *beam, apr_off_t length)
- {
-- h2_stream *stream = ctx;
-- h2_task *task = stream->task;
--
-- if (length > 0 && task && task->assigned) {
-- h2_req_engine_out_consumed(task->assigned, task->c, length);
-- }
- }
-
- static void stream_input_ev(void *ctx, h2_bucket_beam *beam)
-@@ -136,7 +129,6 @@ static void stream_cleanup(h2_mplx *m, h2_stream *stream)
- }
- else if (stream->task) {
- stream->task->c->aborted = 1;
-- apr_thread_cond_broadcast(m->task_thawed);
- }
- }
-
-@@ -151,25 +143,19 @@ static void stream_cleanup(h2_mplx *m, h2_stream *stream)
- * their HTTP/1 cousins, the separate allocator seems to work better
- * than protecting a shared h2_session one with an own lock.
- */
--h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent,
-- const h2_config *conf,
-+h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *parent,
- h2_workers *workers)
- {
- apr_status_t status = APR_SUCCESS;
- apr_allocator_t *allocator;
- apr_thread_mutex_t *mutex;
- h2_mplx *m;
-- h2_ctx *ctx = h2_ctx_get(c, 0);
-- ap_assert(conf);
-
- m = apr_pcalloc(parent, sizeof(h2_mplx));
- if (m) {
- m->id = c->id;
- m->c = c;
-- m->s = (ctx? h2_ctx_server_get(ctx) : NULL);
-- if (!m->s) {
-- m->s = c->base_server;
-- }
-+ m->s = s;
-
- /* We create a pool with its own allocator to be used for
- * processing slave connections. This is the only way to have the
-@@ -204,14 +190,8 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent,
- return NULL;
- }
-
-- status = apr_thread_cond_create(&m->task_thawed, m->pool);
-- if (status != APR_SUCCESS) {
-- apr_pool_destroy(m->pool);
-- return NULL;
-- }
--
-- m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS);
-- m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
-+ m->max_streams = h2_config_sgeti(s, H2_CONF_MAX_STREAMS);
-+ m->stream_max_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM);
-
- m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id));
- m->sredo = h2_ihash_create(m->pool, offsetof(h2_stream,id));
-@@ -232,10 +212,6 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent,
- m->limit_change_interval = apr_time_from_msec(100);
-
- m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
--
-- m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams,
-- m->stream_max_mem);
-- h2_ngn_shed_set_ctx(m->ngn_shed , m);
- }
- return m;
- }
-@@ -394,10 +370,10 @@ static int report_stream_iter(void *ctx, void *val) {
- if (task) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */
- H2_STRM_MSG(stream, "->03198: %s %s %s"
-- "[started=%d/done=%d/frozen=%d]"),
-+ "[started=%d/done=%d]"),
- task->request->method, task->request->authority,
- task->request->path, task->worker_started,
-- task->worker_done, task->frozen);
-+ task->worker_done);
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */
-@@ -436,7 +412,7 @@ static int stream_cancel_iter(void *ctx, void *val) {
- void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
- {
- apr_status_t status;
-- int i, wait_secs = 60;
-+ int i, wait_secs = 60, old_aborted;
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%ld): start release", m->id);
-@@ -447,15 +423,19 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
-
- H2_MPLX_ENTER_ALWAYS(m);
-
-+ /* While really terminating any slave connections, treat the master
-+ * connection as aborted. It's not as if we could send any more data
-+ * at this point. */
-+ old_aborted = m->c->aborted;
-+ m->c->aborted = 1;
-+
- /* How to shut down a h2 connection:
- * 1. cancel all streams still active */
- while (!h2_ihash_iter(m->streams, stream_cancel_iter, m)) {
- /* until empty */
- }
-
-- /* 2. terminate ngn_shed, no more streams
-- * should be scheduled or in the active set */
-- h2_ngn_shed_abort(m->ngn_shed);
-+ /* 2. no more streams should be scheduled or in the active set */
- ap_assert(h2_ihash_empty(m->streams));
- ap_assert(h2_iq_empty(m->q));
-
-@@ -479,10 +459,6 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
- ap_assert(m->tasks_active == 0);
- m->join_wait = NULL;
-
-- /* 4. close the h2_req_enginge shed */
-- h2_ngn_shed_destroy(m->ngn_shed);
-- m->ngn_shed = NULL;
--
- /* 4. With all workers done, all streams should be in spurge */
- if (!h2_ihash_empty(m->shold)) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03516)
-@@ -491,6 +467,7 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
- h2_ihash_iter(m->shold, unexpected_stream_iter, m);
- }
-
-+ m->c->aborted = old_aborted;
- H2_MPLX_LEAVE(m);
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-@@ -787,47 +764,14 @@ apr_status_t h2_mplx_pop_task(h2_mplx *m, h2_task **ptask)
- return rv;
- }
-
--static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn)
-+static void task_done(h2_mplx *m, h2_task *task)
- {
- h2_stream *stream;
-
-- if (task->frozen) {
-- /* this task was handed over to an engine for processing
-- * and the original worker has finished. That means the
-- * engine may start processing now. */
-- h2_task_thaw(task);
-- apr_thread_cond_broadcast(m->task_thawed);
-- return;
-- }
--
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): task(%s) done", m->id, task->id);
- out_close(m, task);
-
-- if (ngn) {
-- apr_off_t bytes = 0;
-- h2_beam_send(task->output.beam, NULL, APR_NONBLOCK_READ);
-- bytes += h2_beam_get_buffered(task->output.beam);
-- if (bytes > 0) {
-- /* we need to report consumed and current buffered output
-- * to the engine. The request will be streamed out or cancelled,
-- * no more data is coming from it and the engine should update
-- * its calculations before we destroy this information. */
-- h2_req_engine_out_consumed(ngn, task->c, bytes);
-- }
-- }
--
-- if (task->engine) {
-- if (!m->aborted && !task->c->aborted
-- && !h2_req_engine_is_shutdown(task->engine)) {
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(10022)
-- "h2_mplx(%ld): task(%s) has not-shutdown "
-- "engine(%s)", m->id, task->id,
-- h2_req_engine_get_id(task->engine));
-- }
-- h2_ngn_shed_done_ngn(m->ngn_shed, task->engine);
-- }
--
- task->worker_done = 1;
- task->done_at = apr_time_now();
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
-@@ -849,18 +793,24 @@ static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn)
- m->id, m->limit_active);
- }
- }
--
-+
-+ ap_assert(task->done_done == 0);
-+
- stream = h2_ihash_get(m->streams, task->stream_id);
- if (stream) {
- /* stream not done yet. */
- if (!m->aborted && h2_ihash_get(m->sredo, stream->id)) {
- /* reset and schedule again */
-+ task->worker_done = 0;
- h2_task_redo(task);
- h2_ihash_remove(m->sredo, stream->id);
- h2_iq_add(m->q, stream->id, NULL, NULL);
-+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c,
-+ H2_STRM_MSG(stream, "redo, added to q"));
- }
- else {
- /* stream not cleaned up, stay around */
-+ task->done_done = 1;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- H2_STRM_MSG(stream, "task_done, stream open"));
- if (stream->input) {
-@@ -873,6 +823,7 @@ static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn)
- }
- else if ((stream = h2_ihash_get(m->shold, task->stream_id)) != NULL) {
- /* stream is done, was just waiting for this. */
-+ task->done_done = 1;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- H2_STRM_MSG(stream, "task_done, in hold"));
- if (stream->input) {
-@@ -897,7 +848,7 @@ void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
- {
- H2_MPLX_ENTER_ALWAYS(m);
-
-- task_done(m, task, NULL);
-+ task_done(m, task);
- --m->tasks_active;
-
- if (m->join_wait) {
-@@ -1091,142 +1042,6 @@ apr_status_t h2_mplx_idle(h2_mplx *m)
- }
-
- /*******************************************************************************
-- * HTTP/2 request engines
-- ******************************************************************************/
--
--typedef struct {
-- h2_mplx * m;
-- h2_req_engine *ngn;
-- int streams_updated;
--} ngn_update_ctx;
--
--static int ngn_update_window(void *ctx, void *val)
--{
-- ngn_update_ctx *uctx = ctx;
-- h2_stream *stream = val;
-- if (stream->task && stream->task->assigned == uctx->ngn
-- && output_consumed_signal(uctx->m, stream->task)) {
-- ++uctx->streams_updated;
-- }
-- return 1;
--}
--
--static apr_status_t ngn_out_update_windows(h2_mplx *m, h2_req_engine *ngn)
--{
-- ngn_update_ctx ctx;
--
-- ctx.m = m;
-- ctx.ngn = ngn;
-- ctx.streams_updated = 0;
-- h2_ihash_iter(m->streams, ngn_update_window, &ctx);
--
-- return ctx.streams_updated? APR_SUCCESS : APR_EAGAIN;
--}
--
--apr_status_t h2_mplx_req_engine_push(const char *ngn_type,
-- request_rec *r,
-- http2_req_engine_init *einit)
--{
-- apr_status_t status;
-- h2_mplx *m;
-- h2_task *task;
-- h2_stream *stream;
--
-- task = h2_ctx_rget_task(r);
-- if (!task) {
-- return APR_ECONNABORTED;
-- }
-- m = task->mplx;
--
-- H2_MPLX_ENTER(m);
--
-- stream = h2_ihash_get(m->streams, task->stream_id);
-- if (stream) {
-- status = h2_ngn_shed_push_request(m->ngn_shed, ngn_type, r, einit);
-- }
-- else {
-- status = APR_ECONNABORTED;
-- }
--
-- H2_MPLX_LEAVE(m);
-- return status;
--}
--
--apr_status_t h2_mplx_req_engine_pull(h2_req_engine *ngn,
-- apr_read_type_e block,
-- int capacity,
-- request_rec **pr)
--{
-- h2_ngn_shed *shed = h2_ngn_shed_get_shed(ngn);
-- h2_mplx *m = h2_ngn_shed_get_ctx(shed);
-- apr_status_t status;
-- int want_shutdown;
--
-- H2_MPLX_ENTER(m);
--
-- want_shutdown = (block == APR_BLOCK_READ);
--
-- /* Take this opportunity to update output consummation
-- * for this engine */
-- ngn_out_update_windows(m, ngn);
--
-- if (want_shutdown && !h2_iq_empty(m->q)) {
-- /* For a blocking read, check first if requests are to be
-- * had and, if not, wait a short while before doing the
-- * blocking, and if unsuccessful, terminating read.
-- */
-- status = h2_ngn_shed_pull_request(shed, ngn, capacity, 1, pr);
-- if (APR_STATUS_IS_EAGAIN(status)) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-- "h2_mplx(%ld): start block engine pull", m->id);
-- apr_thread_cond_timedwait(m->task_thawed, m->lock,
-- apr_time_from_msec(20));
-- status = h2_ngn_shed_pull_request(shed, ngn, capacity, 1, pr);
-- }
-- }
-- else {
-- status = h2_ngn_shed_pull_request(shed, ngn, capacity,
-- want_shutdown, pr);
-- }
--
-- H2_MPLX_LEAVE(m);
-- return status;
--}
--
--void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn,
-- apr_status_t status)
--{
-- h2_task *task = h2_ctx_cget_task(r_conn);
--
-- if (task) {
-- h2_mplx *m = task->mplx;
-- h2_stream *stream;
--
-- H2_MPLX_ENTER_ALWAYS(m);
--
-- stream = h2_ihash_get(m->streams, task->stream_id);
--
-- ngn_out_update_windows(m, ngn);
-- h2_ngn_shed_done_task(m->ngn_shed, ngn, task);
--
-- if (status != APR_SUCCESS && stream
-- && h2_task_can_redo(task)
-- && !h2_ihash_get(m->sredo, stream->id)) {
-- h2_ihash_add(m->sredo, stream);
-- }
--
-- if (task->engine) {
-- /* cannot report that as done until engine returns */
-- }
-- else {
-- task_done(m, task, ngn);
-- }
--
-- H2_MPLX_LEAVE(m);
-- }
--}
--
--/*******************************************************************************
- * mplx master events dispatching
- ******************************************************************************/
-
-diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h
-index 2890b98..575ccaf 100644
---- a/modules/http2/h2_mplx.h
-+++ b/modules/http2/h2_mplx.h
-@@ -47,8 +47,6 @@ struct h2_request;
- struct apr_thread_cond_t;
- struct h2_workers;
- struct h2_iqueue;
--struct h2_ngn_shed;
--struct h2_req_engine;
-
- #include
-
-@@ -86,7 +84,6 @@ struct h2_mplx {
-
- apr_thread_mutex_t *lock;
- struct apr_thread_cond_t *added_output;
-- struct apr_thread_cond_t *task_thawed;
- struct apr_thread_cond_t *join_wait;
-
- apr_size_t stream_max_mem;
-@@ -95,8 +92,6 @@ struct h2_mplx {
- apr_array_header_t *spare_slaves; /* spare slave connections */
-
- struct h2_workers *workers;
--
-- struct h2_ngn_shed *ngn_shed;
- };
-
-
-@@ -111,8 +106,7 @@ apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s);
- * Create the multiplexer for the given HTTP2 session.
- * Implicitly has reference count 1.
- */
--h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *master,
-- const struct h2_config *conf,
-+h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *master,
- struct h2_workers *workers);
-
- /**
-@@ -303,28 +297,4 @@ APR_RING_INSERT_TAIL((b), ap__b, h2_mplx, link); \
- */
- apr_status_t h2_mplx_idle(h2_mplx *m);
-
--/*******************************************************************************
-- * h2_req_engine handling
-- ******************************************************************************/
--
--typedef void h2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed);
--typedef apr_status_t h2_mplx_req_engine_init(struct h2_req_engine *engine,
-- const char *id,
-- const char *type,
-- apr_pool_t *pool,
-- apr_size_t req_buffer_size,
-- request_rec *r,
-- h2_output_consumed **pconsumed,
-- void **pbaton);
--
--apr_status_t h2_mplx_req_engine_push(const char *ngn_type,
-- request_rec *r,
-- h2_mplx_req_engine_init *einit);
--apr_status_t h2_mplx_req_engine_pull(struct h2_req_engine *ngn,
-- apr_read_type_e block,
-- int capacity,
-- request_rec **pr);
--void h2_mplx_req_engine_done(struct h2_req_engine *ngn, conn_rec *r_conn,
-- apr_status_t status);
--
- #endif /* defined(__mod_h2__h2_mplx__) */
-diff --git a/modules/http2/h2_ngn_shed.c b/modules/http2/h2_ngn_shed.c
-index fb85776..e69de29 100644
---- a/modules/http2/h2_ngn_shed.c
-+++ b/modules/http2/h2_ngn_shed.c
-@@ -1,392 +0,0 @@
--/* Licensed to the Apache Software Foundation (ASF) under one or more
-- * contributor license agreements. See the NOTICE file distributed with
-- * this work for additional information regarding copyright ownership.
-- * The ASF licenses this file to You under the Apache License, Version 2.0
-- * (the "License"); you may not use this file except in compliance with
-- * the License. You may obtain a copy of the License at
-- *
-- * http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing, software
-- * distributed under the License is distributed on an "AS IS" BASIS,
-- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- * See the License for the specific language governing permissions and
-- * limitations under the License.
-- */
--
--#include
--#include
--#include
--
--#include
--#include
--#include
--#include
--
--#include
--#include
--#include
--
--#include "mod_http2.h"
--
--#include "h2_private.h"
--#include "h2.h"
--#include "h2_config.h"
--#include "h2_conn.h"
--#include "h2_ctx.h"
--#include "h2_h2.h"
--#include "h2_mplx.h"
--#include "h2_request.h"
--#include "h2_task.h"
--#include "h2_util.h"
--#include "h2_ngn_shed.h"
--
--
--typedef struct h2_ngn_entry h2_ngn_entry;
--struct h2_ngn_entry {
-- APR_RING_ENTRY(h2_ngn_entry) link;
-- h2_task *task;
-- request_rec *r;
--};
--
--#define H2_NGN_ENTRY_NEXT(e) APR_RING_NEXT((e), link)
--#define H2_NGN_ENTRY_PREV(e) APR_RING_PREV((e), link)
--#define H2_NGN_ENTRY_REMOVE(e) APR_RING_REMOVE((e), link)
--
--#define H2_REQ_ENTRIES_SENTINEL(b) APR_RING_SENTINEL((b), h2_ngn_entry, link)
--#define H2_REQ_ENTRIES_EMPTY(b) APR_RING_EMPTY((b), h2_ngn_entry, link)
--#define H2_REQ_ENTRIES_FIRST(b) APR_RING_FIRST(b)
--#define H2_REQ_ENTRIES_LAST(b) APR_RING_LAST(b)
--
--#define H2_REQ_ENTRIES_INSERT_HEAD(b, e) do { \
--h2_ngn_entry *ap__b = (e); \
--APR_RING_INSERT_HEAD((b), ap__b, h2_ngn_entry, link); \
--} while (0)
--
--#define H2_REQ_ENTRIES_INSERT_TAIL(b, e) do { \
--h2_ngn_entry *ap__b = (e); \
--APR_RING_INSERT_TAIL((b), ap__b, h2_ngn_entry, link); \
--} while (0)
--
--struct h2_req_engine {
-- const char *id; /* identifier */
-- const char *type; /* name of the engine type */
-- apr_pool_t *pool; /* pool for engine specific allocations */
-- conn_rec *c; /* connection this engine is assigned to */
-- h2_task *task; /* the task this engine is based on, running in */
-- h2_ngn_shed *shed;
--
-- unsigned int shutdown : 1; /* engine is being shut down */
-- unsigned int done : 1; /* engine has finished */
--
-- APR_RING_HEAD(h2_req_entries, h2_ngn_entry) entries;
-- int capacity; /* maximum concurrent requests */
-- int no_assigned; /* # of assigned requests */
-- int no_live; /* # of live */
-- int no_finished; /* # of finished */
--
-- h2_output_consumed *out_consumed;
-- void *out_consumed_ctx;
--};
--
--const char *h2_req_engine_get_id(h2_req_engine *engine)
--{
-- return engine->id;
--}
--
--int h2_req_engine_is_shutdown(h2_req_engine *engine)
--{
-- return engine->shutdown;
--}
--
--void h2_req_engine_out_consumed(h2_req_engine *engine, conn_rec *c,
-- apr_off_t bytes)
--{
-- if (engine->out_consumed) {
-- engine->out_consumed(engine->out_consumed_ctx, c, bytes);
-- }
--}
--
--h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c,
-- int default_capacity,
-- apr_size_t req_buffer_size)
--{
-- h2_ngn_shed *shed;
--
-- shed = apr_pcalloc(pool, sizeof(*shed));
-- shed->c = c;
-- shed->pool = pool;
-- shed->default_capacity = default_capacity;
-- shed->req_buffer_size = req_buffer_size;
-- shed->ngns = apr_hash_make(pool);
--
-- return shed;
--}
--
--void h2_ngn_shed_set_ctx(h2_ngn_shed *shed, void *user_ctx)
--{
-- shed->user_ctx = user_ctx;
--}
--
--void *h2_ngn_shed_get_ctx(h2_ngn_shed *shed)
--{
-- return shed->user_ctx;
--}
--
--h2_ngn_shed *h2_ngn_shed_get_shed(h2_req_engine *ngn)
--{
-- return ngn->shed;
--}
--
--void h2_ngn_shed_abort(h2_ngn_shed *shed)
--{
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, APLOGNO(03394)
-- "h2_ngn_shed(%ld): abort", shed->c->id);
-- shed->aborted = 1;
--}
--
--static void ngn_add_task(h2_req_engine *ngn, h2_task *task, request_rec *r)
--{
-- h2_ngn_entry *entry = apr_pcalloc(task->pool, sizeof(*entry));
-- APR_RING_ELEM_INIT(entry, link);
-- entry->task = task;
-- entry->r = r;
-- H2_REQ_ENTRIES_INSERT_TAIL(&ngn->entries, entry);
-- ngn->no_assigned++;
--}
--
--
--apr_status_t h2_ngn_shed_push_request(h2_ngn_shed *shed, const char *ngn_type,
-- request_rec *r,
-- http2_req_engine_init *einit)
--{
-- h2_req_engine *ngn;
-- h2_task *task = h2_ctx_rget_task(r);
--
-- ap_assert(task);
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
-- "h2_ngn_shed(%ld): PUSHing request (task=%s)", shed->c->id,
-- task->id);
-- if (task->request->serialize) {
-- /* Max compatibility, deny processing of this */
-- return APR_EOF;
-- }
--
-- if (task->assigned) {
-- --task->assigned->no_assigned;
-- --task->assigned->no_live;
-- task->assigned = NULL;
-- }
--
-- if (task->engine) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
-- "h2_ngn_shed(%ld): push task(%s) hosting engine %s "
-- "already with %d tasks",
-- shed->c->id, task->id, task->engine->id,
-- task->engine->no_assigned);
-- task->assigned = task->engine;
-- ngn_add_task(task->engine, task, r);
-- return APR_SUCCESS;
-- }
--
-- ngn = apr_hash_get(shed->ngns, ngn_type, APR_HASH_KEY_STRING);
-- if (ngn && !ngn->shutdown) {
-- /* this task will be processed in another thread,
-- * freeze any I/O for the time being. */
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
-- "h2_ngn_shed(%ld): pushing request %s to %s",
-- shed->c->id, task->id, ngn->id);
-- if (!h2_task_has_thawed(task)) {
-- h2_task_freeze(task);
-- }
-- ngn_add_task(ngn, task, r);
-- return APR_SUCCESS;
-- }
--
-- /* no existing engine or being shut down, start a new one */
-- if (einit) {
-- apr_status_t status;
-- apr_pool_t *pool = task->pool;
-- h2_req_engine *newngn;
--
-- newngn = apr_pcalloc(pool, sizeof(*ngn));
-- newngn->pool = pool;
-- newngn->id = apr_psprintf(pool, "ngn-%s", task->id);
-- newngn->type = apr_pstrdup(pool, ngn_type);
-- newngn->c = task->c;
-- newngn->shed = shed;
-- newngn->capacity = shed->default_capacity;
-- newngn->no_assigned = 1;
-- newngn->no_live = 1;
-- APR_RING_INIT(&newngn->entries, h2_ngn_entry, link);
--
-- status = einit(newngn, newngn->id, newngn->type, newngn->pool,
-- shed->req_buffer_size, r,
-- &newngn->out_consumed, &newngn->out_consumed_ctx);
--
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c, APLOGNO(03395)
-- "h2_ngn_shed(%ld): create engine %s (%s)",
-- shed->c->id, newngn->id, newngn->type);
-- if (status == APR_SUCCESS) {
-- newngn->task = task;
-- task->engine = newngn;
-- task->assigned = newngn;
-- apr_hash_set(shed->ngns, newngn->type, APR_HASH_KEY_STRING, newngn);
-- }
-- return status;
-- }
-- return APR_EOF;
--}
--
--static h2_ngn_entry *pop_detached(h2_req_engine *ngn)
--{
-- h2_ngn_entry *entry;
-- for (entry = H2_REQ_ENTRIES_FIRST(&ngn->entries);
-- entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries);
-- entry = H2_NGN_ENTRY_NEXT(entry)) {
-- if (h2_task_has_thawed(entry->task)
-- || (entry->task->engine == ngn)) {
-- /* The task hosting this engine can always be pulled by it.
-- * For other task, they need to become detached, e.g. no longer
-- * assigned to another worker. */
-- H2_NGN_ENTRY_REMOVE(entry);
-- return entry;
-- }
-- }
-- return NULL;
--}
--
--apr_status_t h2_ngn_shed_pull_request(h2_ngn_shed *shed,
-- h2_req_engine *ngn,
-- int capacity,
-- int want_shutdown,
-- request_rec **pr)
--{
-- h2_ngn_entry *entry;
--
-- ap_assert(ngn);
-- *pr = NULL;
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, shed->c, APLOGNO(03396)
-- "h2_ngn_shed(%ld): pull task for engine %s, shutdown=%d",
-- shed->c->id, ngn->id, want_shutdown);
-- if (shed->aborted) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, APLOGNO(03397)
-- "h2_ngn_shed(%ld): abort while pulling requests %s",
-- shed->c->id, ngn->id);
-- ngn->shutdown = 1;
-- return APR_ECONNABORTED;
-- }
--
-- ngn->capacity = capacity;
-- if (H2_REQ_ENTRIES_EMPTY(&ngn->entries)) {
-- if (want_shutdown) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
-- "h2_ngn_shed(%ld): emtpy queue, shutdown engine %s",
-- shed->c->id, ngn->id);
-- ngn->shutdown = 1;
-- }
-- return ngn->shutdown? APR_EOF : APR_EAGAIN;
-- }
--
-- if ((entry = pop_detached(ngn))) {
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, entry->task->c, APLOGNO(03398)
-- "h2_ngn_shed(%ld): pulled request %s for engine %s",
-- shed->c->id, entry->task->id, ngn->id);
-- ngn->no_live++;
-- *pr = entry->r;
-- entry->task->assigned = ngn;
-- /* task will now run in ngn's own thread. Modules like lua
-- * seem to require the correct thread set in the conn_rec.
-- * See PR 59542. */
-- if (entry->task->c && ngn->c) {
-- entry->task->c->current_thread = ngn->c->current_thread;
-- }
-- if (entry->task->engine == ngn) {
-- /* If an engine pushes its own base task, and then pulls
-- * it back to itself again, it needs to be thawed.
-- */
-- h2_task_thaw(entry->task);
-- }
-- return APR_SUCCESS;
-- }
--
-- if (1) {
-- h2_ngn_entry *entry = H2_REQ_ENTRIES_FIRST(&ngn->entries);
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03399)
-- "h2_ngn_shed(%ld): pull task, nothing, first task %s",
-- shed->c->id, entry->task->id);
-- }
-- return APR_EAGAIN;
--}
--
--static apr_status_t ngn_done_task(h2_ngn_shed *shed, h2_req_engine *ngn,
-- h2_task *task, int waslive, int aborted)
--{
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, shed->c, APLOGNO(03400)
-- "h2_ngn_shed(%ld): task %s %s by %s",
-- shed->c->id, task->id, aborted? "aborted":"done", ngn->id);
-- ngn->no_finished++;
-- if (waslive) ngn->no_live--;
-- ngn->no_assigned--;
-- task->assigned = NULL;
--
-- return APR_SUCCESS;
--}
--
--apr_status_t h2_ngn_shed_done_task(h2_ngn_shed *shed,
-- struct h2_req_engine *ngn, h2_task *task)
--{
-- return ngn_done_task(shed, ngn, task, 1, 0);
--}
--
--void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn)
--{
-- if (ngn->done) {
-- return;
-- }
--
-- if (!shed->aborted && !H2_REQ_ENTRIES_EMPTY(&ngn->entries)) {
-- h2_ngn_entry *entry;
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
-- "h2_ngn_shed(%ld): exit engine %s (%s), "
-- "has still requests queued, shutdown=%d,"
-- "assigned=%ld, live=%ld, finished=%ld",
-- shed->c->id, ngn->id, ngn->type,
-- ngn->shutdown,
-- (long)ngn->no_assigned, (long)ngn->no_live,
-- (long)ngn->no_finished);
-- for (entry = H2_REQ_ENTRIES_FIRST(&ngn->entries);
-- entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries);
-- entry = H2_NGN_ENTRY_NEXT(entry)) {
-- h2_task *task = entry->task;
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
-- "h2_ngn_shed(%ld): engine %s has queued task %s, "
-- "frozen=%d, aborting",
-- shed->c->id, ngn->id, task->id, task->frozen);
-- ngn_done_task(shed, ngn, task, 0, 1);
-- task->engine = task->assigned = NULL;
-- }
-- }
-- if (!shed->aborted && (ngn->no_assigned > 1 || ngn->no_live > 1)) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
-- "h2_ngn_shed(%ld): exit engine %s (%s), "
-- "assigned=%ld, live=%ld, finished=%ld",
-- shed->c->id, ngn->id, ngn->type,
-- (long)ngn->no_assigned, (long)ngn->no_live,
-- (long)ngn->no_finished);
-- }
-- else {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c,
-- "h2_ngn_shed(%ld): exit engine %s",
-- shed->c->id, ngn->id);
-- }
--
-- apr_hash_set(shed->ngns, ngn->type, APR_HASH_KEY_STRING, NULL);
-- ngn->done = 1;
--}
--
--void h2_ngn_shed_destroy(h2_ngn_shed *shed)
--{
-- ap_assert(apr_hash_count(shed->ngns) == 0);
--}
--
-diff --git a/modules/http2/h2_ngn_shed.h b/modules/http2/h2_ngn_shed.h
-index 7764c18..e69de29 100644
---- a/modules/http2/h2_ngn_shed.h
-+++ b/modules/http2/h2_ngn_shed.h
-@@ -1,79 +0,0 @@
--/* Licensed to the Apache Software Foundation (ASF) under one or more
-- * contributor license agreements. See the NOTICE file distributed with
-- * this work for additional information regarding copyright ownership.
-- * The ASF licenses this file to You under the Apache License, Version 2.0
-- * (the "License"); you may not use this file except in compliance with
-- * the License. You may obtain a copy of the License at
-- *
-- * http://www.apache.org/licenses/LICENSE-2.0
-- *
-- * Unless required by applicable law or agreed to in writing, software
-- * distributed under the License is distributed on an "AS IS" BASIS,
-- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- * See the License for the specific language governing permissions and
-- * limitations under the License.
-- */
--
--#ifndef h2_req_shed_h
--#define h2_req_shed_h
--
--struct h2_req_engine;
--struct h2_task;
--
--typedef struct h2_ngn_shed h2_ngn_shed;
--struct h2_ngn_shed {
-- conn_rec *c;
-- apr_pool_t *pool;
-- apr_hash_t *ngns;
-- void *user_ctx;
--
-- unsigned int aborted : 1;
--
-- int default_capacity;
-- apr_size_t req_buffer_size; /* preferred buffer size for responses */
--};
--
--const char *h2_req_engine_get_id(h2_req_engine *engine);
--int h2_req_engine_is_shutdown(h2_req_engine *engine);
--
--void h2_req_engine_out_consumed(h2_req_engine *engine, conn_rec *c,
-- apr_off_t bytes);
--
--typedef apr_status_t h2_shed_ngn_init(h2_req_engine *engine,
-- const char *id,
-- const char *type,
-- apr_pool_t *pool,
-- apr_size_t req_buffer_size,
-- request_rec *r,
-- h2_output_consumed **pconsumed,
-- void **pbaton);
--
--h2_ngn_shed *h2_ngn_shed_create(apr_pool_t *pool, conn_rec *c,
-- int default_capactiy,
-- apr_size_t req_buffer_size);
--
--void h2_ngn_shed_destroy(h2_ngn_shed *shed);
--
--void h2_ngn_shed_set_ctx(h2_ngn_shed *shed, void *user_ctx);
--void *h2_ngn_shed_get_ctx(h2_ngn_shed *shed);
--
--h2_ngn_shed *h2_ngn_shed_get_shed(struct h2_req_engine *ngn);
--
--void h2_ngn_shed_abort(h2_ngn_shed *shed);
--
--apr_status_t h2_ngn_shed_push_request(h2_ngn_shed *shed, const char *ngn_type,
-- request_rec *r,
-- h2_shed_ngn_init *init_cb);
--
--apr_status_t h2_ngn_shed_pull_request(h2_ngn_shed *shed, h2_req_engine *pub_ngn,
-- int capacity,
-- int want_shutdown, request_rec **pr);
--
--apr_status_t h2_ngn_shed_done_task(h2_ngn_shed *shed,
-- struct h2_req_engine *ngn,
-- struct h2_task *task);
--
--void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn);
--
--
--#endif /* h2_req_shed_h */
-diff --git a/modules/http2/h2_proxy_session.c b/modules/http2/h2_proxy_session.c
-index 8389c7c..3a2718f 100644
---- a/modules/http2/h2_proxy_session.c
-+++ b/modules/http2/h2_proxy_session.c
-@@ -429,12 +429,6 @@ static int stream_response_data(nghttp2_session *ngh2, uint8_t flags,
- stream_id, NGHTTP2_STREAM_CLOSED);
- return NGHTTP2_ERR_STREAM_CLOSING;
- }
-- if (stream->standalone) {
-- nghttp2_session_consume(ngh2, stream_id, len);
-- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, stream->r,
-- "h2_proxy_session(%s): stream %d, win_update %d bytes",
-- session->id, stream_id, (int)len);
-- }
- return 0;
- }
-
-@@ -641,7 +635,7 @@ h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
-
- nghttp2_option_new(&option);
- nghttp2_option_set_peer_max_concurrent_streams(option, 100);
-- nghttp2_option_set_no_auto_window_update(option, 1);
-+ nghttp2_option_set_no_auto_window_update(option, 0);
-
- nghttp2_session_client_new2(&session->ngh2, cbs, session, option);
-
-@@ -653,10 +647,12 @@ h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
- }
- else {
- h2_proxy_session *session = p_conn->data;
-- apr_interval_time_t age = apr_time_now() - session->last_frame_received;
-- if (age > apr_time_from_sec(1)) {
-- session->check_ping = 1;
-- nghttp2_submit_ping(session->ngh2, 0, (const uint8_t *)"nevergonnagiveyouup");
-+ if (!session->check_ping) {
-+ apr_interval_time_t age = apr_time_now() - session->last_frame_received;
-+ if (age > apr_time_from_sec(1)) {
-+ session->check_ping = 1;
-+ nghttp2_submit_ping(session->ngh2, 0, (const uint8_t *)"nevergonnagiveyouup");
-+ }
- }
- }
- return p_conn->data;
-@@ -1543,42 +1539,3 @@ typedef struct {
- int updated;
- } win_update_ctx;
-
--static int win_update_iter(void *udata, void *val)
--{
-- win_update_ctx *ctx = udata;
-- h2_proxy_stream *stream = val;
--
-- if (stream->r && stream->r->connection == ctx->c) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, ctx->session->c,
-- "h2_proxy_session(%s-%d): win_update %ld bytes",
-- ctx->session->id, (int)stream->id, (long)ctx->bytes);
-- nghttp2_session_consume(ctx->session->ngh2, stream->id, ctx->bytes);
-- ctx->updated = 1;
-- return 0;
-- }
-- return 1;
--}
--
--
--void h2_proxy_session_update_window(h2_proxy_session *session,
-- conn_rec *c, apr_off_t bytes)
--{
-- if (!h2_proxy_ihash_empty(session->streams)) {
-- win_update_ctx ctx;
-- ctx.session = session;
-- ctx.c = c;
-- ctx.bytes = bytes;
-- ctx.updated = 0;
-- h2_proxy_ihash_iter(session->streams, win_update_iter, &ctx);
--
-- if (!ctx.updated) {
-- /* could not find the stream any more, possibly closed, update
-- * the connection window at least */
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
-- "h2_proxy_session(%s): win_update conn %ld bytes",
-- session->id, (long)bytes);
-- nghttp2_session_consume_connection(session->ngh2, (size_t)bytes);
-- }
-- }
--}
--
-diff --git a/modules/http2/h2_proxy_session.h b/modules/http2/h2_proxy_session.h
-index ecebb61..1d0750b 100644
---- a/modules/http2/h2_proxy_session.h
-+++ b/modules/http2/h2_proxy_session.h
-@@ -120,9 +120,6 @@ void h2_proxy_session_cancel_all(h2_proxy_session *s);
-
- void h2_proxy_session_cleanup(h2_proxy_session *s, h2_proxy_request_done *done);
-
--void h2_proxy_session_update_window(h2_proxy_session *s,
-- conn_rec *c, apr_off_t bytes);
--
- #define H2_PROXY_REQ_URL_NOTE "h2-proxy-req-url"
-
- #endif /* h2_proxy_session_h */
-diff --git a/modules/http2/h2_request.c b/modules/http2/h2_request.c
-index 5ee88e9..5893c8b 100644
---- a/modules/http2/h2_request.c
-+++ b/modules/http2/h2_request.c
-@@ -17,6 +17,7 @@
- #include
-
- #include
-+#include
-
- #include
- #include
-@@ -84,8 +85,7 @@ apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool,
- req->path = path;
- req->headers = apr_table_make(pool, 10);
- if (r->server) {
-- req->serialize = h2_config_geti(h2_config_sget(r->server),
-- H2_CONF_SER_HEADERS);
-+ req->serialize = h2_config_rgeti(r, H2_CONF_SER_HEADERS);
- }
-
- x.pool = pool;
-@@ -206,13 +206,11 @@ h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src)
- return dst;
- }
-
--request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
-+#if !AP_MODULE_MAGIC_AT_LEAST(20150222, 13)
-+static request_rec *my_ap_create_request(conn_rec *c)
- {
-- int access_status = HTTP_OK;
-- const char *rpath;
- apr_pool_t *p;
- request_rec *r;
-- const char *s;
-
- apr_pool_create(&p, c->pool);
- apr_pool_tag(p, "request");
-@@ -226,8 +224,8 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
- r->ap_auth_type = NULL;
-
- r->allowed_methods = ap_make_method_list(p, 2);
--
-- r->headers_in = apr_table_clone(r->pool, req->headers);
-+
-+ r->headers_in = apr_table_make(r->pool, 5);
- r->trailers_in = apr_table_make(r->pool, 5);
- r->subprocess_env = apr_table_make(r->pool, 25);
- r->headers_out = apr_table_make(r->pool, 12);
-@@ -262,6 +260,24 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
- r->useragent_addr = c->client_addr;
- r->useragent_ip = c->client_ip;
-
-+ return r;
-+}
-+#endif
-+
-+request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
-+{
-+ int access_status = HTTP_OK;
-+ const char *rpath;
-+ const char *s;
-+
-+#if AP_MODULE_MAGIC_AT_LEAST(20150222, 13)
-+ request_rec *r = ap_create_request(c);
-+#else
-+ request_rec *r = my_ap_create_request(c);
-+#endif
-+
-+ r->headers_in = apr_table_clone(r->pool, req->headers);
-+
- ap_run_pre_read_request(r, c);
-
- /* Time to populate r with the data we have. */
-@@ -337,3 +353,4 @@ traceout:
- }
-
-
-+
-diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c
-index 3f0e9c9..f153422 100644
---- a/modules/http2/h2_session.c
-+++ b/modules/http2/h2_session.c
-@@ -495,9 +495,7 @@ static int on_send_data_cb(nghttp2_session *ngh2,
- return NGHTTP2_ERR_WOULDBLOCK;
- }
-
-- if (frame->data.padlen > H2_MAX_PADLEN) {
-- return NGHTTP2_ERR_PROTO;
-- }
-+ ap_assert(frame->data.padlen <= (H2_MAX_PADLEN+1));
- padlen = (unsigned char)frame->data.padlen;
-
- stream = h2_session_stream_get(session, stream_id);
-@@ -513,8 +511,9 @@ static int on_send_data_cb(nghttp2_session *ngh2,
- H2_STRM_MSG(stream, "send_data_cb for %ld bytes"),
- (long)length);
-
-- status = h2_conn_io_write(&session->io, (const char *)framehd, 9);
-+ status = h2_conn_io_write(&session->io, (const char *)framehd, H2_FRAME_HDR_LEN);
- if (padlen && status == APR_SUCCESS) {
-+ --padlen;
- status = h2_conn_io_write(&session->io, (const char *)&padlen, 1);
- }
-
-@@ -622,6 +621,39 @@ static int on_invalid_header_cb(nghttp2_session *ngh2,
- }
- #endif
-
-+static ssize_t select_padding_cb(nghttp2_session *ngh2,
-+ const nghttp2_frame *frame,
-+ size_t max_payloadlen, void *user_data)
-+{
-+ h2_session *session = user_data;
-+ ssize_t frame_len = frame->hd.length + H2_FRAME_HDR_LEN; /* the total length without padding */
-+ ssize_t padded_len = frame_len;
-+
-+ /* Determine # of padding bytes to append to frame. Unless session->padding_always
-+ * the number my be capped by the ui.write_size that currently applies.
-+ */
-+ if (session->padding_max) {
-+ int n = ap_random_pick(0, session->padding_max);
-+ padded_len = H2MIN(max_payloadlen + H2_FRAME_HDR_LEN, frame_len + n);
-+ }
-+
-+ if (padded_len != frame_len) {
-+ if (!session->padding_always && session->io.write_size
-+ && (padded_len > session->io.write_size)
-+ && (frame_len <= session->io.write_size)) {
-+ padded_len = session->io.write_size;
-+ }
-+ if (APLOGctrace2(session->c)) {
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
-+ "select padding from [%d, %d]: %d (frame length: 0x%04x, write size: %d)",
-+ (int)frame_len, (int)max_payloadlen+H2_FRAME_HDR_LEN,
-+ (int)(padded_len - frame_len), (int)padded_len, (int)session->io.write_size);
-+ }
-+ return padded_len - H2_FRAME_HDR_LEN;
-+ }
-+ return frame->hd.length;
-+}
-+
- #define NGH2_SET_CALLBACK(callbacks, name, fn)\
- nghttp2_session_callbacks_set_##name##_callback(callbacks, fn)
-
-@@ -647,6 +679,7 @@ static apr_status_t init_callbacks(conn_rec *c, nghttp2_session_callbacks **pcb)
- #ifdef H2_NG2_INVALID_HEADER_CB
- NGH2_SET_CALLBACK(*pcb, on_invalid_header, on_invalid_header_cb);
- #endif
-+ NGH2_SET_CALLBACK(*pcb, select_padding, select_padding_cb);
- return APR_SUCCESS;
- }
-
-@@ -757,9 +790,8 @@ static apr_status_t session_pool_cleanup(void *data)
- {
- conn_rec *c = data;
- h2_session *session;
-- h2_ctx *ctx = h2_ctx_get(c, 0);
-
-- if (ctx && (session = h2_ctx_session_get(ctx))) {
-+ if ((session = h2_ctx_get_session(c))) {
- /* if the session is still there, now is the last chance
- * to perform cleanup. Normally, cleanup should have happened
- * earlier in the connection pre_close. Main reason is that
-@@ -775,11 +807,8 @@ static apr_status_t session_pool_cleanup(void *data)
- return APR_SUCCESS;
- }
-
--static apr_status_t h2_session_create_int(h2_session **psession,
-- conn_rec *c,
-- request_rec *r,
-- h2_ctx *ctx,
-- h2_workers *workers)
-+apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec *r,
-+ server_rec *s, h2_workers *workers)
- {
- nghttp2_session_callbacks *callbacks = NULL;
- nghttp2_option *options = NULL;
-@@ -820,19 +849,16 @@ static apr_status_t h2_session_create_int(h2_session **psession,
- session->id = c->id;
- session->c = c;
- session->r = r;
-- session->s = h2_ctx_server_get(ctx);
-+ session->s = s;
- session->pool = pool;
-- session->config = h2_config_sget(session->s);
- session->workers = workers;
-
- session->state = H2_SESSION_ST_INIT;
- session->local.accepting = 1;
- session->remote.accepting = 1;
-
-- session->max_stream_count = h2_config_geti(session->config,
-- H2_CONF_MAX_STREAMS);
-- session->max_stream_mem = h2_config_geti(session->config,
-- H2_CONF_STREAM_MAX_MEM);
-+ session->max_stream_count = h2_config_sgeti(s, H2_CONF_MAX_STREAMS);
-+ session->max_stream_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM);
-
- status = apr_thread_cond_create(&session->iowait, session->pool);
- if (status != APR_SUCCESS) {
-@@ -862,14 +888,18 @@ static apr_status_t h2_session_create_int(h2_session **psession,
- session->monitor->on_state_event = on_stream_state_event;
- session->monitor->on_event = on_stream_event;
-
-- session->mplx = h2_mplx_create(c, session->pool, session->config,
-- workers);
-+ session->mplx = h2_mplx_create(c, s, session->pool, workers);
-
- /* connection input filter that feeds the session */
- session->cin = h2_filter_cin_create(session);
- ap_add_input_filter("H2_IN", session->cin, r, c);
-
-- h2_conn_io_init(&session->io, c, session->config);
-+ h2_conn_io_init(&session->io, c, s);
-+ session->padding_max = h2_config_sgeti(s, H2_CONF_PADDING_BITS);
-+ if (session->padding_max) {
-+ session->padding_max = (0x01 << session->padding_max) - 1;
-+ }
-+ session->padding_always = h2_config_sgeti(s, H2_CONF_PADDING_ALWAYS);
- session->bbtmp = apr_brigade_create(session->pool, c->bucket_alloc);
-
- status = init_callbacks(c, &callbacks);
-@@ -888,8 +918,7 @@ static apr_status_t h2_session_create_int(h2_session **psession,
- apr_pool_destroy(pool);
- return status;
- }
-- nghttp2_option_set_peer_max_concurrent_streams(
-- options, (uint32_t)session->max_stream_count);
-+ nghttp2_option_set_peer_max_concurrent_streams(options, (uint32_t)session->max_stream_count);
- /* We need to handle window updates ourself, otherwise we
- * get flooded by nghttp2. */
- nghttp2_option_set_no_auto_window_update(options, 1);
-@@ -907,7 +936,7 @@ static apr_status_t h2_session_create_int(h2_session **psession,
- return APR_ENOMEM;
- }
-
-- n = h2_config_geti(session->config, H2_CONF_PUSH_DIARY_SIZE);
-+ n = h2_config_sgeti(s, H2_CONF_PUSH_DIARY_SIZE);
- session->push_diary = h2_push_diary_create(session->pool, n);
-
- if (APLOGcdebug(c)) {
-@@ -924,22 +953,11 @@ static apr_status_t h2_session_create_int(h2_session **psession,
- (int)session->push_diary->N);
- }
-
-- apr_pool_pre_cleanup_register(pool, c, session_pool_cleanup);
-+ apr_pool_pre_cleanup_register(pool, c, session_pool_cleanup);
-+
- return APR_SUCCESS;
- }
-
--apr_status_t h2_session_create(h2_session **psession,
-- conn_rec *c, h2_ctx *ctx, h2_workers *workers)
--{
-- return h2_session_create_int(psession, c, NULL, ctx, workers);
--}
--
--apr_status_t h2_session_rcreate(h2_session **psession,
-- request_rec *r, h2_ctx *ctx, h2_workers *workers)
--{
-- return h2_session_create_int(psession, r->connection, r, ctx, workers);
--}
--
- static apr_status_t h2_session_start(h2_session *session, int *rv)
- {
- apr_status_t status = APR_SUCCESS;
-@@ -1004,7 +1022,7 @@ static apr_status_t h2_session_start(h2_session *session, int *rv)
- settings[slen].settings_id = NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS;
- settings[slen].value = (uint32_t)session->max_stream_count;
- ++slen;
-- win_size = h2_config_geti(session->config, H2_CONF_WIN_SIZE);
-+ win_size = h2_config_sgeti(session->s, H2_CONF_WIN_SIZE);
- if (win_size != H2_INITIAL_WINDOW_SIZE) {
- settings[slen].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
- settings[slen].value = win_size;
-@@ -1276,7 +1294,7 @@ int h2_session_push_enabled(h2_session *session)
- {
- /* iff we can and they can and want */
- return (session->remote.accepting /* remote GOAWAY received */
-- && h2_config_geti(session->config, H2_CONF_PUSH)
-+ && h2_config_sgeti(session->s, H2_CONF_PUSH)
- && nghttp2_session_get_remote_settings(session->ngh2,
- NGHTTP2_SETTINGS_ENABLE_PUSH));
- }
-@@ -1320,6 +1338,7 @@ static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream,
- int eos)
- {
- apr_status_t status = APR_SUCCESS;
-+ const char *s;
- int rv = 0;
-
- ap_assert(session);
-@@ -1387,8 +1406,12 @@ static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream,
- && (headers->status < 400)
- && (headers->status != 304)
- && h2_session_push_enabled(session)) {
--
-- h2_stream_submit_pushes(stream, headers);
-+ /* PUSH is possibe and enabled on server, unless the request
-+ * denies it, submit resources to push */
-+ s = apr_table_get(headers->notes, H2_PUSH_MODE_NOTE);
-+ if (!s || strcmp(s, "0")) {
-+ h2_stream_submit_pushes(stream, headers);
-+ }
- }
-
- if (!stream->pref_priority) {
-@@ -1410,7 +1433,7 @@ static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream,
- }
-
- if (headers->status == 103
-- && !h2_config_geti(session->config, H2_CONF_EARLY_HINTS)) {
-+ && !h2_config_sgeti(session->s, H2_CONF_EARLY_HINTS)) {
- /* suppress sending this to the client, it might have triggered
- * pushes and served its purpose nevertheless */
- rv = 0;
-@@ -2086,7 +2109,7 @@ apr_status_t h2_session_process(h2_session *session, int async)
- switch (session->state) {
- case H2_SESSION_ST_INIT:
- ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
-- if (!h2_is_acceptable_connection(c, 1)) {
-+ if (!h2_is_acceptable_connection(c, session->r, 1)) {
- update_child_status(session, SERVER_BUSY_READ,
- "inadequate security");
- h2_session_shutdown(session,
-diff --git a/modules/http2/h2_session.h b/modules/http2/h2_session.h
-index df2a862..1bf6f05 100644
---- a/modules/http2/h2_session.h
-+++ b/modules/http2/h2_session.h
-@@ -80,12 +80,13 @@ typedef struct h2_session {
- request_rec *r; /* the request that started this in case
- * of 'h2c', NULL otherwise */
- server_rec *s; /* server/vhost we're starting on */
-- const struct h2_config *config; /* Relevant config for this session */
- apr_pool_t *pool; /* pool to use in session */
- struct h2_mplx *mplx; /* multiplexer for stream data */
- struct h2_workers *workers; /* for executing stream tasks */
- struct h2_filter_cin *cin; /* connection input filter context */
- h2_conn_io io; /* io on httpd conn filters */
-+ int padding_max; /* max number of padding bytes */
-+ int padding_always; /* padding has precedence over I/O optimizations */
- struct nghttp2_session *ngh2; /* the nghttp2 session (internal use) */
-
- h2_session_state state; /* state session is in */
-@@ -142,27 +143,15 @@ const char *h2_session_state_str(h2_session_state state);
- * The session will apply the configured parameter.
- * @param psession pointer receiving the created session on success or NULL
- * @param c the connection to work on
-+ * @param r optional request when protocol was upgraded
- * @param cfg the module config to apply
- * @param workers the worker pool to use
- * @return the created session
- */
- apr_status_t h2_session_create(h2_session **psession,
-- conn_rec *c, struct h2_ctx *ctx,
-+ conn_rec *c, request_rec *r, server_rec *,
- struct h2_workers *workers);
-
--/**
-- * Create a new h2_session for the given request.
-- * The session will apply the configured parameter.
-- * @param psession pointer receiving the created session on success or NULL
-- * @param r the request that was upgraded
-- * @param cfg the module config to apply
-- * @param workers the worker pool to use
-- * @return the created session
-- */
--apr_status_t h2_session_rcreate(h2_session **psession,
-- request_rec *r, struct h2_ctx *ctx,
-- struct h2_workers *workers);
--
- void h2_session_event(h2_session *session, h2_session_event_t ev,
- int err, const char *msg);
-
-diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c
-index 22c5902..b5763ac 100644
---- a/modules/http2/h2_stream.c
-+++ b/modules/http2/h2_stream.c
-@@ -365,9 +365,8 @@ void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev)
- static void set_policy_for(h2_stream *stream, h2_request *r)
- {
- int enabled = h2_session_push_enabled(stream->session);
-- stream->push_policy = h2_push_policy_determine(r->headers, stream->pool,
-- enabled);
-- r->serialize = h2_config_geti(stream->session->config, H2_CONF_SER_HEADERS);
-+ stream->push_policy = h2_push_policy_determine(r->headers, stream->pool, enabled);
-+ r->serialize = h2_config_sgeti(stream->session->s, H2_CONF_SER_HEADERS);
- }
-
- apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_t frame_len)
-@@ -855,7 +854,7 @@ apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen,
- * is requested. But we can reduce the size in case the master
- * connection operates in smaller chunks. (TSL warmup) */
- if (stream->session->io.write_size > 0) {
-- max_chunk = stream->session->io.write_size - 9; /* header bits */
-+ max_chunk = stream->session->io.write_size - H2_FRAME_HDR_LEN;
- }
- requested = (*plen > 0)? H2MIN(*plen, max_chunk) : max_chunk;
-
-@@ -987,7 +986,7 @@ const h2_priority *h2_stream_get_priority(h2_stream *stream,
- const char *ctype = apr_table_get(response->headers, "content-type");
- if (ctype) {
- /* FIXME: Not good enough, config needs to come from request->server */
-- return h2_config_get_priority(stream->session->config, ctype);
-+ return h2_cconfig_get_priority(stream->session->c, ctype);
- }
- }
- return NULL;
-diff --git a/modules/http2/h2_switch.c b/modules/http2/h2_switch.c
-index 5e73568..07a30cc 100644
---- a/modules/http2/h2_switch.c
-+++ b/modules/http2/h2_switch.c
-@@ -55,7 +55,6 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r,
- int is_tls = h2_h2_is_tls(c);
- const char **protos = is_tls? h2_tls_protos : h2_clear_protos;
-
-- (void)s;
- if (!h2_mpm_supported()) {
- return DECLINED;
- }
-@@ -68,7 +67,7 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r,
- return DECLINED;
- }
-
-- if (!h2_is_acceptable_connection(c, 0)) {
-+ if (!h2_is_acceptable_connection(c, r, 0)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03084)
- "protocol propose: connection requirements not met");
- return DECLINED;
-@@ -81,7 +80,7 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r,
- */
- const char *p;
-
-- if (!h2_allows_h2_upgrade(c)) {
-+ if (!h2_allows_h2_upgrade(r)) {
- return DECLINED;
- }
-
-@@ -150,7 +149,7 @@ static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s,
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "switching protocol to '%s'", protocol);
- h2_ctx_protocol_set(ctx, protocol);
-- h2_ctx_server_set(ctx, s);
-+ h2_ctx_server_update(ctx, s);
-
- if (r != NULL) {
- apr_status_t status;
-@@ -164,8 +163,8 @@ static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s,
- ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
-
- /* Ok, start an h2_conn on this one. */
-- h2_ctx_server_set(ctx, r->server);
-- status = h2_conn_setup(ctx, r->connection, r);
-+ status = h2_conn_setup(c, r, s);
-+
- if (status != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03088)
- "session setup");
-@@ -173,7 +172,7 @@ static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s,
- return !OK;
- }
-
-- h2_conn_run(ctx, c);
-+ h2_conn_run(c);
- }
- return OK;
- }
-diff --git a/modules/http2/h2_task.c b/modules/http2/h2_task.c
-index f4c875c..a395807 100644
---- a/modules/http2/h2_task.c
-+++ b/modules/http2/h2_task.c
-@@ -97,7 +97,7 @@ static apr_status_t send_out(h2_task *task, apr_bucket_brigade* bb, int block)
- apr_brigade_length(bb, 0, &written);
- H2_TASK_OUT_LOG(APLOG_TRACE2, task, bb, "h2_task send_out");
- h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(before)");
-- /* engines send unblocking */
-+
- status = h2_beam_send(task->output.beam, bb,
- block? APR_BLOCK_READ : APR_NONBLOCK_READ);
- h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(after)");
-@@ -133,26 +133,9 @@ static apr_status_t slave_out(h2_task *task, ap_filter_t* f,
- apr_status_t rv = APR_SUCCESS;
- int flush = 0, blocking;
-
-- if (task->frozen) {
-- h2_util_bb_log(task->c, task->stream_id, APLOG_TRACE2,
-- "frozen task output write, ignored", bb);
-- while (!APR_BRIGADE_EMPTY(bb)) {
-- b = APR_BRIGADE_FIRST(bb);
-- if (AP_BUCKET_IS_EOR(b)) {
-- APR_BUCKET_REMOVE(b);
-- task->eor = b;
-- }
-- else {
-- apr_bucket_delete(b);
-- }
-- }
-- return APR_SUCCESS;
-- }
--
- send:
-- /* we send block once we opened the output, so someone is there
-- * reading it *and* the task is not assigned to a h2_req_engine */
-- blocking = (!task->assigned && task->output.opened);
-+ /* we send block once we opened the output, so someone is there reading it */
-+ blocking = task->output.opened;
- for (b = APR_BRIGADE_FIRST(bb);
- b != APR_BRIGADE_SENTINEL(bb);
- b = APR_BUCKET_NEXT(b)) {
-@@ -236,7 +219,7 @@ static apr_status_t h2_filter_slave_in(ap_filter_t* f,
- apr_size_t rmax = ((readbytes <= APR_SIZE_MAX)?
- (apr_size_t)readbytes : APR_SIZE_MAX);
-
-- task = h2_ctx_cget_task(f->c);
-+ task = h2_ctx_get_task(f->c);
- ap_assert(task);
-
- if (trace1) {
-@@ -379,7 +362,7 @@ static apr_status_t h2_filter_slave_in(ap_filter_t* f,
- static apr_status_t h2_filter_slave_output(ap_filter_t* filter,
- apr_bucket_brigade* brigade)
- {
-- h2_task *task = h2_ctx_cget_task(filter->c);
-+ h2_task *task = h2_ctx_get_task(filter->c);
- apr_status_t status;
-
- ap_assert(task);
-@@ -392,7 +375,7 @@ static apr_status_t h2_filter_slave_output(ap_filter_t* filter,
-
- static apr_status_t h2_filter_parse_h1(ap_filter_t* f, apr_bucket_brigade* bb)
- {
-- h2_task *task = h2_ctx_cget_task(f->c);
-+ h2_task *task = h2_ctx_get_task(f->c);
- apr_status_t status;
-
- ap_assert(task);
-@@ -502,7 +485,7 @@ static int h2_task_pre_conn(conn_rec* c, void *arg)
-
- ctx = h2_ctx_get(c, 0);
- (void)arg;
-- if (h2_ctx_is_task(ctx)) {
-+ if (ctx->task) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
- "h2_slave(%s), pre_connection, adding filters", c->log_id);
- ap_add_input_filter("H2_SLAVE_IN", NULL, NULL, c);
-@@ -525,6 +508,7 @@ h2_task *h2_task_create(conn_rec *slave, int stream_id,
- ap_assert(req);
-
- apr_pool_create(&pool, slave->pool);
-+ apr_pool_tag(pool, "h2_task");
- task = apr_pcalloc(pool, sizeof(h2_task));
- if (task == NULL) {
- return NULL;
-@@ -633,18 +617,9 @@ apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id)
- task->c->current_thread = thread;
- ap_run_process_connection(c);
-
-- if (task->frozen) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-- "h2_task(%s): process_conn returned frozen task",
-- task->id);
-- /* cleanup delayed */
-- return APR_EAGAIN;
-- }
-- else {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-- "h2_task(%s): processing done", task->id);
-- return output_finish(task);
-- }
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-+ "h2_task(%s): processing done", task->id);
-+ return output_finish(task);
- }
-
- static apr_status_t h2_task_process_request(h2_task *task, conn_rec *c)
-@@ -682,14 +657,8 @@ static apr_status_t h2_task_process_request(h2_task *task, conn_rec *c)
-
- ap_process_request(r);
-
-- if (task->frozen) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-- "h2_task(%s): process_request frozen", task->id);
-- }
-- else {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-- "h2_task(%s): process_request done", task->id);
-- }
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-+ "h2_task(%s): process_request done", task->id);
-
- /* After the call to ap_process_request, the
- * request pool may have been deleted. We set
-@@ -724,7 +693,7 @@ static int h2_task_process_conn(conn_rec* c)
- }
-
- ctx = h2_ctx_get(c, 0);
-- if (h2_ctx_is_task(ctx)) {
-+ if (ctx->task) {
- if (!ctx->task->request->serialize) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_h2, processing request directly");
-@@ -741,28 +710,3 @@ static int h2_task_process_conn(conn_rec* c)
- return DECLINED;
- }
-
--apr_status_t h2_task_freeze(h2_task *task)
--{
-- if (!task->frozen) {
-- task->frozen = 1;
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03406)
-- "h2_task(%s), frozen", task->id);
-- }
-- return APR_SUCCESS;
--}
--
--apr_status_t h2_task_thaw(h2_task *task)
--{
-- if (task->frozen) {
-- task->frozen = 0;
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03407)
-- "h2_task(%s), thawed", task->id);
-- }
-- task->thawed = 1;
-- return APR_SUCCESS;
--}
--
--int h2_task_has_thawed(h2_task *task)
--{
-- return task->thawed;
--}
-diff --git a/modules/http2/h2_task.h b/modules/http2/h2_task.h
-index ab6a746..20be429 100644
---- a/modules/http2/h2_task.h
-+++ b/modules/http2/h2_task.h
-@@ -42,7 +42,6 @@ struct h2_bucket_beam;
- struct h2_conn;
- struct h2_mplx;
- struct h2_task;
--struct h2_req_engine;
- struct h2_request;
- struct h2_response_parser;
- struct h2_stream;
-@@ -80,17 +79,14 @@ struct h2_task {
- struct h2_mplx *mplx;
-
- unsigned int filters_set : 1;
-- unsigned int frozen : 1;
-- unsigned int thawed : 1;
- unsigned int worker_started : 1; /* h2_worker started processing */
-- unsigned int worker_done : 1; /* h2_worker finished */
-+
-+ int worker_done; /* h2_worker finished */
-+ int done_done; /* task_done has been handled */
-
- apr_time_t started_at; /* when processing started */
- apr_time_t done_at; /* when processing was done */
- apr_bucket *eor;
--
-- struct h2_req_engine *engine; /* engine hosted by this task */
-- struct h2_req_engine *assigned; /* engine that task has been assigned to */
- };
-
- h2_task *h2_task_create(conn_rec *slave, int stream_id,
-@@ -120,8 +116,4 @@ apr_status_t h2_task_init(apr_pool_t *pool, server_rec *s);
- extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in;
- extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_task_logio_add_bytes_out;
-
--apr_status_t h2_task_freeze(h2_task *task);
--apr_status_t h2_task_thaw(h2_task *task);
--int h2_task_has_thawed(h2_task *task);
--
- #endif /* defined(__mod_h2__h2_task__) */
-diff --git a/modules/http2/mod_http2.c b/modules/http2/mod_http2.c
-index 3d278e9..5664f39 100644
---- a/modules/http2/mod_http2.c
-+++ b/modules/http2/mod_http2.c
-@@ -172,27 +172,6 @@ static char *http2_var_lookup(apr_pool_t *, server_rec *,
- conn_rec *, request_rec *, char *name);
- static int http2_is_h2(conn_rec *);
-
--static apr_status_t http2_req_engine_push(const char *ngn_type,
-- request_rec *r,
-- http2_req_engine_init *einit)
--{
-- return h2_mplx_req_engine_push(ngn_type, r, einit);
--}
--
--static apr_status_t http2_req_engine_pull(h2_req_engine *ngn,
-- apr_read_type_e block,
-- int capacity,
-- request_rec **pr)
--{
-- return h2_mplx_req_engine_pull(ngn, block, capacity, pr);
--}
--
--static void http2_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn,
-- apr_status_t status)
--{
-- h2_mplx_req_engine_done(ngn, r_conn, status);
--}
--
- static void http2_get_num_workers(server_rec *s, int *minw, int *maxw)
- {
- h2_get_num_workers(s, minw, maxw);
-@@ -220,9 +199,6 @@ static void h2_hooks(apr_pool_t *pool)
-
- APR_REGISTER_OPTIONAL_FN(http2_is_h2);
- APR_REGISTER_OPTIONAL_FN(http2_var_lookup);
-- APR_REGISTER_OPTIONAL_FN(http2_req_engine_push);
-- APR_REGISTER_OPTIONAL_FN(http2_req_engine_pull);
-- APR_REGISTER_OPTIONAL_FN(http2_req_engine_done);
- APR_REGISTER_OPTIONAL_FN(http2_get_num_workers);
-
- ap_log_perror(APLOG_MARK, APLOG_TRACE1, 0, pool, "installing hooks");
-@@ -260,9 +236,8 @@ static const char *val_H2_PUSH(apr_pool_t *p, server_rec *s,
- {
- if (ctx) {
- if (r) {
-- h2_task *task = h2_ctx_get_task(ctx);
-- if (task) {
-- h2_stream *stream = h2_mplx_stream_get(task->mplx, task->stream_id);
-+ if (ctx->task) {
-+ h2_stream *stream = h2_mplx_stream_get(ctx->task->mplx, ctx->task->stream_id);
- if (stream && stream->push_policy != H2_PUSH_NONE) {
- return "on";
- }
-@@ -273,8 +248,7 @@ static const char *val_H2_PUSH(apr_pool_t *p, server_rec *s,
- }
- }
- else if (s) {
-- const h2_config *cfg = h2_config_sget(s);
-- if (cfg && h2_config_geti(cfg, H2_CONF_PUSH)) {
-+ if (h2_config_geti(r, s, H2_CONF_PUSH)) {
- return "on";
- }
- }
-@@ -285,8 +259,7 @@ static const char *val_H2_PUSHED(apr_pool_t *p, server_rec *s,
- conn_rec *c, request_rec *r, h2_ctx *ctx)
- {
- if (ctx) {
-- h2_task *task = h2_ctx_get_task(ctx);
-- if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) {
-+ if (ctx->task && !H2_STREAM_CLIENT_INITIATED(ctx->task->stream_id)) {
- return "PUSHED";
- }
- }
-@@ -297,9 +270,8 @@ static const char *val_H2_PUSHED_ON(apr_pool_t *p, server_rec *s,
- conn_rec *c, request_rec *r, h2_ctx *ctx)
- {
- if (ctx) {
-- h2_task *task = h2_ctx_get_task(ctx);
-- if (task && !H2_STREAM_CLIENT_INITIATED(task->stream_id)) {
-- h2_stream *stream = h2_mplx_stream_get(task->mplx, task->stream_id);
-+ if (ctx->task && !H2_STREAM_CLIENT_INITIATED(ctx->task->stream_id)) {
-+ h2_stream *stream = h2_mplx_stream_get(ctx->task->mplx, ctx->task->stream_id);
- if (stream) {
- return apr_itoa(p, stream->initiated_on);
- }
-@@ -312,9 +284,8 @@ static const char *val_H2_STREAM_TAG(apr_pool_t *p, server_rec *s,
- conn_rec *c, request_rec *r, h2_ctx *ctx)
- {
- if (ctx) {
-- h2_task *task = h2_ctx_get_task(ctx);
-- if (task) {
-- return task->id;
-+ if (ctx->task) {
-+ return ctx->task->id;
- }
- }
- return "";
-@@ -366,7 +337,7 @@ static char *http2_var_lookup(apr_pool_t *p, server_rec *s,
- for (i = 0; i < H2_ALEN(H2_VARS); ++i) {
- h2_var_def *vdef = &H2_VARS[i];
- if (!strcmp(vdef->name, name)) {
-- h2_ctx *ctx = (r? h2_ctx_rget(r) :
-+ h2_ctx *ctx = (r? h2_ctx_get(c, 0) :
- h2_ctx_get(c->master? c->master : c, 0));
- return (char *)vdef->lookup(p, s, c, r, ctx);
- }
-@@ -377,7 +348,7 @@ static char *http2_var_lookup(apr_pool_t *p, server_rec *s,
- static int h2_h2_fixups(request_rec *r)
- {
- if (r->connection->master) {
-- h2_ctx *ctx = h2_ctx_rget(r);
-+ h2_ctx *ctx = h2_ctx_get(r->connection, 0);
- int i;
-
- for (i = 0; ctx && i < H2_ALEN(H2_VARS); ++i) {
-diff --git a/modules/http2/mod_http2.h b/modules/http2/mod_http2.h
-index 7a1b49a..ba5e6dd 100644
---- a/modules/http2/mod_http2.h
-+++ b/modules/http2/mod_http2.h
-@@ -30,22 +30,20 @@ APR_DECLARE_OPTIONAL_FN(int,
-
-
- /*******************************************************************************
-- * HTTP/2 request engines
-+ * START HTTP/2 request engines (DEPRECATED)
- ******************************************************************************/
-+
-+/* The following functions were introduced for the experimental mod_proxy_http2
-+ * support, but have been abandoned since.
-+ * They are still declared here for backward compatibiliy, in case someone
-+ * tries to build an old mod_proxy_http2 against it, but will disappear
-+ * completely sometime in the future.
-+ */
-
- struct apr_thread_cond_t;
--
- typedef struct h2_req_engine h2_req_engine;
--
- typedef void http2_output_consumed(void *ctx, conn_rec *c, apr_off_t consumed);
-
--/**
-- * Initialize a h2_req_engine. The structure will be passed in but
-- * only the name and master are set. The function should initialize
-- * all fields.
-- * @param engine the allocated, partially filled structure
-- * @param r the first request to process, or NULL
-- */
- typedef apr_status_t http2_req_engine_init(h2_req_engine *engine,
- const char *id,
- const char *type,
-@@ -55,35 +53,11 @@ typedef apr_status_t http2_req_engine_init(h2_req_engine *engine,
- http2_output_consumed **pconsumed,
- void **pbaton);
-
--/**
-- * Push a request to an engine with the specified name for further processing.
-- * If no such engine is available, einit is not NULL, einit is called
-- * with a new engine record and the caller is responsible for running the
-- * new engine instance.
-- * @param engine_type the type of the engine to add the request to
-- * @param r the request to push to an engine for processing
-- * @param einit an optional initialization callback for a new engine
-- * of the requested type, should no instance be available.
-- * By passing a non-NULL callback, the caller is willing
-- * to init and run a new engine itself.
-- * @return APR_SUCCESS iff slave was successfully added to an engine
-- */
- APR_DECLARE_OPTIONAL_FN(apr_status_t,
- http2_req_engine_push, (const char *engine_type,
- request_rec *r,
- http2_req_engine_init *einit));
-
--/**
-- * Get a new request for processing in this engine.
-- * @param engine the engine which is done processing the slave
-- * @param block if call should block waiting for request to come
-- * @param capacity how many parallel requests are acceptable
-- * @param pr the request that needs processing or NULL
-- * @return APR_SUCCESS if new request was assigned
-- * APR_EAGAIN if no new request is available
-- * APR_EOF if engine may shut down, as no more request will be scheduled
-- * APR_ECONNABORTED if the engine needs to shut down immediately
-- */
- APR_DECLARE_OPTIONAL_FN(apr_status_t,
- http2_req_engine_pull, (h2_req_engine *engine,
- apr_read_type_e block,
-@@ -98,4 +72,8 @@ APR_DECLARE_OPTIONAL_FN(void,
- http2_get_num_workers, (server_rec *s,
- int *minw, int *max));
-
-+/*******************************************************************************
-+ * END HTTP/2 request engines (DEPRECATED)
-+ ******************************************************************************/
-+
- #endif
-diff --git a/modules/http2/mod_proxy_http2.c b/modules/http2/mod_proxy_http2.c
-index a7e0dcd..95336f7 100644
---- a/modules/http2/mod_proxy_http2.c
-+++ b/modules/http2/mod_proxy_http2.c
-@@ -16,13 +16,14 @@
-
- #include
-
-+#include
- #include
- #include
- #include "mod_http2.h"
-
-
- #include "mod_proxy_http2.h"
--#include "h2_request.h"
-+#include "h2.h"
- #include "h2_proxy_util.h"
- #include "h2_version.h"
- #include "h2_proxy_session.h"
-@@ -46,19 +47,12 @@ AP_DECLARE_MODULE(proxy_http2) = {
-
- /* Optional functions from mod_http2 */
- static int (*is_h2)(conn_rec *c);
--static apr_status_t (*req_engine_push)(const char *name, request_rec *r,
-- http2_req_engine_init *einit);
--static apr_status_t (*req_engine_pull)(h2_req_engine *engine,
-- apr_read_type_e block,
-- int capacity,
-- request_rec **pr);
--static void (*req_engine_done)(h2_req_engine *engine, conn_rec *r_conn,
-- apr_status_t status);
--
-+
- typedef struct h2_proxy_ctx {
-+ const char *id;
-+ conn_rec *master;
- conn_rec *owner;
- apr_pool_t *pool;
-- request_rec *rbase;
- server_rec *server;
- const char *proxy_func;
- char server_portstr[32];
-@@ -66,19 +60,16 @@ typedef struct h2_proxy_ctx {
- proxy_worker *worker;
- proxy_server_conf *conf;
-
-- h2_req_engine *engine;
-- const char *engine_id;
-- const char *engine_type;
-- apr_pool_t *engine_pool;
- apr_size_t req_buffer_size;
-- h2_proxy_fifo *requests;
- int capacity;
-
-- unsigned standalone : 1;
- unsigned is_ssl : 1;
- unsigned flushall : 1;
-
-- apr_status_t r_status; /* status of our first request work */
-+ request_rec *r; /* the request processed in this ctx */
-+ apr_status_t r_status; /* status of request work */
-+ int r_done; /* request was processed, not necessarily successfully */
-+ int r_may_retry; /* request may be retried */
- h2_proxy_session *session; /* current http2 session against backend */
- } h2_proxy_ctx;
-
-@@ -104,16 +95,6 @@ static int h2_proxy_post_config(apr_pool_t *p, apr_pool_t *plog,
- MOD_HTTP2_VERSION, ngh2? ngh2->version_str : "unknown");
-
- is_h2 = APR_RETRIEVE_OPTIONAL_FN(http2_is_h2);
-- req_engine_push = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_push);
-- req_engine_pull = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_pull);
-- req_engine_done = APR_RETRIEVE_OPTIONAL_FN(http2_req_engine_done);
--
-- /* we need all of them */
-- if (!req_engine_push || !req_engine_pull || !req_engine_done) {
-- req_engine_push = NULL;
-- req_engine_pull = NULL;
-- req_engine_done = NULL;
-- }
-
- return status;
- }
-@@ -204,45 +185,6 @@ static int proxy_http2_canon(request_rec *r, char *url)
- return OK;
- }
-
--static void out_consumed(void *baton, conn_rec *c, apr_off_t bytes)
--{
-- h2_proxy_ctx *ctx = baton;
--
-- if (ctx->session) {
-- h2_proxy_session_update_window(ctx->session, c, bytes);
-- }
--}
--
--static apr_status_t proxy_engine_init(h2_req_engine *engine,
-- const char *id,
-- const char *type,
-- apr_pool_t *pool,
-- apr_size_t req_buffer_size,
-- request_rec *r,
-- http2_output_consumed **pconsumed,
-- void **pctx)
--{
-- h2_proxy_ctx *ctx = ap_get_module_config(r->connection->conn_config,
-- &proxy_http2_module);
-- if (!ctx) {
-- ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(03368)
-- "h2_proxy_session, engine init, no ctx found");
-- return APR_ENOTIMPL;
-- }
--
-- ctx->pool = pool;
-- ctx->engine = engine;
-- ctx->engine_id = id;
-- ctx->engine_type = type;
-- ctx->engine_pool = pool;
-- ctx->req_buffer_size = req_buffer_size;
-- ctx->capacity = H2MIN(100, h2_proxy_fifo_capacity(ctx->requests));
--
-- *pconsumed = out_consumed;
-- *pctx = ctx;
-- return APR_SUCCESS;
--}
--
- static apr_status_t add_request(h2_proxy_session *session, request_rec *r)
- {
- h2_proxy_ctx *ctx = session->user_data;
-@@ -252,7 +194,7 @@ static apr_status_t add_request(h2_proxy_session *session, request_rec *r)
- url = apr_table_get(r->notes, H2_PROXY_REQ_URL_NOTE);
- apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu",
- ctx->p_conn->connection->local_addr->port));
-- status = h2_proxy_session_submit(session, url, r, ctx->standalone);
-+ status = h2_proxy_session_submit(session, url, r, 1);
- if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, status, r->connection, APLOGNO(03351)
- "pass request body failed to %pI (%s) from %s (%s)",
-@@ -266,43 +208,15 @@ static apr_status_t add_request(h2_proxy_session *session, request_rec *r)
- static void request_done(h2_proxy_ctx *ctx, request_rec *r,
- apr_status_t status, int touched)
- {
-- const char *task_id = apr_table_get(r->connection->notes, H2_TASK_ID_NOTE);
--
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, r->connection,
-- "h2_proxy_session(%s): request done %s, touched=%d",
-- ctx->engine_id, task_id, touched);
-- if (status != APR_SUCCESS) {
-- if (!touched) {
-- /* untouched request, need rescheduling */
-- status = h2_proxy_fifo_push(ctx->requests, r);
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection,
-- APLOGNO(03369)
-- "h2_proxy_session(%s): rescheduled request %s",
-- ctx->engine_id, task_id);
-- return;
-- }
-- else {
-- const char *uri;
-- uri = apr_uri_unparse(r->pool, &r->parsed_uri, 0);
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection,
-- APLOGNO(03471) "h2_proxy_session(%s): request %s -> %s "
-- "not complete, cannot repeat",
-- ctx->engine_id, task_id, uri);
-- }
-- }
--
-- if (r == ctx->rbase) {
-+ if (r == ctx->r) {
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, r->connection,
-+ "h2_proxy_session(%s): request done, touched=%d",
-+ ctx->id, touched);
-+ ctx->r_done = 1;
-+ if (touched) ctx->r_may_retry = 0;
- ctx->r_status = ((status == APR_SUCCESS)? APR_SUCCESS
- : HTTP_SERVICE_UNAVAILABLE);
- }
--
-- if (req_engine_done && ctx->engine) {
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection,
-- APLOGNO(03370)
-- "h2_proxy_session(%s): finished request %s",
-- ctx->engine_id, task_id);
-- req_engine_done(ctx->engine, r->connection, status);
-- }
- }
-
- static void session_req_done(h2_proxy_session *session, request_rec *r,
-@@ -311,43 +225,15 @@ static void session_req_done(h2_proxy_session *session, request_rec *r,
- request_done(session->user_data, r, status, touched);
- }
-
--static apr_status_t next_request(h2_proxy_ctx *ctx, int before_leave)
--{
-- if (h2_proxy_fifo_count(ctx->requests) > 0) {
-- return APR_SUCCESS;
-- }
-- else if (req_engine_pull && ctx->engine) {
-- apr_status_t status;
-- request_rec *r = NULL;
--
-- status = req_engine_pull(ctx->engine, before_leave?
-- APR_BLOCK_READ: APR_NONBLOCK_READ,
-- ctx->capacity, &r);
-- if (status == APR_SUCCESS && r) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, ctx->owner,
-- "h2_proxy_engine(%s): pulled request (%s) %s",
-- ctx->engine_id,
-- before_leave? "before leave" : "regular",
-- r->the_request);
-- h2_proxy_fifo_push(ctx->requests, r);
-- }
-- return APR_STATUS_IS_EAGAIN(status)? APR_SUCCESS : status;
-- }
-- return APR_EOF;
--}
--
--static apr_status_t proxy_engine_run(h2_proxy_ctx *ctx) {
-+static apr_status_t ctx_run(h2_proxy_ctx *ctx) {
- apr_status_t status = OK;
- int h2_front;
-- request_rec *r;
-
- /* Step Four: Send the Request in a new HTTP/2 stream and
- * loop until we got the response or encounter errors.
- */
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->owner,
-- "eng(%s): setup session", ctx->engine_id);
- h2_front = is_h2? is_h2(ctx->owner) : 0;
-- ctx->session = h2_proxy_session_setup(ctx->engine_id, ctx->p_conn, ctx->conf,
-+ ctx->session = h2_proxy_session_setup(ctx->id, ctx->p_conn, ctx->conf,
- h2_front, 30,
- h2_proxy_log2((int)ctx->req_buffer_size),
- session_req_done);
-@@ -358,105 +244,45 @@ static apr_status_t proxy_engine_run(h2_proxy_ctx *ctx) {
- }
-
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03373)
-- "eng(%s): run session %s", ctx->engine_id, ctx->session->id);
-+ "eng(%s): run session %s", ctx->id, ctx->session->id);
- ctx->session->user_data = ctx;
-
-- while (!ctx->owner->aborted) {
-- if (APR_SUCCESS == h2_proxy_fifo_try_pull(ctx->requests, (void**)&r)) {
-- add_request(ctx->session, r);
-- }
--
-+ ctx->r_done = 0;
-+ add_request(ctx->session, ctx->r);
-+
-+ while (!ctx->master->aborted && !ctx->r_done) {
-+
- status = h2_proxy_session_process(ctx->session);
--
-- if (status == APR_SUCCESS) {
-- apr_status_t s2;
-- /* ongoing processing, call again */
-- if (ctx->session->remote_max_concurrent > 0
-- && ctx->session->remote_max_concurrent != ctx->capacity) {
-- ctx->capacity = H2MIN((int)ctx->session->remote_max_concurrent,
-- h2_proxy_fifo_capacity(ctx->requests));
-- }
-- s2 = next_request(ctx, 0);
-- if (s2 == APR_ECONNABORTED) {
-- /* master connection gone */
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, s2, ctx->owner,
-- APLOGNO(03374) "eng(%s): pull request",
-- ctx->engine_id);
-- /* give notice that we're leaving and cancel all ongoing
-- * streams. */
-- next_request(ctx, 1);
-- h2_proxy_session_cancel_all(ctx->session);
-- h2_proxy_session_process(ctx->session);
-- status = ctx->r_status = APR_SUCCESS;
-- break;
-- }
-- if ((h2_proxy_fifo_count(ctx->requests) == 0)
-- && h2_proxy_ihash_empty(ctx->session->streams)) {
-- break;
-- }
-- }
-- else {
-- /* end of processing, maybe error */
-+ if (status != APR_SUCCESS) {
-+ /* Encountered an error during session processing */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
- APLOGNO(03375) "eng(%s): end of session %s",
-- ctx->engine_id, ctx->session->id);
-- /*
-- * Any open stream of that session needs to
-+ ctx->id, ctx->session->id);
-+ /* Any open stream of that session needs to
- * a) be reopened on the new session iff safe to do so
- * b) reported as done (failed) otherwise
- */
- h2_proxy_session_cleanup(ctx->session, session_req_done);
-- break;
-+ goto out;
- }
- }
-
-- ctx->session->user_data = NULL;
-- ctx->session = NULL;
--
-- return status;
--}
--
--static apr_status_t push_request_somewhere(h2_proxy_ctx *ctx, request_rec *r)
--{
-- conn_rec *c = ctx->owner;
-- const char *engine_type, *hostname;
--
-- hostname = (ctx->p_conn->ssl_hostname?
-- ctx->p_conn->ssl_hostname : ctx->p_conn->hostname);
-- engine_type = apr_psprintf(ctx->pool, "proxy_http2 %s%s", hostname,
-- ctx->server_portstr);
--
-- if (c->master && req_engine_push && r && is_h2 && is_h2(c)) {
-- /* If we are have req_engine capabilities, push the handling of this
-- * request (e.g. slave connection) to a proxy_http2 engine which
-- * uses the same backend. We may be called to create an engine
-- * ourself. */
-- if (req_engine_push(engine_type, r, proxy_engine_init) == APR_SUCCESS) {
-- if (ctx->engine == NULL) {
-- /* request has been assigned to an engine in another thread */
-- return SUSPENDED;
-- }
-+out:
-+ if (ctx->master->aborted) {
-+ /* master connection gone */
-+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
-+ APLOGNO(03374) "eng(%s): master connection gone", ctx->id);
-+ /* cancel all ongoing requests */
-+ h2_proxy_session_cancel_all(ctx->session);
-+ h2_proxy_session_process(ctx->session);
-+ if (!ctx->master->aborted) {
-+ status = ctx->r_status = APR_SUCCESS;
- }
- }
-
-- if (!ctx->engine) {
-- /* No engine was available or has been initialized, handle this
-- * request just by ourself. */
-- ctx->engine_id = apr_psprintf(ctx->pool, "eng-proxy-%ld", c->id);
-- ctx->engine_type = engine_type;
-- ctx->engine_pool = ctx->pool;
-- ctx->req_buffer_size = (32*1024);
-- ctx->standalone = 1;
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-- "h2_proxy_http2(%ld): setup standalone engine for type %s",
-- c->id, engine_type);
-- }
-- else {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
-- "H2: hosting engine %s", ctx->engine_id);
-- }
--
-- return h2_proxy_fifo_push(ctx->requests, r);
-+ ctx->session->user_data = NULL;
-+ ctx->session = NULL;
-+ return status;
- }
-
- static int proxy_http2_handler(request_rec *r,
-@@ -466,7 +292,7 @@ static int proxy_http2_handler(request_rec *r,
- const char *proxyname,
- apr_port_t proxyport)
- {
-- const char *proxy_func;
-+ const char *proxy_func, *task_id;
- char *locurl = url, *u;
- apr_size_t slen;
- int is_ssl = 0;
-@@ -498,29 +324,36 @@ static int proxy_http2_handler(request_rec *r,
- default:
- return DECLINED;
- }
-+
-+ task_id = apr_table_get(r->connection->notes, H2_TASK_ID_NOTE);
-
- ctx = apr_pcalloc(r->pool, sizeof(*ctx));
-- ctx->owner = r->connection;
-- ctx->pool = r->pool;
-- ctx->rbase = r;
-- ctx->server = r->server;
-+ ctx->master = r->connection->master? r->connection->master : r->connection;
-+ ctx->id = task_id? task_id : apr_psprintf(r->pool, "%ld", (long)ctx->master->id);
-+ ctx->owner = r->connection;
-+ ctx->pool = r->pool;
-+ ctx->server = r->server;
- ctx->proxy_func = proxy_func;
-- ctx->is_ssl = is_ssl;
-- ctx->worker = worker;
-- ctx->conf = conf;
-- ctx->flushall = apr_table_get(r->subprocess_env, "proxy-flushall")? 1 : 0;
-- ctx->r_status = HTTP_SERVICE_UNAVAILABLE;
--
-- h2_proxy_fifo_set_create(&ctx->requests, ctx->pool, 100);
-+ ctx->is_ssl = is_ssl;
-+ ctx->worker = worker;
-+ ctx->conf = conf;
-+ ctx->flushall = apr_table_get(r->subprocess_env, "proxy-flushall")? 1 : 0;
-+ ctx->req_buffer_size = (32*1024);
-+ ctx->r = r;
-+ ctx->r_status = status = HTTP_SERVICE_UNAVAILABLE;
-+ ctx->r_done = 0;
-+ ctx->r_may_retry = 1;
-
- ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, ctx);
-
- /* scheme says, this is for us. */
-- apr_table_setn(ctx->rbase->notes, H2_PROXY_REQ_URL_NOTE, url);
-- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->rbase,
-+ apr_table_setn(ctx->r->notes, H2_PROXY_REQ_URL_NOTE, url);
-+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->r,
- "H2: serving URL %s", url);
-
- run_connect:
-+ if (ctx->master->aborted) goto cleanup;
-+
- /* Get a proxy_conn_rec from the worker, might be a new one, might
- * be one still open from another request, or it might fail if the
- * worker is stopped or in error. */
-@@ -530,25 +363,11 @@ run_connect:
- }
-
- ctx->p_conn->is_ssl = ctx->is_ssl;
-- if (ctx->is_ssl && ctx->p_conn->connection) {
-- /* If there are some metadata on the connection (e.g. TLS alert),
-- * let mod_ssl detect them, and create a new connection below.
-- */
-- apr_bucket_brigade *tmp_bb;
-- tmp_bb = apr_brigade_create(ctx->rbase->pool,
-- ctx->rbase->connection->bucket_alloc);
-- status = ap_get_brigade(ctx->p_conn->connection->input_filters, tmp_bb,
-- AP_MODE_SPECULATIVE, APR_NONBLOCK_READ, 1);
-- if (status != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(status)) {
-- ctx->p_conn->close = 1;
-- }
-- apr_brigade_cleanup(tmp_bb);
-- }
-
- /* Step One: Determine the URL to connect to (might be a proxy),
- * initialize the backend accordingly and determine the server
- * port string we can expect in responses. */
-- if ((status = ap_proxy_determine_connection(ctx->pool, ctx->rbase, conf, worker,
-+ if ((status = ap_proxy_determine_connection(ctx->pool, ctx->r, conf, worker,
- ctx->p_conn, &uri, &locurl,
- proxyname, proxyport,
- ctx->server_portstr,
-@@ -556,17 +375,6 @@ run_connect:
- goto cleanup;
- }
-
-- /* If we are not already hosting an engine, try to push the request
-- * to an already existing engine or host a new engine here. */
-- if (r && !ctx->engine) {
-- ctx->r_status = push_request_somewhere(ctx, r);
-- r = NULL;
-- if (ctx->r_status == SUSPENDED) {
-- /* request was pushed to another thread, leave processing here */
-- goto cleanup;
-- }
-- }
--
- /* Step Two: Make the Connection (or check that an already existing
- * socket is still usable). On success, we have a socket connected to
- * backend->hostname. */
-@@ -575,70 +383,58 @@ run_connect:
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03352)
- "H2: failed to make connection to backend: %s",
- ctx->p_conn->hostname);
-- goto reconnect;
-+ goto cleanup;
- }
-
- /* Step Three: Create conn_rec for the socket we have open now. */
- if (!ctx->p_conn->connection) {
-- status = ap_proxy_connection_create_ex(ctx->proxy_func,
-- ctx->p_conn, ctx->rbase);
-+ status = ap_proxy_connection_create_ex(ctx->proxy_func, ctx->p_conn, ctx->r);
- if (status != OK) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner, APLOGNO(03353)
- "setup new connection: is_ssl=%d %s %s %s",
- ctx->p_conn->is_ssl, ctx->p_conn->ssl_hostname,
- locurl, ctx->p_conn->hostname);
-- goto reconnect;
-+ ctx->r_status = status;
-+ goto cleanup;
- }
-
-- if (!ctx->p_conn->data) {
-- /* New conection: set a note on the connection what CN is
-- * requested and what protocol we want */
-+ if (!ctx->p_conn->data && ctx->is_ssl) {
-+ /* New SSL connection: set a note on the connection about what
-+ * protocol we want.
-+ */
-+ apr_table_setn(ctx->p_conn->connection->notes,
-+ "proxy-request-alpn-protos", "h2");
- if (ctx->p_conn->ssl_hostname) {
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, ctx->owner,
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, ctx->owner,
- "set SNI to %s for (%s)",
- ctx->p_conn->ssl_hostname,
- ctx->p_conn->hostname);
- apr_table_setn(ctx->p_conn->connection->notes,
- "proxy-request-hostname", ctx->p_conn->ssl_hostname);
- }
-- if (ctx->is_ssl) {
-- apr_table_setn(ctx->p_conn->connection->notes,
-- "proxy-request-alpn-protos", "h2");
-- }
- }
- }
-
--run_session:
-- status = proxy_engine_run(ctx);
-- if (status == APR_SUCCESS) {
-- /* session and connection still ok */
-- if (next_request(ctx, 1) == APR_SUCCESS) {
-- /* more requests, run again */
-- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(03376)
-- "run_session, again");
-- goto run_session;
-- }
-- /* done */
-- ctx->engine = NULL;
-- }
-+ if (ctx->master->aborted) goto cleanup;
-+ status = ctx_run(ctx);
-
--reconnect:
-- if (next_request(ctx, 1) == APR_SUCCESS) {
-- /* Still more to do, tear down old conn and start over */
-+ if (ctx->r_status != APR_SUCCESS && ctx->r_may_retry && !ctx->master->aborted) {
-+ /* Not successfully processed, but may retry, tear down old conn and start over */
- if (ctx->p_conn) {
- ctx->p_conn->close = 1;
-- /*only in trunk so far */
-- /*proxy_run_detach_backend(r, ctx->p_conn);*/
-+#if AP_MODULE_MAGIC_AT_LEAST(20140207, 2)
-+ proxy_run_detach_backend(r, ctx->p_conn);
-+#endif
- ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server);
- ctx->p_conn = NULL;
- }
- ++reconnects;
-- if (reconnects < 5 && !ctx->owner->aborted) {
-+ if (reconnects < 5) {
- goto run_connect;
- }
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, ctx->owner, APLOGNO(10023)
-- "giving up after %d reconnects, %d requests todo",
-- reconnects, h2_proxy_fifo_count(ctx->requests));
-+ "giving up after %d reconnects, request-done=%d",
-+ reconnects, ctx->r_done);
- }
-
- cleanup:
-@@ -647,17 +443,13 @@ cleanup:
- /* close socket when errors happened or session shut down (EOF) */
- ctx->p_conn->close = 1;
- }
-- /*only in trunk so far */
-- /*proxy_run_detach_backend(ctx->rbase, ctx->p_conn);*/
-+#if AP_MODULE_MAGIC_AT_LEAST(20140207, 2)
-+ proxy_run_detach_backend(ctx->r, ctx->p_conn);
-+#endif
- ap_proxy_release_connection(ctx->proxy_func, ctx->p_conn, ctx->server);
- ctx->p_conn = NULL;
- }
-
-- /* Any requests will still have need to fail */
-- while (APR_SUCCESS == h2_proxy_fifo_try_pull(ctx->requests, (void**)&r)) {
-- request_done(ctx, r, HTTP_SERVICE_UNAVAILABLE, 1);
-- }
--
- ap_set_module_config(ctx->owner->conn_config, &proxy_http2_module, NULL);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, ctx->owner,
- APLOGNO(03377) "leaving handler");
---
-1.8.3.1
-
diff --git a/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-5.patch b/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-5.patch
deleted file mode 100644
index 6aff2aa2bd9be0128f7bb90e4b38440a5ea9dbf2..0000000000000000000000000000000000000000
--- a/CVE-2019-9517_CVE-2019-10081_CVE-2019-10082-5.patch
+++ /dev/null
@@ -1,860 +0,0 @@
-From 94de05dacf17a60a8c3b34b5ded37fc4dc04709b Mon Sep 17 00:00:00 2001
-From: Stefan Eissing
-Date: Thu, 1 Aug 2019 08:18:03 +0000
-Subject: [PATCH 5/5] Merge of r1861338,1862475,1862583,1862865,1863221,1863276
- from trunk:
-
- *) mod_http2: core setting "LimitRequestFieldSize" is not additionally checked on
- merged header fields, just as HTTP/1.1 does. [Stefan Eissing, Michael Kaufmann]
-
- *) mod_http2: fixed a bug that prevented proper stream cleanup when connection
- throttling was in place. Stream resets by clients on streams initiated by them
- are counted as possible trigger for throttling. [Stefan Eissing]
-
- *) mod_http2/mpm_event: Fixes the behaviour when a HTTP/2 connection has nothing
- more to write with streams ongoing (flow control block). The timeout waiting
- for the client to send WINODW_UPDATE was incorrectly KeepAliveTimeout and not
- Timeout as it should be. Fixes PR 63534. [Yann Ylavic, Stefan Eissing]
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1864126 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/http2/h2_conn.c | 7 ++
- modules/http2/h2_filter.c | 52 +++++++++-
- modules/http2/h2_mplx.c | 249 +++++++++++++++++++++++++++------------------
- modules/http2/h2_mplx.h | 11 +-
- modules/http2/h2_session.c | 19 +++-
- modules/http2/h2_stream.c | 80 +++++++++++----
- modules/http2/h2_stream.h | 4 +
- modules/http2/h2_task.c | 8 +-
- modules/http2/h2_task.h | 2 +
- server/mpm/event/event.c | 7 +-
- 10 files changed, 304 insertions(+), 135 deletions(-)
-
-diff --git a/modules/http2/h2_conn.c b/modules/http2/h2_conn.c
-index 9ef0ea0..0b78a84 100644
---- a/modules/http2/h2_conn.c
-+++ b/modules/http2/h2_conn.c
-@@ -231,6 +231,13 @@ apr_status_t h2_conn_run(conn_rec *c)
- case H2_SESSION_ST_BUSY:
- case H2_SESSION_ST_WAIT:
- c->cs->state = CONN_STATE_WRITE_COMPLETION;
-+ if (c->cs && (session->open_streams || !session->remote.emitted_count)) {
-+ /* let the MPM know that we are not done and want
-+ * the Timeout behaviour instead of a KeepAliveTimeout
-+ * See PR 63534.
-+ */
-+ c->cs->sense = CONN_SENSE_WANT_READ;
-+ }
- break;
- case H2_SESSION_ST_CLEANUP:
- case H2_SESSION_ST_DONE:
-diff --git a/modules/http2/h2_filter.c b/modules/http2/h2_filter.c
-index 5fd237f..2fc5e12 100644
---- a/modules/http2/h2_filter.c
-+++ b/modules/http2/h2_filter.c
-@@ -493,6 +493,52 @@ static apr_status_t status_event(void *ctx, h2_bucket_event event,
- return APR_SUCCESS;
- }
-
-+static apr_status_t discard_body(request_rec *r, apr_off_t maxlen)
-+{
-+ apr_bucket_brigade *bb;
-+ int seen_eos;
-+ apr_status_t rv;
-+
-+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
-+ seen_eos = 0;
-+ do {
-+ apr_bucket *bucket;
-+
-+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
-+ APR_BLOCK_READ, HUGE_STRING_LEN);
-+
-+ if (rv != APR_SUCCESS) {
-+ apr_brigade_destroy(bb);
-+ return rv;
-+ }
-+
-+ for (bucket = APR_BRIGADE_FIRST(bb);
-+ bucket != APR_BRIGADE_SENTINEL(bb);
-+ bucket = APR_BUCKET_NEXT(bucket))
-+ {
-+ const char *data;
-+ apr_size_t len;
-+
-+ if (APR_BUCKET_IS_EOS(bucket)) {
-+ seen_eos = 1;
-+ break;
-+ }
-+ if (bucket->length == 0) {
-+ continue;
-+ }
-+ rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
-+ if (rv != APR_SUCCESS) {
-+ apr_brigade_destroy(bb);
-+ return rv;
-+ }
-+ maxlen -= bucket->length;
-+ }
-+ apr_brigade_cleanup(bb);
-+ } while (!seen_eos && maxlen >= 0);
-+
-+ return APR_SUCCESS;
-+}
-+
- int h2_filter_h2_status_handler(request_rec *r)
- {
- conn_rec *c = r->connection;
-@@ -510,8 +556,10 @@ int h2_filter_h2_status_handler(request_rec *r)
-
- task = h2_ctx_get_task(r->connection);
- if (task) {
--
-- if ((status = ap_discard_request_body(r)) != OK) {
-+ /* In this handler, we do some special sauce to send footers back,
-+ * IFF we received footers in the request. This is used in our test
-+ * cases, since CGI has no way of handling those. */
-+ if ((status = discard_body(r, 1024)) != OK) {
- return status;
- }
-
-diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c
-index fae77c7..9b504a5 100644
---- a/modules/http2/h2_mplx.c
-+++ b/modules/http2/h2_mplx.c
-@@ -53,8 +53,12 @@ typedef struct {
- h2_mplx *m;
- h2_stream *stream;
- apr_time_t now;
-+ apr_size_t count;
- } stream_iter_ctx;
-
-+static apr_status_t mplx_be_happy(h2_mplx *m);
-+static apr_status_t mplx_be_annoyed(h2_mplx *m);
-+
- apr_status_t h2_mplx_child_init(apr_pool_t *pool, server_rec *s)
- {
- return APR_SUCCESS;
-@@ -98,7 +102,7 @@ static void stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t len
-
- static void stream_joined(h2_mplx *m, h2_stream *stream)
- {
-- ap_assert(!stream->task || stream->task->worker_done);
-+ ap_assert(!h2_task_has_started(stream->task) || stream->task->worker_done);
-
- h2_ihash_remove(m->shold, stream->id);
- h2_ihash_add(m->spurge, stream);
-@@ -124,7 +128,7 @@ static void stream_cleanup(h2_mplx *m, h2_stream *stream)
- h2_ififo_remove(m->readyq, stream->id);
- h2_ihash_add(m->shold, stream);
-
-- if (!stream->task || stream->task->worker_done) {
-+ if (!h2_task_has_started(stream->task) || stream->task->done_done) {
- stream_joined(m, stream);
- }
- else if (stream->task) {
-@@ -194,7 +198,6 @@ h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *parent,
- m->stream_max_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM);
-
- m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id));
-- m->sredo = h2_ihash_create(m->pool, offsetof(h2_stream,id));
- m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id));
- m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id));
- m->q = h2_iq_create(m->pool, m->max_streams);
-@@ -208,8 +211,8 @@ h2_mplx *h2_mplx_create(conn_rec *c, server_rec *s, apr_pool_t *parent,
- m->workers = workers;
- m->max_active = workers->max_workers;
- m->limit_active = 6; /* the original h1 max parallel connections */
-- m->last_limit_change = m->last_idle_block = apr_time_now();
-- m->limit_change_interval = apr_time_from_msec(100);
-+ m->last_mood_change = apr_time_now();
-+ m->mood_update_interval = apr_time_from_msec(100);
-
- m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
- }
-@@ -431,6 +434,10 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
-
- /* How to shut down a h2 connection:
- * 1. cancel all streams still active */
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-+ "h2_mplx(%ld): release, %d/%d/%d streams (total/hold/purge), %d active tasks",
-+ m->id, (int)h2_ihash_count(m->streams),
-+ (int)h2_ihash_count(m->shold), (int)h2_ihash_count(m->spurge), m->tasks_active);
- while (!h2_ihash_iter(m->streams, stream_cancel_iter, m)) {
- /* until empty */
- }
-@@ -456,10 +463,10 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
- h2_ihash_iter(m->shold, report_stream_iter, m);
- }
- }
-- ap_assert(m->tasks_active == 0);
- m->join_wait = NULL;
--
-+
- /* 4. With all workers done, all streams should be in spurge */
-+ ap_assert(m->tasks_active == 0);
- if (!h2_ihash_empty(m->shold)) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03516)
- "h2_mplx(%ld): unexpected %d streams in hold",
-@@ -470,8 +477,7 @@ void h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
- m->c->aborted = old_aborted;
- H2_MPLX_LEAVE(m);
-
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-- "h2_mplx(%ld): released", m->id);
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): released", m->id);
- }
-
- apr_status_t h2_mplx_stream_cleanup(h2_mplx *m, h2_stream *stream)
-@@ -709,7 +715,6 @@ static h2_task *next_stream_task(h2_mplx *m)
- }
-
- if (!stream->task) {
--
- if (sid > m->max_stream_started) {
- m->max_stream_started = sid;
- }
-@@ -728,9 +733,9 @@ static h2_task *next_stream_task(h2_mplx *m)
- "create task"));
- return NULL;
- }
--
- }
-
-+ stream->task->started_at = apr_time_now();
- ++m->tasks_active;
- return stream->task;
- }
-@@ -778,32 +783,18 @@ static void task_done(h2_mplx *m, h2_task *task)
- "h2_mplx(%s): request done, %f ms elapsed", task->id,
- (task->done_at - task->started_at) / 1000.0);
-
-- if (task->started_at > m->last_idle_block) {
-- /* this task finished without causing an 'idle block', e.g.
-- * a block by flow control.
-- */
-- if (task->done_at- m->last_limit_change >= m->limit_change_interval
-- && m->limit_active < m->max_active) {
-- /* Well behaving stream, allow it more workers */
-- m->limit_active = H2MIN(m->limit_active * 2,
-- m->max_active);
-- m->last_limit_change = task->done_at;
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-- "h2_mplx(%ld): increase worker limit to %d",
-- m->id, m->limit_active);
-- }
-+ if (task->c && !task->c->aborted && task->started_at > m->last_mood_change) {
-+ mplx_be_happy(m);
- }
--
-+
- ap_assert(task->done_done == 0);
-
- stream = h2_ihash_get(m->streams, task->stream_id);
- if (stream) {
- /* stream not done yet. */
-- if (!m->aborted && h2_ihash_get(m->sredo, stream->id)) {
-+ if (!m->aborted && task->redo) {
- /* reset and schedule again */
-- task->worker_done = 0;
- h2_task_redo(task);
-- h2_ihash_remove(m->sredo, stream->id);
- h2_iq_add(m->q, stream->id, NULL, NULL);
- ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c,
- H2_STRM_MSG(stream, "redo, added to q"));
-@@ -848,8 +839,8 @@ void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
- {
- H2_MPLX_ENTER_ALWAYS(m);
-
-- task_done(m, task);
- --m->tasks_active;
-+ task_done(m, task);
-
- if (m->join_wait) {
- apr_thread_cond_signal(m->join_wait);
-@@ -867,94 +858,161 @@ void h2_mplx_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
- * h2_mplx DoS protection
- ******************************************************************************/
-
--static int latest_repeatable_unsubmitted_iter(void *data, void *val)
-+static int timed_out_busy_iter(void *data, void *val)
- {
- stream_iter_ctx *ctx = data;
- h2_stream *stream = val;
--
-- if (stream->task && !stream->task->worker_done
-- && h2_task_can_redo(stream->task)
-- && !h2_ihash_get(ctx->m->sredo, stream->id)) {
-- if (!h2_stream_is_ready(stream)) {
-- /* this task occupies a worker, the response has not been submitted
-- * yet, not been cancelled and it is a repeatable request
-- * -> it can be re-scheduled later */
-- if (!ctx->stream
-- || (ctx->stream->task->started_at < stream->task->started_at)) {
-- /* we did not have one or this one was started later */
-- ctx->stream = stream;
-- }
-- }
-+ if (h2_task_has_started(stream->task) && !stream->task->worker_done
-+ && (ctx->now - stream->task->started_at) > stream->task->timeout) {
-+ /* timed out stream occupying a worker, found */
-+ ctx->stream = stream;
-+ return 0;
- }
- return 1;
- }
-
--static h2_stream *get_latest_repeatable_unsubmitted_stream(h2_mplx *m)
-+static h2_stream *get_timed_out_busy_stream(h2_mplx *m)
- {
- stream_iter_ctx ctx;
- ctx.m = m;
- ctx.stream = NULL;
-- h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx);
-+ ctx.now = apr_time_now();
-+ h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx);
- return ctx.stream;
- }
-
--static int timed_out_busy_iter(void *data, void *val)
-+static int latest_repeatable_unsubmitted_iter(void *data, void *val)
- {
- stream_iter_ctx *ctx = data;
- h2_stream *stream = val;
-- if (stream->task && !stream->task->worker_done
-- && (ctx->now - stream->task->started_at) > stream->task->timeout) {
-- /* timed out stream occupying a worker, found */
-- ctx->stream = stream;
-- return 0;
-+
-+ if (!stream->task) goto leave;
-+ if (!h2_task_has_started(stream->task) || stream->task->worker_done) goto leave;
-+ if (h2_stream_is_ready(stream)) goto leave;
-+ if (stream->task->redo) {
-+ ++ctx->count;
-+ goto leave;
-+ }
-+ if (h2_task_can_redo(stream->task)) {
-+ /* this task occupies a worker, the response has not been submitted
-+ * yet, not been cancelled and it is a repeatable request
-+ * -> we could redo it later */
-+ if (!ctx->stream
-+ || (ctx->stream->task->started_at < stream->task->started_at)) {
-+ /* we did not have one or this one was started later */
-+ ctx->stream = stream;
-+ }
- }
-+leave:
- return 1;
- }
-
--static h2_stream *get_timed_out_busy_stream(h2_mplx *m)
-+static apr_status_t assess_task_to_throttle(h2_task **ptask, h2_mplx *m)
- {
- stream_iter_ctx ctx;
-+
-+ /* count the running tasks already marked for redo and get one that could
-+ * be throttled */
-+ *ptask = NULL;
- ctx.m = m;
- ctx.stream = NULL;
-- ctx.now = apr_time_now();
-- h2_ihash_iter(m->streams, timed_out_busy_iter, &ctx);
-- return ctx.stream;
-+ ctx.count = 0;
-+ h2_ihash_iter(m->streams, latest_repeatable_unsubmitted_iter, &ctx);
-+ if (m->tasks_active - ctx.count > m->limit_active) {
-+ /* we are above the limit of running tasks, accounting for the ones
-+ * already throttled. */
-+ if (ctx.stream && ctx.stream->task) {
-+ *ptask = ctx.stream->task;
-+ return APR_EAGAIN;
-+ }
-+ /* above limit, be seeing no candidate for easy throttling */
-+ if (get_timed_out_busy_stream(m)) {
-+ /* Too many busy workers, unable to cancel enough streams
-+ * and with a busy, timed out stream, we tell the client
-+ * to go away... */
-+ return APR_TIMEUP;
-+ }
-+ }
-+ return APR_SUCCESS;
- }
-
- static apr_status_t unschedule_slow_tasks(h2_mplx *m)
- {
-- h2_stream *stream;
-- int n;
-+ h2_task *task;
-+ apr_status_t rv;
-
- /* Try to get rid of streams that occupy workers. Look for safe requests
- * that are repeatable. If none found, fail the connection.
- */
-- n = (m->tasks_active - m->limit_active - (int)h2_ihash_count(m->sredo));
-- while (n > 0 && (stream = get_latest_repeatable_unsubmitted_stream(m))) {
-+ while (APR_EAGAIN == (rv = assess_task_to_throttle(&task, m))) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%s): unschedule, resetting task for redo later",
-- stream->task->id);
-- h2_task_rst(stream->task, H2_ERR_CANCEL);
-- h2_ihash_add(m->sredo, stream);
-- --n;
-+ task->id);
-+ task->redo = 1;
-+ h2_task_rst(task, H2_ERR_CANCEL);
- }
-
-- if ((m->tasks_active - h2_ihash_count(m->sredo)) > m->limit_active) {
-- h2_stream *stream = get_timed_out_busy_stream(m);
-- if (stream) {
-- /* Too many busy workers, unable to cancel enough streams
-- * and with a busy, timed out stream, we tell the client
-- * to go away... */
-- return APR_TIMEUP;
-- }
-+ return rv;
-+}
-+
-+static apr_status_t mplx_be_happy(h2_mplx *m)
-+{
-+ apr_time_t now;
-+
-+ --m->irritations_since;
-+ now = apr_time_now();
-+ if (m->limit_active < m->max_active
-+ && (now - m->last_mood_change >= m->mood_update_interval
-+ || m->irritations_since < -m->limit_active)) {
-+ m->limit_active = H2MIN(m->limit_active * 2, m->max_active);
-+ m->last_mood_change = now;
-+ m->irritations_since = 0;
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-+ "h2_mplx(%ld): mood update, increasing worker limit to %d",
-+ m->id, m->limit_active);
- }
- return APR_SUCCESS;
- }
-
--apr_status_t h2_mplx_idle(h2_mplx *m)
-+static apr_status_t mplx_be_annoyed(h2_mplx *m)
- {
- apr_status_t status = APR_SUCCESS;
- apr_time_t now;
-+
-+ ++m->irritations_since;
-+ now = apr_time_now();
-+ if (m->limit_active > 2 &&
-+ ((now - m->last_mood_change >= m->mood_update_interval)
-+ || (m->irritations_since >= m->limit_active))) {
-+
-+ if (m->limit_active > 16) {
-+ m->limit_active = 16;
-+ }
-+ else if (m->limit_active > 8) {
-+ m->limit_active = 8;
-+ }
-+ else if (m->limit_active > 4) {
-+ m->limit_active = 4;
-+ }
-+ else if (m->limit_active > 2) {
-+ m->limit_active = 2;
-+ }
-+ m->last_mood_change = now;
-+ m->irritations_since = 0;
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-+ "h2_mplx(%ld): mood update, decreasing worker limit to %d",
-+ m->id, m->limit_active);
-+ }
-+
-+ if (m->tasks_active > m->limit_active) {
-+ status = unschedule_slow_tasks(m);
-+ }
-+ return status;
-+}
-+
-+apr_status_t h2_mplx_idle(h2_mplx *m)
-+{
-+ apr_status_t status = APR_SUCCESS;
- apr_size_t scount;
-
- H2_MPLX_ENTER(m);
-@@ -974,31 +1032,7 @@ apr_status_t h2_mplx_idle(h2_mplx *m)
- * of busy workers we allow for this connection until it
- * well behaves.
- */
-- now = apr_time_now();
-- m->last_idle_block = now;
-- if (m->limit_active > 2
-- && now - m->last_limit_change >= m->limit_change_interval) {
-- if (m->limit_active > 16) {
-- m->limit_active = 16;
-- }
-- else if (m->limit_active > 8) {
-- m->limit_active = 8;
-- }
-- else if (m->limit_active > 4) {
-- m->limit_active = 4;
-- }
-- else if (m->limit_active > 2) {
-- m->limit_active = 2;
-- }
-- m->last_limit_change = now;
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-- "h2_mplx(%ld): decrease worker limit to %d",
-- m->id, m->limit_active);
-- }
--
-- if (m->tasks_active > m->limit_active) {
-- status = unschedule_slow_tasks(m);
-- }
-+ status = mplx_be_annoyed(m);
- }
- else if (!h2_iq_empty(m->q)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
-@@ -1093,11 +1127,24 @@ int h2_mplx_awaits_data(h2_mplx *m)
- if (h2_ihash_empty(m->streams)) {
- waiting = 0;
- }
-- else if (!m->tasks_active && !h2_ififo_count(m->readyq)
-- && h2_iq_empty(m->q)) {
-+ else if (!m->tasks_active && !h2_ififo_count(m->readyq) && h2_iq_empty(m->q)) {
- waiting = 0;
- }
-
- H2_MPLX_LEAVE(m);
- return waiting;
- }
-+
-+apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id)
-+{
-+ h2_stream *stream;
-+ apr_status_t status = APR_SUCCESS;
-+
-+ H2_MPLX_ENTER_ALWAYS(m);
-+ stream = h2_ihash_get(m->streams, stream_id);
-+ if (stream && stream->task) {
-+ status = mplx_be_annoyed(m);
-+ }
-+ H2_MPLX_LEAVE(m);
-+ return status;
-+}
-diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h
-index 575ccaf..8a4f63f 100644
---- a/modules/http2/h2_mplx.h
-+++ b/modules/http2/h2_mplx.h
-@@ -63,7 +63,6 @@ struct h2_mplx {
- unsigned int is_registered; /* is registered at h2_workers */
-
- struct h2_ihash_t *streams; /* all streams currently processing */
-- struct h2_ihash_t *sredo; /* all streams that need to be re-started */
- struct h2_ihash_t *shold; /* all streams done with task ongoing */
- struct h2_ihash_t *spurge; /* all streams done, ready for destroy */
-
-@@ -77,10 +76,10 @@ struct h2_mplx {
- int tasks_active; /* # of tasks being processed from this mplx */
- int limit_active; /* current limit on active tasks, dynamic */
- int max_active; /* max, hard limit # of active tasks in a process */
-- apr_time_t last_idle_block; /* last time, this mplx entered IDLE while
-- * streams were ready */
-- apr_time_t last_limit_change; /* last time, worker limit changed */
-- apr_interval_time_t limit_change_interval;
-+
-+ apr_time_t last_mood_change; /* last time, we worker limit changed */
-+ apr_interval_time_t mood_update_interval; /* how frequent we update at most */
-+ int irritations_since; /* irritations (>0) or happy events (<0) since last mood change */
-
- apr_thread_mutex_t *lock;
- struct apr_thread_cond_t *added_output;
-@@ -205,6 +204,8 @@ typedef int h2_mplx_stream_cb(struct h2_stream *s, void *ctx);
-
- apr_status_t h2_mplx_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
-
-+apr_status_t h2_mplx_client_rst(h2_mplx *m, int stream_id);
-+
- /*******************************************************************************
- * Output handling of streams.
- ******************************************************************************/
-diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c
-index f153422..43d26d3 100644
---- a/modules/http2/h2_session.c
-+++ b/modules/http2/h2_session.c
-@@ -390,9 +390,14 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
- (int)frame->rst_stream.error_code);
- stream = h2_session_stream_get(session, frame->hd.stream_id);
- if (stream && stream->initiated_on) {
-+ /* A stream reset on a request we sent it. Normal, when the
-+ * client does not want it. */
- ++session->pushes_reset;
- }
- else {
-+ /* A stream reset on a request it sent us. Could happen in a browser
-+ * when the user navigates away or cancels loading - maybe. */
-+ h2_mplx_client_rst(session->mplx, frame->hd.stream_id);
- ++session->streams_reset;
- }
- break;
-@@ -1699,7 +1704,7 @@ static void transit(h2_session *session, const char *action, h2_session_state ns
- * that already served requests - not fair. */
- session->idle_sync_until = apr_time_now() + apr_time_from_sec(1);
- s = "timeout";
-- timeout = H2MAX(session->s->timeout, session->s->keep_alive_timeout);
-+ timeout = session->s->timeout;
- update_child_status(session, SERVER_BUSY_READ, "idle");
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
- H2_SSSN_LOG("", session, "enter idle, timeout = %d sec"),
-@@ -1707,8 +1712,8 @@ static void transit(h2_session *session, const char *action, h2_session_state ns
- }
- else if (session->open_streams) {
- s = "timeout";
-- timeout = session->s->keep_alive_timeout;
-- update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle");
-+ timeout = session->s->timeout;
-+ update_child_status(session, SERVER_BUSY_READ, "idle");
- }
- else {
- /* normal keepalive setup */
-@@ -2166,6 +2171,14 @@ apr_status_t h2_session_process(h2_session *session, int async)
- session->have_read = 1;
- }
- else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) {
-+ status = h2_mplx_idle(session->mplx);
-+ if (status == APR_EAGAIN) {
-+ break;
-+ }
-+ else if (status != APR_SUCCESS) {
-+ dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
-+ H2_ERR_ENHANCE_YOUR_CALM, "less is more");
-+ }
- status = APR_EAGAIN;
- goto out;
- }
-diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c
-index b5763ac..8c3d305 100644
---- a/modules/http2/h2_stream.c
-+++ b/modules/http2/h2_stream.c
-@@ -397,13 +397,8 @@ apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_
- /* start pushed stream */
- ap_assert(stream->request == NULL);
- ap_assert(stream->rtmp != NULL);
-- status = h2_request_end_headers(stream->rtmp, stream->pool, 1, 0);
-- if (status != APR_SUCCESS) {
-- return status;
-- }
-- set_policy_for(stream, stream->rtmp);
-- stream->request = stream->rtmp;
-- stream->rtmp = NULL;
-+ status = h2_stream_end_headers(stream, 1, 0);
-+ if (status != APR_SUCCESS) goto leave;
- break;
-
- default:
-@@ -415,6 +410,7 @@ apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_
- if (status == APR_SUCCESS && eos) {
- status = transit(stream, on_event(stream, H2_SEV_CLOSED_L));
- }
-+leave:
- return status;
- }
-
-@@ -455,13 +451,8 @@ apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_
- * to abort the connection here, since this is clearly a protocol error */
- return APR_EINVAL;
- }
-- status = h2_request_end_headers(stream->rtmp, stream->pool, eos, frame_len);
-- if (status != APR_SUCCESS) {
-- return status;
-- }
-- set_policy_for(stream, stream->rtmp);
-- stream->request = stream->rtmp;
-- stream->rtmp = NULL;
-+ status = h2_stream_end_headers(stream, eos, frame_len);
-+ if (status != APR_SUCCESS) goto leave;
- }
- break;
-
-@@ -472,6 +463,7 @@ apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_
- if (status == APR_SUCCESS && eos) {
- status = transit(stream, on_event(stream, H2_SEV_CLOSED_R));
- }
-+leave:
- return status;
- }
-
-@@ -683,6 +675,8 @@ static apr_status_t add_trailer(h2_stream *stream,
- hvalue = apr_pstrndup(stream->pool, value, vlen);
- h2_util_camel_case_header(hname, nlen);
- apr_table_mergen(stream->trailers, hname, hvalue);
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
-+ H2_STRM_MSG(stream, "added trailer '%s: %s'"), hname, hvalue);
-
- return APR_SUCCESS;
- }
-@@ -702,15 +696,19 @@ apr_status_t h2_stream_add_header(h2_stream *stream,
- if (name[0] == ':') {
- if ((vlen) > session->s->limit_req_line) {
- /* pseudo header: approximation of request line size check */
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
-- H2_STRM_MSG(stream, "pseudo %s too long"), name);
-+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
-+ H2_STRM_LOG(APLOGNO(10178), stream,
-+ "Request pseudo header exceeds "
-+ "LimitRequestFieldSize: %s"), name);
- error = HTTP_REQUEST_URI_TOO_LARGE;
- }
- }
- else if ((nlen + 2 + vlen) > session->s->limit_req_fieldsize) {
- /* header too long */
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
-- H2_STRM_MSG(stream, "header %s too long"), name);
-+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
-+ H2_STRM_LOG(APLOGNO(10180), stream,"Request header exceeds "
-+ "LimitRequestFieldSize: %.*s"),
-+ (int)H2MIN(nlen, 80), name);
- error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
- }
-
-@@ -722,8 +720,9 @@ apr_status_t h2_stream_add_header(h2_stream *stream,
- h2_stream_rst(stream, H2_ERR_ENHANCE_YOUR_CALM);
- return APR_ECONNRESET;
- }
-- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
-- H2_STRM_MSG(stream, "too many header lines"));
-+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
-+ H2_STRM_LOG(APLOGNO(10181), stream, "Number of request headers "
-+ "exceeds LimitRequestFields"));
- error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
- }
-
-@@ -754,6 +753,47 @@ apr_status_t h2_stream_add_header(h2_stream *stream,
- return status;
- }
-
-+typedef struct {
-+ apr_size_t maxlen;
-+ const char *failed_key;
-+} val_len_check_ctx;
-+
-+static int table_check_val_len(void *baton, const char *key, const char *value)
-+{
-+ val_len_check_ctx *ctx = baton;
-+
-+ if (strlen(value) <= ctx->maxlen) return 1;
-+ ctx->failed_key = key;
-+ return 0;
-+}
-+
-+apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes)
-+{
-+ apr_status_t status;
-+ val_len_check_ctx ctx;
-+
-+ status = h2_request_end_headers(stream->rtmp, stream->pool, eos, raw_bytes);
-+ if (APR_SUCCESS == status) {
-+ set_policy_for(stream, stream->rtmp);
-+ stream->request = stream->rtmp;
-+ stream->rtmp = NULL;
-+
-+ ctx.maxlen = stream->session->s->limit_req_fieldsize;
-+ ctx.failed_key = NULL;
-+ apr_table_do(table_check_val_len, &ctx, stream->request->headers, NULL);
-+ if (ctx.failed_key) {
-+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c,
-+ H2_STRM_LOG(APLOGNO(), stream,"Request header exceeds "
-+ "LimitRequestFieldSize: %.*s"),
-+ (int)H2MIN(strlen(ctx.failed_key), 80), ctx.failed_key);
-+ set_error_response(stream, HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE);
-+ /* keep on returning APR_SUCCESS, so that we send a HTTP response and
-+ * do not RST the stream. */
-+ }
-+ }
-+ return status;
-+}
-+
- static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb)
- {
- if (bb) {
-diff --git a/modules/http2/h2_stream.h b/modules/http2/h2_stream.h
-index 7ecc0ad..79cb39d 100644
---- a/modules/http2/h2_stream.h
-+++ b/modules/http2/h2_stream.h
-@@ -198,6 +198,10 @@ apr_status_t h2_stream_set_request_rec(h2_stream *stream,
- apr_status_t h2_stream_add_header(h2_stream *stream,
- const char *name, size_t nlen,
- const char *value, size_t vlen);
-+
-+/* End the contruction of request headers */
-+apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes);
-+
-
- apr_status_t h2_stream_send_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
- apr_status_t h2_stream_recv_frame(h2_stream *stream, int frame_type, int flags, size_t frame_len);
-diff --git a/modules/http2/h2_task.c b/modules/http2/h2_task.c
-index a395807..c312459 100644
---- a/modules/http2/h2_task.c
-+++ b/modules/http2/h2_task.c
-@@ -408,8 +408,15 @@ int h2_task_can_redo(h2_task *task) {
- || !strcmp("OPTIONS", task->request->method));
- }
-
-+int h2_task_has_started(h2_task *task)
-+{
-+ return task && task->started_at != 0;
-+}
-+
- void h2_task_redo(h2_task *task)
- {
-+ task->started_at = 0;
-+ task->worker_done = 0;
- task->rst_error = 0;
- }
-
-@@ -548,7 +555,6 @@ apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id)
- ap_assert(task);
- c = task->c;
- task->worker_started = 1;
-- task->started_at = apr_time_now();
-
- if (c->master) {
- /* Each conn_rec->id is supposed to be unique at a point in time. Since
-diff --git a/modules/http2/h2_task.h b/modules/http2/h2_task.h
-index 20be429..9a7ad68 100644
---- a/modules/http2/h2_task.h
-+++ b/modules/http2/h2_task.h
-@@ -80,6 +80,7 @@ struct h2_task {
-
- unsigned int filters_set : 1;
- unsigned int worker_started : 1; /* h2_worker started processing */
-+ unsigned int redo : 1; /* was throttled, should be restarted later */
-
- int worker_done; /* h2_worker finished */
- int done_done; /* task_done has been handled */
-@@ -101,6 +102,7 @@ apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id);
-
- void h2_task_redo(h2_task *task);
- int h2_task_can_redo(h2_task *task);
-+int h2_task_has_started(h2_task *task);
-
- /**
- * Reset the task with the given error code, resets all input/output.
-diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c
-index 048ae61..7a8a197 100644
---- a/server/mpm/event/event.c
-+++ b/server/mpm/event/event.c
-@@ -1112,10 +1112,11 @@ read_request:
- "network write failure in core output filter");
- cs->pub.state = CONN_STATE_LINGER;
- }
-- else if (c->data_in_output_filters) {
-+ else if (c->data_in_output_filters ||
-+ cs->pub.sense == CONN_SENSE_WANT_READ) {
- /* Still in WRITE_COMPLETION_STATE:
-- * Set a write timeout for this connection, and let the
-- * event thread poll for writeability.
-+ * Set a read/write timeout for this connection, and let the
-+ * event thread poll for read/writeability.
- */
- cs->queue_timestamp = apr_time_now();
- notify_suspend(cs);
---
-1.8.3.1
-
diff --git a/CVE-2020-1927-1.patch b/CVE-2020-1927-1.patch
deleted file mode 100644
index 35fe491bcd4ffb902b8d68efaf7a59f6f2a0514d..0000000000000000000000000000000000000000
--- a/CVE-2020-1927-1.patch
+++ /dev/null
@@ -1,73 +0,0 @@
-From f11d5830759eb50ed366fc0690f9f4f491064ea3 Mon Sep 17 00:00:00 2001
-From: Jim Jagielski
-Date: Tue, 11 Feb 2020 13:16:38 +0000
-Subject: [PATCH 1/2] Merge r1873747 from trunk:
-
-factor out default regex flags
-
-Submitted by: covener
-Reviewed by: covener, minfrin, jorton
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1873905 13f79535-47bb-0310-9956-ffa450edef68
----
- include/ap_mmn.h | 1 +
- include/ap_regex.h | 2 ++
- server/core.c | 2 +-
- server/util_pcre.c | 3 +--
- 4 files changed, 5 insertions(+), 3 deletions(-)
-
-diff --git a/include/ap_mmn.h b/include/ap_mmn.h
-index 839228e..f5043ef 100644
---- a/include/ap_mmn.h
-+++ b/include/ap_mmn.h
-@@ -515,6 +515,7 @@
- * 20120211.77 (2.4.34-dev) Add ap_exists_directive()
- * 20120211.78 (2.4.34-dev) Add response_field_size to proxy_worker_shared
- * 20120211.79 (2.4.34-dev) Add AP_GETLINE_NOSPC_EOL flag to http_protocol.h
-+ * 20120211.90 (2.4.42-dev) AP_REG_DEFAULT macro in ap_regex.h
- */
-
- #define MODULE_MAGIC_COOKIE 0x41503234UL /* "AP24" */
-diff --git a/include/ap_regex.h b/include/ap_regex.h
-index 7d8df79..e651eea 100644
---- a/include/ap_regex.h
-+++ b/include/ap_regex.h
-@@ -86,6 +86,8 @@ extern "C" {
-
- #define AP_REG_MATCH "MATCH_" /** suggested prefix for ap_regname */
-
-+#define AP_REG_DEFAULT (AP_REG_DOTALL|AP_REG_DOLLAR_ENDONLY)
-+
- /* Error values: */
- enum {
- AP_REG_ASSERT = 1, /** internal error ? */
-diff --git a/server/core.c b/server/core.c
-index e892c87..a8772a3 100644
---- a/server/core.c
-+++ b/server/core.c
-@@ -4938,7 +4938,7 @@ static int core_pre_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptem
- apr_pool_cleanup_register(pconf, NULL, reset_config_defines,
- apr_pool_cleanup_null);
-
-- ap_regcomp_set_default_cflags(AP_REG_DOLLAR_ENDONLY);
-+ ap_regcomp_set_default_cflags(AP_REG_DEFAULT);
-
- mpm_common_pre_config(pconf);
-
-diff --git a/server/util_pcre.c b/server/util_pcre.c
-index 35831f5..74722b4 100644
---- a/server/util_pcre.c
-+++ b/server/util_pcre.c
-@@ -120,8 +120,7 @@ AP_DECLARE(void) ap_regfree(ap_regex_t *preg)
- * Compile a regular expression *
- *************************************************/
-
--static int default_cflags = AP_REG_DOTALL |
-- AP_REG_DOLLAR_ENDONLY;
-+static int default_cflags = AP_REG_DEFAULT;
-
- AP_DECLARE(int) ap_regcomp_get_default_cflags(void)
- {
---
-1.8.3.1
-
diff --git a/CVE-2020-1927-2.patch b/CVE-2020-1927-2.patch
deleted file mode 100644
index 33b072ef587c73f7237dbe237bf60bf51b8870c7..0000000000000000000000000000000000000000
--- a/CVE-2020-1927-2.patch
+++ /dev/null
@@ -1,99 +0,0 @@
-From ff36010963d1c2f2e6b331aa6d7d7d879e3975f6 Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Wed, 19 Feb 2020 12:26:31 +0000
-Subject: [PATCH 2/2] add AP_REG_NO_DEFAULT to allow opt-out of pcre defaults
-
-... and use it in mod_substitute to avoid DOTALL
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1874191 13f79535-47bb-0310-9956-ffa450edef68
----
- include/ap_mmn.h | 1 +
- include/ap_regex.h | 4 +++-
- modules/filters/mod_substitute.c | 6 ++++--
- server/util_pcre.c | 4 +++-
- server/util_regex.c | 3 ++-
- 5 files changed, 13 insertions(+), 5 deletions(-)
-
-diff --git a/include/ap_mmn.h b/include/ap_mmn.h
-index f5043ef..4c74e56 100644
---- a/include/ap_mmn.h
-+++ b/include/ap_mmn.h
-@@ -516,6 +516,7 @@
- * 20120211.78 (2.4.34-dev) Add response_field_size to proxy_worker_shared
- * 20120211.79 (2.4.34-dev) Add AP_GETLINE_NOSPC_EOL flag to http_protocol.h
- * 20120211.90 (2.4.42-dev) AP_REG_DEFAULT macro in ap_regex.h
-+ * 20120211.92 (2.4.42-dev) AP_REG_NO_DEFAULT macro in ap_regex.h
- */
-
- #define MODULE_MAGIC_COOKIE 0x41503234UL /* "AP24" */
-diff --git a/include/ap_regex.h b/include/ap_regex.h
-index e651eea..7af2f99 100644
---- a/include/ap_regex.h
-+++ b/include/ap_regex.h
-@@ -84,7 +84,9 @@ extern "C" {
-
- #define AP_REG_DOLLAR_ENDONLY 0x200 /* '$' matches at end of subject string only */
-
--#define AP_REG_MATCH "MATCH_" /** suggested prefix for ap_regname */
-+#define AP_REG_NO_DEFAULT 0x400 /**< Don't implicitely add AP_REG_DEFAULT options */
-+
-+#define AP_REG_MATCH "MATCH_" /**< suggested prefix for ap_regname */
-
- #define AP_REG_DEFAULT (AP_REG_DOTALL|AP_REG_DOLLAR_ENDONLY)
-
-diff --git a/modules/filters/mod_substitute.c b/modules/filters/mod_substitute.c
-index b7d5296..e976c51 100644
---- a/modules/filters/mod_substitute.c
-+++ b/modules/filters/mod_substitute.c
-@@ -667,8 +667,10 @@ static const char *set_pattern(cmd_parms *cmd, void *cfg, const char *line)
-
- /* first see if we can compile the regex */
- if (!is_pattern) {
-- r = ap_pregcomp(cmd->pool, from, AP_REG_EXTENDED |
-- (ignore_case ? AP_REG_ICASE : 0));
-+ int flags = AP_REG_NO_DEFAULT
-+ | (ap_regcomp_get_default_cflags() & AP_REG_DOLLAR_ENDONLY)
-+ | (ignore_case ? AP_REG_ICASE : 0);
-+ r = ap_pregcomp(cmd->pool, from, flags);
- if (!r)
- return "Substitute could not compile regex";
- }
-diff --git a/server/util_pcre.c b/server/util_pcre.c
-index 74722b4..8819871 100644
---- a/server/util_pcre.c
-+++ b/server/util_pcre.c
-@@ -168,7 +168,9 @@ AP_DECLARE(int) ap_regcomp(ap_regex_t * preg, const char *pattern, int cflags)
- int errcode = 0;
- int options = PCRE_DUPNAMES;
-
-- cflags |= default_cflags;
-+ if ((cflags & AP_REG_NO_DEFAULT) == 0)
-+ cflags |= default_cflags;
-+
- if ((cflags & AP_REG_ICASE) != 0)
- options |= PCRE_CASELESS;
- if ((cflags & AP_REG_NEWLINE) != 0)
-diff --git a/server/util_regex.c b/server/util_regex.c
-index 2a30d68..5405f8d 100644
---- a/server/util_regex.c
-+++ b/server/util_regex.c
-@@ -94,6 +94,7 @@ AP_DECLARE(ap_rxplus_t*) ap_rxplus_compile(apr_pool_t *pool,
- }
-
- /* anything after the current delimiter is flags */
-+ ret->flags = ap_regcomp_get_default_cflags() & AP_REG_DOLLAR_ENDONLY;
- while (*++endp) {
- switch (*endp) {
- case 'i': ret->flags |= AP_REG_ICASE; break;
-@@ -106,7 +107,7 @@ AP_DECLARE(ap_rxplus_t*) ap_rxplus_compile(apr_pool_t *pool,
- default: break; /* we should probably be stricter here */
- }
- }
-- if (ap_regcomp(&ret->rx, rxstr, ret->flags) == 0) {
-+ if (ap_regcomp(&ret->rx, rxstr, AP_REG_NO_DEFAULT | ret->flags) == 0) {
- apr_pool_cleanup_register(pool, &ret->rx, rxplus_cleanup,
- apr_pool_cleanup_null);
- }
---
-1.8.3.1
-
diff --git a/CVE-2020-1934.patch b/CVE-2020-1934.patch
deleted file mode 100644
index 5c00ecadd41ecaddcf8dac75d96708c60000ee3e..0000000000000000000000000000000000000000
--- a/CVE-2020-1934.patch
+++ /dev/null
@@ -1,88 +0,0 @@
-From 0b59e8ce2d978dfd6b74473df4e1309a5c226498 Mon Sep 17 00:00:00 2001
-From: Jim Jagielski
-Date: Tue, 11 Feb 2020 13:14:42 +0000
-Subject: [PATCH] Merge r1873745 from trunk:
-
-trap bad FTP responses
-
-Submitted by: covener
-Reviewed by: covener, minfrin, jorton
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1873904 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/proxy/mod_proxy_ftp.c | 20 +++++++++++++++-----
- 1 file changed, 15 insertions(+), 5 deletions(-)
-
-diff --git a/modules/proxy/mod_proxy_ftp.c b/modules/proxy/mod_proxy_ftp.c
-index 1557301..6318102 100644
---- a/modules/proxy/mod_proxy_ftp.c
-+++ b/modules/proxy/mod_proxy_ftp.c
-@@ -218,7 +218,7 @@ static int ftp_check_string(const char *x)
- * (EBCDIC) machines either.
- */
- static apr_status_t ftp_string_read(conn_rec *c, apr_bucket_brigade *bb,
-- char *buff, apr_size_t bufflen, int *eos)
-+ char *buff, apr_size_t bufflen, int *eos, apr_size_t *outlen)
- {
- apr_bucket *e;
- apr_status_t rv;
-@@ -230,6 +230,7 @@ static apr_status_t ftp_string_read(conn_rec *c, apr_bucket_brigade *bb,
- /* start with an empty string */
- buff[0] = 0;
- *eos = 0;
-+ *outlen = 0;
-
- /* loop through each brigade */
- while (!found) {
-@@ -273,6 +274,7 @@ static apr_status_t ftp_string_read(conn_rec *c, apr_bucket_brigade *bb,
- if (len > 0) {
- memcpy(pos, response, len);
- pos += len;
-+ *outlen += len;
- }
- }
- apr_bucket_delete(e);
-@@ -385,28 +387,36 @@ static int ftp_getrc_msg(conn_rec *ftp_ctrl, apr_bucket_brigade *bb, char *msgbu
- char buff[5];
- char *mb = msgbuf, *me = &msgbuf[msglen];
- apr_status_t rv;
-+ apr_size_t nread;
-+
- int eos;
-
-- if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) {
-+ if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos, &nread))) {
- return -1;
- }
- /*
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, APLOGNO(03233)
- "<%s", response);
- */
-+ if (nread < 4) {
-+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, APLOGNO(10229) "Malformed FTP response '%s'", response);
-+ *mb = '\0';
-+ return -1;
-+ }
-+
- if (!apr_isdigit(response[0]) || !apr_isdigit(response[1]) ||
-- !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-'))
-+ !apr_isdigit(response[2]) || (response[3] != ' ' && response[3] != '-'))
- status = 0;
- else
- status = 100 * response[0] + 10 * response[1] + response[2] - 111 * '0';
-
- mb = apr_cpystrn(mb, response + 4, me - mb);
-
-- if (response[3] == '-') {
-+ if (response[3] == '-') { /* multi-line reply "123-foo\nbar\n123 baz" */
- memcpy(buff, response, 3);
- buff[3] = ' ';
- do {
-- if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos))) {
-+ if (APR_SUCCESS != (rv = ftp_string_read(ftp_ctrl, bb, response, sizeof(response), &eos, &nread))) {
- return -1;
- }
- mb = apr_cpystrn(mb, response + (' ' == response[0] ? 1 : 4), me - mb);
---
-1.8.3.1
-
diff --git a/MPMs-Initialize-all-runtime-asynchronous-objects-on-.patch b/MPMs-Initialize-all-runtime-asynchronous-objects-on-.patch
deleted file mode 100644
index b41aa08314d87dcf4998dbff393e3ed7a3351632..0000000000000000000000000000000000000000
--- a/MPMs-Initialize-all-runtime-asynchronous-objects-on-.patch
+++ /dev/null
@@ -1,699 +0,0 @@
-From 98928d02d2473ceb9f81b1c3bc527f8b0a0039e6 Mon Sep 17 00:00:00 2001
-From: Graham Leggett
-Date: Fri, 21 Sep 2018 13:30:15 +0000
-Subject: [PATCH 188/504] MPMs: Initialize all runtime/asynchronous objects on
- a dedicated pool and before signals handling to avoid lifetime issues on
- restart or shutdown. PR 62658. trunk patch: http://svn.apache.org/r1835845
- http://svn.apache.org/r1835846
- http://svn.apache.org/r1837354 http://svn.apache.org/r1837356
- http://svn.apache.org/r1839571
- http://svn.apache.org/r1839583 2.4.x patch:
- http://home.apache.org/~ylavic/patches/2.4.x-mpms_async_objects_lifetime.patch
- +1: ylavic, jim (but not for 2.4.35), minfrin
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1841586 13f79535-47bb-0310-9956-ffa450edef68
----
- CHANGES | 4 +
- STATUS | 11 --
- server/mpm/event/event.c | 203 +++++++++++++++------------
- server/mpm/mpmt_os2/mpmt_os2_child.c | 1 +
- server/mpm/netware/mpm_netware.c | 1 +
- server/mpm/prefork/prefork.c | 6 +-
- server/mpm/winnt/child.c | 3 +
- server/mpm/winnt/mpm_winnt.c | 1 +
- server/mpm/worker/worker.c | 148 +++++++++++--------
- 9 files changed, 215 insertions(+), 163 deletions(-)
-
-diff --git a/server/mpm/event/event.c b/server/mpm/event/event.c
-index f07b757ab9..ffe8a23cbd 100644
---- a/server/mpm/event/event.c
-+++ b/server/mpm/event/event.c
-@@ -436,6 +436,7 @@ int raise_sigstop_flags;
-
- static apr_pool_t *pconf; /* Pool for config stuff */
- static apr_pool_t *pchild; /* Pool for httpd child stuff */
-+static apr_pool_t *pruntime; /* Pool for MPM threads stuff */
-
- static pid_t ap_my_pid; /* Linux getpid() doesn't work except in main
- thread. Use this instead */
-@@ -709,10 +710,10 @@ static void event_note_child_killed(int childnum, pid_t pid, ap_generation_t gen
-
- static void event_note_child_started(int slot, pid_t pid)
- {
-+ ap_generation_t gen = retained->mpm->my_generation;
- ap_scoreboard_image->parent[slot].pid = pid;
-- ap_run_child_status(ap_server_conf,
-- ap_scoreboard_image->parent[slot].pid,
-- retained->mpm->my_generation, slot, MPM_CHILD_STARTED);
-+ ap_scoreboard_image->parent[slot].generation = gen;
-+ ap_run_child_status(ap_server_conf, pid, gen, slot, MPM_CHILD_STARTED);
- }
-
- static const char *event_get_name(void)
-@@ -1270,36 +1271,6 @@ static void dummy_signal_handler(int sig)
- }
-
-
--static apr_status_t init_pollset(apr_pool_t *p)
--{
-- ap_listen_rec *lr;
-- listener_poll_type *pt;
-- int i = 0;
--
-- listener_pollfd = apr_palloc(p, sizeof(apr_pollfd_t) * num_listensocks);
-- for (lr = my_bucket->listeners; lr != NULL; lr = lr->next, i++) {
-- apr_pollfd_t *pfd;
-- AP_DEBUG_ASSERT(i < num_listensocks);
-- pfd = &listener_pollfd[i];
-- pt = apr_pcalloc(p, sizeof(*pt));
-- pfd->desc_type = APR_POLL_SOCKET;
-- pfd->desc.s = lr->sd;
-- pfd->reqevents = APR_POLLIN;
--
-- pt->type = PT_ACCEPT;
-- pt->baton = lr;
--
-- pfd->client_data = pt;
--
-- apr_socket_opt_set(pfd->desc.s, APR_SO_NONBLOCK, 1);
-- apr_pollset_add(event_pollset, pfd);
--
-- lr->accept_func = ap_unixd_accept;
-- }
--
-- return APR_SUCCESS;
--}
--
- static apr_status_t push_timer2worker(timer_event_t* te)
- {
- return ap_queue_push_timer(worker_queue, te);
-@@ -1611,7 +1582,6 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
- proc_info *ti = dummy;
- int process_slot = ti->pslot;
- struct process_score *ps = ap_get_scoreboard_process(process_slot);
-- apr_pool_t *tpool = apr_thread_pool_get(thd);
- int closed = 0;
- int have_idle_worker = 0;
- apr_time_t last_log;
-@@ -1619,16 +1589,6 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
- last_log = apr_time_now();
- free(ti);
-
-- rc = init_pollset(tpool);
-- if (rc != APR_SUCCESS) {
-- ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
-- "failed to initialize pollset, "
-- "shutdown process now");
-- resource_shortage = 1;
-- signal_threads(ST_UNGRACEFUL);
-- return NULL;
-- }
--
- /* Unblock the signal used to wake this thread up, and set a handler for
- * it.
- */
-@@ -2168,8 +2128,6 @@ static int check_signal(int signum)
- return 0;
- }
-
--
--
- static void create_listener_thread(thread_starter * ts)
- {
- int my_child_num = ts->child_num_arg;
-@@ -2181,7 +2139,7 @@ static void create_listener_thread(thread_starter * ts)
- my_info->pslot = my_child_num;
- my_info->tslot = -1; /* listener thread doesn't have a thread slot */
- rv = apr_thread_create(&ts->listener, thread_attr, listener_thread,
-- my_info, pchild);
-+ my_info, pruntime);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(00474)
- "apr_thread_create: unable to create listener thread");
-@@ -2191,25 +2149,12 @@ static void create_listener_thread(thread_starter * ts)
- apr_os_thread_get(&listener_os_thread, ts->listener);
- }
-
--/* XXX under some circumstances not understood, children can get stuck
-- * in start_threads forever trying to take over slots which will
-- * never be cleaned up; for now there is an APLOG_DEBUG message issued
-- * every so often when this condition occurs
-- */
--static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
-+static void setup_threads_runtime(void)
- {
-- thread_starter *ts = dummy;
-- apr_thread_t **threads = ts->threads;
-- apr_threadattr_t *thread_attr = ts->threadattr;
-- int my_child_num = ts->child_num_arg;
-- proc_info *my_info;
- apr_status_t rv;
-- int i;
-- int threads_created = 0;
-- int listener_started = 0;
-- int loops;
-- int prev_threads_created;
-- int max_recycled_pools = -1;
-+ ap_listen_rec *lr;
-+ apr_pool_t *pskip = NULL;
-+ int max_recycled_pools = -1, i;
- const int good_methods[] = { APR_POLLSET_KQUEUE,
- APR_POLLSET_PORT,
- APR_POLLSET_EPOLL };
-@@ -2218,10 +2163,39 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
- const apr_uint32_t pollset_size = (apr_uint32_t)num_listensocks +
- (apr_uint32_t)threads_per_child *
- (async_factor > 2 ? async_factor : 2);
-+ int pollset_flags;
-+
-+ /* Event's skiplist operations will happen concurrently with other modules'
-+ * runtime so they need their own pool for allocations, and its lifetime
-+ * should be at least the one of the connections (ptrans). Thus pskip is
-+ * created as a subpool of pconf like/before ptrans (before so that it's
-+ * destroyed after). In forked mode pconf is never destroyed so we are good
-+ * anyway, but in ONE_PROCESS mode this ensures that the skiplist works
-+ * from connection/ptrans cleanups (even after pchild is destroyed).
-+ */
-+ apr_pool_create(&pskip, pconf);
-+ apr_pool_tag(pskip, "mpm_skiplist");
-+ apr_thread_mutex_create(&g_timer_skiplist_mtx, APR_THREAD_MUTEX_DEFAULT, pskip);
-+ APR_RING_INIT(&timer_free_ring, timer_event_t, link);
-+ apr_skiplist_init(&timer_skiplist, pskip);
-+ apr_skiplist_set_compare(timer_skiplist, timer_comp, timer_comp);
-+
-+ /* All threads (listener, workers) and synchronization objects (queues,
-+ * pollset, mutexes...) created here should have at least the lifetime of
-+ * the connections they handle (i.e. ptrans). We can't use this thread's
-+ * self pool because all these objects survive it, nor use pchild or pconf
-+ * directly because this starter thread races with other modules' runtime,
-+ * nor finally pchild (or subpool thereof) because it is killed explicitely
-+ * before pconf (thus connections/ptrans can live longer, which matters in
-+ * ONE_PROCESS mode). So this leaves us with a subpool of pconf, created
-+ * before any ptrans hence destroyed after.
-+ */
-+ apr_pool_create(&pruntime, pconf);
-+ apr_pool_tag(pruntime, "mpm_runtime");
-
- /* We must create the fd queues before we start up the listener
- * and worker threads. */
-- rv = ap_queue_create(&worker_queue, threads_per_child, pchild);
-+ rv = ap_queue_create(&worker_queue, threads_per_child, pruntime);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(03100)
- "ap_queue_create() failed");
-@@ -2235,7 +2209,7 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
- */
- max_recycled_pools = threads_per_child * 3 / 4 ;
- }
-- rv = ap_queue_info_create(&worker_queue_info, pchild,
-+ rv = ap_queue_info_create(&worker_queue_info, pruntime,
- threads_per_child, max_recycled_pools);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(03101)
-@@ -2247,7 +2221,7 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
- * thread starts.
- */
- rv = apr_thread_mutex_create(&timeout_mutex, APR_THREAD_MUTEX_DEFAULT,
-- pchild);
-+ pruntime);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03102)
- "creation of the timeout mutex failed.");
-@@ -2255,25 +2229,30 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
- }
-
- /* Create the main pollset */
-+ pollset_flags = APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY |
-+ APR_POLLSET_NODEFAULT | APR_POLLSET_WAKEABLE;
- for (i = 0; i < sizeof(good_methods) / sizeof(good_methods[0]); i++) {
-- apr_uint32_t flags = APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY |
-- APR_POLLSET_NODEFAULT | APR_POLLSET_WAKEABLE;
-- rv = apr_pollset_create_ex(&event_pollset, pollset_size, pchild, flags,
-- good_methods[i]);
-+ rv = apr_pollset_create_ex(&event_pollset, pollset_size, pruntime,
-+ pollset_flags, good_methods[i]);
- if (rv == APR_SUCCESS) {
- listener_is_wakeable = 1;
- break;
- }
-- flags &= ~APR_POLLSET_WAKEABLE;
-- rv = apr_pollset_create_ex(&event_pollset, pollset_size, pchild, flags,
-- good_methods[i]);
-- if (rv == APR_SUCCESS) {
-- break;
-+ }
-+ if (rv != APR_SUCCESS) {
-+ pollset_flags &= ~APR_POLLSET_WAKEABLE;
-+ for (i = 0; i < sizeof(good_methods) / sizeof(good_methods[0]); i++) {
-+ rv = apr_pollset_create_ex(&event_pollset, pollset_size, pruntime,
-+ pollset_flags, good_methods[i]);
-+ if (rv == APR_SUCCESS) {
-+ break;
-+ }
- }
- }
- if (rv != APR_SUCCESS) {
-- rv = apr_pollset_create(&event_pollset, pollset_size, pchild,
-- APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY);
-+ pollset_flags &= ~APR_POLLSET_NODEFAULT;
-+ rv = apr_pollset_create(&event_pollset, pollset_size, pruntime,
-+ pollset_flags);
- }
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(03103)
-@@ -2281,12 +2260,57 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
- clean_child_exit(APEXIT_CHILDFATAL);
- }
-
-+ /* Add listeners to the main pollset */
-+ listener_pollfd = apr_pcalloc(pruntime, num_listensocks *
-+ sizeof(apr_pollfd_t));
-+ for (i = 0, lr = my_bucket->listeners; lr; lr = lr->next, i++) {
-+ apr_pollfd_t *pfd;
-+ listener_poll_type *pt;
-+
-+ AP_DEBUG_ASSERT(i < num_listensocks);
-+ pfd = &listener_pollfd[i];
-+
-+ pfd->reqevents = APR_POLLIN;
-+ pfd->desc_type = APR_POLL_SOCKET;
-+ pfd->desc.s = lr->sd;
-+
-+ pt = apr_pcalloc(pruntime, sizeof(*pt));
-+ pfd->client_data = pt;
-+ pt->type = PT_ACCEPT;
-+ pt->baton = lr;
-+
-+ apr_socket_opt_set(pfd->desc.s, APR_SO_NONBLOCK, 1);
-+ apr_pollset_add(event_pollset, pfd);
-+
-+ lr->accept_func = ap_unixd_accept;
-+ }
-+
-+ worker_sockets = apr_pcalloc(pruntime, threads_per_child *
-+ sizeof(apr_socket_t *));
-+}
-+
-+/* XXX under some circumstances not understood, children can get stuck
-+ * in start_threads forever trying to take over slots which will
-+ * never be cleaned up; for now there is an APLOG_DEBUG message issued
-+ * every so often when this condition occurs
-+ */
-+static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
-+{
-+ thread_starter *ts = dummy;
-+ apr_thread_t **threads = ts->threads;
-+ apr_threadattr_t *thread_attr = ts->threadattr;
-+ int my_child_num = ts->child_num_arg;
-+ proc_info *my_info;
-+ apr_status_t rv;
-+ int threads_created = 0;
-+ int listener_started = 0;
-+ int prev_threads_created;
-+ int loops, i;
-+
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(02471)
- "start_threads: Using %s (%swakeable)",
- apr_pollset_method_name(event_pollset),
- listener_is_wakeable ? "" : "not ");
-- worker_sockets = apr_pcalloc(pchild, threads_per_child
-- * sizeof(apr_socket_t *));
-
- loops = prev_threads_created = 0;
- while (1) {
-@@ -2310,7 +2334,7 @@ static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
- * done because it lets us deal with tid better.
- */
- rv = apr_thread_create(&threads[i], thread_attr,
-- worker_thread, my_info, pchild);
-+ worker_thread, my_info, pruntime);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
- APLOGNO(03104)
-@@ -2431,7 +2455,6 @@ static void child_main(int child_num_arg, int child_bucket)
- apr_threadattr_t *thread_attr;
- apr_thread_t *start_thread_id;
- int i;
-- apr_pool_t *pskip;
-
- /* for benefit of any hooks that run as this child initializes */
- retained->mpm->mpm_state = AP_MPMQ_STARTING;
-@@ -2439,7 +2462,12 @@ static void child_main(int child_num_arg, int child_bucket)
- ap_my_pid = getpid();
- ap_child_slot = child_num_arg;
- ap_fatal_signal_child_setup(ap_server_conf);
-+
-+ /* Get a sub context for global allocations in this child, so that
-+ * we can have cleanups occur when the child exits.
-+ */
- apr_pool_create(&pchild, pconf);
-+ apr_pool_tag(pchild, "pchild");
-
- /* close unused listeners and pods */
- for (i = 0; i < retained->mpm->num_buckets; i++) {
-@@ -2457,12 +2485,6 @@ static void child_main(int child_num_arg, int child_bucket)
- clean_child_exit(APEXIT_CHILDFATAL);
- }
-
-- apr_thread_mutex_create(&g_timer_skiplist_mtx, APR_THREAD_MUTEX_DEFAULT, pchild);
-- APR_RING_INIT(&timer_free_ring, timer_event_t, link);
-- apr_pool_create(&pskip, pchild);
-- apr_skiplist_init(&timer_skiplist, pskip);
-- apr_skiplist_set_compare(timer_skiplist, timer_comp, timer_comp);
--
- /* Just use the standard apr_setup_signal_thread to block all signals
- * from being received. The child processes no longer use signals for
- * any communication with the parent process. Let's also do this before
-@@ -2486,7 +2508,10 @@ static void child_main(int child_num_arg, int child_bucket)
- conns_this_child = APR_INT32_MAX;
- }
-
-- /* Setup worker threads */
-+ /* Setup threads */
-+
-+ /* Globals used by signal_threads() so to be initialized before */
-+ setup_threads_runtime();
-
- /* clear the storage; we may not create all our threads immediately,
- * and we want a 0 entry to indicate a thread which was not created
-diff --git a/server/mpm/mpmt_os2/mpmt_os2_child.c b/server/mpm/mpmt_os2/mpmt_os2_child.c
-index ca9f594754..bb7e1369ea 100644
---- a/server/mpm/mpmt_os2/mpmt_os2_child.c
-+++ b/server/mpm/mpmt_os2/mpmt_os2_child.c
-@@ -110,6 +110,7 @@ void ap_mpm_child_main(apr_pool_t *pconf)
-
- /* Create pool for child */
- apr_pool_create(&pchild, pconf);
-+ apr_pool_tag(pchild, "pchild");
-
- ap_run_child_init(pchild, ap_server_conf);
-
-diff --git a/server/mpm/netware/mpm_netware.c b/server/mpm/netware/mpm_netware.c
-index 2fab52f598..82480334b8 100644
---- a/server/mpm/netware/mpm_netware.c
-+++ b/server/mpm/netware/mpm_netware.c
-@@ -886,6 +886,7 @@ static int netware_run(apr_pool_t *_pconf, apr_pool_t *plog, server_rec *s)
-
- /* Only set slot 0 since that is all NetWare will ever have. */
- ap_scoreboard_image->parent[0].pid = getpid();
-+ ap_scoreboard_image->parent[0].generation = ap_my_generation;
- ap_run_child_status(ap_server_conf,
- ap_scoreboard_image->parent[0].pid,
- ap_my_generation,
-diff --git a/server/mpm/prefork/prefork.c b/server/mpm/prefork/prefork.c
-index 3fb328467d..8efda72ee1 100644
---- a/server/mpm/prefork/prefork.c
-+++ b/server/mpm/prefork/prefork.c
-@@ -208,10 +208,10 @@ static void prefork_note_child_killed(int childnum, pid_t pid,
-
- static void prefork_note_child_started(int slot, pid_t pid)
- {
-+ ap_generation_t gen = retained->mpm->my_generation;
- ap_scoreboard_image->parent[slot].pid = pid;
-- ap_run_child_status(ap_server_conf,
-- ap_scoreboard_image->parent[slot].pid,
-- retained->mpm->my_generation, slot, MPM_CHILD_STARTED);
-+ ap_scoreboard_image->parent[slot].generation = gen;
-+ ap_run_child_status(ap_server_conf, pid, gen, slot, MPM_CHILD_STARTED);
- }
-
- /* a clean exit from a child with proper cleanup */
-diff --git a/server/mpm/winnt/child.c b/server/mpm/winnt/child.c
-index 9ddba18f80..21755f398c 100644
---- a/server/mpm/winnt/child.c
-+++ b/server/mpm/winnt/child.c
-@@ -917,6 +917,9 @@ void child_main(apr_pool_t *pconf, DWORD parent_pid)
- int i;
- int num_events;
-
-+ /* Get a sub context for global allocations in this child, so that
-+ * we can have cleanups occur when the child exits.
-+ */
- apr_pool_create(&pchild, pconf);
- apr_pool_tag(pchild, "pchild");
-
-diff --git a/server/mpm/winnt/mpm_winnt.c b/server/mpm/winnt/mpm_winnt.c
-index 2487e45be2..3d71f80e72 100644
---- a/server/mpm/winnt/mpm_winnt.c
-+++ b/server/mpm/winnt/mpm_winnt.c
-@@ -139,6 +139,7 @@ AP_INIT_TAKE1("ThreadLimit", set_thread_limit, NULL, RSRC_CONF,
- static void winnt_note_child_started(int slot, pid_t pid)
- {
- ap_scoreboard_image->parent[slot].pid = pid;
-+ ap_scoreboard_image->parent[slot].generation = my_generation;
- ap_run_child_status(ap_server_conf,
- ap_scoreboard_image->parent[slot].pid,
- my_generation, slot, MPM_CHILD_STARTED);
-diff --git a/server/mpm/worker/worker.c b/server/mpm/worker/worker.c
-index 7804efc457..8012fe29d8 100644
---- a/server/mpm/worker/worker.c
-+++ b/server/mpm/worker/worker.c
-@@ -134,6 +134,8 @@ static int num_listensocks = 0;
- static int resource_shortage = 0;
- static fd_queue_t *worker_queue;
- static fd_queue_info_t *worker_queue_info;
-+static apr_pollset_t *worker_pollset;
-+
-
- /* data retained by worker across load/unload of the module
- * allocated on first call to pre-config hook; located on
-@@ -218,6 +220,7 @@ int raise_sigstop_flags;
-
- static apr_pool_t *pconf; /* Pool for config stuff */
- static apr_pool_t *pchild; /* Pool for httpd child stuff */
-+static apr_pool_t *pruntime; /* Pool for MPM threads stuff */
-
- static pid_t ap_my_pid; /* Linux getpid() doesn't work except in main
- thread. Use this instead */
-@@ -392,10 +395,10 @@ static void worker_note_child_killed(int childnum, pid_t pid, ap_generation_t ge
-
- static void worker_note_child_started(int slot, pid_t pid)
- {
-+ ap_generation_t gen = retained->mpm->my_generation;
- ap_scoreboard_image->parent[slot].pid = pid;
-- ap_run_child_status(ap_server_conf,
-- ap_scoreboard_image->parent[slot].pid,
-- retained->mpm->my_generation, slot, MPM_CHILD_STARTED);
-+ ap_scoreboard_image->parent[slot].generation = gen;
-+ ap_run_child_status(ap_server_conf, pid, gen, slot, MPM_CHILD_STARTED);
- }
-
- static void worker_note_child_lost_slot(int slot, pid_t newpid)
-@@ -538,47 +541,15 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t *thd, void * dummy)
- {
- proc_info * ti = dummy;
- int process_slot = ti->pid;
-- apr_pool_t *tpool = apr_thread_pool_get(thd);
- void *csd = NULL;
- apr_pool_t *ptrans = NULL; /* Pool for per-transaction stuff */
-- apr_pollset_t *pollset;
- apr_status_t rv;
-- ap_listen_rec *lr;
-+ ap_listen_rec *lr = NULL;
- int have_idle_worker = 0;
- int last_poll_idx = 0;
-
- free(ti);
-
-- rv = apr_pollset_create(&pollset, num_listensocks, tpool,
-- APR_POLLSET_NOCOPY);
-- if (rv != APR_SUCCESS) {
-- ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
-- "Couldn't create pollset in thread;"
-- " check system or user limits");
-- /* let the parent decide how bad this really is */
-- clean_child_exit(APEXIT_CHILDSICK);
-- }
--
-- for (lr = my_bucket->listeners; lr != NULL; lr = lr->next) {
-- apr_pollfd_t *pfd = apr_pcalloc(tpool, sizeof *pfd);
--
-- pfd->desc_type = APR_POLL_SOCKET;
-- pfd->desc.s = lr->sd;
-- pfd->reqevents = APR_POLLIN;
-- pfd->client_data = lr;
--
-- rv = apr_pollset_add(pollset, pfd);
-- if (rv != APR_SUCCESS) {
-- ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
-- "Couldn't create add listener to pollset;"
-- " check system or user limits");
-- /* let the parent decide how bad this really is */
-- clean_child_exit(APEXIT_CHILDSICK);
-- }
--
-- lr->accept_func = ap_unixd_accept;
-- }
--
- /* Unblock the signal used to wake this thread up, and set a handler for
- * it.
- */
-@@ -630,7 +601,7 @@ static void * APR_THREAD_FUNC listener_thread(apr_thread_t *thd, void * dummy)
- apr_int32_t numdesc;
- const apr_pollfd_t *pdesc;
-
-- rv = apr_pollset_poll(pollset, -1, &numdesc, &pdesc);
-+ rv = apr_pollset_poll(worker_pollset, -1, &numdesc, &pdesc);
- if (rv != APR_SUCCESS) {
- if (APR_STATUS_IS_EINTR(rv)) {
- continue;
-@@ -871,7 +842,7 @@ static void create_listener_thread(thread_starter *ts)
- my_info->tid = -1; /* listener thread doesn't have a thread slot */
- my_info->sd = 0;
- rv = apr_thread_create(&ts->listener, thread_attr, listener_thread,
-- my_info, pchild);
-+ my_info, pruntime);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(00275)
- "apr_thread_create: unable to create listener thread");
-@@ -881,35 +852,34 @@ static void create_listener_thread(thread_starter *ts)
- apr_os_thread_get(&listener_os_thread, ts->listener);
- }
-
--/* XXX under some circumstances not understood, children can get stuck
-- * in start_threads forever trying to take over slots which will
-- * never be cleaned up; for now there is an APLOG_DEBUG message issued
-- * every so often when this condition occurs
-- */
--static void * APR_THREAD_FUNC start_threads(apr_thread_t *thd, void *dummy)
-+static void setup_threads_runtime(void)
- {
-- thread_starter *ts = dummy;
-- apr_thread_t **threads = ts->threads;
-- apr_threadattr_t *thread_attr = ts->threadattr;
-- int my_child_num = ts->child_num_arg;
-- proc_info *my_info;
-+ ap_listen_rec *lr;
- apr_status_t rv;
-- int i;
-- int threads_created = 0;
-- int listener_started = 0;
-- int loops;
-- int prev_threads_created;
-+
-+ /* All threads (listener, workers) and synchronization objects (queues,
-+ * pollset, mutexes...) created here should have at least the lifetime of
-+ * the connections they handle (i.e. ptrans). We can't use this thread's
-+ * self pool because all these objects survive it, nor use pchild or pconf
-+ * directly because this starter thread races with other modules' runtime,
-+ * nor finally pchild (or subpool thereof) because it is killed explicitely
-+ * before pconf (thus connections/ptrans can live longer, which matters in
-+ * ONE_PROCESS mode). So this leaves us with a subpool of pconf, created
-+ * before any ptrans hence destroyed after.
-+ */
-+ apr_pool_create(&pruntime, pconf);
-+ apr_pool_tag(pruntime, "mpm_runtime");
-
- /* We must create the fd queues before we start up the listener
- * and worker threads. */
-- rv = ap_queue_create(&worker_queue, threads_per_child, pchild);
-+ rv = ap_queue_create(&worker_queue, threads_per_child, pruntime);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(03140)
- "ap_queue_create() failed");
- clean_child_exit(APEXIT_CHILDFATAL);
- }
-
-- rv = ap_queue_info_create(&worker_queue_info, pchild,
-+ rv = ap_queue_info_create(&worker_queue_info, pruntime,
- threads_per_child, -1);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(03141)
-@@ -917,8 +887,58 @@ static void * APR_THREAD_FUNC start_threads(apr_thread_t *thd, void *dummy)
- clean_child_exit(APEXIT_CHILDFATAL);
- }
-
-- worker_sockets = apr_pcalloc(pchild, threads_per_child
-- * sizeof(apr_socket_t *));
-+ /* Create the main pollset */
-+ rv = apr_pollset_create(&worker_pollset, num_listensocks, pruntime,
-+ APR_POLLSET_NOCOPY);
-+ if (rv != APR_SUCCESS) {
-+ ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf, APLOGNO(03285)
-+ "Couldn't create pollset in thread;"
-+ " check system or user limits");
-+ /* let the parent decide how bad this really is */
-+ clean_child_exit(APEXIT_CHILDSICK);
-+ }
-+
-+ for (lr = my_bucket->listeners; lr != NULL; lr = lr->next) {
-+ apr_pollfd_t *pfd = apr_pcalloc(pruntime, sizeof *pfd);
-+
-+ pfd->desc_type = APR_POLL_SOCKET;
-+ pfd->desc.s = lr->sd;
-+ pfd->reqevents = APR_POLLIN;
-+ pfd->client_data = lr;
-+
-+ rv = apr_pollset_add(worker_pollset, pfd);
-+ if (rv != APR_SUCCESS) {
-+ ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf, APLOGNO(03286)
-+ "Couldn't create add listener to pollset;"
-+ " check system or user limits");
-+ /* let the parent decide how bad this really is */
-+ clean_child_exit(APEXIT_CHILDSICK);
-+ }
-+
-+ lr->accept_func = ap_unixd_accept;
-+ }
-+
-+ worker_sockets = apr_pcalloc(pruntime, threads_per_child *
-+ sizeof(apr_socket_t *));
-+}
-+
-+/* XXX under some circumstances not understood, children can get stuck
-+ * in start_threads forever trying to take over slots which will
-+ * never be cleaned up; for now there is an APLOG_DEBUG message issued
-+ * every so often when this condition occurs
-+ */
-+static void * APR_THREAD_FUNC start_threads(apr_thread_t *thd, void *dummy)
-+{
-+ thread_starter *ts = dummy;
-+ apr_thread_t **threads = ts->threads;
-+ apr_threadattr_t *thread_attr = ts->threadattr;
-+ int my_child_num = ts->child_num_arg;
-+ proc_info *my_info;
-+ apr_status_t rv;
-+ int threads_created = 0;
-+ int listener_started = 0;
-+ int prev_threads_created;
-+ int loops, i;
-
- loops = prev_threads_created = 0;
- while (1) {
-@@ -942,7 +962,7 @@ static void * APR_THREAD_FUNC start_threads(apr_thread_t *thd, void *dummy)
- * done because it lets us deal with tid better.
- */
- rv = apr_thread_create(&threads[i], thread_attr,
-- worker_thread, my_info, pchild);
-+ worker_thread, my_info, pruntime);
- if (rv != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(03142)
- "apr_thread_create: unable to create worker thread");
-@@ -1082,7 +1102,12 @@ static void child_main(int child_num_arg, int child_bucket)
-
- ap_my_pid = getpid();
- ap_fatal_signal_child_setup(ap_server_conf);
-+
-+ /* Get a sub context for global allocations in this child, so that
-+ * we can have cleanups occur when the child exits.
-+ */
- apr_pool_create(&pchild, pconf);
-+ apr_pool_tag(pchild, "pchild");
-
- /* close unused listeners and pods */
- for (i = 0; i < retained->mpm->num_buckets; i++) {
-@@ -1132,7 +1157,10 @@ static void child_main(int child_num_arg, int child_bucket)
- requests_this_child = INT_MAX;
- }
-
-- /* Setup worker threads */
-+ /* Setup threads */
-+
-+ /* Globals used by signal_threads() so to be initialized before */
-+ setup_threads_runtime();
-
- /* clear the storage; we may not create all our threads immediately,
- * and we want a 0 entry to indicate a thread which was not created
---
-2.19.1
-
diff --git a/Merge-of-r1853133-r1853166-from-trunk.patch b/Merge-of-r1853133-r1853166-from-trunk.patch
deleted file mode 100644
index d33dcd38c7c51dc95fc228b5363a0bce5ce5810d..0000000000000000000000000000000000000000
--- a/Merge-of-r1853133-r1853166-from-trunk.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-From d9f2c7df12a2e51ed78056e2bdc5714abf32390c Mon Sep 17 00:00:00 2001
-From: Stefan Eissing
-Date: Fri, 8 Feb 2019 09:01:42 +0000
-Subject: [PATCH 370/504] Merge of r1853133,r1853166 from trunk:
-
-mod_ssl: Don't unset FIPS mode on restart unless it's forced by
- configuration (SSLFIPS on) and not active by default in OpenSSL. PR 63136.
-
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1853197 13f79535-47bb-0310-9956-ffa450edef68
----
- CHANGES | 4 +++-
- modules/ssl/mod_ssl.c | 3 ---
- modules/ssl/ssl_engine_init.c | 12 +++++++++++-
- 3 files changed, 14 insertions(+), 5 deletions(-)
-
-diff --git a/modules/ssl/mod_ssl.c b/modules/ssl/mod_ssl.c
-index 9fdf9e042e..4797c78bb9 100644
---- a/modules/ssl/mod_ssl.c
-+++ b/modules/ssl/mod_ssl.c
-@@ -331,9 +331,6 @@ static apr_status_t ssl_cleanup_pre_config(void *data)
- /*
- * Try to kill the internals of the SSL library.
- */
--#ifdef HAVE_FIPS
-- FIPS_mode_set(0);
--#endif
- /* Corresponds to OBJ_create()s */
- OBJ_cleanup();
- /* Corresponds to OPENSSL_load_builtin_modules() */
-diff --git a/modules/ssl/ssl_engine_init.c b/modules/ssl/ssl_engine_init.c
-index 18d18c691f..48d7b96cd8 100644
---- a/modules/ssl/ssl_engine_init.c
-+++ b/modules/ssl/ssl_engine_init.c
-@@ -183,6 +183,14 @@ int ssl_is_challenge(conn_rec *c, const char *servername,
- return 0;
- }
-
-+#ifdef HAVE_FIPS
-+static apr_status_t modssl_fips_cleanup(void *data)
-+{
-+ FIPS_mode_set(0);
-+ return APR_SUCCESS;
-+}
-+#endif
-+
- /*
- * Per-module initialization
- */
-@@ -311,11 +319,13 @@ apr_status_t ssl_init_Module(apr_pool_t *p, apr_pool_t *plog,
- ssl_rand_seed(base_server, ptemp, SSL_RSCTX_STARTUP, "Init: ");
-
- #ifdef HAVE_FIPS
-- if(sc->fips) {
-+ if (sc->fips) {
- if (!FIPS_mode()) {
- if (FIPS_mode_set(1)) {
- ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, s, APLOGNO(01884)
- "Operating in SSL FIPS mode");
-+ apr_pool_cleanup_register(p, NULL, modssl_fips_cleanup,
-+ apr_pool_cleanup_null);
- }
- else {
- ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(01885) "FIPS mode failed");
---
-2.19.1
-
diff --git a/Merge-r1418761-r1418765-r1510295-r1757147-r1805163-r.patch b/Merge-r1418761-r1418765-r1510295-r1757147-r1805163-r.patch
deleted file mode 100644
index 4e2ae487c6e700ae5f96e9d08a330606e4153ef2..0000000000000000000000000000000000000000
--- a/Merge-r1418761-r1418765-r1510295-r1757147-r1805163-r.patch
+++ /dev/null
@@ -1,397 +0,0 @@
-From fd0648f226ecbc38917ccb076a2147e206d73c8b Mon Sep 17 00:00:00 2001
-From: Jim Jagielski
-Date: Wed, 15 Aug 2018 15:01:08 +0000
-Subject: [PATCH 075/504] Merge r1418761, r1418765, r1510295, r1757147,
- r1805163, r1818924, r1827374, r1831772, r1832351, r1832951, r1815004 from
- trunk:
-
-Don't claim "BIO dump follows" if it is not logged due to log level config.
-
-
-make ssl_io_data_dump respect per-conn loglevel
-
-
-add high trace level log messages for debugging buffering and write completion
-
-
-* modules/ssl/ssl_engine_kernel.c (ssl_callback_SessionTicket): Fail
- if RAND_bytes() fails; possible per API, although not in practice
- with the OpenSSL implementation.
-
-
-Fix typo in log message.
-
-
-ap_add_common_vars(): use apr_pstrmemdup().
-
-This avoids a transient replacement/restore of '?' by '\0' in r->filename.
-
-
-Use 'ap_request_has_body()' instead of duplicating its implemenation.
-
-The logic in 'ap_request_has_body()' is:
- has_body = (!r->header_only
- && (r->kept_body
- || apr_table_get(r->headers_in, "Transfer-Encoding")
- || ( (cls = apr_table_get(r->headers_in, "Content-Length"))
- && (apr_strtoff(&cl, cls, &estr, 10) == APR_SUCCESS)
- && (!*estr)
- && (cl > 0) )
- )
- );
-So the test is slighly different from the original code. (but this looks fine to me)
-
-This also has the advantage to avoid a redundant call to 'apr_table_get()' and to improve readability.
-
-While at it, move the test '!r->expecting_100' a few lines above because it is cheap.
-
-PR62368: Print the unparsed URI in AH03454
-
-... to include r->args and get otherwise get as close to possible to
-what came in over the wire.
-
-Submitted By: Hank Ibell
-Committed By: covener
-
-
-
-
-All error handling paths of this function call 'apr_brigade_destroy()' , except this one.
-So add it here too.
-
-Probably spotted with the help of the Coccinelle software (Thx Julia for the patch and for Coccinelle)
-
-See PR 53016
-
-* modules/proxy/proxy_util.c (ap_proxy_share_worker): Skip creating subpool
- for debugging unless debug-level logging is enabled. No functional change.
-
-
-mod_watchdog: Correct some log messages and fix
-compiler warning
-"'rv' may be used uninitialized in this function".
-
-Follow up to r1722154.
-
-Submitted by: sf, jorton, jorton, ylavic, jailletc36, covener, jailletc36, jorton, rjung
-Reviewed by: jailletc36, jim, jorton
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1838103 13f79535-47bb-0310-9956-ffa450edef68
----
- CHANGES | 2 ++
- STATUS | 26 ---------------------
- modules/core/mod_watchdog.c | 10 +++++----
- modules/proxy/mod_proxy_ajp.c | 1 +
- modules/proxy/proxy_util.c | 3 ++-
- modules/ssl/ssl_engine_io.c | 31 ++++++++++++++++---------
- modules/ssl/ssl_engine_kernel.c | 12 +++++-----
- server/core_filters.c | 40 ++++++++++++++++++++++++++++++---
- server/protocol.c | 2 +-
- server/util_script.c | 5 ++---
- 10 files changed, 77 insertions(+), 55 deletions(-)
-
-diff --git a/modules/core/mod_watchdog.c b/modules/core/mod_watchdog.c
-index b6deaba306..61f4675252 100644
---- a/modules/core/mod_watchdog.c
-+++ b/modules/core/mod_watchdog.c
-@@ -534,11 +534,13 @@ static int wd_post_config_hook(apr_pool_t *pconf, apr_pool_t *plog,
- w->name, s,
- wd_server_conf->pool, 0);
- if (rv != APR_SUCCESS) {
-+ ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(10095)
-+ "Watchdog: Failed to create singleton mutex.");
- return rv;
- }
-+ ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(02979)
-+ "Watchdog: Created singleton mutex (%s).", w->name);
- }
-- ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(02979)
-- "Watchdog: Created child worker thread (%s).", w->name);
- wd_server_conf->child_workers++;
- }
- }
-@@ -580,12 +582,12 @@ static void wd_child_init_hook(apr_pool_t *p, server_rec *s)
- */
- if ((rv = wd_startup(w, wd_server_conf->pool)) != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_CRIT, rv, s, APLOGNO(01573)
-- "Watchdog: Failed to create worker thread.");
-+ "Watchdog: Failed to create child worker thread.");
- /* No point to continue */
- return;
- }
- ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, s, APLOGNO(02981)
-- "Watchdog: Created worker thread (%s).", wn[i].provider_name);
-+ "Watchdog: Created child worker thread (%s).", wn[i].provider_name);
- }
- }
- }
-diff --git a/modules/proxy/mod_proxy_ajp.c b/modules/proxy/mod_proxy_ajp.c
-index 8669db6308..73716aff51 100644
---- a/modules/proxy/mod_proxy_ajp.c
-+++ b/modules/proxy/mod_proxy_ajp.c
-@@ -322,6 +322,7 @@ static int ap_proxy_ajp_request(apr_pool_t *p, request_rec *r,
- * Close it to clean things up.
- */
- conn->close = 1;
-+ apr_brigade_destroy(input_brigade);
- return HTTP_BAD_REQUEST;
- }
- }
-diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
-index 7b76144ba7..2bd0edbfbc 100644
---- a/modules/proxy/proxy_util.c
-+++ b/modules/proxy/proxy_util.c
-@@ -1879,7 +1879,8 @@ PROXY_DECLARE(apr_status_t) ap_proxy_share_worker(proxy_worker *worker, proxy_wo
- }
- worker->s = shm;
- worker->s->index = i;
-- {
-+
-+ if (APLOGdebug(ap_server_conf)) {
- apr_pool_t *pool;
- apr_pool_create(&pool, ap_server_conf->process->pool);
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(02338)
-diff --git a/modules/ssl/ssl_engine_io.c b/modules/ssl/ssl_engine_io.c
-index 1a47b0e982..d52d5e30ca 100644
---- a/modules/ssl/ssl_engine_io.c
-+++ b/modules/ssl/ssl_engine_io.c
-@@ -877,6 +877,8 @@ static apr_status_t ssl_filter_write(ap_filter_t *f,
- */
- outctx->c->cs->sense = CONN_SENSE_WANT_READ;
- outctx->rc = APR_EAGAIN;
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, outctx->c,
-+ "Want read during nonblocking write");
- }
- else if (ssl_err == SSL_ERROR_SYSCALL) {
- ap_log_cerror(APLOG_MARK, APLOG_INFO, outctx->rc, c, APLOGNO(01993)
-@@ -2071,6 +2073,8 @@ void ssl_io_filter_init(conn_rec *c, request_rec *r, SSL *ssl)
- /* write is non blocking for the benefit of async mpm */
- if (c->cs) {
- BIO_set_nbio(filter_ctx->pbioWrite, 1);
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, c,
-+ "Enabling non-blocking writes");
- }
-
- ssl_io_input_add_filter(filter_ctx, c, r, ssl);
-@@ -2114,9 +2118,8 @@ void ssl_io_filter_register(apr_pool_t *p)
-
- #define DUMP_WIDTH 16
-
--static void ssl_io_data_dump(server_rec *s,
-- const char *b,
-- long len)
-+static void ssl_io_data_dump(conn_rec *c, server_rec *s,
-+ const char *b, long len)
- {
- char buf[256];
- char tmp[64];
-@@ -2129,7 +2132,7 @@ static void ssl_io_data_dump(server_rec *s,
- rows = (len / DUMP_WIDTH);
- if ((rows * DUMP_WIDTH) < len)
- rows++;
-- ap_log_error(APLOG_MARK, APLOG_TRACE7, 0, s,
-+ ap_log_cserror(APLOG_MARK, APLOG_TRACE7, 0, c, s,
- "+-------------------------------------------------------------------------+");
- for(i = 0 ; i< rows; i++) {
- #if APR_CHARSET_EBCDIC
-@@ -2168,12 +2171,12 @@ static void ssl_io_data_dump(server_rec *s,
- }
- }
- apr_cpystrn(buf+strlen(buf), " |", sizeof(buf)-strlen(buf));
-- ap_log_error(APLOG_MARK, APLOG_TRACE7, 0, s, "%s", buf);
-+ ap_log_cserror(APLOG_MARK, APLOG_TRACE7, 0, c, s, "%s", buf);
- }
- if (trunc > 0)
-- ap_log_error(APLOG_MARK, APLOG_TRACE7, 0, s,
-+ ap_log_cserror(APLOG_MARK, APLOG_TRACE7, 0, c, s,
- "| %04ld - ", len + trunc);
-- ap_log_error(APLOG_MARK, APLOG_TRACE7, 0, s,
-+ ap_log_cserror(APLOG_MARK, APLOG_TRACE7, 0, c, s,
- "+-------------------------------------------------------------------------+");
- return;
- }
-@@ -2195,15 +2198,21 @@ long ssl_io_data_cb(BIO *bio, int cmd,
- if ( cmd == (BIO_CB_WRITE|BIO_CB_RETURN)
- || cmd == (BIO_CB_READ |BIO_CB_RETURN) ) {
- if (rc >= 0) {
-+ const char *dump = "";
-+ if (APLOG_CS_IS_LEVEL(c, s, APLOG_TRACE7)) {
-+ if (argp != NULL)
-+ dump = "(BIO dump follows)";
-+ else
-+ dump = "(Oops, no memory buffer?)";
-+ }
- ap_log_cserror(APLOG_MARK, APLOG_TRACE4, 0, c, s,
- "%s: %s %ld/%d bytes %s BIO#%pp [mem: %pp] %s",
- MODSSL_LIBRARY_NAME,
- (cmd == (BIO_CB_WRITE|BIO_CB_RETURN) ? "write" : "read"),
- rc, argi, (cmd == (BIO_CB_WRITE|BIO_CB_RETURN) ? "to" : "from"),
-- bio, argp,
-- (argp != NULL ? "(BIO dump follows)" : "(Oops, no memory buffer?)"));
-- if ((argp != NULL) && APLOG_CS_IS_LEVEL(c, s, APLOG_TRACE7))
-- ssl_io_data_dump(s, argp, rc);
-+ bio, argp, dump);
-+ if (*dump != '\0' && argp != NULL)
-+ ssl_io_data_dump(c, s, argp, rc);
- }
- else {
- ap_log_cserror(APLOG_MARK, APLOG_TRACE4, 0, c, s,
-diff --git a/modules/ssl/ssl_engine_kernel.c b/modules/ssl/ssl_engine_kernel.c
-index e34fc55fa2..9a89ff9da8 100644
---- a/modules/ssl/ssl_engine_kernel.c
-+++ b/modules/ssl/ssl_engine_kernel.c
-@@ -839,10 +839,8 @@ int ssl_hook_Access(request_rec *r)
- * request body, and then to reinject that request body later.
- */
- if (renegotiate && !renegotiate_quick
-- && (apr_table_get(r->headers_in, "transfer-encoding")
-- || (apr_table_get(r->headers_in, "content-length")
-- && strcmp(apr_table_get(r->headers_in, "content-length"), "0")))
-- && !r->expecting_100) {
-+ && !r->expecting_100
-+ && ap_request_has_body(r)) {
- int rv;
- apr_size_t rsize;
-
-@@ -2162,7 +2160,7 @@ static apr_status_t init_vhost(conn_rec *c, SSL *ssl)
-
- if (SSL_check_private_key(ssl) < 1) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c, APLOGNO(10088)
-- "Challenbge certificate and private key %s "
-+ "Challenge certificate and private key %s "
- "do not match", servername);
- return APR_EGENERAL;
- }
-@@ -2334,7 +2332,9 @@ int ssl_callback_SessionTicket(SSL *ssl,
- }
-
- memcpy(keyname, ticket_key->key_name, 16);
-- RAND_bytes(iv, EVP_MAX_IV_LENGTH);
-+ if (RAND_bytes(iv, EVP_MAX_IV_LENGTH) != 1) {
-+ return -1;
-+ }
- EVP_EncryptInit_ex(cipher_ctx, EVP_aes_128_cbc(), NULL,
- ticket_key->aes_key, iv);
- HMAC_Init_ex(hctx, ticket_key->hmac_secret, 16, tlsext_tick_md(), NULL);
-diff --git a/server/core_filters.c b/server/core_filters.c
-index ddc2ff7f0f..a6c2bd666b 100644
---- a/server/core_filters.c
-+++ b/server/core_filters.c
-@@ -378,6 +378,7 @@ apr_status_t ap_core_output_filter(ap_filter_t *f, apr_bucket_brigade *new_bb)
- apr_size_t bytes_in_brigade, non_file_bytes_in_brigade;
- int eor_buckets_in_brigade, morphing_bucket_in_brigade;
- apr_status_t rv;
-+ int loglevel = ap_get_conn_module_loglevel(c, APLOG_MODULE_INDEX);
-
- /* Fail quickly if the connection has already been aborted. */
- if (c->aborted) {
-@@ -513,7 +514,7 @@ apr_status_t ap_core_output_filter(ap_filter_t *f, apr_bucket_brigade *new_bb)
- || eor_buckets_in_brigade > MAX_REQUESTS_IN_PIPELINE) {
- /* this segment of the brigade MUST be sent before returning. */
-
-- if (APLOGctrace6(c)) {
-+ if (loglevel >= APLOG_TRACE6) {
- char *reason = APR_BUCKET_IS_FLUSH(bucket) ?
- "FLUSH bucket" :
- (non_file_bytes_in_brigade >= THRESHOLD_MAX_BUFFER) ?
-@@ -521,8 +522,17 @@ apr_status_t ap_core_output_filter(ap_filter_t *f, apr_bucket_brigade *new_bb)
- morphing_bucket_in_brigade ? "morphing bucket" :
- "MAX_REQUESTS_IN_PIPELINE";
- ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, c,
-- "core_output_filter: flushing because of %s",
-- reason);
-+ "will flush because of %s", reason);
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, c,
-+ "seen in brigade%s: bytes: %" APR_SIZE_T_FMT
-+ ", non-file bytes: %" APR_SIZE_T_FMT ", eor "
-+ "buckets: %d, morphing buckets: %d",
-+ flush_upto == NULL ? " so far"
-+ : " since last flush point",
-+ bytes_in_brigade,
-+ non_file_bytes_in_brigade,
-+ eor_buckets_in_brigade,
-+ morphing_bucket_in_brigade);
- }
- /*
- * Defer the actual blocking write to avoid doing many writes.
-@@ -539,6 +549,10 @@ apr_status_t ap_core_output_filter(ap_filter_t *f, apr_bucket_brigade *new_bb)
- if (flush_upto != NULL) {
- ctx->tmp_flush_bb = apr_brigade_split_ex(bb, flush_upto,
- ctx->tmp_flush_bb);
-+ if (loglevel >= APLOG_TRACE8) {
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, c,
-+ "flushing now");
-+ }
- rv = send_brigade_blocking(net->client_socket, bb,
- &(ctx->bytes_written), c);
- if (rv != APR_SUCCESS) {
-@@ -549,9 +563,23 @@ apr_status_t ap_core_output_filter(ap_filter_t *f, apr_bucket_brigade *new_bb)
- c->aborted = 1;
- return rv;
- }
-+ if (loglevel >= APLOG_TRACE8) {
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, c,
-+ "total bytes written: %" APR_SIZE_T_FMT,
-+ ctx->bytes_written);
-+ }
- APR_BRIGADE_CONCAT(bb, ctx->tmp_flush_bb);
- }
-
-+ if (loglevel >= APLOG_TRACE8) {
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, c,
-+ "brigade contains: bytes: %" APR_SIZE_T_FMT
-+ ", non-file bytes: %" APR_SIZE_T_FMT
-+ ", eor buckets: %d, morphing buckets: %d",
-+ bytes_in_brigade, non_file_bytes_in_brigade,
-+ eor_buckets_in_brigade, morphing_bucket_in_brigade);
-+ }
-+
- if (bytes_in_brigade >= THRESHOLD_MIN_WRITE) {
- rv = send_brigade_nonblocking(net->client_socket, bb,
- &(ctx->bytes_written), c);
-@@ -563,6 +591,12 @@ apr_status_t ap_core_output_filter(ap_filter_t *f, apr_bucket_brigade *new_bb)
- c->aborted = 1;
- return rv;
- }
-+ if (loglevel >= APLOG_TRACE8) {
-+ ap_log_cerror(APLOG_MARK, APLOG_TRACE8, 0, c,
-+ "tried nonblocking write, total bytes "
-+ "written: %" APR_SIZE_T_FMT,
-+ ctx->bytes_written);
-+ }
- }
-
- setaside_remaining_output(f, ctx, bb, c);
-diff --git a/server/protocol.c b/server/protocol.c
-index 708160f30b..2ca6b124a8 100644
---- a/server/protocol.c
-+++ b/server/protocol.c
-@@ -894,7 +894,7 @@ rrl_done:
- else if (deferred_error == rrl_baduri)
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03454)
- "HTTP Request Line; URI incorrectly encoded: '%.*s'",
-- field_name_len(r->uri), r->uri);
-+ field_name_len(r->unparsed_uri), r->unparsed_uri);
- else if (deferred_error == rrl_badwhitespace)
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03447)
- "HTTP Request Line; Invalid whitespace");
-diff --git a/server/util_script.c b/server/util_script.c
-index 4121ae0aec..599ba58e71 100644
---- a/server/util_script.c
-+++ b/server/util_script.c
-@@ -260,9 +260,8 @@ AP_DECLARE(void) ap_add_common_vars(request_rec *r)
- apr_table_addn(e, "CONTEXT_DOCUMENT_ROOT", ap_context_document_root(r));
- apr_table_addn(e, "SERVER_ADMIN", s->server_admin); /* Apache */
- if (apr_table_get(r->notes, "proxy-noquery") && (q = ap_strchr(r->filename, '?'))) {
-- *q = '\0';
-- apr_table_addn(e, "SCRIPT_FILENAME", apr_pstrdup(r->pool, r->filename));
-- *q = '?';
-+ char *script_filename = apr_pstrmemdup(r->pool, r->filename, q - r->filename);
-+ apr_table_addn(e, "SCRIPT_FILENAME", script_filename);
- }
- else {
- apr_table_addn(e, "SCRIPT_FILENAME", r->filename); /* Apache */
---
-2.19.1
-
diff --git a/Merge-r1831773-from-trunk.patch b/Merge-r1831773-from-trunk.patch
deleted file mode 100644
index 2d4608c9044327c57e1331e07a338ece36caeb33..0000000000000000000000000000000000000000
--- a/Merge-r1831773-from-trunk.patch
+++ /dev/null
@@ -1,87 +0,0 @@
-From 5e6fa5d12a569b7b780139c30542a49c78993bce Mon Sep 17 00:00:00 2001
-From: Jim Jagielski
-Date: Tue, 11 Dec 2018 14:09:11 +0000
-Subject: [PATCH 293/504] Merge r1831773 from trunk:
-
-PR62311: only create the rewritelock when needed
-
-Submitted By: Hank Ibell
-Committed By: covener
-
-
-
-Submitted by: covener
-Reviewed by: jailletc36, icing (by inspection), covener
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1848681 13f79535-47bb-0310-9956-ffa450edef68
----
- CHANGES | 4 ++++
- STATUS | 5 -----
- modules/mappers/mod_rewrite.c | 21 ++++++++++++---------
- 3 files changed, 16 insertions(+), 14 deletions(-)
-
-diff --git a/modules/mappers/mod_rewrite.c b/modules/mappers/mod_rewrite.c
-index fb897a9760..68a33b68a5 100644
---- a/modules/mappers/mod_rewrite.c
-+++ b/modules/mappers/mod_rewrite.c
-@@ -416,6 +416,7 @@ static cache *cachep;
- static int proxy_available;
-
- /* Locks/Mutexes */
-+static int rewrite_lock_needed = 0;
- static apr_global_mutex_t *rewrite_mapr_lock_acquire = NULL;
- static const char *rewritemap_mutex_type = "rewrite-map";
-
-@@ -2687,9 +2688,6 @@ static apr_status_t rewritelock_create(server_rec *s, apr_pool_t *p)
- apr_status_t rc;
-
- /* create the lockfile */
-- /* XXX See if there are any rewrite map programs before creating
-- * the mutex.
-- */
- rc = ap_global_mutex_create(&rewrite_mapr_lock_acquire, NULL,
- rewritemap_mutex_type, NULL, s, p, 0);
- if (rc != APR_SUCCESS) {
-@@ -3163,6 +3161,8 @@ static const char *cmd_rewritemap(cmd_parms *cmd, void *dconf, const char *a1,
-
- newmap->type = MAPTYPE_PRG;
- newmap->checkfile = newmap->argv[0];
-+ rewrite_lock_needed = 1;
-+
- if (a3) {
- char *tok_cntx;
- newmap->user = apr_strtok(apr_pstrdup(cmd->pool, a3), ":", &tok_cntx);
-@@ -4469,6 +4469,7 @@ static int pre_config(apr_pool_t *pconf,
- {
- APR_OPTIONAL_FN_TYPE(ap_register_rewrite_mapfunc) *map_pfn_register;
-
-+ rewrite_lock_needed = 0;
- ap_mutex_register(pconf, rewritemap_mutex_type, NULL, APR_LOCK_DEFAULT, 0);
-
- /* register int: rewritemap handlers */
-@@ -4494,13 +4495,15 @@ static int post_config(apr_pool_t *p,
- /* check if proxy module is available */
- proxy_available = (ap_find_linked_module("mod_proxy.c") != NULL);
-
-- rv = rewritelock_create(s, p);
-- if (rv != APR_SUCCESS) {
-- return HTTP_INTERNAL_SERVER_ERROR;
-- }
-+ if (rewrite_lock_needed) {
-+ rv = rewritelock_create(s, p);
-+ if (rv != APR_SUCCESS) {
-+ return HTTP_INTERNAL_SERVER_ERROR;
-+ }
-
-- apr_pool_cleanup_register(p, (void *)s, rewritelock_remove,
-- apr_pool_cleanup_null);
-+ apr_pool_cleanup_register(p, (void *)s, rewritelock_remove,
-+ apr_pool_cleanup_null);
-+ }
-
- /* if we are not doing the initial config, step through the servers and
- * open the RewriteMap prg:xxx programs,
---
-2.19.1
-
diff --git a/Merge-r1837130-from-trunk.patch b/Merge-r1837130-from-trunk.patch
deleted file mode 100644
index aa9bf0b0c137d47f90080817802e0f08e7e0ae38..0000000000000000000000000000000000000000
--- a/Merge-r1837130-from-trunk.patch
+++ /dev/null
@@ -1,108 +0,0 @@
-From ae583b572b6ce34e5fdeb92c88fde44d18db2db7 Mon Sep 17 00:00:00 2001
-From: Yann Ylavic
-Date: Tue, 28 Aug 2018 20:07:07 +0000
-Subject: [PATCH 102/504] Merge r1837130 from trunk:
-
-mod_ratelimit: Don't interfere with "chunked" encoding.
-
-By the time ap_http_header_filter() sends the header brigade and adds the
-"CHUNK" filter, we need to garantee that the header went through all the
-filters' stack, and more specifically above ap_http_chunk_filter() which
-assumes that all it receives is content data.
-Since rate_limit_filter() may retain the header brigade, make it run after
-ap_http_chunk_filter(), just before AP_FTYPE_CONNECTION filters.
-
-Also, ap_http_header_filter() shouldn't eat the EOS for HEAD/no-body responses.
-For instance mod_ratelimit depends on it since r1835168, but any next request
-filter may as well to flush and/or bail out approprietely.
-
-This fixes the regression introduced in 2.4.34 (r1835168).
-PR 62568.
-
-Submitted by: ylavic
-Reviewed by: covener, ylavic, jim
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1839497 13f79535-47bb-0310-9956-ffa450edef68
----
- CHANGES | 3 +++
- STATUS | 7 -------
- modules/filters/mod_ratelimit.c | 3 +--
- modules/http/chunk_filter.c | 3 ++-
- modules/http/http_filters.c | 13 ++++++++++++-
- 5 files changed, 18 insertions(+), 11 deletions(-)
-
-diff --git a/modules/filters/mod_ratelimit.c b/modules/filters/mod_ratelimit.c
-index cf79973120..d16eb39059 100644
---- a/modules/filters/mod_ratelimit.c
-+++ b/modules/filters/mod_ratelimit.c
-@@ -65,7 +65,6 @@ rate_limit_filter(ap_filter_t *f, apr_bucket_brigade *bb)
-
- /* Set up our rl_ctx_t on first use */
- if (ctx == NULL) {
--
- const char *rl = NULL;
- int ratelimit;
- int burst = 0;
-@@ -327,7 +326,7 @@ static void register_hooks(apr_pool_t *p)
- {
- /* run after mod_deflate etc etc, but not at connection level, ie, mod_ssl. */
- ap_register_output_filter(RATE_LIMIT_FILTER_NAME, rate_limit_filter,
-- NULL, AP_FTYPE_PROTOCOL + 3);
-+ NULL, AP_FTYPE_CONNECTION - 1);
- }
-
- AP_DECLARE_MODULE(ratelimit) = {
-diff --git a/modules/http/chunk_filter.c b/modules/http/chunk_filter.c
-index 17fbabdb0a..cb1501aebf 100644
---- a/modules/http/chunk_filter.c
-+++ b/modules/http/chunk_filter.c
-@@ -69,6 +69,7 @@ apr_status_t ap_http_chunk_filter(ap_filter_t *f, apr_bucket_brigade *b)
- {
- if (APR_BUCKET_IS_EOS(e)) {
- /* there shouldn't be anything after the eos */
-+ ap_remove_output_filter(f);
- eos = e;
- break;
- }
-@@ -186,11 +187,11 @@ apr_status_t ap_http_chunk_filter(ap_filter_t *f, apr_bucket_brigade *b)
-
- /* pass the brigade to the next filter. */
- rv = ap_pass_brigade(f->next, b);
-+ apr_brigade_cleanup(b);
- if (rv != APR_SUCCESS || eos != NULL) {
- return rv;
- }
- tmp = b;
-- apr_brigade_cleanup(tmp);
- }
- return APR_SUCCESS;
- }
-diff --git a/modules/http/http_filters.c b/modules/http/http_filters.c
-index 5fa40635d8..37c0113e5b 100644
---- a/modules/http/http_filters.c
-+++ b/modules/http/http_filters.c
-@@ -1308,8 +1308,19 @@ AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
- else if (ctx->headers_sent) {
- /* Eat body if response must not have one. */
- if (r->header_only || r->status == HTTP_NO_CONTENT) {
-+ /* Still next filters may be waiting for EOS, so pass it (alone)
-+ * when encountered and be done with this filter.
-+ */
-+ e = APR_BRIGADE_LAST(b);
-+ if (e != APR_BRIGADE_SENTINEL(b) && APR_BUCKET_IS_EOS(e)) {
-+ APR_BUCKET_REMOVE(e);
-+ apr_brigade_cleanup(b);
-+ APR_BRIGADE_INSERT_HEAD(b, e);
-+ ap_remove_output_filter(f);
-+ rv = ap_pass_brigade(f->next, b);
-+ }
- apr_brigade_cleanup(b);
-- return APR_SUCCESS;
-+ return rv;
- }
- }
-
---
-2.19.1
-
diff --git a/Merge-r1837250-from-trunk.patch b/Merge-r1837250-from-trunk.patch
deleted file mode 100644
index 9ef0da01b77186dbc2cdaa807aee5bdec86192b5..0000000000000000000000000000000000000000
--- a/Merge-r1837250-from-trunk.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From 657d20fe0dbe55a793ca48fb6f58ee497d241934 Mon Sep 17 00:00:00 2001
-From: Jim Jagielski
-Date: Wed, 7 Nov 2018 15:18:42 +0000
-Subject: [PATCH 261/504] Merge r1837250 from trunk:
-
-If ProxyPassReverse is used for reverse mapping of relative redirects, subsequent ProxyPassReverse statements, whether they are relative or absolute, may fail.
-
-PR 60408 [Peter Haworth ]
-Submitted by: jailletc36
-Reviewed by: jailletc36, rpluem, jim
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1846044 13f79535-47bb-0310-9956-ffa450edef68
----
- CHANGES | 4 ++++
- STATUS | 6 ------
- modules/proxy/proxy_util.c | 8 ++++++--
- 3 files changed, 10 insertions(+), 8 deletions(-)
-
-diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
-index 6501c68064..cbf8826777 100644
---- a/modules/proxy/proxy_util.c
-+++ b/modules/proxy/proxy_util.c
-@@ -837,7 +837,7 @@ PROXY_DECLARE(const char *) ap_proxy_location_reverse_map(request_rec *r,
- {
- proxy_req_conf *rconf;
- struct proxy_alias *ent;
-- int i, l1, l2;
-+ int i, l1, l1_orig, l2;
- char *u;
-
- /*
-@@ -849,7 +849,7 @@ PROXY_DECLARE(const char *) ap_proxy_location_reverse_map(request_rec *r,
- return url;
- }
-
-- l1 = strlen(url);
-+ l1_orig = strlen(url);
- if (conf->interpolate_env == 1) {
- rconf = ap_get_module_config(r->request_config, &proxy_module);
- ent = (struct proxy_alias *)rconf->raliases->elts;
-@@ -862,6 +862,10 @@ PROXY_DECLARE(const char *) ap_proxy_location_reverse_map(request_rec *r,
- ap_get_module_config(r->server->module_config, &proxy_module);
- proxy_balancer *balancer;
- const char *real = ent[i].real;
-+
-+ /* Restore the url length, if it had been changed by the code below */
-+ l1 = l1_orig;
-+
- /*
- * First check if mapping against a balancer and see
- * if we have such a entity. If so, then we need to
---
-2.19.1
-
diff --git a/Merge-r1842540-from-trunk.patch b/Merge-r1842540-from-trunk.patch
deleted file mode 100644
index b66d9f5af5194c38c26b050d93b6ae8b436dca46..0000000000000000000000000000000000000000
--- a/Merge-r1842540-from-trunk.patch
+++ /dev/null
@@ -1,75 +0,0 @@
-From 5c1995151ab80cd71bc845bd288b90fd55665e2e Mon Sep 17 00:00:00 2001
-From: Eric Covener
-Date: Tue, 9 Oct 2018 23:26:35 +0000
-Subject: [PATCH 209/504] Merge r1842540 from trunk:
-
-* Pickup the proxy related configuration for verify mode and verify depth and
- not the configuration settings for frontend connections in case of
- connections by the proxy to the backend.
-
-PR: 62769
-
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1843370 13f79535-47bb-0310-9956-ffa450edef68
----
- CHANGES | 4 ++++
- STATUS | 9 ---------
- modules/ssl/ssl_engine_kernel.c | 25 ++++++++++++++++++-------
- 3 files changed, 22 insertions(+), 16 deletions(-)
-
-diff --git a/modules/ssl/ssl_engine_kernel.c b/modules/ssl/ssl_engine_kernel.c
-index d576a298ec..6cd0da527f 100644
---- a/modules/ssl/ssl_engine_kernel.c
-+++ b/modules/ssl/ssl_engine_kernel.c
-@@ -1740,7 +1740,8 @@ int ssl_callback_SSLVerify(int ok, X509_STORE_CTX *ctx)
- /* Get verify ingredients */
- int errnum = X509_STORE_CTX_get_error(ctx);
- int errdepth = X509_STORE_CTX_get_error_depth(ctx);
-- int depth, verify;
-+ int depth = UNSET;
-+ int verify = SSL_CVERIFY_UNSET;
-
- /*
- * Log verification information
-@@ -1756,10 +1757,15 @@ int ssl_callback_SSLVerify(int ok, X509_STORE_CTX *ctx)
- /*
- * Check for optionally acceptable non-verifiable issuer situation
- */
-- if (dc && (dc->nVerifyClient != SSL_CVERIFY_UNSET)) {
-- verify = dc->nVerifyClient;
-+ if (dc) {
-+ if (sslconn->is_proxy) {
-+ verify = dc->proxy->auth.verify_mode;
-+ }
-+ else {
-+ verify = dc->nVerifyClient;
-+ }
- }
-- else {
-+ if (!dc || (verify == SSL_CVERIFY_UNSET)) {
- verify = mctx->auth.verify_mode;
- }
-
-@@ -1863,10 +1869,15 @@ int ssl_callback_SSLVerify(int ok, X509_STORE_CTX *ctx)
- /*
- * Finally check the depth of the certificate verification
- */
-- if (dc && (dc->nVerifyDepth != UNSET)) {
-- depth = dc->nVerifyDepth;
-+ if (dc) {
-+ if (sslconn->is_proxy) {
-+ depth = dc->proxy->auth.verify_depth;
-+ }
-+ else {
-+ depth = dc->nVerifyDepth;
-+ }
- }
-- else {
-+ if (!dc || (depth == UNSET)) {
- depth = mctx->auth.verify_depth;
- }
-
---
-2.19.1
-
diff --git a/Merge-r1851093-from-trunk1.patch b/Merge-r1851093-from-trunk1.patch
deleted file mode 100644
index 701603a25b82f97d7c528cea34e6747ea0cde52a..0000000000000000000000000000000000000000
--- a/Merge-r1851093-from-trunk1.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From b34ce8ef855714a8a90e4e97cd7f2b8f8027c7fe Mon Sep 17 00:00:00 2001
-From: Christophe Jaillet
-Date: Fri, 15 Feb 2019 15:57:51 +0000
-Subject: [PATCH 390/504] Merge r1851093 from trunk
-
- * mod_proxy_wstunnel: Fix websocket proxy over UDS.
-
-PR: 62932
-Submitted by:
-Reviewed by: jailletc36 (by inspection), jim, ylavic
-Backported by: jailletc36
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1853653 13f79535-47bb-0310-9956-ffa450edef68
----
-diff --git a/modules/proxy/mod_proxy_wstunnel.c b/modules/proxy/mod_proxy_wstunnel.c
-index 9dda010dbc..1056d5cb72 100644
---- a/modules/proxy/mod_proxy_wstunnel.c
-+++ b/modules/proxy/mod_proxy_wstunnel.c
-@@ -77,7 +77,10 @@ static int proxy_wstunnel_canon(request_rec *r, char *url)
- if (path == NULL)
- return HTTP_BAD_REQUEST;
-
-- apr_snprintf(sport, sizeof(sport), ":%d", port);
-+ if (port != def_port)
-+ apr_snprintf(sport, sizeof(sport), ":%d", port);
-+ else
-+ sport[0] = '\0';
-
- if (ap_strchr_c(host, ':')) {
- /* if literal IPv6 address */
---
-2.19.1
-
diff --git a/Merge-r1851093-from-trunk2.patch b/Merge-r1851093-from-trunk2.patch
deleted file mode 100644
index c3b18a77b83971cabc12a223fe3b7fd04b4d324e..0000000000000000000000000000000000000000
--- a/Merge-r1851093-from-trunk2.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From 7e6e9c0a7b9653af14ddce21c9cebf9765c70823 Mon Sep 17 00:00:00 2001
-From: Christophe Jaillet
-Date: Fri, 15 Feb 2019 16:06:24 +0000
-Subject: [PATCH 391/504] Merge r1851093 from trunk
-
- * mod_proxy_wstunnel: Fix websocket proxy over UDS.
-
-PR: 62932
-Submitted by:
-Reviewed by: jailletc36 (by inspection), jim, ylavic
-Backported by: jailletc36
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1853654 13f79535-47bb-0310-9956-ffa450edef68
----
- modules/aaa/mod_authn_dbm.c | 6 +++++-
- 1 file changed, 5 insertions(+), 1 deletion(-)
-
-diff --git a/modules/aaa/mod_authn_dbm.c b/modules/aaa/mod_authn_dbm.c
-index f4fb73672e..6eae9e052a 100644
---- a/modules/aaa/mod_authn_dbm.c
-+++ b/modules/aaa/mod_authn_dbm.c
-@@ -102,7 +102,11 @@ static apr_status_t fetch_dbm_value(const char *dbmtype, const char *dbmfile,
-
- apr_dbm_close(f);
-
-- return rv;
-+ /* NOT FOUND is not an error case; this is indicated by a NULL result.
-+ * Treat all NULL lookup/error results as success for the simple case
-+ * of auth credential lookup, these are DECLINED in both cases.
-+ */
-+ return APR_SUCCESS;
- }
-
- static authn_status check_dbm_pw(request_rec *r, const char *user,
---
-2.19.1
-
diff --git a/Merge-r1853190-from-trunk.patch b/Merge-r1853190-from-trunk.patch
deleted file mode 100644
index 086073e5408e18b1818530044bd5df404faaece4..0000000000000000000000000000000000000000
--- a/Merge-r1853190-from-trunk.patch
+++ /dev/null
@@ -1,135 +0,0 @@
-From 44b3ddc560c490c60600998fa2bf59b142d08e05 Mon Sep 17 00:00:00 2001
-From: Joe Orton
-Date: Tue, 12 Mar 2019 09:24:26 +0000
-Subject: [PATCH 408/504] Merge r1853190 from trunk:
-
-Fix a race condition. Authentication with valid credentials could be
-refused in case of concurrent accesses from different users.
-
-PR: 63124
-Submitted by: Simon Kappel
-Reviewed by: jailletc36, icing, jorton
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855298 13f79535-47bb-0310-9956-ffa450edef68
----
- CHANGES | 4 ++++
- modules/aaa/mod_auth_digest.c | 26 ++++++++++++--------------
- 2 files changed, 16 insertions(+), 14 deletions(-)
-
-diff --git a/modules/aaa/mod_auth_digest.c b/modules/aaa/mod_auth_digest.c
-index a67f06986f..b76094114d 100644
---- a/modules/aaa/mod_auth_digest.c
-+++ b/modules/aaa/mod_auth_digest.c
-@@ -92,7 +92,6 @@ typedef struct digest_config_struct {
- int check_nc;
- const char *algorithm;
- char *uri_list;
-- const char *ha1;
- } digest_config_rec;
-
-
-@@ -153,6 +152,7 @@ typedef struct digest_header_struct {
- apr_time_t nonce_time;
- enum hdr_sts auth_hdr_sts;
- int needed_auth;
-+ const char *ha1;
- client_entry *client;
- } digest_header_rec;
-
-@@ -1304,7 +1304,7 @@ static int hook_note_digest_auth_failure(request_rec *r, const char *auth_type)
- */
-
- static authn_status get_hash(request_rec *r, const char *user,
-- digest_config_rec *conf)
-+ digest_config_rec *conf, const char **rethash)
- {
- authn_status auth_result;
- char *password;
-@@ -1356,7 +1356,7 @@ static authn_status get_hash(request_rec *r, const char *user,
- } while (current_provider);
-
- if (auth_result == AUTH_USER_FOUND) {
-- conf->ha1 = password;
-+ *rethash = password;
- }
-
- return auth_result;
-@@ -1483,25 +1483,24 @@ static int check_nonce(request_rec *r, digest_header_rec *resp,
-
- /* RFC-2069 */
- static const char *old_digest(const request_rec *r,
-- const digest_header_rec *resp, const char *ha1)
-+ const digest_header_rec *resp)
- {
- const char *ha2;
-
- ha2 = ap_md5(r->pool, (unsigned char *)apr_pstrcat(r->pool, resp->method, ":",
- resp->uri, NULL));
- return ap_md5(r->pool,
-- (unsigned char *)apr_pstrcat(r->pool, ha1, ":", resp->nonce,
-- ":", ha2, NULL));
-+ (unsigned char *)apr_pstrcat(r->pool, resp->ha1, ":",
-+ resp->nonce, ":", ha2, NULL));
- }
-
- /* RFC-2617 */
- static const char *new_digest(const request_rec *r,
-- digest_header_rec *resp,
-- const digest_config_rec *conf)
-+ digest_header_rec *resp)
- {
- const char *ha1, *ha2, *a2;
-
-- ha1 = conf->ha1;
-+ ha1 = resp->ha1;
-
- a2 = apr_pstrcat(r->pool, resp->method, ":", resp->uri, NULL);
- ha2 = ap_md5(r->pool, (const unsigned char *)a2);
-@@ -1514,7 +1513,6 @@ static const char *new_digest(const request_rec *r,
- NULL));
- }
-
--
- static void copy_uri_components(apr_uri_t *dst,
- apr_uri_t *src, request_rec *r) {
- if (src->scheme && src->scheme[0] != '\0') {
-@@ -1759,7 +1757,7 @@ static int authenticate_digest_user(request_rec *r)
- return HTTP_UNAUTHORIZED;
- }
-
-- return_code = get_hash(r, r->user, conf);
-+ return_code = get_hash(r, r->user, conf, &resp->ha1);
-
- if (return_code == AUTH_USER_NOT_FOUND) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01790)
-@@ -1789,7 +1787,7 @@ static int authenticate_digest_user(request_rec *r)
-
- if (resp->message_qop == NULL) {
- /* old (rfc-2069) style digest */
-- if (strcmp(resp->digest, old_digest(r, resp, conf->ha1))) {
-+ if (strcmp(resp->digest, old_digest(r, resp))) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01792)
- "user %s: password mismatch: %s", r->user,
- r->uri);
-@@ -1819,7 +1817,7 @@ static int authenticate_digest_user(request_rec *r)
- return HTTP_UNAUTHORIZED;
- }
-
-- exp_digest = new_digest(r, resp, conf);
-+ exp_digest = new_digest(r, resp);
- if (!exp_digest) {
- /* we failed to allocate a client struct */
- return HTTP_INTERNAL_SERVER_ERROR;
-@@ -1903,7 +1901,7 @@ static int add_auth_info(request_rec *r)
-
- /* calculate rspauth attribute
- */
-- ha1 = conf->ha1;
-+ ha1 = resp->ha1;
-
- a2 = apr_pstrcat(r->pool, ":", resp->uri, NULL);
- ha2 = ap_md5(r->pool, (const unsigned char *)a2);
---
-2.19.1
-
diff --git a/Merge-r1855646-r1855748-from-trunk.patch b/Merge-r1855646-r1855748-from-trunk.patch
deleted file mode 100644
index 554b1d91c1300d98c71f83898f4485a350ca41a9..0000000000000000000000000000000000000000
--- a/Merge-r1855646-r1855748-from-trunk.patch
+++ /dev/null
@@ -1,143 +0,0 @@
-From 0dcd178c561f3293775a3d5953c764d64a5af233 Mon Sep 17 00:00:00 2001
-From: Joe Orton
-Date: Wed, 20 Mar 2019 15:50:44 +0000
-Subject: [PATCH 475/504] Merge r1855646, r1855748 from trunk:
-
-mod_proxy/ssl: cleanup per-request SSL configuration for recycled proxy conns.
-
-The SSL dir config of proxy/backend connections is stored in r->per_dir_config
-but those connections have a lifetime independent of the requests they handle.
-
-So we need to allow the external ssl_engine_set() function to reset mod_ssl's
-dir config in between proxy requests, or the first sslconn->dc could be used
-after free for the next requests.
-
-mod_proxy can then reset/reinit the request config when recycling its backend
-connections.
-
-* Solve a chicken and egg problem here:
- We need to have sslconn->dc set correctly when we want to
- init sslconn, but we need to allocate memory for it first.
-
-PR 63256.
-Submitted by: ylavic, rpluem
-Reviewed by: ylavic, jorton, jim
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1855918 13f79535-47bb-0310-9956-ffa450edef68
----
- CHANGES | 4 ++++
- modules/proxy/proxy_util.c | 13 +++++++++++++
- modules/ssl/mod_ssl.c | 38 ++++++++++++++++++++++++--------------
- 3 files changed, 41 insertions(+), 14 deletions(-)
-
-diff --git a/modules/proxy/proxy_util.c b/modules/proxy/proxy_util.c
-index cbf8826777..b131ec07f6 100644
---- a/modules/proxy/proxy_util.c
-+++ b/modules/proxy/proxy_util.c
-@@ -1532,6 +1532,13 @@ static apr_status_t connection_cleanup(void *theconn)
- socket_cleanup(conn);
- conn->close = 0;
- }
-+ else if (conn->is_ssl) {
-+ /* Unbind/reset the SSL connection dir config (sslconn->dc) from
-+ * r->per_dir_config, r will likely get destroyed before this proxy
-+ * conn is reused.
-+ */
-+ ap_proxy_ssl_engine(conn->connection, worker->section_config, 1);
-+ }
-
- if (worker->s->hmax && worker->cp->res) {
- conn->inreslist = 1;
-@@ -3172,6 +3179,12 @@ static int proxy_connection_create(const char *proxy_function,
- apr_bucket_alloc_t *bucket_alloc;
-
- if (conn->connection) {
-+ if (conn->is_ssl) {
-+ /* on reuse, reinit the SSL connection dir config with the current
-+ * r->per_dir_config, the previous one was reset on release.
-+ */
-+ ap_proxy_ssl_engine(conn->connection, per_dir_config, 1);
-+ }
- return OK;
- }
-
-diff --git a/modules/ssl/mod_ssl.c b/modules/ssl/mod_ssl.c
-index 4797c78bb9..e857f50647 100644
---- a/modules/ssl/mod_ssl.c
-+++ b/modules/ssl/mod_ssl.c
-@@ -442,17 +442,20 @@ static int ssl_hook_pre_config(apr_pool_t *pconf,
- }
-
- static SSLConnRec *ssl_init_connection_ctx(conn_rec *c,
-- ap_conf_vector_t *per_dir_config)
-+ ap_conf_vector_t *per_dir_config,
-+ int new_proxy)
- {
- SSLConnRec *sslconn = myConnConfig(c);
-- SSLSrvConfigRec *sc;
-+ int need_setup = 0;
-
-- if (sslconn) {
-- return sslconn;
-+ if (!sslconn) {
-+ sslconn = apr_pcalloc(c->pool, sizeof(*sslconn));
-+ need_setup = 1;
- }
-
-- sslconn = apr_pcalloc(c->pool, sizeof(*sslconn));
--
-+ /* Reinit dc in any case because it may be r->per_dir_config scoped
-+ * and thus a caller like mod_proxy needs to update it per request.
-+ */
- if (per_dir_config) {
- sslconn->dc = ap_get_module_config(per_dir_config, &ssl_module);
- }
-@@ -461,12 +464,20 @@ static SSLConnRec *ssl_init_connection_ctx(conn_rec *c,
- &ssl_module);
- }
-
-- sslconn->server = c->base_server;
-- sslconn->verify_depth = UNSET;
-- sc = mySrvConfig(c->base_server);
-- sslconn->cipher_suite = sc->server->auth.cipher_suite;
-+ if (need_setup) {
-+ sslconn->server = c->base_server;
-+ sslconn->verify_depth = UNSET;
-+ if (new_proxy) {
-+ sslconn->is_proxy = 1;
-+ sslconn->cipher_suite = sslconn->dc->proxy->auth.cipher_suite;
-+ }
-+ else {
-+ SSLSrvConfigRec *sc = mySrvConfig(c->base_server);
-+ sslconn->cipher_suite = sc->server->auth.cipher_suite;
-+ }
-
-- myConnConfigSet(c, sslconn);
-+ myConnConfigSet(c, sslconn);
-+ }
-
- return sslconn;
- }
-@@ -507,8 +518,7 @@ static int ssl_engine_set(conn_rec *c,
- int status;
-
- if (proxy) {
-- sslconn = ssl_init_connection_ctx(c, per_dir_config);
-- sslconn->is_proxy = 1;
-+ sslconn = ssl_init_connection_ctx(c, per_dir_config, 1);
- }
- else {
- sslconn = myConnConfig(c);
-@@ -555,7 +565,7 @@ int ssl_init_ssl_connection(conn_rec *c, request_rec *r)
- /*
- * Create or retrieve SSL context
- */
-- sslconn = ssl_init_connection_ctx(c, r ? r->per_dir_config : NULL);
-+ sslconn = ssl_init_connection_ctx(c, r ? r->per_dir_config : NULL, 0);
- server = sslconn->server;
- sc = mySrvConfig(server);
-
---
-2.19.1
-
diff --git a/On-the-2.4.x-branch.patch b/On-the-2.4.x-branch.patch
deleted file mode 100644
index 53064c3b28d4c660c4a7b04319a6a312f4a747ab..0000000000000000000000000000000000000000
--- a/On-the-2.4.x-branch.patch
+++ /dev/null
@@ -1,178 +0,0 @@
-From c0457a9d97bc7b50cbc54f587fcca419d1a2ca2e Mon Sep 17 00:00:00 2001
-From: Stefan Eissing
-Date: Fri, 3 Aug 2018 10:54:47 +0000
-Subject: [PATCH 056/504] On the 2.4.x branch:
-
-backport of r1837357 from trunk.
- *) mod_md: When the last domain name from an MD is moved to another one,
- that now empty MD gets moved to the store archive. PR 62572.
-
-
-
-git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/branches/2.4.x@1837358 13f79535-47bb-0310-9956-ffa450edef68
----
- CHANGES | 3 +++
- modules/md/md_reg.c | 60 +++++++++++++++++++++++------------------
- modules/md/md_reg.h | 2 ++
- modules/md/md_version.h | 4 +--
- 4 files changed, 41 insertions(+), 28 deletions(-)
-
-diff --git a/modules/md/md_reg.c b/modules/md/md_reg.c
-index 459c15f157..233fea79d7 100644
---- a/modules/md/md_reg.c
-+++ b/modules/md/md_reg.c
-@@ -635,11 +635,10 @@ apr_status_t md_reg_creds_get(const md_creds_t **pcreds, md_reg_t *reg,
-
- typedef struct {
- apr_pool_t *p;
-- apr_array_header_t *conf_mds;
- apr_array_header_t *store_mds;
- } sync_ctx;
-
--static int find_changes(void *baton, md_store_t *store, md_t *md, apr_pool_t *ptemp)
-+static int do_add_md(void *baton, md_store_t *store, md_t *md, apr_pool_t *ptemp)
- {
- sync_ctx *ctx = baton;
-
-@@ -649,6 +648,18 @@ static int find_changes(void *baton, md_store_t *store, md_t *md, apr_pool_t *pt
- return 1;
- }
-
-+static apr_status_t read_store_mds(md_reg_t *reg, sync_ctx *ctx)
-+{
-+ int rv;
-+
-+ apr_array_clear(ctx->store_mds);
-+ rv = md_store_md_iter(do_add_md, ctx, reg->store, ctx->p, MD_SG_DOMAINS, "*");
-+ if (APR_STATUS_IS_ENOENT(rv)) {
-+ rv = APR_SUCCESS;
-+ }
-+ return rv;
-+}
-+
- apr_status_t md_reg_set_props(md_reg_t *reg, apr_pool_t *p, int can_http, int can_https)
- {
- if (reg->can_http != can_http || reg->can_https != can_https) {
-@@ -686,17 +697,11 @@ apr_status_t md_reg_sync(md_reg_t *reg, apr_pool_t *p, apr_pool_t *ptemp,
- apr_array_header_t *master_mds)
- {
- sync_ctx ctx;
-- md_store_t *store = reg->store;
- apr_status_t rv;
-
- ctx.p = ptemp;
-- ctx.conf_mds = master_mds;
-- ctx.store_mds = apr_array_make(ptemp, 100, sizeof(md_t *));
--
-- rv = md_store_md_iter(find_changes, &ctx, store, ptemp, MD_SG_DOMAINS, "*");
-- if (APR_STATUS_IS_ENOENT(rv)) {
-- rv = APR_SUCCESS;
-- }
-+ ctx.store_mds = apr_array_make(ptemp,100, sizeof(md_t *));
-+ rv = read_store_mds(reg, &ctx);
-
- md_log_perror(MD_LOG_MARK, MD_LOG_DEBUG, rv, p,
- "sync: found %d mds in store", ctx.store_mds->nelts);
-@@ -705,8 +710,8 @@ apr_status_t md_reg_sync(md_reg_t *reg, apr_pool_t *p, apr_pool_t *ptemp,
- md_t *md, *config_md, *smd, *omd;
- const char *common;
-
-- for (i = 0; i < ctx.conf_mds->nelts; ++i) {
-- md = APR_ARRAY_IDX(ctx.conf_mds, i, md_t *);
-+ for (i = 0; i < master_mds->nelts; ++i) {
-+ md = APR_ARRAY_IDX(master_mds, i, md_t *);
-
- /* find the store md that is closest match for the configured md */
- smd = md_find_closest_match(ctx.store_mds, md);
-@@ -734,7 +739,7 @@ apr_status_t md_reg_sync(md_reg_t *reg, apr_pool_t *p, apr_pool_t *ptemp,
- assert(common);
-
- /* Is this md still configured or has it been abandoned in the config? */
-- config_md = md_get_by_name(ctx.conf_mds, omd->name);
-+ config_md = md_get_by_name(master_mds, omd->name);
- if (config_md && md_contains(config_md, common, 0)) {
- /* domain used in two configured mds, not allowed */
- rv = APR_EINVAL;
-@@ -742,21 +747,19 @@ apr_status_t md_reg_sync(md_reg_t *reg, apr_pool_t *p, apr_pool_t *ptemp,
- "domain %s used in md %s and %s",
- common, md->name, omd->name);
- }
-- else if (config_md) {
-- /* domain stored in omd, but no longer has the offending domain,
-- remove it from the store md. */
-- omd->domains = md_array_str_remove(ptemp, omd->domains, common, 0);
-- rv = md_reg_update(reg, ptemp, omd->name, omd, MD_UPD_DOMAINS);
-- }
- else {
-- /* domain in a store md that is no longer configured, warn about it.
-- * Remove the domain here, so we can progress, but never save it. */
-+ /* remove it from the other md and update store, or, if it
-+ * is now empty, move it into the archive */
- omd->domains = md_array_str_remove(ptemp, omd->domains, common, 0);
-- md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, p,
-- "domain %s, configured in md %s, is part of the stored md %s."
-- " That md however is no longer mentioned in the config. "
-- "If you longer want it, remove the md from the store.",
-- common, md->name, omd->name);
-+ if (apr_is_empty_array(omd->domains)) {
-+ md_log_perror(MD_LOG_MARK, MD_LOG_WARNING, rv, p,
-+ "All domains of the MD %s have moved elsewhere, "
-+ " moving it to the archive. ", omd->name);
-+ md_reg_remove(reg, ptemp, omd->name, 1); /* best effort */
-+ }
-+ else {
-+ rv = md_reg_update(reg, ptemp, omd->name, omd, MD_UPD_DOMAINS);
-+ }
- }
- }
-
-@@ -841,6 +844,11 @@ apr_status_t md_reg_sync(md_reg_t *reg, apr_pool_t *p, apr_pool_t *ptemp,
- return rv;
- }
-
-+apr_status_t md_reg_remove(md_reg_t *reg, apr_pool_t *p, const char *name, int archive)
-+{
-+ return md_store_move(reg->store, p, MD_SG_DOMAINS, MD_SG_ARCHIVE, name, archive);
-+}
-+
-
- /**************************************************************************************************/
- /* driving */
-diff --git a/modules/md/md_reg.h b/modules/md/md_reg.h
-index 2bf738583c..d976b7fe80 100644
---- a/modules/md/md_reg.h
-+++ b/modules/md/md_reg.h
-@@ -124,6 +124,8 @@ apr_status_t md_reg_get_cred_files(md_reg_t *reg, const md_t *md, apr_pool_t *p,
- apr_status_t md_reg_sync(md_reg_t *reg, apr_pool_t *p, apr_pool_t *ptemp,
- apr_array_header_t *master_mds);
-
-+apr_status_t md_reg_remove(md_reg_t *reg, apr_pool_t *p, const char *name, int archive);
-+
- /**************************************************************************************************/
- /* protocol drivers */
-
-diff --git a/modules/md/md_version.h b/modules/md/md_version.h
-index b87f19c1db..34ab4eb61e 100644
---- a/modules/md/md_version.h
-+++ b/modules/md/md_version.h
-@@ -27,7 +27,7 @@
- * @macro
- * Version number of the md module as c string
- */
--#define MOD_MD_VERSION "1.1.15"
-+#define MOD_MD_VERSION "1.1.16"
-
- /**
- * @macro
-@@ -35,7 +35,7 @@
- * release. This is a 24 bit number with 8 bits for major number, 8 bits
- * for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
- */
--#define MOD_MD_VERSION_NUM 0x01010f
-+#define MOD_MD_VERSION_NUM 0x010110
-
- #define MD_ACME_DEF_URL "https://acme-v01.api.letsencrypt.org/directory"
-
---
-2.19.1
-
diff --git a/httpd-2.4.25-detect-systemd.patch b/httpd-2.4.25-detect-systemd.patch
index f8e302be2db79cab7ee639c1c3cfb65cac896c36..718beae46fbc221412625eb1ffc07ec7e78a6aa9 100644
--- a/httpd-2.4.25-detect-systemd.patch
+++ b/httpd-2.4.25-detect-systemd.patch
@@ -1,39 +1,3 @@
-diff -uap httpd-2.4.25/acinclude.m4.detectsystemd httpd-2.4.25/acinclude.m4
-diff -uap httpd-2.4.25/acinclude.m4.detectsystemd httpd-2.4.25/acinclude.m4
-diff -uap httpd-2.4.25/acinclude.m4.detectsystemd httpd-2.4.25/acinclude.m4
---- httpd-2.4.25/acinclude.m4.detectsystemd
-+++ httpd-2.4.25/acinclude.m4
-@@ -604,6 +604,30 @@
- fi
- ])
-
-+AC_DEFUN(APACHE_CHECK_SYSTEMD, [
-+dnl Check for systemd support for listen.c's socket activation.
-+case $host in
-+*-linux-*)
-+ if test -n "$PKGCONFIG" && $PKGCONFIG --exists libsystemd; then
-+ SYSTEMD_LIBS=`$PKGCONFIG --libs libsystemd`
-+ elif test -n "$PKGCONFIG" && $PKGCONFIG --exists libsystemd-daemon; then
-+ SYSTEMD_LIBS=`$PKGCONFIG --libs libsystemd-daemon`
-+ else
-+ AC_CHECK_LIB(systemd-daemon, sd_notify, SYSTEMD_LIBS="-lsystemd-daemon")
-+ fi
-+ if test -n "$SYSTEMD_LIBS"; then
-+ AC_CHECK_HEADERS(systemd/sd-daemon.h)
-+ if test "${ac_cv_header_systemd_sd_daemon_h}" = "no" || test -z "${SYSTEMD_LIBS}"; then
-+ AC_MSG_WARN([Your system does not support systemd.])
-+ else
-+ APR_ADDTO(HTTPD_LIBS, [$SYSTEMD_LIBS])
-+ AC_DEFINE(HAVE_SYSTEMD, 1, [Define if systemd is supported])
-+ fi
-+ fi
-+ ;;
-+esac
-+])
-+
- dnl
- dnl APACHE_EXPORT_ARGUMENTS
- dnl Export (via APACHE_SUBST) the various path-related variables that
diff -uap httpd-2.4.25/configure.in.detectsystemd httpd-2.4.25/configure.in
--- httpd-2.4.25/configure.in.detectsystemd
+++ httpd-2.4.25/configure.in
@@ -45,15 +9,6 @@ diff -uap httpd-2.4.25/configure.in.detectsystemd httpd-2.4.25/configure.in
else
AC_MSG_ERROR([pcre-config for libpcre not found. PCRE is required and available from http://pcre.org/])
fi
-@@ -504,6 +510,8 @@
- AC_DEFINE(HAVE_GMTOFF, 1, [Define if struct tm has a tm_gmtoff field])
- fi
-
-+APACHE_CHECK_SYSTEMD
-+
- dnl ## Set up any appropriate OS-specific environment variables for apachectl
-
- case $host in
@@ -668,6 +676,7 @@
APACHE_SUBST(BUILTIN_LIBS)
APACHE_SUBST(SHLIBPATH_VAR)
diff --git a/httpd-2.4.33-export.patch b/httpd-2.4.33-export.patch
deleted file mode 100644
index 9adf398798aeaba774f7056a9a4cf66b6cc8d30b..0000000000000000000000000000000000000000
--- a/httpd-2.4.33-export.patch
+++ /dev/null
@@ -1,20 +0,0 @@
-
-There is no need to "suck in" the apr/apr-util symbols when using
-a shared libapr{,util}, it just bloats the symbol table; so don't.
-
-Upstream-HEAD: needed
-Upstream-2.0: omit
-Upstream-Status: EXPORT_DIRS change is conditional on using shared apr
-
---- httpd-2.4.33/server/Makefile.in.export
-+++ httpd-2.4.33/server/Makefile.in
-@@ -60,9 +60,6 @@
- ls $$dir/*.h ; \
- done; \
- echo "$(top_srcdir)/server/mpm_fdqueue.h"; \
-- for dir in $(EXPORT_DIRS_APR); do \
-- ls $$dir/ap[ru].h $$dir/ap[ru]_*.h 2>/dev/null; \
-- done; \
- ) | sed -e s,//,/,g | sort -u > $@
-
- exports.c: export_files
diff --git a/httpd-2.4.33-mddefault.patch b/httpd-2.4.33-mddefault.patch
deleted file mode 100644
index 9e82fb8f46d1e66a510d97a9b96c40d629f897e2..0000000000000000000000000000000000000000
--- a/httpd-2.4.33-mddefault.patch
+++ /dev/null
@@ -1,21 +0,0 @@
-
-Override default.
-
---- httpd-2.4.33/modules/md/mod_md_config.c.mddefault
-+++ httpd-2.4.33/modules/md/mod_md_config.c
-@@ -54,10 +54,14 @@
-
- #define DEF_VAL (-1)
-
-+#ifndef MD_DEFAULT_STORE_DIR
-+#define MD_DEFAULT_STORE_DIR "state/md"
-+#endif
-+
- /* Default settings for the global conf */
- static md_mod_conf_t defmc = {
- NULL,
-- "md",
-+ MD_DEFAULT_STORE_DIR,
- NULL,
- NULL,
- 80,
diff --git a/httpd-2.4.33-r1830819+.patch b/httpd-2.4.33-r1830819+.patch
deleted file mode 100644
index 0b2d90d87eb14b0e113cfb4485c0388196804cfe..0000000000000000000000000000000000000000
--- a/httpd-2.4.33-r1830819+.patch
+++ /dev/null
@@ -1,690 +0,0 @@
-# ./pullrev.sh 1830819 1830836 1830912 1830913 1830927 1831168 1831173
-
-http://svn.apache.org/viewvc?view=revision&revision=1830819
-http://svn.apache.org/viewvc?view=revision&revision=1830912
-http://svn.apache.org/viewvc?view=revision&revision=1830913
-http://svn.apache.org/viewvc?view=revision&revision=1830927
-http://svn.apache.org/viewvc?view=revision&revision=1831168
-http://svn.apache.org/viewvc?view=revision&revision=1831173
-http://svn.apache.org/viewvc?view=revision&revision=1835240
-http://svn.apache.org/viewvc?view=revision&revision=1835242
-
---- httpd-2.4.33/modules/ssl/ssl_engine_config.c.r1830819+
-+++ httpd-2.4.33/modules/ssl/ssl_engine_config.c
-@@ -891,7 +891,9 @@
- SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
- const char *err;
-
-- if ((err = ssl_cmd_check_file(cmd, &arg))) {
-+ /* Only check for non-ENGINE based certs. */
-+ if (!modssl_is_engine_id(arg)
-+ && (err = ssl_cmd_check_file(cmd, &arg))) {
- return err;
- }
-
-@@ -907,7 +909,9 @@
- SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
- const char *err;
-
-- if ((err = ssl_cmd_check_file(cmd, &arg))) {
-+ /* Check keyfile exists for non-ENGINE keys. */
-+ if (!modssl_is_engine_id(arg)
-+ && (err = ssl_cmd_check_file(cmd, &arg))) {
- return err;
- }
-
---- httpd-2.4.33/modules/ssl/ssl_engine_init.c.r1830819+
-+++ httpd-2.4.33/modules/ssl/ssl_engine_init.c
-@@ -1181,12 +1182,18 @@
- (certfile = APR_ARRAY_IDX(mctx->pks->cert_files, i,
- const char *));
- i++) {
-+ EVP_PKEY *pkey;
-+ const char *engine_certfile = NULL;
-+
- key_id = apr_psprintf(ptemp, "%s:%d", vhost_id, i);
-
- ERR_clear_error();
-
- /* first the certificate (public key) */
-- if (mctx->cert_chain) {
-+ if (modssl_is_engine_id(certfile)) {
-+ engine_certfile = certfile;
-+ }
-+ else if (mctx->cert_chain) {
- if ((SSL_CTX_use_certificate_file(mctx->ssl_ctx, certfile,
- SSL_FILETYPE_PEM) < 1)) {
- ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(02561)
-@@ -1215,12 +1222,46 @@
-
- ERR_clear_error();
-
-- if ((SSL_CTX_use_PrivateKey_file(mctx->ssl_ctx, keyfile,
-- SSL_FILETYPE_PEM) < 1) &&
-- (ERR_GET_FUNC(ERR_peek_last_error())
-- != X509_F_X509_CHECK_PRIVATE_KEY)) {
-+ if (modssl_is_engine_id(keyfile)) {
-+ apr_status_t rv;
-+
-+ cert = NULL;
-+
-+ if ((rv = modssl_load_engine_keypair(s, ptemp, vhost_id,
-+ engine_certfile, keyfile,
-+ &cert, &pkey))) {
-+ return rv;
-+ }
-+
-+ if (cert) {
-+ if (SSL_CTX_use_certificate(mctx->ssl_ctx, cert) < 1) {
-+ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10137)
-+ "Failed to configure engine certificate %s, check %s",
-+ key_id, certfile);
-+ ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s);
-+ return APR_EGENERAL;
-+ }
-+
-+ /* SSL_CTX now owns the cert. */
-+ X509_free(cert);
-+ }
-+
-+ if (SSL_CTX_use_PrivateKey(mctx->ssl_ctx, pkey) < 1) {
-+ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10130)
-+ "Failed to configure private key %s from engine",
-+ keyfile);
-+ ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s);
-+ return APR_EGENERAL;
-+ }
-+
-+ /* SSL_CTX now owns the key */
-+ EVP_PKEY_free(pkey);
-+ }
-+ else if ((SSL_CTX_use_PrivateKey_file(mctx->ssl_ctx, keyfile,
-+ SSL_FILETYPE_PEM) < 1)
-+ && (ERR_GET_FUNC(ERR_peek_last_error())
-+ != X509_F_X509_CHECK_PRIVATE_KEY)) {
- ssl_asn1_t *asn1;
-- EVP_PKEY *pkey;
- const unsigned char *ptr;
-
- ERR_clear_error();
-@@ -1307,8 +1348,9 @@
- /*
- * Try to read DH parameters from the (first) SSLCertificateFile
- */
-- if ((certfile = APR_ARRAY_IDX(mctx->pks->cert_files, 0, const char *)) &&
-- (dhparams = ssl_dh_GetParamFromFile(certfile))) {
-+ certfile = APR_ARRAY_IDX(mctx->pks->cert_files, 0, const char *);
-+ if (certfile && !modssl_is_engine_id(certfile)
-+ && (dhparams = ssl_dh_GetParamFromFile(certfile))) {
- SSL_CTX_set_tmp_dh(mctx->ssl_ctx, dhparams);
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02540)
- "Custom DH parameters (%d bits) for %s loaded from %s",
-@@ -1320,10 +1362,10 @@
- /*
- * Similarly, try to read the ECDH curve name from SSLCertificateFile...
- */
-- if ((certfile != NULL) &&
-- (ecparams = ssl_ec_GetParamFromFile(certfile)) &&
-- (nid = EC_GROUP_get_curve_name(ecparams)) &&
-- (eckey = EC_KEY_new_by_curve_name(nid))) {
-+ if (certfile && !modssl_is_engine_id(certfile)
-+ && (ecparams = ssl_ec_GetParamFromFile(certfile))
-+ && (nid = EC_GROUP_get_curve_name(ecparams))
-+ && (eckey = EC_KEY_new_by_curve_name(nid))) {
- SSL_CTX_set_tmp_ecdh(mctx->ssl_ctx, eckey);
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02541)
- "ECDH curve %s for %s specified in %s",
---- httpd-2.4.33/modules/ssl/ssl_engine_pphrase.c.r1830819+
-+++ httpd-2.4.33/modules/ssl/ssl_engine_pphrase.c
-@@ -143,9 +143,6 @@
- const char *key_id = asn1_table_vhost_key(mc, p, sc->vhost_id, idx);
- EVP_PKEY *pPrivateKey = NULL;
- ssl_asn1_t *asn1;
-- unsigned char *ucp;
-- long int length;
-- BOOL bReadable;
- int nPassPhrase = (*pphrases)->nelts;
- int nPassPhraseRetry = 0;
- apr_time_t pkey_mtime = 0;
-@@ -222,16 +219,12 @@
- * is not empty. */
- ERR_clear_error();
-
-- bReadable = ((pPrivateKey = modssl_read_privatekey(ppcb_arg.pkey_file,
-- NULL, ssl_pphrase_Handle_CB, &ppcb_arg)) != NULL ?
-- TRUE : FALSE);
--
-- /*
-- * when the private key file now was readable,
-- * it's fine and we go out of the loop
-- */
-- if (bReadable)
-- break;
-+ pPrivateKey = modssl_read_privatekey(ppcb_arg.pkey_file,
-+ ssl_pphrase_Handle_CB, &ppcb_arg);
-+ /* If the private key was successfully read, nothing more to
-+ do here. */
-+ if (pPrivateKey != NULL)
-+ break;
-
- /*
- * when we have more remembered pass phrases
-@@ -356,19 +349,12 @@
- nPassPhrase++;
- }
-
-- /*
-- * Insert private key into the global module configuration
-- * (we convert it to a stand-alone DER byte sequence
-- * because the SSL library uses static variables inside a
-- * RSA structure which do not survive DSO reloads!)
-- */
-- length = i2d_PrivateKey(pPrivateKey, NULL);
-- ucp = ssl_asn1_table_set(mc->tPrivateKey, key_id, length);
-- (void)i2d_PrivateKey(pPrivateKey, &ucp); /* 2nd arg increments */
-+ /* Cache the private key in the global module configuration so it
-+ * can be used after subsequent reloads. */
-+ asn1 = ssl_asn1_table_set(mc->tPrivateKey, key_id, pPrivateKey);
-
- if (ppcb_arg.nPassPhraseDialogCur != 0) {
- /* remember mtime of encrypted keys */
-- asn1 = ssl_asn1_table_get(mc->tPrivateKey, key_id);
- asn1->source_mtime = pkey_mtime;
- }
-
-@@ -619,3 +605,288 @@
- */
- return (len);
- }
-+
-+
-+#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT)
-+
-+/* OpenSSL UI implementation for passphrase entry; largely duplicated
-+ * from ssl_pphrase_Handle_CB but adjusted for UI API. TODO: Might be
-+ * worth trying to shift pphrase handling over to the UI API
-+ * completely. */
-+static int passphrase_ui_open(UI *ui)
-+{
-+ pphrase_cb_arg_t *ppcb = UI_get0_user_data(ui);
-+ SSLSrvConfigRec *sc = mySrvConfig(ppcb->s);
-+
-+ ppcb->nPassPhraseDialog++;
-+ ppcb->nPassPhraseDialogCur++;
-+
-+ /*
-+ * Builtin or Pipe dialog
-+ */
-+ if (sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN
-+ || sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) {
-+ if (sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) {
-+ if (!readtty) {
-+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, ppcb->s,
-+ APLOGNO(10143)
-+ "Init: Creating pass phrase dialog pipe child "
-+ "'%s'", sc->server->pphrase_dialog_path);
-+ if (ssl_pipe_child_create(ppcb->p,
-+ sc->server->pphrase_dialog_path)
-+ != APR_SUCCESS) {
-+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, ppcb->s,
-+ APLOGNO(10144)
-+ "Init: Failed to create pass phrase pipe '%s'",
-+ sc->server->pphrase_dialog_path);
-+ return 0;
-+ }
-+ }
-+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, ppcb->s, APLOGNO(10145)
-+ "Init: Requesting pass phrase via piped dialog");
-+ }
-+ else { /* sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN */
-+#ifdef WIN32
-+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, ppcb->s, APLOGNO(10146)
-+ "Init: Failed to create pass phrase pipe '%s'",
-+ sc->server->pphrase_dialog_path);
-+ return 0;
-+#else
-+ /*
-+ * stderr has already been redirected to the error_log.
-+ * rather than attempting to temporarily rehook it to the terminal,
-+ * we print the prompt to stdout before EVP_read_pw_string turns
-+ * off tty echo
-+ */
-+ apr_file_open_stdout(&writetty, ppcb->p);
-+
-+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, ppcb->s, APLOGNO(10147)
-+ "Init: Requesting pass phrase via builtin terminal "
-+ "dialog");
-+#endif
-+ }
-+
-+ /*
-+ * The first time display a header to inform the user about what
-+ * program he actually speaks to, which module is responsible for
-+ * this terminal dialog and why to the hell he has to enter
-+ * something...
-+ */
-+ if (ppcb->nPassPhraseDialog == 1) {
-+ apr_file_printf(writetty, "%s mod_ssl (Pass Phrase Dialog)\n",
-+ AP_SERVER_BASEVERSION);
-+ apr_file_printf(writetty,
-+ "A pass phrase is required to access the private key.\n");
-+ }
-+ if (ppcb->bPassPhraseDialogOnce) {
-+ ppcb->bPassPhraseDialogOnce = FALSE;
-+ apr_file_printf(writetty, "\n");
-+ apr_file_printf(writetty, "Private key %s (%s)\n",
-+ ppcb->key_id, ppcb->pkey_file);
-+ }
-+ }
-+
-+ return 1;
-+}
-+
-+static int passphrase_ui_read(UI *ui, UI_STRING *uis)
-+{
-+ pphrase_cb_arg_t *ppcb = UI_get0_user_data(ui);
-+ SSLSrvConfigRec *sc = mySrvConfig(ppcb->s);
-+ const char *prompt;
-+ int i;
-+ int bufsize;
-+ int len;
-+ char *buf;
-+
-+ prompt = UI_get0_output_string(uis);
-+ if (prompt == NULL) {
-+ prompt = "Enter pass phrase:";
-+ }
-+
-+ /*
-+ * Get the maximum expected size and allocate the buffer
-+ */
-+ bufsize = UI_get_result_maxsize(uis);
-+ buf = apr_pcalloc(ppcb->p, bufsize);
-+
-+ if (sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN
-+ || sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) {
-+ /*
-+ * Get the pass phrase through a callback.
-+ * Empty input is not accepted.
-+ */
-+ for (;;) {
-+ if (sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) {
-+ i = pipe_get_passwd_cb(buf, bufsize, "", FALSE);
-+ }
-+ else { /* sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN */
-+ i = EVP_read_pw_string(buf, bufsize, "", FALSE);
-+ }
-+ if (i != 0) {
-+ OPENSSL_cleanse(buf, bufsize);
-+ return 0;
-+ }
-+ len = strlen(buf);
-+ if (len < 1){
-+ apr_file_printf(writetty, "Apache:mod_ssl:Error: Pass phrase"
-+ "empty (needs to be at least 1 character).\n");
-+ apr_file_puts(prompt, writetty);
-+ }
-+ else {
-+ break;
-+ }
-+ }
-+ }
-+ /*
-+ * Filter program
-+ */
-+ else if (sc->server->pphrase_dialog_type == SSL_PPTYPE_FILTER) {
-+ const char *cmd = sc->server->pphrase_dialog_path;
-+ const char **argv = apr_palloc(ppcb->p, sizeof(char *) * 3);
-+ char *result;
-+
-+ ap_log_error(APLOG_MARK, APLOG_INFO, 0, ppcb->s, APLOGNO(10148)
-+ "Init: Requesting pass phrase from dialog filter "
-+ "program (%s)", cmd);
-+
-+ argv[0] = cmd;
-+ argv[1] = ppcb->key_id;
-+ argv[2] = NULL;
-+
-+ result = ssl_util_readfilter(ppcb->s, ppcb->p, cmd, argv);
-+ apr_cpystrn(buf, result, bufsize);
-+ len = strlen(buf);
-+ }
-+
-+ /*
-+ * Ok, we now have the pass phrase, so give it back
-+ */
-+ ppcb->cpPassPhraseCur = apr_pstrdup(ppcb->p, buf);
-+ UI_set_result(ui, uis, buf);
-+
-+ /* Clear sensitive data. */
-+ OPENSSL_cleanse(buf, bufsize);
-+ return 1;
-+}
-+
-+static int passphrase_ui_write(UI *ui, UI_STRING *uis)
-+{
-+ pphrase_cb_arg_t *ppcb = UI_get0_user_data(ui);
-+ SSLSrvConfigRec *sc;
-+ const char *prompt;
-+
-+ sc = mySrvConfig(ppcb->s);
-+
-+ if (sc->server->pphrase_dialog_type == SSL_PPTYPE_BUILTIN
-+ || sc->server->pphrase_dialog_type == SSL_PPTYPE_PIPE) {
-+ prompt = UI_get0_output_string(uis);
-+ apr_file_puts(prompt, writetty);
-+ }
-+
-+ return 1;
-+}
-+
-+static int passphrase_ui_close(UI *ui)
-+{
-+ /*
-+ * Close the pipes if they were opened
-+ */
-+ if (readtty) {
-+ apr_file_close(readtty);
-+ apr_file_close(writetty);
-+ readtty = writetty = NULL;
-+ }
-+ return 1;
-+}
-+
-+static apr_status_t pp_ui_method_cleanup(void *uip)
-+{
-+ UI_METHOD *uim = uip;
-+
-+ UI_destroy_method(uim);
-+
-+ return APR_SUCCESS;
-+}
-+
-+static UI_METHOD *get_passphrase_ui(apr_pool_t *p)
-+{
-+ UI_METHOD *ui_method = UI_create_method("Passphrase UI");
-+
-+ UI_method_set_opener(ui_method, passphrase_ui_open);
-+ UI_method_set_reader(ui_method, passphrase_ui_read);
-+ UI_method_set_writer(ui_method, passphrase_ui_write);
-+ UI_method_set_closer(ui_method, passphrase_ui_close);
-+
-+ apr_pool_cleanup_register(p, ui_method, pp_ui_method_cleanup,
-+ pp_ui_method_cleanup);
-+
-+ return ui_method;
-+}
-+
-+
-+apr_status_t modssl_load_engine_keypair(server_rec *s, apr_pool_t *p,
-+ const char *vhostid,
-+ const char *certid, const char *keyid,
-+ X509 **pubkey, EVP_PKEY **privkey)
-+{
-+ SSLModConfigRec *mc = myModConfig(s);
-+ ENGINE *e;
-+ UI_METHOD *ui_method = get_passphrase_ui(p);
-+ pphrase_cb_arg_t ppcb;
-+
-+ memset(&ppcb, 0, sizeof ppcb);
-+ ppcb.s = s;
-+ ppcb.p = p;
-+ ppcb.bPassPhraseDialogOnce = TRUE;
-+ ppcb.key_id = vhostid;
-+ ppcb.pkey_file = keyid;
-+
-+ if (!mc->szCryptoDevice) {
-+ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10131)
-+ "Init: Cannot load private key `%s' without engine",
-+ keyid);
-+ return ssl_die(s);
-+ }
-+
-+ if (!(e = ENGINE_by_id(mc->szCryptoDevice))) {
-+ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10132)
-+ "Init: Failed to load Crypto Device API `%s'",
-+ mc->szCryptoDevice);
-+ ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s);
-+ return ssl_die(s);
-+ }
-+
-+ if (APLOGdebug(s)) {
-+ ENGINE_ctrl_cmd_string(e, "VERBOSE", NULL, 0);
-+ }
-+
-+ if (certid) {
-+ struct {
-+ const char *cert_id;
-+ X509 *cert;
-+ } params = { certid, NULL };
-+
-+ if (!ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, ¶ms, NULL, 1)) {
-+ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10136)
-+ "Init: Unable to get the certificate");
-+ ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s);
-+ return ssl_die(s);
-+ }
-+
-+ *pubkey = params.cert;
-+ }
-+
-+ *privkey = ENGINE_load_private_key(e, keyid, ui_method, &ppcb);
-+ if (*privkey == NULL) {
-+ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO(10133)
-+ "Init: Unable to get the private key");
-+ ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s);
-+ return ssl_die(s);
-+ }
-+
-+ ENGINE_free(e);
-+
-+ return APR_SUCCESS;
-+}
-+#endif
---- httpd-2.4.33/modules/ssl/ssl_private.h.r1830819+
-+++ httpd-2.4.33/modules/ssl/ssl_private.h
-@@ -976,21 +976,28 @@
- apr_status_t ssl_load_encrypted_pkey(server_rec *, apr_pool_t *, int,
- const char *, apr_array_header_t **);
-
-+/* Load public and/or private key from the configured ENGINE. Private
-+ * key returned as *pkey. certid can be NULL, in which case *pubkey
-+ * is not altered. Errors logged on failure. */
-+apr_status_t modssl_load_engine_keypair(server_rec *s, apr_pool_t *p,
-+ const char *vhostid,
-+ const char *certid, const char *keyid,
-+ X509 **pubkey, EVP_PKEY **privkey);
-+
- /** Diffie-Hellman Parameter Support */
- DH *ssl_dh_GetParamFromFile(const char *);
- #ifdef HAVE_ECC
- EC_GROUP *ssl_ec_GetParamFromFile(const char *);
- #endif
-
--unsigned char *ssl_asn1_table_set(apr_hash_t *table,
-- const char *key,
-- long int length);
--
--ssl_asn1_t *ssl_asn1_table_get(apr_hash_t *table,
-- const char *key);
--
--void ssl_asn1_table_unset(apr_hash_t *table,
-- const char *key);
-+/* Store the EVP_PKEY key (serialized into DER) in the hash table with
-+ * key, returning the ssl_asn1_t structure pointer. */
-+ssl_asn1_t *ssl_asn1_table_set(apr_hash_t *table, const char *key,
-+ EVP_PKEY *pkey);
-+/* Retrieve the ssl_asn1_t structure with given key from the hash. */
-+ssl_asn1_t *ssl_asn1_table_get(apr_hash_t *table, const char *key);
-+/* Remove and free the ssl_asn1_t structure with given key. */
-+void ssl_asn1_table_unset(apr_hash_t *table, const char *key);
-
- /** Mutex Support */
- int ssl_mutex_init(server_rec *, apr_pool_t *);
-@@ -1078,6 +1085,10 @@
- int ssl_is_challenge(conn_rec *c, const char *servername,
- X509 **pcert, EVP_PKEY **pkey);
-
-+/* Returns non-zero if the cert/key filename should be handled through
-+ * the configured ENGINE. */
-+int modssl_is_engine_id(const char *name);
-+
- #endif /* SSL_PRIVATE_H */
- /** @} */
-
---- httpd-2.4.33/modules/ssl/ssl_util.c.r1830819+
-+++ httpd-2.4.33/modules/ssl/ssl_util.c
-@@ -181,45 +181,37 @@
- return TRUE;
- }
-
--/*
-- * certain key data needs to survive restarts,
-- * which are stored in the user data table of s->process->pool.
-- * to prevent "leaking" of this data, we use malloc/free
-- * rather than apr_palloc and these wrappers to help make sure
-- * we do not leak the malloc-ed data.
-- */
--unsigned char *ssl_asn1_table_set(apr_hash_t *table,
-- const char *key,
-- long int length)
-+/* Decrypted private keys are cached to survive restarts. The cached
-+ * data must have lifetime of the process (hence malloc/free rather
-+ * than pools), and uses raw DER since the EVP_PKEY structure
-+ * internals may not survive across a module reload. */
-+ssl_asn1_t *ssl_asn1_table_set(apr_hash_t *table, const char *key,
-+ EVP_PKEY *pkey)
- {
- apr_ssize_t klen = strlen(key);
- ssl_asn1_t *asn1 = apr_hash_get(table, key, klen);
-+ apr_size_t length = i2d_PrivateKey(pkey, NULL);
-+ unsigned char *p;
-
-- /*
-- * if a value for this key already exists,
-- * reuse as much of the already malloc-ed data
-- * as possible.
-- */
-+ /* Re-use structure if cached previously. */
- if (asn1) {
- if (asn1->nData != length) {
-- free(asn1->cpData); /* XXX: realloc? */
-- asn1->cpData = NULL;
-+ asn1->cpData = ap_realloc(asn1->cpData, length);
- }
- }
- else {
- asn1 = ap_malloc(sizeof(*asn1));
- asn1->source_mtime = 0; /* used as a note for encrypted private keys */
-- asn1->cpData = NULL;
-- }
--
-- asn1->nData = length;
-- if (!asn1->cpData) {
- asn1->cpData = ap_malloc(length);
-+
-+ apr_hash_set(table, key, klen, asn1);
- }
-
-- apr_hash_set(table, key, klen, asn1);
-+ asn1->nData = length;
-+ p = asn1->cpData;
-+ i2d_PrivateKey(pkey, &p); /* increases p by length */
-
-- return asn1->cpData; /* caller will assign a value to this */
-+ return asn1;
- }
-
- ssl_asn1_t *ssl_asn1_table_get(apr_hash_t *table,
-@@ -469,3 +461,13 @@
- }
-
- #endif /* #if APR_HAS_THREADS && MODSSL_USE_OPENSSL_PRE_1_1_API */
-+
-+int modssl_is_engine_id(const char *name)
-+{
-+#if defined(HAVE_OPENSSL_ENGINE_H) && defined(HAVE_ENGINE_INIT)
-+ /* ### Can handle any other special ENGINE key names here? */
-+ return strncmp(name, "pkcs11:", 7) == 0;
-+#else
-+ return 0;
-+#endif
-+}
---- httpd-2.4.33/modules/ssl/ssl_util_ssl.c.r1830819+
-+++ httpd-2.4.33/modules/ssl/ssl_util_ssl.c
-@@ -74,7 +74,7 @@
- ** _________________________________________________________________
- */
-
--EVP_PKEY *modssl_read_privatekey(const char* filename, EVP_PKEY **key, pem_password_cb *cb, void *s)
-+EVP_PKEY *modssl_read_privatekey(const char *filename, pem_password_cb *cb, void *s)
- {
- EVP_PKEY *rc;
- BIO *bioS;
-@@ -83,7 +83,7 @@
- /* 1. try PEM (= DER+Base64+headers) */
- if ((bioS=BIO_new_file(filename, "r")) == NULL)
- return NULL;
-- rc = PEM_read_bio_PrivateKey(bioS, key, cb, s);
-+ rc = PEM_read_bio_PrivateKey(bioS, NULL, cb, s);
- BIO_free(bioS);
-
- if (rc == NULL) {
-@@ -107,41 +107,9 @@
- BIO_free(bioS);
- }
- }
-- if (rc != NULL && key != NULL) {
-- if (*key != NULL)
-- EVP_PKEY_free(*key);
-- *key = rc;
-- }
- return rc;
- }
-
--typedef struct {
-- const char *pass;
-- int pass_len;
--} pass_ctx;
--
--static int provide_pass(char *buf, int size, int rwflag, void *baton)
--{
-- pass_ctx *ctx = baton;
-- if (ctx->pass_len > 0) {
-- if (ctx->pass_len < size) {
-- size = (int)ctx->pass_len;
-- }
-- memcpy(buf, ctx->pass, size);
-- }
-- return ctx->pass_len;
--}
--
--EVP_PKEY *modssl_read_encrypted_pkey(const char *filename, EVP_PKEY **key,
-- const char *pass, apr_size_t pass_len)
--{
-- pass_ctx ctx;
--
-- ctx.pass = pass;
-- ctx.pass_len = pass_len;
-- return modssl_read_privatekey(filename, key, provide_pass, &ctx);
--}
--
- /* _________________________________________________________________
- **
- ** Smart shutdown
---- httpd-2.4.33/modules/ssl/ssl_util_ssl.h.r1830819+
-+++ httpd-2.4.33/modules/ssl/ssl_util_ssl.h
-@@ -64,8 +64,11 @@
- void modssl_init_app_data2_idx(void);
- void *modssl_get_app_data2(SSL *);
- void modssl_set_app_data2(SSL *, void *);
--EVP_PKEY *modssl_read_privatekey(const char *, EVP_PKEY **, pem_password_cb *, void *);
--EVP_PKEY *modssl_read_encrypted_pkey(const char *, EVP_PKEY **, const char *, apr_size_t);
-+
-+/* Read private key from filename in either PEM or raw base64(DER)
-+ * format, using password entry callback cb and userdata. */
-+EVP_PKEY *modssl_read_privatekey(const char *filename, pem_password_cb *cb, void *ud);
-+
- int modssl_smart_shutdown(SSL *ssl);
- BOOL modssl_X509_getBC(X509 *, int *, int *);
- char *modssl_X509_NAME_ENTRY_to_string(apr_pool_t *p, X509_NAME_ENTRY *xsne,
diff --git a/httpd-2.4.33-sslmultiproxy.patch b/httpd-2.4.33-sslmultiproxy.patch
deleted file mode 100644
index 679f229e22241ab7711b90ada9fd80c752b56ee8..0000000000000000000000000000000000000000
--- a/httpd-2.4.33-sslmultiproxy.patch
+++ /dev/null
@@ -1,126 +0,0 @@
-From ce2d1d7d4b2bebe34cf37fdeb30d35050092c5b5 Mon Sep 17 00:00:00 2001
-From: Rob Crittenden
-Date: Thu, 12 Apr 2018 14:36:28 -0400
-Subject: [PATCH] httpd-2.4.18-sslmultiproxy.patch
-
----
- modules/ssl/mod_ssl.c | 24 ++++++++++++++++++++++--
- modules/ssl/ssl_engine_vars.c | 18 +++++++++++++++++-
- 2 files changed, 39 insertions(+), 3 deletions(-)
-
-diff --git a/modules/ssl/mod_ssl.c b/modules/ssl/mod_ssl.c
-index 48d64cb..42e85a3 100644
-diff -uap httpd-2.4.33/modules/ssl/mod_ssl.c.sslmultiproxy httpd-2.4.33/modules/ssl/mod_ssl.c
---- httpd-2.4.33/modules/ssl/mod_ssl.c.sslmultiproxy
-+++ httpd-2.4.33/modules/ssl/mod_ssl.c
-@@ -444,12 +444,19 @@
- return OK;
- }
-
-+static APR_OPTIONAL_FN_TYPE(ssl_engine_disable) *othermod_engine_disable;
-+static APR_OPTIONAL_FN_TYPE(ssl_engine_set) *othermod_engine_set;
-+
- static SSLConnRec *ssl_init_connection_ctx(conn_rec *c,
- ap_conf_vector_t *per_dir_config)
- {
- SSLConnRec *sslconn = myConnConfig(c);
- SSLSrvConfigRec *sc;
-
-+ if (othermod_engine_disable) {
-+ othermod_engine_disable(c);
-+ }
-+
- if (sslconn) {
- return sslconn;
- }
-@@ -508,6 +515,10 @@
- {
- SSLConnRec *sslconn;
- int status;
-+
-+ if (othermod_engine_set) {
-+ return othermod_engine_set(c, per_dir_config, proxy, enable);
-+ }
-
- if (proxy) {
- sslconn = ssl_init_connection_ctx(c, per_dir_config);
-@@ -537,12 +548,18 @@
-
- static int ssl_proxy_enable(conn_rec *c)
- {
-- return ssl_engine_set(c, NULL, 1, 1);
-+ if (othermod_engine_set)
-+ return othermod_engine_set(c, NULL, 1, 1);
-+ else
-+ return ssl_engine_set(c, NULL, 1, 1);
- }
-
- static int ssl_engine_disable(conn_rec *c)
- {
-- return ssl_engine_set(c, NULL, 0, 0);
-+ if (othermod_engine_set)
-+ return othermod_engine_set(c, NULL, 0, 0);
-+ else
-+ return ssl_engine_set(c, NULL, 0, 0);
- }
-
- int ssl_init_ssl_connection(conn_rec *c, request_rec *r)
-@@ -730,6 +747,9 @@
- APR_HOOK_MIDDLE);
-
- ssl_var_register(p);
-+
-+ othermod_engine_disable = APR_RETRIEVE_OPTIONAL_FN(ssl_engine_disable);
-+ othermod_engine_set = APR_RETRIEVE_OPTIONAL_FN(ssl_engine_set);
-
- APR_REGISTER_OPTIONAL_FN(ssl_proxy_enable);
- APR_REGISTER_OPTIONAL_FN(ssl_engine_disable);
-diff -uap httpd-2.4.33/modules/ssl/ssl_engine_vars.c.sslmultiproxy httpd-2.4.33/modules/ssl/ssl_engine_vars.c
---- httpd-2.4.33/modules/ssl/ssl_engine_vars.c.sslmultiproxy
-+++ httpd-2.4.33/modules/ssl/ssl_engine_vars.c
-@@ -54,6 +54,8 @@
- static void ssl_var_lookup_ssl_cipher_bits(SSL *ssl, int *usekeysize, int *algkeysize);
- static char *ssl_var_lookup_ssl_version(apr_pool_t *p, char *var);
- static char *ssl_var_lookup_ssl_compress_meth(SSL *ssl);
-+static APR_OPTIONAL_FN_TYPE(ssl_is_https) *othermod_is_https;
-+static APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *othermod_var_lookup;
-
- static SSLConnRec *ssl_get_effective_config(conn_rec *c)
- {
-@@ -68,7 +70,9 @@
- static int ssl_is_https(conn_rec *c)
- {
- SSLConnRec *sslconn = ssl_get_effective_config(c);
-- return sslconn && sslconn->ssl;
-+
-+ return (sslconn && sslconn->ssl)
-+ || (othermod_is_https && othermod_is_https(c));
- }
-
- static const char var_interface[] = "mod_ssl/" AP_SERVER_BASEREVISION;
-@@ -137,6 +141,9 @@
- {
- char *cp, *cp2;
-
-+ othermod_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https);
-+ othermod_var_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup);
-+
- APR_REGISTER_OPTIONAL_FN(ssl_is_https);
- APR_REGISTER_OPTIONAL_FN(ssl_var_lookup);
- APR_REGISTER_OPTIONAL_FN(ssl_ext_list);
-@@ -271,6 +278,15 @@
- */
- if (result == NULL && c != NULL) {
- SSLConnRec *sslconn = ssl_get_effective_config(c);
-+
-+ if (strlen(var) > 4 && strcEQn(var, "SSL_", 4)
-+ && (!sslconn || !sslconn->ssl) && othermod_var_lookup) {
-+ /* For an SSL_* variable, if mod_ssl is not enabled for
-+ * this connection and another SSL module is present, pass
-+ * through to that module. */
-+ return othermod_var_lookup(p, s, c, r, var);
-+ }
-+
- if (strlen(var) > 4 && strcEQn(var, "SSL_", 4)
- && sslconn && sslconn->ssl)
- result = ssl_var_lookup_ssl(p, sslconn, r, var+4);
diff --git a/httpd-2.4.33-systemd.patch b/httpd-2.4.33-systemd.patch
deleted file mode 100644
index 7f5ee3b369026e8e6818e2aa3df23e274f8bb8cd..0000000000000000000000000000000000000000
--- a/httpd-2.4.33-systemd.patch
+++ /dev/null
@@ -1,245 +0,0 @@
---- httpd-2.4.33/modules/arch/unix/config5.m4.systemd
-+++ httpd-2.4.33/modules/arch/unix/config5.m4
-@@ -18,6 +18,16 @@
- fi
- ])
-
-+APACHE_MODULE(systemd, Systemd support, , , all, [
-+ if test "${ac_cv_header_systemd_sd_daemon_h}" = "no" || test -z "${SYSTEMD_LIBS}"; then
-+ AC_MSG_WARN([Your system does not support systemd.])
-+ enable_systemd="no"
-+ else
-+ APR_ADDTO(MOD_SYSTEMD_LDADD, [$SYSTEMD_LIBS])
-+ enable_systemd="yes"
-+ fi
-+])
-+
- APR_ADDTO(INCLUDES, [-I\$(top_srcdir)/$modpath_current])
-
- APACHE_MODPATH_FINISH
---- httpd-2.4.33/modules/arch/unix/mod_systemd.c.systemd
-+++ httpd-2.4.33/modules/arch/unix/mod_systemd.c
-@@ -0,0 +1,223 @@
-+/* Licensed to the Apache Software Foundation (ASF) under one or more
-+ * contributor license agreements. See the NOTICE file distributed with
-+ * this work for additional information regarding copyright ownership.
-+ * The ASF licenses this file to You under the Apache License, Version 2.0
-+ * (the "License"); you may not use this file except in compliance with
-+ * the License. You may obtain a copy of the License at
-+ *
-+ * http://www.apache.org/licenses/LICENSE-2.0
-+ *
-+ * Unless required by applicable law or agreed to in writing, software
-+ * distributed under the License is distributed on an "AS IS" BASIS,
-+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+ * See the License for the specific language governing permissions and
-+ * limitations under the License.
-+ *
-+ */
-+
-+#include
-+#include
-+#include "ap_mpm.h"
-+#include
-+#include
-+#include
-+#include
-+#include
-+#include
-+#include "unixd.h"
-+#include "scoreboard.h"
-+#include "mpm_common.h"
-+
-+#include "systemd/sd-daemon.h"
-+#include "systemd/sd-journal.h"
-+
-+#if APR_HAVE_UNISTD_H
-+#include
-+#endif
-+
-+static int shutdown_timer = 0;
-+static int shutdown_counter = 0;
-+static unsigned long bytes_served;
-+static pid_t mainpid;
-+static char describe_listeners[50];
-+
-+static int systemd_pre_config(apr_pool_t *pconf, apr_pool_t *plog,
-+ apr_pool_t *ptemp)
-+{
-+ sd_notify(0,
-+ "RELOADING=1\n"
-+ "STATUS=Reading configuration...\n");
-+ ap_extended_status = 1;
-+ return OK;
-+}
-+
-+static char *dump_listener(ap_listen_rec *lr, apr_pool_t *p)
-+{
-+ apr_sockaddr_t *sa = lr->bind_addr;
-+ char addr[128];
-+
-+ if (apr_sockaddr_is_wildcard(sa)) {
-+ return apr_pstrcat(p, "port ", apr_itoa(p, sa->port), NULL);
-+ }
-+
-+ apr_sockaddr_ip_getbuf(addr, sizeof addr, sa);
-+
-+ return apr_psprintf(p, "%s port %u", addr, sa->port);
-+}
-+
-+static int systemd_post_config(apr_pool_t *pconf, apr_pool_t *plog,
-+ apr_pool_t *ptemp, server_rec *s)
-+{
-+ ap_listen_rec *lr;
-+ apr_size_t plen = sizeof describe_listeners;
-+ char *p = describe_listeners;
-+
-+ if (ap_state_query(AP_SQ_MAIN_STATE) == AP_SQ_MS_CREATE_PRE_CONFIG)
-+ return OK;
-+
-+ for (lr = ap_listeners; lr; lr = lr->next) {
-+ char *s = dump_listener(lr, ptemp);
-+
-+ if (strlen(s) + 3 < plen) {
-+ char *newp = apr_cpystrn(p, s, plen);
-+ if (lr->next)
-+ newp = apr_cpystrn(newp, ", ", 3);
-+ plen -= newp - p;
-+ p = newp;
-+ }
-+ else {
-+ if (plen < 4) {
-+ p = describe_listeners + sizeof describe_listeners - 4;
-+ plen = 4;
-+ }
-+ apr_cpystrn(p, "...", plen);
-+ break;
-+ }
-+ }
-+
-+ sd_journal_print(LOG_INFO, "Server configured, listening on: %s", describe_listeners);
-+
-+ return OK;
-+}
-+
-+static int systemd_pre_mpm(apr_pool_t *p, ap_scoreboard_e sb_type)
-+{
-+ int rv;
-+
-+ mainpid = getpid();
-+
-+ rv = sd_notifyf(0, "READY=1\n"
-+ "STATUS=Started, listening on: %s\n"
-+ "MAINPID=%" APR_PID_T_FMT,
-+ describe_listeners, mainpid);
-+ if (rv < 0) {
-+ ap_log_perror(APLOG_MARK, APLOG_ERR, 0, p, APLOGNO(02395)
-+ "sd_notifyf returned an error %d", rv);
-+ }
-+
-+ return OK;
-+}
-+
-+static int systemd_monitor(apr_pool_t *p, server_rec *s)
-+{
-+ ap_sload_t sload;
-+ apr_interval_time_t up_time;
-+ char bps[5];
-+ int rv;
-+
-+ if (!ap_extended_status) {
-+ /* Nothing useful to report if ExtendedStatus disabled. */
-+ return DECLINED;
-+ }
-+
-+ ap_get_sload(&sload);
-+
-+ if (sload.access_count == 0) {
-+ rv = sd_notifyf(0, "READY=1\n"
-+ "STATUS=Running, listening on: %s\n",
-+ describe_listeners);
-+ }
-+ else {
-+ /* up_time in seconds */
-+ up_time = (apr_uint32_t) apr_time_sec(apr_time_now() -
-+ ap_scoreboard_image->global->restart_time);
-+
-+ apr_strfsize((unsigned long)((float) (sload.bytes_served)
-+ / (float) up_time), bps);
-+
-+ rv = sd_notifyf(0, "READY=1\n"
-+ "STATUS=Total requests: %lu; Idle/Busy workers %d/%d;"
-+ "Requests/sec: %.3g; Bytes served/sec: %sB/sec\n",
-+ sload.access_count, sload.idle, sload.busy,
-+ ((float) sload.access_count) / (float) up_time, bps);
-+ }
-+
-+ if (rv < 0) {
-+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02396)
-+ "sd_notifyf returned an error %d", rv);
-+ }
-+
-+ /* Shutdown httpd when nothing is sent for shutdown_timer seconds. */
-+ if (sload.bytes_served == bytes_served) {
-+ /* mpm_common.c: INTERVAL_OF_WRITABLE_PROBES is 10 */
-+ shutdown_counter += 10;
-+ if (shutdown_timer > 0 && shutdown_counter >= shutdown_timer) {
-+ rv = sd_notifyf(0, "READY=1\n"
-+ "STATUS=Stopped as result of IdleShutdown "
-+ "timeout.");
-+ if (rv < 0) {
-+ ap_log_error(APLOG_MARK, APLOG_ERR, 0, s, APLOGNO(02804)
-+ "sd_notifyf returned an error %d", rv);
-+ }
-+ kill(mainpid, AP_SIG_GRACEFUL);
-+ }
-+ }
-+ else {
-+ shutdown_counter = 0;
-+ }
-+
-+ bytes_served = sload.bytes_served;
-+
-+ return DECLINED;
-+}
-+
-+static void systemd_register_hooks(apr_pool_t *p)
-+{
-+ /* Enable ap_extended_status. */
-+ ap_hook_pre_config(systemd_pre_config, NULL, NULL, APR_HOOK_LAST);
-+ /* Grab the listener config. */
-+ ap_hook_post_config(systemd_post_config, NULL, NULL, APR_HOOK_LAST);
-+ /* We know the PID in this hook ... */
-+ ap_hook_pre_mpm(systemd_pre_mpm, NULL, NULL, APR_HOOK_LAST);
-+ /* Used to update httpd's status line using sd_notifyf */
-+ ap_hook_monitor(systemd_monitor, NULL, NULL, APR_HOOK_MIDDLE);
-+}
-+
-+static const char *set_shutdown_timer(cmd_parms *cmd, void *dummy,
-+ const char *arg)
-+{
-+ const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
-+ if (err != NULL) {
-+ return err;
-+ }
-+
-+ shutdown_timer = atoi(arg);
-+ return NULL;
-+}
-+
-+static const command_rec systemd_cmds[] =
-+{
-+AP_INIT_TAKE1("IdleShutdown", set_shutdown_timer, NULL, RSRC_CONF,
-+ "Number of seconds in idle-state after which httpd is shutdown"),
-+ {NULL}
-+};
-+
-+AP_DECLARE_MODULE(systemd) = {
-+ STANDARD20_MODULE_STUFF,
-+ NULL,
-+ NULL,
-+ NULL,
-+ NULL,
-+ systemd_cmds,
-+ systemd_register_hooks,
-+};
diff --git a/httpd-2.4.34-r1555631.patch b/httpd-2.4.34-r1555631.patch
deleted file mode 100644
index 7ca9478ed8b2b521e1d38cda7c15a15fa4cc8344..0000000000000000000000000000000000000000
--- a/httpd-2.4.34-r1555631.patch
+++ /dev/null
@@ -1,14 +0,0 @@
-# ./pullrev.sh 1555631
-http://svn.apache.org/viewvc?view=revision&revision=1555631
-
---- httpd-2.4.34/modules/ssl/ssl_engine_ocsp.c
-+++ httpd-2.4.34/modules/ssl/ssl_engine_ocsp.c
-@@ -61,7 +61,7 @@
- /* Use default responder URL if forced by configuration, else use
- * certificate-specified responder, falling back to default if
- * necessary and possible. */
-- if (sc->server->ocsp_force_default) {
-+ if (sc->server->ocsp_force_default == TRUE) {
- s = sc->server->ocsp_responder;
- }
- else {
diff --git a/httpd-2.4.34-r1738878.patch b/httpd-2.4.34-r1738878.patch
deleted file mode 100644
index 5af48f502691039d34812a2cc6c6f705c2d7b90f..0000000000000000000000000000000000000000
--- a/httpd-2.4.34-r1738878.patch
+++ /dev/null
@@ -1,130 +0,0 @@
---- httpd-2.4.34/modules/proxy/ajp_header.c.r1738878
-+++ httpd-2.4.34/modules/proxy/ajp_header.c
-@@ -213,7 +213,8 @@
-
- static apr_status_t ajp_marshal_into_msgb(ajp_msg_t *msg,
- request_rec *r,
-- apr_uri_t *uri)
-+ apr_uri_t *uri,
-+ const char *secret)
- {
- int method;
- apr_uint32_t i, num_headers = 0;
-@@ -293,17 +294,15 @@
- i, elts[i].key, elts[i].val);
- }
-
--/* XXXX need to figure out how to do this
-- if (s->secret) {
-+ if (secret) {
- if (ajp_msg_append_uint8(msg, SC_A_SECRET) ||
-- ajp_msg_append_string(msg, s->secret)) {
-+ ajp_msg_append_string(msg, secret)) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(03228)
-- "Error ajp_marshal_into_msgb - "
-+ "ajp_marshal_into_msgb: "
- "Error appending secret");
- return APR_EGENERAL;
- }
- }
-- */
-
- if (r->user) {
- if (ajp_msg_append_uint8(msg, SC_A_REMOTE_USER) ||
-@@ -671,7 +670,8 @@
- apr_status_t ajp_send_header(apr_socket_t *sock,
- request_rec *r,
- apr_size_t buffsize,
-- apr_uri_t *uri)
-+ apr_uri_t *uri,
-+ const char *secret)
- {
- ajp_msg_t *msg;
- apr_status_t rc;
-@@ -683,7 +683,7 @@
- return rc;
- }
-
-- rc = ajp_marshal_into_msgb(msg, r, uri);
-+ rc = ajp_marshal_into_msgb(msg, r, uri, secret);
- if (rc != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(00988)
- "ajp_send_header: ajp_marshal_into_msgb failed");
---- httpd-2.4.34/modules/proxy/ajp.h.r1738878
-+++ httpd-2.4.34/modules/proxy/ajp.h
-@@ -413,12 +413,14 @@
- * @param sock backend socket
- * @param r current request
- * @param buffsize max size of the AJP packet.
-+ * @param secret authentication secret
- * @param uri requested uri
- * @return APR_SUCCESS or error
- */
- apr_status_t ajp_send_header(apr_socket_t *sock, request_rec *r,
- apr_size_t buffsize,
-- apr_uri_t *uri);
-+ apr_uri_t *uri,
-+ const char *secret);
-
- /**
- * Read the ajp message and return the type of the message.
---- httpd-2.4.34/modules/proxy/mod_proxy_ajp.c.r1738878
-+++ httpd-2.4.34/modules/proxy/mod_proxy_ajp.c
-@@ -193,6 +193,7 @@
- apr_off_t content_length = 0;
- int original_status = r->status;
- const char *original_status_line = r->status_line;
-+ const char *secret = NULL;
-
- if (psf->io_buffer_size_set)
- maxsize = psf->io_buffer_size;
-@@ -202,12 +203,15 @@
- maxsize = AJP_MSG_BUFFER_SZ;
- maxsize = APR_ALIGN(maxsize, 1024);
-
-+ if (*conn->worker->s->secret)
-+ secret = conn->worker->s->secret;
-+
- /*
- * Send the AJP request to the remote server
- */
-
- /* send request headers */
-- status = ajp_send_header(conn->sock, r, maxsize, uri);
-+ status = ajp_send_header(conn->sock, r, maxsize, uri, secret);
- if (status != APR_SUCCESS) {
- conn->close = 1;
- ap_log_rerror(APLOG_MARK, APLOG_ERR, status, r, APLOGNO(00868)
---- httpd-2.4.34/modules/proxy/mod_proxy.c.r1738878
-+++ httpd-2.4.34/modules/proxy/mod_proxy.c
-@@ -319,6 +319,12 @@
- (int)sizeof(worker->s->upgrade));
- }
- }
-+ else if (!strcasecmp(key, "secret")) {
-+ if (PROXY_STRNCPY(worker->s->secret, val) != APR_SUCCESS) {
-+ return apr_psprintf(p, "Secret length must be < %d characters",
-+ (int)sizeof(worker->s->secret));
-+ }
-+ }
- else if (!strcasecmp(key, "responsefieldsize")) {
- long s = atol(val);
- if (s < 0) {
---- httpd-2.4.34/modules/proxy/mod_proxy.h.r1738878
-+++ httpd-2.4.34/modules/proxy/mod_proxy.h
-@@ -357,6 +357,7 @@
- #define PROXY_WORKER_MAX_HOSTNAME_SIZE 64
- #define PROXY_BALANCER_MAX_HOSTNAME_SIZE PROXY_WORKER_MAX_HOSTNAME_SIZE
- #define PROXY_BALANCER_MAX_STICKY_SIZE 64
-+#define PROXY_WORKER_MAX_SECRET_SIZE 64
-
- #define PROXY_RFC1035_HOSTNAME_SIZE 256
-
-@@ -453,6 +454,7 @@
- char hostname_ex[PROXY_RFC1035_HOSTNAME_SIZE]; /* RFC1035 compliant version of the remote backend address */
- apr_size_t response_field_size; /* Size of proxy response buffer in bytes. */
- unsigned int response_field_size_set:1;
-+ char secret[PROXY_WORKER_MAX_SECRET_SIZE]; /* authentication secret (e.g. AJP13) */
- } proxy_worker_shared;
-
- #define ALIGNED_PROXY_WORKER_SHARED_SIZE (APR_ALIGN_DEFAULT(sizeof(proxy_worker_shared)))
diff --git a/httpd-2.4.34-r1827912+.patch b/httpd-2.4.34-r1827912+.patch
deleted file mode 100644
index 98c7ac8c4fb9d50423d898c258a776c9b17cc96d..0000000000000000000000000000000000000000
--- a/httpd-2.4.34-r1827912+.patch
+++ /dev/null
@@ -1,858 +0,0 @@
-
-Pull all changes from upstream integration branch:
-
-svn diff -r1840105:1841219 https://svn.apache.org/repos/asf/httpd/httpd/branches/tlsv1.3-for-2.4.x
-
---- httpd-2.4.34/modules/ssl/mod_ssl.c.r1827912+
-+++ httpd-2.4.34/modules/ssl/mod_ssl.c
-@@ -93,9 +93,9 @@
- SSL_CMD_SRV(FIPS, FLAG,
- "Enable FIPS-140 mode "
- "(`on', `off')")
-- SSL_CMD_ALL(CipherSuite, TAKE1,
-- "Colon-delimited list of permitted SSL Ciphers "
-- "('XXX:...:XXX' - see manual)")
-+ SSL_CMD_ALL(CipherSuite, TAKE12,
-+ "Colon-delimited list of permitted SSL Ciphers, optional preceeded "
-+ "by protocol identifier ('XXX:...:XXX' - see manual)")
- SSL_CMD_SRV(CertificateFile, TAKE1,
- "SSL Server Certificate file "
- "('/path/to/file' - PEM or DER encoded)")
-@@ -185,9 +185,9 @@
- SSL_CMD_PXY(ProxyProtocol, RAW_ARGS,
- "SSL Proxy: enable or disable SSL protocol flavors "
- "('[+-][" SSL_PROTOCOLS "] ...' - see manual)")
-- SSL_CMD_PXY(ProxyCipherSuite, TAKE1,
-+ SSL_CMD_PXY(ProxyCipherSuite, TAKE12,
- "SSL Proxy: colon-delimited list of permitted SSL ciphers "
-- "('XXX:...:XXX' - see manual)")
-+ ", optionally preceeded by protocol specifier ('XXX:...:XXX' - see manual)")
- SSL_CMD_PXY(ProxyVerify, TAKE1,
- "SSL Proxy: whether to verify the remote certificate "
- "('on' or 'off')")
-@@ -398,7 +398,7 @@
- /* We must register the library in full, to ensure our configuration
- * code can successfully test the SSL environment.
- */
--#if MODSSL_USE_OPENSSL_PRE_1_1_API
-+#if MODSSL_USE_OPENSSL_PRE_1_1_API || defined(LIBRESSL_VERSION_NUMBER)
- (void)CRYPTO_malloc_init();
- #else
- OPENSSL_malloc_init();
---- httpd-2.4.34/modules/ssl/ssl_engine_config.c.r1827912+
-+++ httpd-2.4.34/modules/ssl/ssl_engine_config.c
-@@ -136,6 +136,7 @@
- mctx->auth.cipher_suite = NULL;
- mctx->auth.verify_depth = UNSET;
- mctx->auth.verify_mode = SSL_CVERIFY_UNSET;
-+ mctx->auth.tls13_ciphers = NULL;
-
- mctx->ocsp_mask = UNSET;
- mctx->ocsp_force_default = UNSET;
-@@ -280,6 +281,7 @@
- cfgMergeString(auth.cipher_suite);
- cfgMergeInt(auth.verify_depth);
- cfgMerge(auth.verify_mode, SSL_CVERIFY_UNSET);
-+ cfgMergeString(auth.tls13_ciphers);
-
- cfgMergeInt(ocsp_mask);
- cfgMergeBool(ocsp_force_default);
-@@ -761,22 +763,37 @@
-
- const char *ssl_cmd_SSLCipherSuite(cmd_parms *cmd,
- void *dcfg,
-- const char *arg)
-+ const char *arg1, const char *arg2)
- {
- SSLSrvConfigRec *sc = mySrvConfig(cmd->server);
- SSLDirConfigRec *dc = (SSLDirConfigRec *)dcfg;
-
-- /* always disable null and export ciphers */
-- arg = apr_pstrcat(cmd->pool, arg, ":!aNULL:!eNULL:!EXP", NULL);
--
-- if (cmd->path) {
-- dc->szCipherSuite = arg;
-+ if (arg2 == NULL) {
-+ arg2 = arg1;
-+ arg1 = "SSL";
- }
-- else {
-- sc->server->auth.cipher_suite = arg;
-+
-+ if (!strcmp("SSL", arg1)) {
-+ /* always disable null and export ciphers */
-+ arg2 = apr_pstrcat(cmd->pool, arg2, ":!aNULL:!eNULL:!EXP", NULL);
-+ if (cmd->path) {
-+ dc->szCipherSuite = arg2;
-+ }
-+ else {
-+ sc->server->auth.cipher_suite = arg2;
-+ }
-+ return NULL;
- }
--
-- return NULL;
-+#if SSL_HAVE_PROTOCOL_TLSV1_3
-+ else if (!strcmp("TLSv1.3", arg1)) {
-+ if (cmd->path) {
-+ return "TLSv1.3 ciphers cannot be set inside a directory context";
-+ }
-+ sc->server->auth.tls13_ciphers = arg2;
-+ return NULL;
-+ }
-+#endif
-+ return apr_pstrcat(cmd->pool, "procotol '", arg1, "' not supported", NULL);
- }
-
- #define SSL_FLAGS_CHECK_FILE \
-@@ -1449,6 +1466,9 @@
- else if (strcEQ(w, "TLSv1.2")) {
- thisopt = SSL_PROTOCOL_TLSV1_2;
- }
-+ else if (SSL_HAVE_PROTOCOL_TLSV1_3 && strcEQ(w, "TLSv1.3")) {
-+ thisopt = SSL_PROTOCOL_TLSV1_3;
-+ }
- #endif
- else if (strcEQ(w, "all")) {
- thisopt = SSL_PROTOCOL_ALL;
-@@ -1510,16 +1530,28 @@
-
- const char *ssl_cmd_SSLProxyCipherSuite(cmd_parms *cmd,
- void *dcfg,
-- const char *arg)
-+ const char *arg1, const char *arg2)
- {
- SSLDirConfigRec *dc = (SSLDirConfigRec *)dcfg;
--
-- /* always disable null and export ciphers */
-- arg = apr_pstrcat(cmd->pool, arg, ":!aNULL:!eNULL:!EXP", NULL);
--
-- dc->proxy->auth.cipher_suite = arg;
--
-- return NULL;
-+
-+ if (arg2 == NULL) {
-+ arg2 = arg1;
-+ arg1 = "SSL";
-+ }
-+
-+ if (!strcmp("SSL", arg1)) {
-+ /* always disable null and export ciphers */
-+ arg2 = apr_pstrcat(cmd->pool, arg2, ":!aNULL:!eNULL:!EXP", NULL);
-+ dc->proxy->auth.cipher_suite = arg2;
-+ return NULL;
-+ }
-+#if SSL_HAVE_PROTOCOL_TLSV1_3
-+ else if (!strcmp("TLSv1.3", arg1)) {
-+ dc->proxy->auth.tls13_ciphers = arg2;
-+ return NULL;
-+ }
-+#endif
-+ return apr_pstrcat(cmd->pool, "procotol '", arg1, "' not supported", NULL);
- }
-
- const char *ssl_cmd_SSLProxyVerify(cmd_parms *cmd,
---- httpd-2.4.34/modules/ssl/ssl_engine_init.c.r1827912+
-+++ httpd-2.4.34/modules/ssl/ssl_engine_init.c
-@@ -568,6 +568,9 @@
- #ifdef HAVE_TLSV1_X
- (protocol & SSL_PROTOCOL_TLSV1_1 ? "TLSv1.1, " : ""),
- (protocol & SSL_PROTOCOL_TLSV1_2 ? "TLSv1.2, " : ""),
-+#if SSL_HAVE_PROTOCOL_TLSV1_3
-+ (protocol & SSL_PROTOCOL_TLSV1_3 ? "TLSv1.3, " : ""),
-+#endif
- #endif
- NULL);
- cp[strlen(cp)-2] = NUL;
-@@ -600,6 +603,13 @@
- TLSv1_2_client_method() : /* proxy */
- TLSv1_2_server_method(); /* server */
- }
-+#if SSL_HAVE_PROTOCOL_TLSV1_3
-+ else if (protocol == SSL_PROTOCOL_TLSV1_3) {
-+ method = mctx->pkp ?
-+ TLSv1_3_client_method() : /* proxy */
-+ TLSv1_3_server_method(); /* server */
-+ }
-+#endif
- #endif
- else { /* For multiple protocols, we need a flexible method */
- method = mctx->pkp ?
-@@ -617,7 +627,8 @@
-
- SSL_CTX_set_options(ctx, SSL_OP_ALL);
-
--#if OPENSSL_VERSION_NUMBER < 0x10100000L
-+#if OPENSSL_VERSION_NUMBER < 0x10100000L || \
-+ (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x20800000L)
- /* always disable SSLv2, as per RFC 6176 */
- SSL_CTX_set_options(ctx, SSL_OP_NO_SSLv2);
-
-@@ -639,10 +650,19 @@
- if (!(protocol & SSL_PROTOCOL_TLSV1_2)) {
- SSL_CTX_set_options(ctx, SSL_OP_NO_TLSv1_2);
- }
-+#if SSL_HAVE_PROTOCOL_TLSV1_3
-+ ssl_set_ctx_protocol_option(s, ctx, SSL_OP_NO_TLSv1_3,
-+ protocol & SSL_PROTOCOL_TLSV1_3, "TLSv1.3");
-+#endif
- #endif
-
- #else /* #if OPENSSL_VERSION_NUMBER < 0x10100000L */
- /* We first determine the maximum protocol version we should provide */
-+#if SSL_HAVE_PROTOCOL_TLSV1_3
-+ if (SSL_HAVE_PROTOCOL_TLSV1_3 && (protocol & SSL_PROTOCOL_TLSV1_3)) {
-+ prot = TLS1_3_VERSION;
-+ } else
-+#endif
- if (protocol & SSL_PROTOCOL_TLSV1_2) {
- prot = TLS1_2_VERSION;
- } else if (protocol & SSL_PROTOCOL_TLSV1_1) {
-@@ -664,6 +684,11 @@
-
- /* Next we scan for the minimal protocol version we should provide,
- * but we do not allow holes between max and min */
-+#if SSL_HAVE_PROTOCOL_TLSV1_3
-+ if (prot == TLS1_3_VERSION && protocol & SSL_PROTOCOL_TLSV1_2) {
-+ prot = TLS1_2_VERSION;
-+ }
-+#endif
- if (prot == TLS1_2_VERSION && protocol & SSL_PROTOCOL_TLSV1_1) {
- prot = TLS1_1_VERSION;
- }
-@@ -736,6 +761,13 @@
- SSL_CTX_set_mode(ctx, SSL_MODE_RELEASE_BUFFERS);
- #endif
-
-+#if OPENSSL_VERSION_NUMBER >= 0x1010100fL
-+ /* For OpenSSL >=1.1.1, disable auto-retry mode so it's possible
-+ * to consume handshake records without blocking for app-data.
-+ * https://github.com/openssl/openssl/issues/7178 */
-+ SSL_CTX_clear_mode(ctx, SSL_MODE_AUTO_RETRY);
-+#endif
-+
- return APR_SUCCESS;
- }
-
-@@ -888,7 +920,15 @@
- ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s);
- return ssl_die(s);
- }
--
-+#if SSL_HAVE_PROTOCOL_TLSV1_3
-+ if (mctx->auth.tls13_ciphers
-+ && !SSL_CTX_set_ciphersuites(ctx, mctx->auth.tls13_ciphers)) {
-+ ap_log_error(APLOG_MARK, APLOG_EMERG, 0, s, APLOGNO()
-+ "Unable to configure permitted TLSv1.3 ciphers");
-+ ssl_log_ssl_error(SSLLOG_MARK, APLOG_EMERG, s);
-+ return ssl_die(s);
-+ }
-+#endif
- return APR_SUCCESS;
- }
-
-@@ -1493,6 +1533,13 @@
- X509_STORE_CTX *sctx;
- X509_STORE *store = SSL_CTX_get_cert_store(mctx->ssl_ctx);
-
-+#if OPENSSL_VERSION_NUMBER >= 0x1010100fL
-+ /* For OpenSSL >=1.1.1, turn on client cert support which is
-+ * otherwise turned off by default (by design).
-+ * https://github.com/openssl/openssl/issues/6933 */
-+ SSL_CTX_set_post_handshake_auth(mctx->ssl_ctx, 1);
-+#endif
-+
- SSL_CTX_set_client_cert_cb(mctx->ssl_ctx,
- ssl_callback_proxy_cert);
-
---- httpd-2.4.34/modules/ssl/ssl_engine_kernel.c.r1827912+
-+++ httpd-2.4.34/modules/ssl/ssl_engine_kernel.c
-@@ -188,6 +188,12 @@
- || strcmp(a1->cipher_suite, a2->cipher_suite))) {
- return 0;
- }
-+ /* both have the same ca cipher suite string */
-+ if ((a1->tls13_ciphers != a2->tls13_ciphers)
-+ && (!a1->tls13_ciphers || !a2->tls13_ciphers
-+ || strcmp(a1->tls13_ciphers, a2->tls13_ciphers))) {
-+ return 0;
-+ }
- return 1;
- }
-
-@@ -424,87 +430,70 @@
- }
- }
-
--/*
-- * Access Handler
-- */
--int ssl_hook_Access(request_rec *r)
-+static int ssl_check_post_client_verify(request_rec *r, SSLSrvConfigRec *sc,
-+ SSLDirConfigRec *dc, SSLConnRec *sslconn,
-+ SSL *ssl)
- {
-- SSLDirConfigRec *dc = myDirConfig(r);
-- SSLSrvConfigRec *sc = mySrvConfig(r->server);
-- SSLConnRec *sslconn = myConnConfig(r->connection);
-- SSL *ssl = sslconn ? sslconn->ssl : NULL;
-- server_rec *handshakeserver = sslconn ? sslconn->server : NULL;
-- SSLSrvConfigRec *hssc = handshakeserver? mySrvConfig(handshakeserver) : NULL;
-- SSL_CTX *ctx = NULL;
-- apr_array_header_t *requires;
-- ssl_require_t *ssl_requires;
-- int ok, i;
-- BOOL renegotiate = FALSE, renegotiate_quick = FALSE;
- X509 *cert;
-- X509 *peercert;
-- X509_STORE *cert_store = NULL;
-- X509_STORE_CTX *cert_store_ctx;
-- STACK_OF(SSL_CIPHER) *cipher_list_old = NULL, *cipher_list = NULL;
-- const SSL_CIPHER *cipher = NULL;
-- int depth, verify_old, verify, n, is_slave = 0;
-- const char *ncipher_suite;
--
-- /* On a slave connection, we do not expect to have an SSLConnRec, but
-- * our master connection might have one. */
-- if (!(sslconn && ssl) && r->connection->master) {
-- sslconn = myConnConfig(r->connection->master);
-- ssl = sslconn ? sslconn->ssl : NULL;
-- handshakeserver = sslconn ? sslconn->server : NULL;
-- hssc = handshakeserver? mySrvConfig(handshakeserver) : NULL;
-- is_slave = 1;
-- }
-
-- if (ssl) {
-- /*
-- * We should have handshaken here (on handshakeserver),
-- * otherwise we are being redirected (ErrorDocument) from
-- * a renegotiation failure below. The access is still
-- * forbidden in the latter case, let ap_die() handle
-- * this recursive (same) error.
-- */
-- if (!SSL_is_init_finished(ssl)) {
-- return HTTP_FORBIDDEN;
-+ /*
-+ * Remember the peer certificate's DN
-+ */
-+ if ((cert = SSL_get_peer_certificate(ssl))) {
-+ if (sslconn->client_cert) {
-+ X509_free(sslconn->client_cert);
- }
-- ctx = SSL_get_SSL_CTX(ssl);
-+ sslconn->client_cert = cert;
-+ sslconn->client_dn = NULL;
- }
--
-+
- /*
-- * Support for SSLRequireSSL directive
-+ * Finally check for acceptable renegotiation results
- */
-- if (dc->bSSLRequired && !ssl) {
-- if ((sc->enabled == SSL_ENABLED_OPTIONAL) && !is_slave) {
-- /* This vhost was configured for optional SSL, just tell the
-- * client that we need to upgrade.
-- */
-- apr_table_setn(r->err_headers_out, "Upgrade", "TLS/1.0, HTTP/1.1");
-- apr_table_setn(r->err_headers_out, "Connection", "Upgrade");
-+ if ((dc->nVerifyClient != SSL_CVERIFY_NONE) ||
-+ (sc->server->auth.verify_mode != SSL_CVERIFY_NONE)) {
-+ BOOL do_verify = ((dc->nVerifyClient == SSL_CVERIFY_REQUIRE) ||
-+ (sc->server->auth.verify_mode == SSL_CVERIFY_REQUIRE));
-+
-+ if (do_verify && (SSL_get_verify_result(ssl) != X509_V_OK)) {
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02262)
-+ "Re-negotiation handshake failed: "
-+ "Client verification failed");
-
-- return HTTP_UPGRADE_REQUIRED;
-+ return HTTP_FORBIDDEN;
- }
-
-- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02219)
-- "access to %s failed, reason: %s",
-- r->filename, "SSL connection required");
--
-- /* remember forbidden access for strict require option */
-- apr_table_setn(r->notes, "ssl-access-forbidden", "1");
-+ if (do_verify) {
-+ if (cert == NULL) {
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02263)
-+ "Re-negotiation handshake failed: "
-+ "Client certificate missing");
-
-- return HTTP_FORBIDDEN;
-+ return HTTP_FORBIDDEN;
-+ }
-+ }
- }
-+ return OK;
-+}
-
-- /*
-- * Check to see whether SSL is in use; if it's not, then no
-- * further access control checks are relevant. (the test for
-- * sc->enabled is probably strictly unnecessary)
-- */
-- if (sc->enabled == SSL_ENABLED_FALSE || !ssl) {
-- return DECLINED;
-- }
-+/*
-+ * Access Handler, classic flavour, for SSL/TLS up to v1.2
-+ * where everything can be renegotiated and no one is happy.
-+ */
-+static int ssl_hook_Access_classic(request_rec *r, SSLSrvConfigRec *sc, SSLDirConfigRec *dc,
-+ SSLConnRec *sslconn, SSL *ssl)
-+{
-+ server_rec *handshakeserver = sslconn ? sslconn->server : NULL;
-+ SSLSrvConfigRec *hssc = handshakeserver? mySrvConfig(handshakeserver) : NULL;
-+ SSL_CTX *ctx = NULL;
-+ BOOL renegotiate = FALSE, renegotiate_quick = FALSE;
-+ X509 *peercert;
-+ X509_STORE *cert_store = NULL;
-+ X509_STORE_CTX *cert_store_ctx;
-+ STACK_OF(SSL_CIPHER) *cipher_list_old = NULL, *cipher_list = NULL;
-+ const SSL_CIPHER *cipher = NULL;
-+ int depth, verify_old, verify, n, rc;
-+ const char *ncipher_suite;
-
- #ifdef HAVE_SRP
- /*
-@@ -581,7 +570,7 @@
- }
-
- /* configure new state */
-- if (is_slave) {
-+ if (r->connection->master) {
- /* TODO: this categorically fails changed cipher suite settings
- * on slave connections. We could do better by
- * - create a new SSL* from our SSL_CTX and set cipher suite there,
-@@ -659,7 +648,7 @@
- }
-
- if (renegotiate) {
-- if (is_slave) {
-+ if (r->connection->master) {
- /* The request causes renegotiation on a slave connection.
- * This is not allowed since we might have concurrent requests
- * on this connection.
-@@ -732,7 +721,7 @@
- (verify & SSL_VERIFY_FAIL_IF_NO_PEER_CERT)))
- {
- renegotiate = TRUE;
-- if (is_slave) {
-+ if (r->connection->master) {
- /* The request causes renegotiation on a slave connection.
- * This is not allowed since we might have concurrent requests
- * on this connection.
-@@ -885,6 +874,7 @@
-
- if (renegotiate_quick) {
- STACK_OF(X509) *cert_stack;
-+ X509 *cert;
-
- /* perform just a manual re-verification of the peer */
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02258)
-@@ -1037,43 +1027,10 @@
- }
-
- /*
-- * Remember the peer certificate's DN
-- */
-- if ((cert = SSL_get_peer_certificate(ssl))) {
-- if (sslconn->client_cert) {
-- X509_free(sslconn->client_cert);
-- }
-- sslconn->client_cert = cert;
-- sslconn->client_dn = NULL;
-- }
--
-- /*
- * Finally check for acceptable renegotiation results
- */
-- if ((dc->nVerifyClient != SSL_CVERIFY_NONE) ||
-- (sc->server->auth.verify_mode != SSL_CVERIFY_NONE)) {
-- BOOL do_verify = ((dc->nVerifyClient == SSL_CVERIFY_REQUIRE) ||
-- (sc->server->auth.verify_mode == SSL_CVERIFY_REQUIRE));
--
-- if (do_verify && (SSL_get_verify_result(ssl) != X509_V_OK)) {
-- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02262)
-- "Re-negotiation handshake failed: "
-- "Client verification failed");
--
-- return HTTP_FORBIDDEN;
-- }
--
-- if (do_verify) {
-- if ((peercert = SSL_get_peer_certificate(ssl)) == NULL) {
-- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02263)
-- "Re-negotiation handshake failed: "
-- "Client certificate missing");
--
-- return HTTP_FORBIDDEN;
-- }
--
-- X509_free(peercert);
-- }
-+ if (OK != (rc = ssl_check_post_client_verify(r, sc, dc, sslconn, ssl))) {
-+ return rc;
- }
-
- /*
-@@ -1096,6 +1053,215 @@
- }
- }
-
-+ return DECLINED;
-+}
-+
-+#if SSL_HAVE_PROTOCOL_TLSV1_3
-+/*
-+ * Access Handler, modern flavour, for SSL/TLS v1.3 and onward.
-+ * Only client certificates can be requested, everything else stays.
-+ */
-+static int ssl_hook_Access_modern(request_rec *r, SSLSrvConfigRec *sc, SSLDirConfigRec *dc,
-+ SSLConnRec *sslconn, SSL *ssl)
-+{
-+ if ((dc->nVerifyClient != SSL_CVERIFY_UNSET) ||
-+ (sc->server->auth.verify_mode != SSL_CVERIFY_UNSET)) {
-+ int vmode_inplace, vmode_needed;
-+ int change_vmode = FALSE;
-+ int old_state, n, rc;
-+
-+ vmode_inplace = SSL_get_verify_mode(ssl);
-+ vmode_needed = SSL_VERIFY_NONE;
-+
-+ if ((dc->nVerifyClient == SSL_CVERIFY_REQUIRE) ||
-+ (sc->server->auth.verify_mode == SSL_CVERIFY_REQUIRE)) {
-+ vmode_needed |= SSL_VERIFY_PEER_STRICT;
-+ }
-+
-+ if ((dc->nVerifyClient == SSL_CVERIFY_OPTIONAL) ||
-+ (dc->nVerifyClient == SSL_CVERIFY_OPTIONAL_NO_CA) ||
-+ (sc->server->auth.verify_mode == SSL_CVERIFY_OPTIONAL) ||
-+ (sc->server->auth.verify_mode == SSL_CVERIFY_OPTIONAL_NO_CA))
-+ {
-+ vmode_needed |= SSL_VERIFY_PEER;
-+ }
-+
-+ if (vmode_needed == SSL_VERIFY_NONE) {
-+ return DECLINED;
-+ }
-+
-+ vmode_needed |= SSL_VERIFY_CLIENT_ONCE;
-+ if (vmode_inplace != vmode_needed) {
-+ /* Need to change, if new setting is more restrictive than existing one */
-+
-+ if ((vmode_inplace == SSL_VERIFY_NONE)
-+ || (!(vmode_inplace & SSL_VERIFY_PEER)
-+ && (vmode_needed & SSL_VERIFY_PEER))
-+ || (!(vmode_inplace & SSL_VERIFY_FAIL_IF_NO_PEER_CERT)
-+ && (vmode_needed & SSL_VERIFY_FAIL_IF_NO_PEER_CERT))) {
-+ /* need to change the effective verify mode */
-+ change_vmode = TRUE;
-+ }
-+ else {
-+ /* FIXME: does this work with TLSv1.3? Is this more than re-inspecting
-+ * the certificate we should already have? */
-+ /*
-+ * override of SSLVerifyDepth
-+ *
-+ * The depth checks are handled by us manually inside the
-+ * verify callback function and not by OpenSSL internally
-+ * (and our function is aware of both the per-server and
-+ * per-directory contexts). So we cannot ask OpenSSL about
-+ * the currently verify depth. Instead we remember it in our
-+ * SSLConnRec attached to the SSL* of OpenSSL. We've to force
-+ * the renegotiation if the reconfigured/new verify depth is
-+ * less than the currently active/remembered verify depth
-+ * (because this means more restriction on the certificate
-+ * chain).
-+ */
-+ n = (sslconn->verify_depth != UNSET)?
-+ sslconn->verify_depth : sc->server->auth.verify_depth;
-+ /* determine the new depth */
-+ sslconn->verify_depth = (dc->nVerifyDepth != UNSET)
-+ ? dc->nVerifyDepth
-+ : sc->server->auth.verify_depth;
-+ if (sslconn->verify_depth < n) {
-+ change_vmode = TRUE;
-+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO()
-+ "Reduced client verification depth will "
-+ "force renegotiation");
-+ }
-+ }
-+ }
-+
-+ if (change_vmode) {
-+ char peekbuf[1];
-+
-+ if (r->connection->master) {
-+ /* FIXME: modifying the SSL on a slave connection is no good.
-+ * We would need to push this back to the master connection
-+ * somehow.
-+ */
-+ apr_table_setn(r->notes, "ssl-renegotiate-forbidden", "verify-client");
-+ return HTTP_FORBIDDEN;
-+ }
-+
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO() "verify client post handshake");
-+
-+ SSL_set_verify(ssl, vmode_needed, ssl_callback_SSLVerify);
-+
-+ if (SSL_verify_client_post_handshake(ssl) != 1) {
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(10158)
-+ "cannot perform post-handshake authentication");
-+ ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server);
-+ apr_table_setn(r->notes, "error-notes",
-+ "Reason: Cannot perform Post-Handshake Authentication.
");
-+ return HTTP_FORBIDDEN;
-+ }
-+
-+ old_state = sslconn->reneg_state;
-+ sslconn->reneg_state = RENEG_ALLOW;
-+ modssl_set_app_data2(ssl, r);
-+
-+ SSL_do_handshake(ssl);
-+ /* Need to trigger renegotiation handshake by reading.
-+ * Peeking 0 bytes actually works.
-+ * See: http://marc.info/?t=145493359200002&r=1&w=2
-+ */
-+ SSL_peek(ssl, peekbuf, 0);
-+
-+ sslconn->reneg_state = old_state;
-+ modssl_set_app_data2(ssl, NULL);
-+
-+ /*
-+ * Finally check for acceptable renegotiation results
-+ */
-+ if (OK != (rc = ssl_check_post_client_verify(r, sc, dc, sslconn, ssl))) {
-+ return rc;
-+ }
-+ }
-+ }
-+
-+ return DECLINED;
-+}
-+#endif
-+
-+int ssl_hook_Access(request_rec *r)
-+{
-+ SSLDirConfigRec *dc = myDirConfig(r);
-+ SSLSrvConfigRec *sc = mySrvConfig(r->server);
-+ SSLConnRec *sslconn = myConnConfig(r->connection);
-+ SSL *ssl = sslconn ? sslconn->ssl : NULL;
-+ apr_array_header_t *requires;
-+ ssl_require_t *ssl_requires;
-+ int ok, i, ret;
-+
-+ /* On a slave connection, we do not expect to have an SSLConnRec, but
-+ * our master connection might have one. */
-+ if (!(sslconn && ssl) && r->connection->master) {
-+ sslconn = myConnConfig(r->connection->master);
-+ ssl = sslconn ? sslconn->ssl : NULL;
-+ }
-+
-+ /*
-+ * We should have handshaken here, otherwise we are being
-+ * redirected (ErrorDocument) from a renegotiation failure below.
-+ * The access is still forbidden in the latter case, let ap_die() handle
-+ * this recursive (same) error.
-+ */
-+ if (ssl && !SSL_is_init_finished(ssl)) {
-+ return HTTP_FORBIDDEN;
-+ }
-+
-+ /*
-+ * Support for SSLRequireSSL directive
-+ */
-+ if (dc->bSSLRequired && !ssl) {
-+ if ((sc->enabled == SSL_ENABLED_OPTIONAL) && !r->connection->master) {
-+ /* This vhost was configured for optional SSL, just tell the
-+ * client that we need to upgrade.
-+ */
-+ apr_table_setn(r->err_headers_out, "Upgrade", "TLS/1.0, HTTP/1.1");
-+ apr_table_setn(r->err_headers_out, "Connection", "Upgrade");
-+
-+ return HTTP_UPGRADE_REQUIRED;
-+ }
-+
-+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02219)
-+ "access to %s failed, reason: %s",
-+ r->filename, "SSL connection required");
-+
-+ /* remember forbidden access for strict require option */
-+ apr_table_setn(r->notes, "ssl-access-forbidden", "1");
-+
-+ return HTTP_FORBIDDEN;
-+ }
-+
-+ /*
-+ * Check to see whether SSL is in use; if it's not, then no
-+ * further access control checks are relevant. (the test for
-+ * sc->enabled is probably strictly unnecessary)
-+ */
-+ if (sc->enabled == SSL_ENABLED_FALSE || !ssl) {
-+ return DECLINED;
-+ }
-+
-+#if SSL_HAVE_PROTOCOL_TLSV1_3
-+ /* TLSv1.3+ is less complicated here. Branch off into a new codeline
-+ * and avoid messing with the past. */
-+ if (SSL_version(ssl) >= TLS1_3_VERSION) {
-+ ret = ssl_hook_Access_modern(r, sc, dc, sslconn, ssl);
-+ }
-+ else
-+#endif
-+ {
-+ ret = ssl_hook_Access_classic(r, sc, dc, sslconn, ssl);
-+ }
-+
-+ if (ret != DECLINED) {
-+ return ret;
-+ }
-+
- /* If we're trying to have the user name set from a client
- * certificate then we need to set it here. This should be safe as
- * the user name probably isn't important from an auth checking point
-@@ -2080,31 +2246,43 @@
- {
- conn_rec *c;
- server_rec *s;
-- SSLConnRec *scr;
-
- /* Retrieve the conn_rec and the associated SSLConnRec. */
- if ((c = (conn_rec *)SSL_get_app_data((SSL *)ssl)) == NULL) {
- return;
- }
-
-- if ((scr = myConnConfig(c)) == NULL) {
-- return;
-- }
-+ /* With TLS 1.3 this callback may be called multiple times on the first
-+ * negotiation, so the below logic to detect renegotiations can't work.
-+ * Fortunately renegotiations are forbidden starting with TLS 1.3, and
-+ * this is enforced by OpenSSL so there's nothing to be done here.
-+ */
-+#if SSL_HAVE_PROTOCOL_TLSV1_3
-+ if (SSL_version(ssl) < TLS1_3_VERSION)
-+#endif
-+ {
-+ SSLConnRec *sslconn;
-+
-+ if ((sslconn = myConnConfig(c)) == NULL) {
-+ return;
-+ }
-
-- /* If the reneg state is to reject renegotiations, check the SSL
-- * state machine and move to ABORT if a Client Hello is being
-- * read. */
-- if (!scr->is_proxy &&
-- (where & SSL_CB_HANDSHAKE_START) &&
-- scr->reneg_state == RENEG_REJECT) {
-- scr->reneg_state = RENEG_ABORT;
-+ /* If the reneg state is to reject renegotiations, check the SSL
-+ * state machine and move to ABORT if a Client Hello is being
-+ * read. */
-+ if (!sslconn->is_proxy &&
-+ (where & SSL_CB_HANDSHAKE_START) &&
-+ sslconn->reneg_state == RENEG_REJECT) {
-+ sslconn->reneg_state = RENEG_ABORT;
- ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02042)
- "rejecting client initiated renegotiation");
-- }
-- /* If the first handshake is complete, change state to reject any
-- * subsequent client-initiated renegotiation. */
-- else if ((where & SSL_CB_HANDSHAKE_DONE) && scr->reneg_state == RENEG_INIT) {
-- scr->reneg_state = RENEG_REJECT;
-+ }
-+ /* If the first handshake is complete, change state to reject any
-+ * subsequent client-initiated renegotiation. */
-+ else if ((where & SSL_CB_HANDSHAKE_DONE)
-+ && sslconn->reneg_state == RENEG_INIT) {
-+ sslconn->reneg_state = RENEG_REJECT;
-+ }
- }
-
- s = mySrvFromConn(c);
---- httpd-2.4.34/modules/ssl/ssl_private.h.r1827912+
-+++ httpd-2.4.34/modules/ssl/ssl_private.h
-@@ -132,13 +132,14 @@
- SSL_CTX_ctrl(ctx, SSL_CTRL_SET_MIN_PROTO_VERSION, version, NULL)
- #define SSL_CTX_set_max_proto_version(ctx, version) \
- SSL_CTX_ctrl(ctx, SSL_CTRL_SET_MAX_PROTO_VERSION, version, NULL)
--#endif
--/* LibreSSL declares OPENSSL_VERSION_NUMBER == 2.0 but does not include most
-- * changes from OpenSSL >= 1.1 (new functions, macros, deprecations, ...), so
-- * we have to work around this...
-+#elif LIBRESSL_VERSION_NUMBER < 0x2070000f
-+/* LibreSSL before 2.7 declares OPENSSL_VERSION_NUMBER == 2.0 but does not
-+ * include most changes from OpenSSL >= 1.1 (new functions, macros,
-+ * deprecations, ...), so we have to work around this...
- */
- #define MODSSL_USE_OPENSSL_PRE_1_1_API (1)
--#else
-+#endif /* LIBRESSL_VERSION_NUMBER < 0x2060000f */
-+#else /* defined(LIBRESSL_VERSION_NUMBER) */
- #define MODSSL_USE_OPENSSL_PRE_1_1_API (OPENSSL_VERSION_NUMBER < 0x10100000L)
- #endif
-
-@@ -238,7 +239,8 @@
- void free_bio_methods(void);
- #endif
-
--#if OPENSSL_VERSION_NUMBER < 0x10002000L || defined(LIBRESSL_VERSION_NUMBER)
-+#if OPENSSL_VERSION_NUMBER < 0x10002000L || \
-+ (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x2070000f)
- #define X509_STORE_CTX_get0_store(x) (x->ctx)
- #endif
-
-@@ -372,8 +374,17 @@
- #ifdef HAVE_TLSV1_X
- #define SSL_PROTOCOL_TLSV1_1 (1<<3)
- #define SSL_PROTOCOL_TLSV1_2 (1<<4)
-+#define SSL_PROTOCOL_TLSV1_3 (1<<5)
-+
-+#ifdef SSL_OP_NO_TLSv1_3
-+#define SSL_HAVE_PROTOCOL_TLSV1_3 (1)
-+#define SSL_PROTOCOL_ALL (SSL_PROTOCOL_BASIC| \
-+ SSL_PROTOCOL_TLSV1_1|SSL_PROTOCOL_TLSV1_2|SSL_PROTOCOL_TLSV1_3)
-+#else
-+#define SSL_HAVE_PROTOCOL_TLSV1_3 (0)
- #define SSL_PROTOCOL_ALL (SSL_PROTOCOL_BASIC| \
- SSL_PROTOCOL_TLSV1_1|SSL_PROTOCOL_TLSV1_2)
-+#endif
- #else
- #define SSL_PROTOCOL_ALL (SSL_PROTOCOL_BASIC)
- #endif
-@@ -646,6 +657,11 @@
- /** for client or downstream server authentication */
- int verify_depth;
- ssl_verify_t verify_mode;
-+
-+ /** TLSv1.3 has its separate cipher list, separate from the
-+ settings for older TLS protocol versions. Since which one takes
-+ effect is a matter of negotiation, we need separate settings */
-+ const char *tls13_ciphers;
- } modssl_auth_ctx_t;
-
- #ifdef HAVE_TLS_SESSION_TICKETS
-@@ -801,7 +817,7 @@
- const char *ssl_cmd_SSLCryptoDevice(cmd_parms *, void *, const char *);
- const char *ssl_cmd_SSLRandomSeed(cmd_parms *, void *, const char *, const char *, const char *);
- const char *ssl_cmd_SSLEngine(cmd_parms *, void *, const char *);
--const char *ssl_cmd_SSLCipherSuite(cmd_parms *, void *, const char *);
-+const char *ssl_cmd_SSLCipherSuite(cmd_parms *, void *, const char *, const char *);
- const char *ssl_cmd_SSLCertificateFile(cmd_parms *, void *, const char *);
- const char *ssl_cmd_SSLCertificateKeyFile(cmd_parms *, void *, const char *);
- const char *ssl_cmd_SSLCertificateChainFile(cmd_parms *, void *, const char *);
-@@ -830,7 +846,7 @@
-
- const char *ssl_cmd_SSLProxyEngine(cmd_parms *cmd, void *dcfg, int flag);
- const char *ssl_cmd_SSLProxyProtocol(cmd_parms *, void *, const char *);
--const char *ssl_cmd_SSLProxyCipherSuite(cmd_parms *, void *, const char *);
-+const char *ssl_cmd_SSLProxyCipherSuite(cmd_parms *, void *, const char *, const char *);
- const char *ssl_cmd_SSLProxyVerify(cmd_parms *, void *, const char *);
- const char *ssl_cmd_SSLProxyVerifyDepth(cmd_parms *, void *, const char *);
- const char *ssl_cmd_SSLProxyCACertificatePath(cmd_parms *, void *, const char *);
diff --git a/httpd-2.4.4-r1337344+.patch b/httpd-2.4.4-r1337344+.patch
deleted file mode 100644
index 6e5c3e78329a1e0a9d8baeae4d58d7c1618e987c..0000000000000000000000000000000000000000
--- a/httpd-2.4.4-r1337344+.patch
+++ /dev/null
@@ -1,250 +0,0 @@
-# ./pullrev.sh 1337344 1341905 1342065 1341930
-
-suexec enhancements:
-
-1) use syslog for logging
-2) use capabilities not setuid/setgid root binary
-
-http://svn.apache.org/viewvc?view=revision&revision=1337344
-http://svn.apache.org/viewvc?view=revision&revision=1341905
-http://svn.apache.org/viewvc?view=revision&revision=1342065
-http://svn.apache.org/viewvc?view=revision&revision=1341930
-
---- httpd-2.4.4/configure.in.r1337344+
-+++ httpd-2.4.4/configure.in
-@@ -734,7 +734,24 @@ APACHE_HELP_STRING(--with-suexec-gidmin,
-
- AC_ARG_WITH(suexec-logfile,
- APACHE_HELP_STRING(--with-suexec-logfile,Set the logfile),[
-- AC_DEFINE_UNQUOTED(AP_LOG_EXEC, "$withval", [SuExec log file] ) ] )
-+ if test "x$withval" = "xyes"; then
-+ AC_DEFINE_UNQUOTED(AP_LOG_EXEC, "$withval", [SuExec log file])
-+ fi
-+])
-+
-+AC_ARG_WITH(suexec-syslog,
-+APACHE_HELP_STRING(--with-suexec-syslog,Set the logfile),[
-+ if test $withval = "yes"; then
-+ if test "x${with_suexec_logfile}" != "xno"; then
-+ AC_MSG_NOTICE([hint: use "--without-suexec-logfile --with-suexec-syslog"])
-+ AC_MSG_ERROR([suexec does not support both logging to file and syslog])
-+ fi
-+ AC_CHECK_FUNCS([vsyslog], [], [
-+ AC_MSG_ERROR([cannot support syslog from suexec without vsyslog()])])
-+ AC_DEFINE(AP_LOG_SYSLOG, 1, [SuExec log to syslog])
-+ fi
-+])
-+
-
- AC_ARG_WITH(suexec-safepath,
- APACHE_HELP_STRING(--with-suexec-safepath,Set the safepath),[
-@@ -744,6 +761,15 @@ AC_ARG_WITH(suexec-umask,
- APACHE_HELP_STRING(--with-suexec-umask,umask for suexec'd process),[
- AC_DEFINE_UNQUOTED(AP_SUEXEC_UMASK, 0$withval, [umask for suexec'd process] ) ] )
-
-+INSTALL_SUEXEC=setuid
-+AC_ARG_ENABLE([suexec-capabilities],
-+APACHE_HELP_STRING(--enable-suexec-capabilities,Use Linux capability bits not setuid root suexec), [
-+INSTALL_SUEXEC=caps
-+AC_DEFINE(AP_SUEXEC_CAPABILITIES, 1,
-+ [Enable if suexec is installed with Linux capabilities, not setuid])
-+])
-+APACHE_SUBST(INSTALL_SUEXEC)
-+
- dnl APR should go after the other libs, so the right symbols can be picked up
- if test x${apu_found} != xobsolete; then
- AP_LIBS="$AP_LIBS `$apu_config --avoid-ldap --link-libtool`"
---- httpd-2.4.4/docs/manual/suexec.html.en.r1337344+
-+++ httpd-2.4.4/docs/manual/suexec.html.en
-@@ -372,6 +372,21 @@
- together with the --enable-suexec
option to let
- APACI accept your request for using the suEXEC feature.
-
-+ --enable-suexec-capabilities
-+
-+ Linux specific: Normally,
-+ the suexec
binary is installed "setuid/setgid
-+ root", which allows it to run with the full privileges of the
-+ root user. If this option is used, the suexec
-+ binary will instead be installed with only the setuid/setgid
-+ "capability" bits set, which is the subset of full root
-+ priviliges required for suexec operation. Note that
-+ the suexec
binary may not be able to write to a log
-+ file in this mode; it is recommended that the
-+ --with-suexec-syslog --without-suexec-logfile
-+ options are used in conjunction with this mode, so that syslog
-+ logging is used instead.
-+
- --with-suexec-bin=PATH
-
- The path to the suexec
binary must be hard-coded
-@@ -433,6 +448,12 @@
- "suexec_log
" and located in your standard logfile
- directory (--logfiledir
).
-
-+ --with-suexec-syslog
-+
-+ If defined, suexec will log notices and errors to syslog
-+ instead of a logfile. This option must be combined
-+ with --without-suexec-logfile
.
-+
- --with-suexec-safepath=PATH
-
- Define a safe PATH environment to pass to CGI
-@@ -550,9 +571,12 @@ Group webgroup
-
- The suEXEC wrapper will write log information
- to the file defined with the --with-suexec-logfile
-- option as indicated above. If you feel you have configured and
-- installed the wrapper properly, have a look at this log and the
-- error_log for the server to see where you may have gone astray.
-+ option as indicated above, or to syslog if --with-suexec-syslog
-+ is used. If you feel you have configured and
-+ installed the wrapper properly, have a look at the log and the
-+ error_log for the server to see where you may have gone astray.
-+ The output of "suexec -V"
will show the options
-+ used to compile suexec, if using a binary distribution.
-
-
-
-@@ -640,4 +664,4 @@ if (typeof(prettyPrint) !== 'undefined')
- prettyPrint();
- }
- //-->
--