diff --git a/accel-tcg-fix-race-in-cpu_exec_step_atomic-bug-18630.patch b/accel-tcg-fix-race-in-cpu_exec_step_atomic-bug-18630.patch new file mode 100644 index 0000000000000000000000000000000000000000..c2bb3ec7358b9e7cbcaea8d7afc3a9956df6b8e1 --- /dev/null +++ b/accel-tcg-fix-race-in-cpu_exec_step_atomic-bug-18630.patch @@ -0,0 +1,93 @@ +From ede1e22fea5ec59c1cbf6d67cf2d64c587341964 Mon Sep 17 00:00:00 2001 +From: liuxiangdong +Date: Sun, 10 Sep 2023 21:27:42 +0800 +Subject: [PATCH] accel/tcg: fix race in cpu_exec_step_atomic (bug 1863025) +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +The bug describes a race whereby cpu_exec_step_atomic can acquire a TB +which is invalidated by a tb_flush before we execute it. This doesn't +affect the other cpu_exec modes as a tb_flush by it's nature can only +occur on a quiescent system. The race was described as: + + B2. tcg_cpu_exec => cpu_exec => tb_find => tb_gen_code + B3. tcg_tb_alloc obtains a new TB + + C3. TB obtained with tb_lookup__cpu_state or tb_gen_code + (same TB as B2) + + A3. start_exclusive critical section entered + A4. do_tb_flush is called, TB memory freed/re-allocated + A5. end_exclusive exits critical section + + B2. tcg_cpu_exec => cpu_exec => tb_find => tb_gen_code + B3. tcg_tb_alloc reallocates TB from B2 + + C4. start_exclusive critical section entered + C5. cpu_tb_exec executes the TB code that was free in A4 + +The simplest fix is to widen the exclusive period to include the TB +lookup. As a result we can drop the complication of checking we are in +the exclusive region before we end it. + +Cc: Yifan +Buglink: https://bugs.launchpad.net/qemu/+bug/1863025 +Reviewed-by: Paolo Bonzini +Reviewed-by: Richard Henderson +Signed-off-by: Alex Bennée +Message-Id: <20200214144952.15502-1-alex.bennee@linaro.org> +Signed-off-by: Richard Henderson +Signed-off-by: liuxiangdong +--- + accel/tcg/cpu-exec.c | 20 ++++++++++---------- + 1 file changed, 10 insertions(+), 10 deletions(-) + +diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c +index 6c85c3ee1e..97803f0f7f 100644 +--- a/accel/tcg/cpu-exec.c ++++ b/accel/tcg/cpu-exec.c +@@ -243,6 +243,8 @@ void cpu_exec_step_atomic(CPUState *cpu) + volatile bool in_exclusive_region = false; + + if (sigsetjmp(cpu->jmp_env, 0) == 0) { ++ start_exclusive(); ++ + tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); + if (tb == NULL) { + mmap_lock(); +@@ -250,8 +252,6 @@ void cpu_exec_step_atomic(CPUState *cpu) + mmap_unlock(); + } + +- start_exclusive(); +- + /* Since we got here, we know that parallel_cpus must be true. */ + parallel_cpus = false; + in_exclusive_region = true; +@@ -274,14 +274,14 @@ void cpu_exec_step_atomic(CPUState *cpu) + assert_no_pages_locked(); + } + +- if (in_exclusive_region) { +- /* We might longjump out of either the codegen or the +- * execution, so must make sure we only end the exclusive +- * region if we started it. +- */ +- parallel_cpus = true; +- end_exclusive(); +- } ++ /* ++ * As we start the exclusive region before codegen we must still ++ * be in the region if we longjump out of either the codegen or ++ * the execution. ++ */ ++ g_assert(in_exclusive_region); ++ parallel_cpus = true; ++ end_exclusive(); + } + + struct tb_desc { +-- +2.41.0.windows.1 + diff --git a/io-remove-io-watch-if-TLS-channel-is-closed-during-h.patch b/io-remove-io-watch-if-TLS-channel-is-closed-during-h.patch new file mode 100644 index 0000000000000000000000000000000000000000..80ca049a62b4757a58708c928305ffe6a8471a6d --- /dev/null +++ b/io-remove-io-watch-if-TLS-channel-is-closed-during-h.patch @@ -0,0 +1,87 @@ +From 4b395b33a27c2ab3031be92f700a9b48673c9fe2 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Daniel=20P=2E=20Berrang=C3=A9?= +Date: Tue, 20 Jun 2023 09:45:34 +0100 +Subject: [PATCH] io: remove io watch if TLS channel is closed during handshake +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +The TLS handshake make take some time to complete, during which time an +I/O watch might be registered with the main loop. If the owner of the +I/O channel invokes qio_channel_close() while the handshake is waiting +to continue the I/O watch must be removed. Failing to remove it will +later trigger the completion callback which the owner is not expecting +to receive. In the case of the VNC server, this results in a SEGV as +vnc_disconnect_start() tries to shutdown a client connection that is +already gone / NULL. + +--------------------------------------- +Note: g_clear_handle_id is not available before glibc-2.56, modify it. + +CVE-2023-3354 +Reported-by: jiangyegen +Signed-off-by: Daniel P. Berrangé +--- + include/io/channel-tls.h | 1 + + io/channel-tls.c | 19 +++++++++++++------ + 2 files changed, 14 insertions(+), 6 deletions(-) + +diff --git a/include/io/channel-tls.h b/include/io/channel-tls.h +index fdbdf12feb..e49e2831a2 100644 +--- a/include/io/channel-tls.h ++++ b/include/io/channel-tls.h +@@ -49,6 +49,7 @@ struct QIOChannelTLS { + QIOChannel *master; + QCryptoTLSSession *session; + QIOChannelShutdown shutdown; ++ guint hs_ioc_tag; + }; + + /** +diff --git a/io/channel-tls.c b/io/channel-tls.c +index 7ec8ceff2f..b0bdbd428f 100644 +--- a/io/channel-tls.c ++++ b/io/channel-tls.c +@@ -194,12 +194,13 @@ static void qio_channel_tls_handshake_task(QIOChannelTLS *ioc, + } + + trace_qio_channel_tls_handshake_pending(ioc, status); +- qio_channel_add_watch_full(ioc->master, +- condition, +- qio_channel_tls_handshake_io, +- data, +- NULL, +- context); ++ ioc->hs_ioc_tag = ++ qio_channel_add_watch_full(ioc->master, ++ condition, ++ qio_channel_tls_handshake_io, ++ data, ++ NULL, ++ context); + } + } + +@@ -214,6 +215,7 @@ static gboolean qio_channel_tls_handshake_io(QIOChannel *ioc, + QIOChannelTLS *tioc = QIO_CHANNEL_TLS( + qio_task_get_source(task)); + ++ tioc->hs_ioc_tag = 0; + g_free(data); + qio_channel_tls_handshake_task(tioc, task, context); + +@@ -371,6 +373,11 @@ static int qio_channel_tls_close(QIOChannel *ioc, + { + QIOChannelTLS *tioc = QIO_CHANNEL_TLS(ioc); + ++ if (tioc->hs_ioc_tag) { ++ g_source_remove(tioc->hs_ioc_tag); ++ tioc->hs_ioc_tag = 0; ++ } ++ + return qio_channel_close(tioc->master, errp); + } + +-- +2.41.0.windows.1 + diff --git a/pci-assert-configuration-access-is-within-bounds.patch b/pci-assert-configuration-access-is-within-bounds.patch new file mode 100644 index 0000000000000000000000000000000000000000..e7ddcd714a01216b29230e9737b7ec108556b740 --- /dev/null +++ b/pci-assert-configuration-access-is-within-bounds.patch @@ -0,0 +1,49 @@ +From 36da21f2d5ce6863770bb635540946947fa06516 Mon Sep 17 00:00:00 2001 +From: Prasad J Pandit +Date: Thu, 4 Jun 2020 17:05:25 +0530 +Subject: [PATCH] pci: assert configuration access is within bounds +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +While accessing PCI configuration bytes, assert that +'address + len' is within PCI configuration space. + +Generally it is within bounds. This is more of a defensive +assert, in case a buggy device was to send 'address' which +may go out of bounds. + +Suggested-by: Philippe Mathieu-Daudé +Signed-off-by: Prasad J Pandit +Message-Id: <20200604113525.58898-1-ppandit@redhat.com> +Reviewed-by: Michael S. Tsirkin +Signed-off-by: Michael S. Tsirkin +--- + hw/pci/pci.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/hw/pci/pci.c b/hw/pci/pci.c +index 602fc566cc..9f6632ae7d 100644 +--- a/hw/pci/pci.c ++++ b/hw/pci/pci.c +@@ -1379,6 +1379,8 @@ uint32_t pci_default_read_config(PCIDevice *d, + { + uint32_t val = 0; + ++ assert(address + len <= pci_config_size(d)); ++ + if (pci_is_express_downstream_port(d) && + ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) { + pcie_sync_bridge_lnk(d); +@@ -1392,6 +1394,8 @@ void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int + int i, was_irq_disabled = pci_irq_disabled(d); + uint32_t val = val_in; + ++ assert(addr + l <= pci_config_size(d)); ++ + for (i = 0; i < l; val >>= 8, ++i) { + uint8_t wmask = d->wmask[addr + i]; + uint8_t w1cmask = d->w1cmask[addr + i]; +-- +2.41.0.windows.1 + diff --git a/qemu.spec b/qemu.spec index 912bed0196b12b89ee56165d97016a686b35eb57..a68acd934f4d9dc7e6da49d572ec32ef4c845c59 100644 --- a/qemu.spec +++ b/qemu.spec @@ -1,6 +1,6 @@ Name: qemu Version: 4.1.0 -Release: 75 +Release: 76 Epoch: 10 Summary: QEMU is a generic and open source machine emulator and virtualizer License: GPLv2 and BSD and MIT and CC-BY-SA-4.0 @@ -352,6 +352,9 @@ Patch0339: 9pfs-prevent-opening-special-files-CVE-2023-2861.patch Patch0340: qga-win32-Remove-change-action-from-MSI-installer.patch Patch0341: qga-win32-Use-rundll-for-VSS-installation.patch Patch0342: virtio-crypto-verify-src-dst-buffer-length-for-sym-r.patch +Patch0343: io-remove-io-watch-if-TLS-channel-is-closed-during-h.patch +Patch0344: pci-assert-configuration-access-is-within-bounds.patch +Patch0345: accel-tcg-fix-race-in-cpu_exec_step_atomic-bug-18630.patch BuildRequires: flex BuildRequires: bison @@ -753,6 +756,11 @@ getent passwd qemu >/dev/null || \ %endif %changelog +* Mon Sep 11 2023 Jiabo Feng +- accel/tcg: fix race in cpu_exec_step_atomic (bug 1863025) +- pci: assert configuration access is within bounds +- io: remove io watch if TLS channel is closed during handshake + * Wed Aug 16 2023 Jiabo Feng - virtio-crypto: verify src&dst buffer length for sym request