From ad9ee9e229abade6e7f0dd96a68301e79d0a3fc2 Mon Sep 17 00:00:00 2001 From: LIU Yulong Date: Fri, 1 Apr 2022 18:02:32 +0800 Subject: [PATCH] Enable vhost features Build qemu with following features: --enable-vhost-net \ --enable-vhost-kernel \ --enable-vhost-user-blk-server \ --enable-vhost-vdpa \ --enable-vhost-vsock \ Signed-off-by: LIU Yulong --- qemu.spec | 16 +- vhost-user-fix-VirtQ-notifier-cleanup.patch | 149 ++++++++++++++++++ ...t-user-remove-VirtQ-notifier-restore.patch | 90 +++++++++++ 3 files changed, 254 insertions(+), 1 deletion(-) create mode 100644 vhost-user-fix-VirtQ-notifier-cleanup.patch create mode 100644 vhost-user-remove-VirtQ-notifier-restore.patch diff --git a/qemu.spec b/qemu.spec index b70927d0..efbc15e4 100644 --- a/qemu.spec +++ b/qemu.spec @@ -1,6 +1,8 @@ +%undefine _missing_build_ids_terminate_build + Name: qemu Version: 6.2.0 -Release: 31 +Release: 32 Epoch: 2 Summary: QEMU is a generic and open source machine emulator and virtualizer License: GPLv2 and BSD and MIT and CC-BY-SA-4.0 @@ -241,6 +243,8 @@ Patch0227: scsi-bus-fix-incorrect-call-for-blk_error_retry_rese.patch Patch0228: Revert-monitor-limit-io-error-qmp-event-to-at-most-o.patch Patch0229: vhost-vsock-detach-the-virqueue-element-in-case-of-e.patch Patch0230: virtio-net-fix-map-leaking-on-error-during-receive.patch +Patch0231: vhost-user-remove-VirtQ-notifier-restore.patch +Patch0232: vhost-user-fix-VirtQ-notifier-cleanup.patch BuildRequires: flex BuildRequires: gcc @@ -423,6 +427,11 @@ cd ../ --enable-linux-aio \ --enable-cap-ng \ --enable-vhost-user \ + --enable-vhost-net \ + --enable-vhost-kernel \ + --enable-vhost-user-blk-server \ + --enable-vhost-vdpa \ + --enable-vhost-vsock \ --enable-tpm \ --enable-modules \ --enable-libssh \ @@ -701,6 +710,11 @@ getent passwd qemu >/dev/null || \ %endif %changelog +* Wed Apr 20 2022 LIU Yulong +* Add vDPA build params +* vhost-user: fix VirtQ notifier cleanup +* vhost-user: remove VirtQ notifier restore + * Fri Apr 15 2022 yezengruan - vhost-vsock: detach the virqueue element in case of error (CVE-2022-26354) - virtio-net: fix map leaking on error during receive (CVE-2022-26353) diff --git a/vhost-user-fix-VirtQ-notifier-cleanup.patch b/vhost-user-fix-VirtQ-notifier-cleanup.patch new file mode 100644 index 00000000..55179cae --- /dev/null +++ b/vhost-user-fix-VirtQ-notifier-cleanup.patch @@ -0,0 +1,149 @@ +From 3cc0c1da368afb9d69776103abbcbecf2ce81dc7 Mon Sep 17 00:00:00 2001 +From: Xueming Li +Date: Tue, 7 Sep 2021 15:42:25 +0800 +Subject: [PATCH v6 2/2] vhost-user: fix VirtQ notifier cleanup +To: qemu-devel@nongnu.org +Cc: xuemingl@nvidia.com, + qemu-stable@nongnu.org + +When vhost-user device cleanup is executed and un-mmaps notifier +address, VM cpu thread writing the notifier fails by accessing invalid +address error. + +To avoid this concurrent issue, call RCU and wait for a memory flatview +update, then un-mmap notifiers in callback. + +Fixes: 44866521bd6e ("vhost-user: support registering external host notifiers") +Cc: qemu-stable@nongnu.org +Cc: Yuwei Zhang +Signed-off-by: Xueming Li +--- + hw/virtio/vhost-user.c | 50 +++++++++++++++++++++------------- + include/hw/virtio/vhost-user.h | 2 ++ + 2 files changed, 33 insertions(+), 19 deletions(-) + +diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c +index c671719e9b..5adad4d029 100644 +--- a/hw/virtio/vhost-user.c ++++ b/hw/virtio/vhost-user.c +@@ -25,6 +25,7 @@ + #include "migration/postcopy-ram.h" + #include "migration/register.h" + #include "trace.h" ++#include "exec/ramblock.h" + + #include + #include +@@ -1143,15 +1144,27 @@ static int vhost_user_set_vring_num(struct vhost_dev *dev, + return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring); + } + +-static void vhost_user_host_notifier_remove(struct vhost_dev *dev, +- int queue_idx) ++static void vhost_user_host_notifier_free(VhostUserHostNotifier *n) + { +- struct vhost_user *u = dev->opaque; +- VhostUserHostNotifier *n = &u->user->notifier[queue_idx]; +- VirtIODevice *vdev = dev->vdev; ++ assert(n && n->old_addr); ++ munmap(n->old_addr, qemu_real_host_page_size); ++ n->old_addr = NULL; ++} ++ ++static void vhost_user_host_notifier_remove(VhostUserState *user, ++ VirtIODevice *vdev, int queue_idx) ++{ ++ VhostUserHostNotifier *n = &user->notifier[queue_idx]; + + if (n->addr) { +- virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false); ++ if (vdev) { ++ virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false); ++ } ++ assert(n->addr); ++ assert(!n->old_addr); ++ n->old_addr = n->addr; ++ n->addr = NULL; ++ call_rcu(n, vhost_user_host_notifier_free, rcu); + } + } + +@@ -1190,8 +1203,9 @@ static int vhost_user_get_vring_base(struct vhost_dev *dev, + .payload.state = *ring, + .hdr.size = sizeof(msg.payload.state), + }; ++ struct vhost_user *u = dev->opaque; + +- vhost_user_host_notifier_remove(dev, ring->index); ++ vhost_user_host_notifier_remove(u->user, dev->vdev, ring->index); + + if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + return -1; +@@ -1486,12 +1500,7 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev, + + n = &user->notifier[queue_idx]; + +- if (n->addr) { +- virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false); +- object_unparent(OBJECT(&n->mr)); +- munmap(n->addr, page_size); +- n->addr = NULL; +- } ++ vhost_user_host_notifier_remove(user, vdev, queue_idx); + + if (area->u64 & VHOST_USER_VRING_NOFD_MASK) { + return 0; +@@ -1510,9 +1519,12 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev, + + name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]", + user, queue_idx); +- if (!n->mr.ram) /* Don't init again after suspend. */ ++ if (!n->mr.ram) { /* Don't init again after suspend. */ + memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, + page_size, addr); ++ } else { ++ n->mr.ram_block->host = addr; ++ } + g_free(name); + + if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) { +@@ -2460,17 +2472,17 @@ bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp) + void vhost_user_cleanup(VhostUserState *user) + { + int i; ++ VhostUserHostNotifier *n; + + if (!user->chr) { + return; + } + memory_region_transaction_begin(); + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { +- if (user->notifier[i].addr) { +- object_unparent(OBJECT(&user->notifier[i].mr)); +- munmap(user->notifier[i].addr, qemu_real_host_page_size); +- user->notifier[i].addr = NULL; +- } ++ n = &user->notifier[i]; ++ assert(!n->addr); ++ vhost_user_host_notifier_remove(user, NULL, i); ++ object_unparent(OBJECT(&n->mr)); + } + memory_region_transaction_commit(); + user->chr = NULL; +diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h +index f6012b2078..03aa22d450 100644 +--- a/include/hw/virtio/vhost-user.h ++++ b/include/hw/virtio/vhost-user.h +@@ -12,8 +12,10 @@ + #include "hw/virtio/virtio.h" + + typedef struct VhostUserHostNotifier { ++ struct rcu_head rcu; + MemoryRegion mr; + void *addr; ++ void *old_addr; + } VhostUserHostNotifier; + + typedef struct VhostUserState { +-- +2.33.0 diff --git a/vhost-user-remove-VirtQ-notifier-restore.patch b/vhost-user-remove-VirtQ-notifier-restore.patch new file mode 100644 index 00000000..24c19f3d --- /dev/null +++ b/vhost-user-remove-VirtQ-notifier-restore.patch @@ -0,0 +1,90 @@ +From f4cc1d28209c6773e643808a9d98bb961791eb51 Mon Sep 17 00:00:00 2001 +From: Xueming Li +Date: Tue, 7 Sep 2021 15:42:25 +0800 +Subject: [PATCH v6 1/2] vhost-user: remove VirtQ notifier restore +To: qemu-devel@nongnu.org +Cc: xuemingl@nvidia.com, + qemu-stable@nongnu.org + +When vhost-user vdpa client suspend, backend may close all resources, +VQ notifier mmap address become invalid, restore MR which contains +the invalid address is wrong. vdpa client will set VQ notifier after +reconnect. + +This patch removes VQ notifier restore and related flags to avoid reusing +invalid address. + +Fixes: 44866521bd6e ("vhost-user: support registering external host notifiers") +Cc: qemu-stable@nongnu.org +Cc: Yuwei Zhang +Signed-off-by: Xueming Li +--- + hw/virtio/vhost-user.c | 19 +------------------ + include/hw/virtio/vhost-user.h | 1 - + 2 files changed, 1 insertion(+), 19 deletions(-) + +diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c +index bf6e50223c..c671719e9b 100644 +--- a/hw/virtio/vhost-user.c ++++ b/hw/virtio/vhost-user.c +@@ -1143,19 +1143,6 @@ static int vhost_user_set_vring_num(struct vhost_dev *dev, + return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring); + } + +-static void vhost_user_host_notifier_restore(struct vhost_dev *dev, +- int queue_idx) +-{ +- struct vhost_user *u = dev->opaque; +- VhostUserHostNotifier *n = &u->user->notifier[queue_idx]; +- VirtIODevice *vdev = dev->vdev; +- +- if (n->addr && !n->set) { +- virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true); +- n->set = true; +- } +-} +- + static void vhost_user_host_notifier_remove(struct vhost_dev *dev, + int queue_idx) + { +@@ -1163,17 +1150,14 @@ static void vhost_user_host_notifier_remove(struct vhost_dev *dev, + VhostUserHostNotifier *n = &u->user->notifier[queue_idx]; + VirtIODevice *vdev = dev->vdev; + +- if (n->addr && n->set) { ++ if (n->addr) { + virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false); +- n->set = false; + } + } + + static int vhost_user_set_vring_base(struct vhost_dev *dev, + struct vhost_vring_state *ring) + { +- vhost_user_host_notifier_restore(dev, ring->index); +- + return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring); + } + +@@ -1538,7 +1522,6 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev, + } + + n->addr = addr; +- n->set = true; + + return 0; + } +diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h +index a9abca3288..f6012b2078 100644 +--- a/include/hw/virtio/vhost-user.h ++++ b/include/hw/virtio/vhost-user.h +@@ -14,7 +14,6 @@ + typedef struct VhostUserHostNotifier { + MemoryRegion mr; + void *addr; +- bool set; + } VhostUserHostNotifier; + + typedef struct VhostUserState { +-- +2.33.0 -- Gitee