From dd1452d29dd71f581d63245648257d207a9e445f Mon Sep 17 00:00:00 2001 From: sundongxu Date: Mon, 18 Dec 2023 23:28:19 +0800 Subject: [PATCH] shadow_dev: introduce shadow dev for virtio-net device for virtio net devices, create the shadow device for vlpi bypass inject supported. Signed-off-by: sundongxu --- qemu.spec | 6 +- ...duce-shadow-dev-for-virtio-net-devic.patch | 195 ++++++++++++++++++ 2 files changed, 200 insertions(+), 1 deletion(-) create mode 100644 shadow_dev-introduce-shadow-dev-for-virtio-net-devic.patch diff --git a/qemu.spec b/qemu.spec index bc9d629c..3e12aa94 100644 --- a/qemu.spec +++ b/qemu.spec @@ -3,7 +3,7 @@ Name: qemu Version: 6.2.0 -Release: 86 +Release: 87 Epoch: 10 Summary: QEMU is a generic and open source machine emulator and virtualizer License: GPLv2 and BSD and MIT and CC-BY-SA-4.0 @@ -832,6 +832,7 @@ Patch0817: vhost-implement-migration-state-notifier-for-vdpa-de.patch Patch0818: vhost-implement-post-resume-bh.patch Patch0819: vdpa-implement-vdpa-device-migration.patch Patch0820: vdpa-move-memory-listener-to-the-realize-stage.patch +Patch0821: shadow_dev-introduce-shadow-dev-for-virtio-net-devic.patch BuildRequires: flex @@ -1431,6 +1432,9 @@ getent passwd qemu >/dev/null || \ %endif %changelog +* Mon Dec 18 2023 - 10:6.2.0-87 +- shadow_dev: introduce shadow dev for virtio-net device + * Tue Dec 5 2023 - 10:6.2.0-86 - vdpa: move memory listener to the realize stage - vdpa: implement vdpa device migration diff --git a/shadow_dev-introduce-shadow-dev-for-virtio-net-devic.patch b/shadow_dev-introduce-shadow-dev-for-virtio-net-devic.patch new file mode 100644 index 00000000..b2dda638 --- /dev/null +++ b/shadow_dev-introduce-shadow-dev-for-virtio-net-devic.patch @@ -0,0 +1,195 @@ +From 0a6c08bd3a16543b8021c8b65a45f7ebb701a9aa Mon Sep 17 00:00:00 2001 +From: Dongxu Sun +Date: Fri, 15 Dec 2023 17:44:54 +0800 +Subject: [PATCH] shadow_dev: introduce shadow dev for virtio-net device + +for virtio net devices, create the shadow device for vlpi +bypass inject supported. + +Signed-off-by: Wang Haibin +Signed-off-by: Yu Zenghui +Signed-off-by: Chen Qun +Signed-off-by: KunKun Jiang +Signed-off-by: Dongxu Sun +--- + hw/virtio/virtio-pci.c | 32 ++++++++++++++++++++++++++ + include/sysemu/kvm.h | 5 +++++ + linux-headers/linux/kvm.h | 13 +++++++++++ + target/arm/kvm.c | 47 +++++++++++++++++++++++++++++++++++++++ + 4 files changed, 97 insertions(+) + +diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c +index 82706b8b32..6b45683280 100644 +--- a/hw/virtio/virtio-pci.c ++++ b/hw/virtio/virtio-pci.c +@@ -873,18 +873,44 @@ undo: + } + return ret; + } ++ ++#ifdef __aarch64__ ++int __attribute__((weak)) kvm_create_shadow_device(PCIDevice *dev) ++{ ++ return 0; ++} ++ ++int __attribute__((weak)) kvm_delete_shadow_device(PCIDevice *dev) ++{ ++ return 0; ++} ++#endif ++ + static int kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy *proxy, int nvqs) + { + int queue_no; + int ret = 0; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + ++#ifdef __aarch64__ ++ if (!strcmp(vdev->name, "virtio-net")) { ++ kvm_create_shadow_device(&proxy->pci_dev); ++ } ++#endif ++ + for (queue_no = 0; queue_no < nvqs; queue_no++) { + if (!virtio_queue_get_num(vdev, queue_no)) { + return -1; + } + ret = kvm_virtio_pci_vector_use_one(proxy, queue_no); + } ++ ++#ifdef __aarch64__ ++ if (!strcmp(vdev->name, "virtio-net") && ret != 0) { ++ kvm_delete_shadow_device(&proxy->pci_dev); ++ } ++#endif ++ + return ret; + } + +@@ -927,6 +953,12 @@ static void kvm_virtio_pci_vector_vq_release(VirtIOPCIProxy *proxy, int nvqs) + } + kvm_virtio_pci_vector_release_one(proxy, queue_no); + } ++ ++#ifdef __aarch64__ ++ if (!strcmp(vdev->name, "virtio-net")) { ++ kvm_delete_shadow_device(&proxy->pci_dev); ++ } ++#endif + } + + static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy *proxy) +diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h +index 1ec9432493..9f52d08ce0 100644 +--- a/include/sysemu/kvm.h ++++ b/include/sysemu/kvm.h +@@ -553,4 +553,9 @@ bool kvm_arch_cpu_check_are_resettable(void); + bool kvm_dirty_ring_enabled(void); + + uint32_t kvm_dirty_ring_size(void); ++ ++#ifdef __aarch64__ ++int kvm_create_shadow_device(PCIDevice *dev); ++int kvm_delete_shadow_device(PCIDevice *dev); ++#endif + #endif +diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h +index 2008fbc173..cd0885f523 100644 +--- a/linux-headers/linux/kvm.h ++++ b/linux-headers/linux/kvm.h +@@ -1127,6 +1127,8 @@ struct kvm_ppc_resize_hpt { + + #define KVM_CAP_ARM_CPU_FEATURE 555 + ++#define KVM_CAP_ARM_VIRT_MSI_BYPASS 799 ++ + #ifdef KVM_CAP_IRQ_ROUTING + + struct kvm_irq_routing_irqchip { +@@ -1431,6 +1433,17 @@ struct kvm_s390_ucas_mapping { + #define KVM_XEN_HVM_CONFIG _IOW(KVMIO, 0x7a, struct kvm_xen_hvm_config) + #define KVM_SET_CLOCK _IOW(KVMIO, 0x7b, struct kvm_clock_data) + #define KVM_GET_CLOCK _IOR(KVMIO, 0x7c, struct kvm_clock_data) ++ ++#ifdef __aarch64__ ++struct kvm_master_dev_info ++{ ++ __u32 nvectors; /* number of msi vectors */ ++ struct kvm_msi msi[0]; ++}; ++#define KVM_CREATE_SHADOW_DEV _IOW(KVMIO, 0xf0, struct kvm_master_dev_info) ++#define KVM_DEL_SHADOW_DEV _IOW(KVMIO, 0xf1, __u32) ++#endif ++ + /* Available with KVM_CAP_PIT_STATE2 */ + #define KVM_GET_PIT2 _IOR(KVMIO, 0x9f, struct kvm_pit_state2) + #define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2) +diff --git a/target/arm/kvm.c b/target/arm/kvm.c +index 22ac5bcb97..38d80adfb7 100644 +--- a/target/arm/kvm.c ++++ b/target/arm/kvm.c +@@ -27,6 +27,8 @@ + #include "trace.h" + #include "internals.h" + #include "hw/pci/pci.h" ++#include "hw/pci/msi.h" ++#include "hw/pci/msix.h" + #include "exec/memattrs.h" + #include "exec/address-spaces.h" + #include "hw/boards.h" +@@ -1075,6 +1077,51 @@ int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, + return 0; + } + ++int kvm_create_shadow_device(PCIDevice *dev) ++{ ++ KVMState *s = kvm_state; ++ struct kvm_master_dev_info *mdi; ++ MSIMessage msg; ++ uint32_t vector, nvectors = msix_nr_vectors_allocated(dev); ++ uint32_t request_id; ++ int ret; ++ ++ if (!kvm_vm_check_extension(s, KVM_CAP_ARM_VIRT_MSI_BYPASS) || !nvectors) { ++ return 0; ++ } ++ ++ mdi = g_malloc0(sizeof(uint32_t) + sizeof(struct kvm_msi) * nvectors); ++ mdi->nvectors = nvectors; ++ request_id = pci_requester_id(dev); ++ ++ for (vector = 0; vector < nvectors; vector++) { ++ msg = msix_get_message(dev, vector); ++ mdi->msi[vector].address_lo = extract64(msg.address, 0, 32); ++ mdi->msi[vector].address_hi = extract64(msg.address, 32, 32); ++ mdi->msi[vector].data = le32_to_cpu(msg.data); ++ mdi->msi[vector].flags = KVM_MSI_VALID_DEVID; ++ mdi->msi[vector].devid = request_id; ++ memset(mdi->msi[vector].pad, 0, sizeof(mdi->msi[vector].pad)); ++ } ++ ++ ret = kvm_vm_ioctl(s, KVM_CREATE_SHADOW_DEV, mdi); ++ g_free(mdi); ++ return ret; ++} ++ ++int kvm_delete_shadow_device(PCIDevice *dev) ++{ ++ KVMState *s = kvm_state; ++ uint32_t request_id, nvectors = msix_nr_vectors_allocated(dev); ++ ++ if (!kvm_vm_check_extension(s, KVM_CAP_ARM_VIRT_MSI_BYPASS) || !nvectors) { ++ return 0; ++ } ++ ++ request_id = pci_requester_id(dev); ++ return kvm_vm_ioctl(s, KVM_DEL_SHADOW_DEV, &request_id); ++} ++ + int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, + int vector, PCIDevice *dev) + { +-- +2.17.1 + -- Gitee