diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index 825ecb99a89c934ac64efc89ba8eeb3f1537c7f0..f96afb1230ed5e6deb31c792e3aa70b27bad895e 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -1737,6 +1737,36 @@ static void kvm_io_ioeventfd_add(MemoryListener *listener, } } +static int kvm_ioeventfd_batch(bool start) +{ + int ret; + struct kvm_ioeventfd iofd = { + .flags = start ? + KVM_IOEVENTFD_FLAG_BATCH_BEGIN : KVM_IOEVENTFD_FLAG_BATCH_END, + }; + + if (!kvm_enabled()) { + return -ENOSYS; + } + + ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd); + if (ret < 0) { + return -errno; + } + + return 0; +} + +static void kvm_ioeventfd_begin(MemoryListener *listener) +{ + kvm_ioeventfd_batch(true); +} + +static void kvm_ioeventfd_end(MemoryListener *listener) +{ + kvm_ioeventfd_batch(false); +} + static void kvm_io_ioeventfd_del(MemoryListener *listener, MemoryRegionSection *section, bool match_data, uint64_t data, @@ -1902,10 +1932,11 @@ static void kvm_add_routing_entry(KVMState *s, set_gsi(s, entry->gsi); } -static int kvm_update_routing_entry(KVMState *s, +static int kvm_update_routing_entry(KVMRouteChange *c, struct kvm_irq_routing_entry *new_entry) { struct kvm_irq_routing_entry *entry; + KVMState *s = c->s; int n; for (n = 0; n < s->irq_routes->nr; n++) { @@ -1919,7 +1950,7 @@ static int kvm_update_routing_entry(KVMState *s, } *entry = *new_entry; - + c->changes++; return 0; } @@ -2051,7 +2082,7 @@ int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev) return virq; } -int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg, +int kvm_irqchip_update_msi_route(KVMRouteChange *c, int virq, MSIMessage msg, PCIDevice *dev) { struct kvm_irq_routing_entry kroute = {}; @@ -2081,7 +2112,7 @@ int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg, trace_kvm_irqchip_update_msi_route(virq); - return kvm_update_routing_entry(s, &kroute); + return kvm_update_routing_entry(c, &kroute); } static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event, @@ -2223,7 +2254,7 @@ static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event, abort(); } -int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg) +int kvm_irqchip_update_msi_route(KVMRouteChange *c, int virq, MSIMessage msg) { return -ENOSYS; } @@ -2630,6 +2661,8 @@ static int kvm_init(MachineState *ms) s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region; s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region; + s->memory_listener.listener.eventfd_begin = kvm_ioeventfd_begin; + s->memory_listener.listener.eventfd_end = kvm_ioeventfd_end; kvm_memory_listener_register(s, &s->memory_listener, &address_space_memory, 0, "kvm-memory"); diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c index b071afee4566b90b67ac0a11c86eae227a886a86..1fffdc0ea2dba64d1c2ad4115520b9fcab5d4d7c 100644 --- a/accel/stubs/kvm-stub.c +++ b/accel/stubs/kvm-stub.c @@ -65,7 +65,7 @@ void kvm_irqchip_release_virq(KVMState *s, int virq) { } -int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg, +int kvm_irqchip_update_msi_route(KVMRouteChange *c, int virq, MSIMessage msg, PCIDevice *dev) { return -ENOSYS; diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c index 716ffc8bbbda1db4cfba8ecbdbce04ffc8455564..0b43aec8fa97504f7c8dc0b9450ffa1a0b7388d8 100644 --- a/hw/intc/ioapic.c +++ b/hw/intc/ioapic.c @@ -195,6 +195,7 @@ static void ioapic_update_kvm_routes(IOAPICCommonState *s) int i; if (kvm_irqchip_is_split()) { + KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state); for (i = 0; i < IOAPIC_NUM_PINS; i++) { MSIMessage msg; struct ioapic_entry_info info; @@ -202,10 +203,10 @@ static void ioapic_update_kvm_routes(IOAPICCommonState *s) if (!info.masked) { msg.address = info.addr; msg.data = info.data; - kvm_irqchip_update_msi_route(kvm_state, i, msg, NULL); + kvm_irqchip_update_msi_route(&c, i, msg, NULL); } } - kvm_irqchip_commit_routes(kvm_state); + kvm_irqchip_commit_route_changes(&c); } #endif } diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c index ad9a3c546eb72bf8ebfee651d152f4d2f45f0f7b..f66491a7a7a68fac494fc7707e0e5ab22d160f17 100644 --- a/hw/misc/ivshmem.c +++ b/hw/misc/ivshmem.c @@ -278,6 +278,7 @@ static int ivshmem_vector_unmask(PCIDevice *dev, unsigned vector, IVShmemState *s = IVSHMEM_COMMON(dev); EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; MSIVector *v = &s->msi_vectors[vector]; + KVMRouteChange c; int ret; IVSHMEM_DPRINTF("vector unmask %p %d\n", dev, vector); @@ -287,11 +288,12 @@ static int ivshmem_vector_unmask(PCIDevice *dev, unsigned vector, } assert(!v->unmasked); - ret = kvm_irqchip_update_msi_route(kvm_state, v->virq, msg, dev); + c = kvm_irqchip_begin_route_changes(kvm_state); + ret = kvm_irqchip_update_msi_route(&c, v->virq, msg, dev); if (ret < 0) { return ret; } - kvm_irqchip_commit_routes(kvm_state); + kvm_irqchip_commit_route_changes(&c); ret = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, v->virq); if (ret < 0) { diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index 293deb873763f8ee91267a9ee7387b814ea524f0..ce958848b61d0d871a08ce28dc9bc5009870d1a2 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -507,8 +507,9 @@ static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector) static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg, PCIDevice *pdev) { - kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev); - kvm_irqchip_commit_routes(kvm_state); + KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state); + kvm_irqchip_update_msi_route(&c, vector->virq, msg, pdev); + kvm_irqchip_commit_route_changes(&c); } static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 06b125ec62534d121af58b0088285cb574ec2d9b..558471307a61cd3679c7c9b427e9a4516b2e5283 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -49,6 +49,90 @@ * configuration space */ #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) +static KVMRouteChange virtio_pci_route_change; + +static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy, + EventNotifier *n, + unsigned int vector); + +static inline void virtio_pci_begin_route_changes(VirtIODevice *vdev) +{ + if (!vdev->defer_kvm_irq_routing) { + virtio_pci_route_change = kvm_irqchip_begin_route_changes(kvm_state); + } +} + +static inline void virtio_pci_commit_route_changes(VirtIODevice *vdev) +{ + if (!vdev->defer_kvm_irq_routing) { + kvm_irqchip_commit_route_changes(&virtio_pci_route_change); + } +} + +static void virtio_pci_prepare_kvm_msi_virq_batch(VirtIOPCIProxy *proxy) +{ + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + if (vdev->defer_kvm_irq_routing) { + qemu_log("invaild defer kvm irq routing state: %d\n", vdev->defer_kvm_irq_routing); + return; + } + virtio_pci_route_change = kvm_irqchip_begin_route_changes(kvm_state); + vdev->defer_kvm_irq_routing = true; +} + +static void virtio_pci_commit_kvm_msi_virq_batch(VirtIOPCIProxy *proxy) +{ + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + EventNotifier *n; + VirtQueue *vq; + int vector, index, ret; + + if (!vdev->defer_kvm_irq_routing) { + qemu_log("invaild defer kvm irq routing state: %d\n", vdev->defer_kvm_irq_routing); + return; + } + vdev->defer_kvm_irq_routing = false; + kvm_irqchip_commit_route_changes(&virtio_pci_route_change); + + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { + return; + } + + for (vector = 0; vector < proxy->pci_dev.msix_entries_nr; vector++) { + if (msix_is_masked(&proxy->pci_dev, vector)) { + continue; + } + + if (vector == vdev->config_vector) { + n = virtio_config_get_guest_notifier(vdev); + ret = kvm_virtio_pci_irqfd_use(proxy, n, vector); + if (ret) { + qemu_log("config irqfd use failed: %d\n", ret); + } + continue; + } + + vq = virtio_vector_first_queue(vdev, vector); + + while (vq) { + index = virtio_get_queue_index(vq); + if (!virtio_queue_get_num(vdev, index)) { + break; + } + if (index < proxy->nvqs_with_notifiers) { + n = virtio_queue_get_guest_notifier(vq); + ret = kvm_virtio_pci_irqfd_use(proxy, n, vector); + if (ret < 0) { + qemu_log("Error: irqfd use failed: %d\n", ret); + } + } + vq = virtio_vector_next_queue(vq); + } + } +} + static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, VirtIOPCIProxy *dev); static void virtio_pci_reset(DeviceState *qdev); @@ -815,12 +899,10 @@ static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, int ret; if (irqfd->users == 0) { - KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state); - ret = kvm_irqchip_add_msi_route(&c, vector, &proxy->pci_dev); + ret = kvm_irqchip_add_msi_route(&virtio_pci_route_change, vector, &proxy->pci_dev); if (ret < 0) { return ret; } - kvm_irqchip_commit_route_changes(&c); irqfd->virq = ret; } irqfd->users++; @@ -949,13 +1031,17 @@ static int kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy *proxy, int nvqs) kvm_create_shadow_device(&proxy->pci_dev); } #endif - for (queue_no = 0; queue_no < nvqs; queue_no++) { if (!virtio_queue_get_num(vdev, queue_no)) { return -1; } + } + + virtio_pci_begin_route_changes(vdev); + for (queue_no = 0; queue_no < nvqs; queue_no++) { ret = kvm_virtio_pci_vector_use_one(proxy, queue_no); } + virtio_pci_commit_route_changes(vdev); #ifdef __aarch64__ if (!strcmp(vdev->name, "virtio-net") && ret != 0) { @@ -1032,12 +1118,13 @@ static int virtio_pci_one_vector_unmask(VirtIOPCIProxy *proxy, if (proxy->vector_irqfd) { irqfd = &proxy->vector_irqfd[vector]; if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) { - ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg, + virtio_pci_begin_route_changes(vdev); + ret = kvm_irqchip_update_msi_route(&virtio_pci_route_change, irqfd->virq, msg, &proxy->pci_dev); if (ret < 0) { return ret; } - kvm_irqchip_commit_routes(kvm_state); + virtio_pci_commit_route_changes(vdev); } } @@ -1052,7 +1139,9 @@ static int virtio_pci_one_vector_unmask(VirtIOPCIProxy *proxy, event_notifier_set(n); } } else { - ret = kvm_virtio_pci_irqfd_use(proxy, n, vector); + if (!vdev->defer_kvm_irq_routing) { + ret = kvm_virtio_pci_irqfd_use(proxy, n, vector); + } } return ret; } @@ -1309,6 +1398,8 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) if ((with_irqfd || (vdev->use_guest_notifier_mask && k->guest_notifier_mask)) && assign) { + + virtio_pci_prepare_kvm_msi_virq_batch(proxy); if (with_irqfd) { proxy->vector_irqfd = g_malloc0(sizeof(*proxy->vector_irqfd) * @@ -1326,6 +1417,7 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) r = msix_set_vector_notifiers(&proxy->pci_dev, virtio_pci_vector_unmask, virtio_pci_vector_mask, virtio_pci_vector_poll); + virtio_pci_commit_kvm_msi_virq_batch(proxy); if (r < 0) { goto notifiers_error; } diff --git a/include/exec/memory.h b/include/exec/memory.h index c14dc69d277bde140bd13d310ac1be5e2672d668..fe27f323b20ea9930a256e17a6328a2b831ccd6d 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -1079,6 +1079,27 @@ struct MemoryListener { void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, bool match_data, uint64_t data, EventNotifier *e); + /** + * @eventfd_begin: + * + * Called during an address space begin to update ioeventfd, + * notify kvm that ioeventfd will be update in batches. + * + * @listener: The #MemoryListener. + */ + void (*eventfd_begin)(MemoryListener *listener); + + /** + * @eventfd_end: + * + * Called during an address space update ioeventfd end, + * notify kvm that all ioeventfd modifications have been submitted + * and batch processing can be started. + * + * @listener: The #MemoryListener. + */ + void (*eventfd_end)(MemoryListener *listener); + /** * @coalesced_io_add: * @@ -1180,6 +1201,8 @@ struct FlatView { unsigned nr_allocated; struct AddressSpaceDispatch *dispatch; MemoryRegion *root; + #define FLATVIEW_FLAG_LAST_PROCESSED (1 << 0) + unsigned flags; }; static inline FlatView *address_space_to_flatview(AddressSpace *as) @@ -2567,6 +2590,11 @@ void memory_region_transaction_begin(void); */ void memory_region_transaction_commit(void); +/** + * memory_region_commit: Force commit memory region immediately. + */ +void memory_region_commit(void); + /** * memory_listener_register: register callbacks to be called when memory * sections are mapped or unmapped into an address diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h index 78db2bde98a97223ab19f23a33b528dd9847551c..672f7445dd0e489d805c49d0b16ef94cca83519c 100644 --- a/include/hw/virtio/virtio.h +++ b/include/hw/virtio/virtio.h @@ -147,6 +147,7 @@ struct VirtIODevice bool use_started; bool started; bool start_on_kick; /* when virtio 1.0 feature has not been negotiated */ + bool defer_kvm_irq_routing; bool disable_legacy_check; bool vhost_started; VMChangeStateEntry *vmstate; diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index 176aa53cbe3ebcf6850ff02e9d8df9335f18bda5..16cccc881e8e16d28b61f016f73692511c01823d 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -501,7 +501,7 @@ void kvm_init_cpu_signals(CPUState *cpu); * @return: virq (>=0) when success, errno (<0) when failed. */ int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev); -int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg, +int kvm_irqchip_update_msi_route(KVMRouteChange *c, int virq, MSIMessage msg, PCIDevice *dev); void kvm_irqchip_commit_routes(KVMState *s); diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index a19683f1e953dffa829bf96602708de7051baa63..07146514402d5055906619c3f57c4dd49fe33dd8 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -819,6 +819,8 @@ enum { kvm_ioeventfd_flag_nr_deassign, kvm_ioeventfd_flag_nr_virtio_ccw_notify, kvm_ioeventfd_flag_nr_fast_mmio, + kvm_ioeventfd_flag_nr_batch_begin, + kvm_ioeventfd_flag_nr_batch_end, kvm_ioeventfd_flag_nr_max, }; @@ -827,6 +829,10 @@ enum { #define KVM_IOEVENTFD_FLAG_DEASSIGN (1 << kvm_ioeventfd_flag_nr_deassign) #define KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY \ (1 << kvm_ioeventfd_flag_nr_virtio_ccw_notify) +#define KVM_IOEVENTFD_FLAG_BATCH_BEGIN \ + (1<< kvm_ioeventfd_flag_nr_batch_begin) +#define KVM_IOEVENTFD_FLAG_BATCH_END \ + (1 << kvm_ioeventfd_flag_nr_batch_end) #define KVM_IOEVENTFD_VALID_FLAG_MASK ((1 << kvm_ioeventfd_flag_nr_max) - 1) diff --git a/migration/migration.c b/migration/migration.c index dce22c2da53f0506195d76c2d64c43e0cc9eb89b..9a433e615b866cfc8ef4d8308fc8b03b411e2893 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -68,6 +68,8 @@ #include "sysemu/dirtylimit.h" #include "qemu/sockets.h" +#define DEFAULT_FD_MAX 4096 + static NotifierList migration_state_notifiers = NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); @@ -1712,6 +1714,31 @@ void migrate_del_blocker(Error **reasonp) } } +/* + * Kernel will expand the fatable allocated to the qemu process when + * the number of fds held by qemu process exceeds a power of 2 (starting from 64). + * Each expansion introduces tens of ms of latency due to RCU synchronization. + * The expansion is completed during qemu process initialization to avoid + * triggering this action during the migration downtime phase. + */ +static void qemu_pre_extend_fdtable(void) +{ + int buffer[DEFAULT_FD_MAX] = {0}; + int i; + + /* expand fdtable */ + for (i = 0; i < DEFAULT_FD_MAX; i++) { + buffer[i] = qemu_dup(STDIN_FILENO); + } + + /* close tmp fd */ + for (i = 0; i < DEFAULT_FD_MAX; i++) { + if (buffer[i] > 0) { + (void)qemu_close(buffer[i]); + } + } +} + void qmp_migrate_incoming(const char *uri, bool has_channels, MigrationChannelList *channels, Error **errp) { @@ -1731,6 +1758,8 @@ void qmp_migrate_incoming(const char *uri, bool has_channels, return; } + qemu_pre_extend_fdtable(); + qemu_start_incoming_migration(uri, has_channels, channels, &local_err); if (local_err) { diff --git a/migration/savevm.c b/migration/savevm.c index cc65da605e60c79aeda153a6af1186690c863ca3..030a4bf7d2e4af43bfa11d8a3fbf2b3573bf4bcb 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -2857,6 +2857,10 @@ int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis) uint8_t section_type; int ret = 0; + if (qemu_mutex_iothread_locked()) { + memory_region_transaction_begin(); + } + retry: while (true) { section_type = qemu_get_byte(f); @@ -2900,6 +2904,9 @@ retry: } out: + if (qemu_mutex_iothread_locked()) { + memory_region_transaction_commit(); + } if (ret < 0) { qemu_file_set_error(f, ret); diff --git a/migration/vmstate.c b/migration/vmstate.c index bd08e390c564b1572ddfdce1e64d416488159140..e621d8ddb7159d484e54dc00f1d3616090c42f6e 100644 --- a/migration/vmstate.c +++ b/migration/vmstate.c @@ -20,6 +20,7 @@ #include "qemu/bitops.h" #include "qemu/error-report.h" #include "trace.h" +#include "exec/memory.h" static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd, void *opaque, JSONWriter *vmdesc, @@ -184,6 +185,13 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd, return ret; } if (vmsd->post_load) { + /** + * We call memory_transaction_begin in qemu_loadvm_state_main, + * so address space will not be updated during vm state loading. + * But some dev need to use address space here, force commit + * memory region transaction before call post_load. + */ + memory_region_commit(); ret = vmsd->post_load(opaque, version_id); } trace_vmstate_load_state_end(vmsd->name, "end", ret); diff --git a/system/memory.c b/system/memory.c index 9db07fd83234d792a030b8647f59c9b5b4740e25..7858aa187837bc3d264a3d70e160b2e570f32653 100644 --- a/system/memory.c +++ b/system/memory.c @@ -856,6 +856,13 @@ static void address_space_update_ioeventfds(AddressSpace *as) return; } + view = address_space_get_flatview(as); + if (view->flags & FLATVIEW_FLAG_LAST_PROCESSED) { + flatview_unref(view); + return; + } + view->flags |= FLATVIEW_FLAG_LAST_PROCESSED; + /* * It is likely that the number of ioeventfds hasn't changed much, so use * the previous size as the starting value, with some headroom to avoid @@ -864,7 +871,6 @@ static void address_space_update_ioeventfds(AddressSpace *as) ioeventfd_max = QEMU_ALIGN_UP(as->ioeventfd_nb, 4); ioeventfds = g_new(MemoryRegionIoeventfd, ioeventfd_max); - view = address_space_get_flatview(as); FOR_EACH_FLAT_RANGE(fr, view) { for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, @@ -1111,40 +1117,64 @@ static void address_space_update_topology(AddressSpace *as) address_space_set_flatview(as); } +static void address_space_update_view(AddressSpace *as) +{ + FlatView *view; + + view = address_space_get_flatview(as); + if (view->flags & FLATVIEW_FLAG_LAST_PROCESSED) { + view->flags &= ~FLATVIEW_FLAG_LAST_PROCESSED; + } + flatview_unref(view); +} + void memory_region_transaction_begin(void) { qemu_flush_coalesced_mmio_buffer(); ++memory_region_transaction_depth; } -void memory_region_transaction_commit(void) +void memory_region_commit(void) { AddressSpace *as; + if (memory_region_update_pending) { + flatviews_reset(); + + MEMORY_LISTENER_CALL_GLOBAL(begin, Forward); + + QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { + address_space_set_flatview(as); + address_space_update_ioeventfds(as); + } + memory_region_update_pending = false; + ioeventfd_update_pending = false; + QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { + address_space_update_view(as); + } + MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); + } else if (ioeventfd_update_pending) { + MEMORY_LISTENER_CALL_GLOBAL(eventfd_begin, Forward); + QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { + address_space_update_ioeventfds(as); + } + ioeventfd_update_pending = false; + QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { + address_space_update_view(as); + } + MEMORY_LISTENER_CALL_GLOBAL(eventfd_end, Forward); + } +} + +void memory_region_transaction_commit(void) +{ assert(memory_region_transaction_depth); assert(qemu_mutex_iothread_locked()); --memory_region_transaction_depth; if (!memory_region_transaction_depth) { - if (memory_region_update_pending) { - flatviews_reset(); - - MEMORY_LISTENER_CALL_GLOBAL(begin, Forward); - - QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { - address_space_set_flatview(as); - address_space_update_ioeventfds(as); - } - memory_region_update_pending = false; - ioeventfd_update_pending = false; - MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); - } else if (ioeventfd_update_pending) { - QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { - address_space_update_ioeventfds(as); - } - ioeventfd_update_pending = false; - } - } + memory_region_commit(); + } } static void memory_region_destructor_none(MemoryRegion *mr) @@ -3142,6 +3172,7 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) as->name = g_strdup(name ? name : "anonymous"); address_space_update_topology(as); address_space_update_ioeventfds(as); + address_space_update_view(as); } static void do_address_space_destroy(AddressSpace *as) diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 2df3ff99c3423f1c0ecd88ebbd30c0c546679779..3a88e65635698ec3043a90b05cd984a2e93e7a58 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -5700,9 +5700,11 @@ void kvm_update_msi_routes_all(void *private, bool global, { int cnt = 0, vector; MSIRouteEntry *entry; + KVMRouteChange c; MSIMessage msg; PCIDevice *dev; + c = kvm_irqchip_begin_route_changes(kvm_state); /* TODO: explicit route update */ QLIST_FOREACH(entry, &msi_route_list, list) { cnt++; @@ -5719,9 +5721,9 @@ void kvm_update_msi_routes_all(void *private, bool global, */ continue; } - kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev); + kvm_irqchip_update_msi_route(&c, entry->virq, msg, dev); } - kvm_irqchip_commit_routes(kvm_state); + kvm_irqchip_commit_route_changes(&c); trace_kvm_x86_update_msi_routes(cnt); } diff --git a/tests/unit/test-vmstate.c b/tests/unit/test-vmstate.c index 0b7d5ecd6838da416318210b39f83fbf9657277d..22c586eee02a2ad84fc1986463ca00bb9080d2ee 100644 --- a/tests/unit/test-vmstate.c +++ b/tests/unit/test-vmstate.c @@ -31,6 +31,7 @@ #include "../migration/savevm.h" #include "qemu/module.h" #include "io/channel-file.h" +#include "exec/memory.h" static int temp_fd; @@ -1479,6 +1480,11 @@ static void test_tmp_struct(void) g_assert_cmpint(obj.f, ==, 8); /* From the child->parent */ } +/* stub for ut */ +void memory_region_commit(void) +{ +} + int main(int argc, char **argv) { g_autofree char *temp_file = g_strdup_printf("%s/vmst.test.XXXXXX",