diff --git a/mac_dbdma-Remove-leftover-dma_memory_unmap-calls-CVE.patch b/mac_dbdma-Remove-leftover-dma_memory_unmap-calls-CVE.patch new file mode 100644 index 0000000000000000000000000000000000000000..056517fbecfe28ed5366589ccdc4cf9874b299c8 --- /dev/null +++ b/mac_dbdma-Remove-leftover-dma_memory_unmap-calls-CVE.patch @@ -0,0 +1,71 @@ +From ee07f1cc2a423040ad9a862c8054893c84db7f01 Mon Sep 17 00:00:00 2001 +From: Mattias Nissler +Date: Mon, 16 Sep 2024 10:57:08 -0700 +Subject: [PATCH 5/5] mac_dbdma: Remove leftover `dma_memory_unmap` + calls(CVE-2024-8612) + +cherry-pick from 2d0a071e625d7234e8c5623b7e7bf445e1bef72c + +These were passing a NULL buffer pointer unconditionally, which happens +to behave in a mostly benign way (except for the chance of an excess +memory region unref and a bounce buffer leak). Per the function comment, +this was never meant to be accepted though, and triggers an assertion +with the "softmmu: Support concurrent bounce buffers" change. + +Given that the code in question never sets up any mappings, just remove +the unnecessary dma_memory_unmap calls along with the DBDMA_io struct +fields that are now entirely unused. + +Signed-off-by: Mattias Nissler +Message-Id: <20240916175708.1829059-1-mnissler@rivosinc.com> +Fixes: be1e343995 ("macio: switch over to new byte-aligned DMA helpers") +Reviewed-by: Mark Cave-Ayland +Tested-by: Mark Cave-Ayland +Signed-off-by: Mark Cave-Ayland +--- + hw/ide/macio.c | 6 ------ + include/hw/ppc/mac_dbdma.h | 4 ---- + 2 files changed, 10 deletions(-) + +diff --git a/hw/ide/macio.c b/hw/ide/macio.c +index 54571fed12..460b8cfd6a 100644 +--- a/hw/ide/macio.c ++++ b/hw/ide/macio.c +@@ -118,9 +118,6 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret) + return; + + done: +- dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len, +- io->dir, io->dma_len); +- + if (ret < 0) { + block_acct_failed(blk_get_stats(s->blk), &s->acct); + } else { +@@ -201,9 +198,6 @@ static void pmac_ide_transfer_cb(void *opaque, int ret) + return; + + done: +- dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len, +- io->dir, io->dma_len); +- + if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) { + if (ret < 0) { + block_acct_failed(blk_get_stats(s->blk), &s->acct); +diff --git a/include/hw/ppc/mac_dbdma.h b/include/hw/ppc/mac_dbdma.h +index 26cc469de4..38c3d87964 100644 +--- a/include/hw/ppc/mac_dbdma.h ++++ b/include/hw/ppc/mac_dbdma.h +@@ -43,10 +43,6 @@ struct DBDMA_io { + DBDMA_end dma_end; + /* DMA is in progress, don't start another one */ + bool processing; +- /* DMA request */ +- void *dma_mem; +- dma_addr_t dma_len; +- DMADirection dir; + }; + + /* +-- +2.45.1.windows.1 + diff --git a/qdev-properties-add-size32-property-type.patch b/qdev-properties-add-size32-property-type.patch new file mode 100644 index 0000000000000000000000000000000000000000..deb83cc0d5c035b162696642ee521338468e343c --- /dev/null +++ b/qdev-properties-add-size32-property-type.patch @@ -0,0 +1,100 @@ +From 921e5443d80d6ed33bda10dd790e63b70d67cab0 Mon Sep 17 00:00:00 2001 +From: Roman Kagan +Date: Fri, 29 May 2020 01:55:12 +0300 +Subject: [PATCH 3/5] qdev-properties: add size32 property type + +cherry-pick from 914e74cda9a54ac860000aa18882dc40d3c8180b + +Introduce size32 property type which handles size suffixes (k, m, g) +just like size property, but is uint32_t rather than uint64_t. It's +going to be useful for properties that are byte sizes but are inherently +32bit, like BlkConf.opt_io_size or .discard_granularity (they are +switched to this new property type in a followup commit). + +The getter for size32 is left out for a separate patch as its benefit is +less obvious, and it affects test output; for now the regular uint32 +getter is used. + +Signed-off-by: Roman Kagan +Message-Id: <20200528225516.1676602-5-rvkagan@yandex-team.ru> +Signed-off-by: Kevin Wolf +--- + hw/core/qdev-properties.c | 40 ++++++++++++++++++++++++++++++++++++ + include/hw/qdev-properties.h | 3 +++ + 2 files changed, 43 insertions(+) + +diff --git a/hw/core/qdev-properties.c b/hw/core/qdev-properties.c +index 02a824fc68..881138b3e6 100644 +--- a/hw/core/qdev-properties.c ++++ b/hw/core/qdev-properties.c +@@ -766,6 +766,46 @@ const PropertyInfo qdev_prop_pci_devfn = { + .set_default_value = set_default_value_int, + }; + ++/* --- 32bit unsigned int 'size' type --- */ ++ ++static void set_size32(Object *obj, Visitor *v, const char *name, void *opaque, ++ Error **errp) ++{ ++ DeviceState *dev = DEVICE(obj); ++ Property *prop = opaque; ++ uint32_t *ptr = qdev_get_prop_ptr(dev, prop); ++ uint64_t value; ++ Error *local_err = NULL; ++ ++ if (dev->realized) { ++ qdev_prop_set_after_realize(dev, name, errp); ++ return; ++ } ++ ++ visit_type_size(v, name, &value, &local_err); ++ if (local_err) { ++ error_propagate(errp, local_err); ++ return; ++ } ++ ++ if (value > UINT32_MAX) { ++ error_setg(errp, ++ "Property %s.%s doesn't take value %" PRIu64 ++ " (maximum: %u)", ++ dev->id ? : "", name, value, UINT32_MAX); ++ return; ++ } ++ ++ *ptr = value; ++} ++ ++const PropertyInfo qdev_prop_size32 = { ++ .name = "size", ++ .get = get_uint32, ++ .set = set_size32, ++ .set_default_value = set_default_value_uint, ++}; ++ + /* --- blocksize --- */ + + static void set_blocksize(Object *obj, Visitor *v, const char *name, +diff --git a/include/hw/qdev-properties.h b/include/hw/qdev-properties.h +index 4585c200df..a2a6598905 100644 +--- a/include/hw/qdev-properties.h ++++ b/include/hw/qdev-properties.h +@@ -33,6 +33,7 @@ extern const PropertyInfo qdev_prop_drive; + extern const PropertyInfo qdev_prop_drive_iothread; + extern const PropertyInfo qdev_prop_netdev; + extern const PropertyInfo qdev_prop_pci_devfn; ++extern const PropertyInfo qdev_prop_size32; + extern const PropertyInfo qdev_prop_blocksize; + extern const PropertyInfo qdev_prop_pci_host_devaddr; + extern const PropertyInfo qdev_prop_uuid; +@@ -221,6 +222,8 @@ extern const PropertyInfo qdev_prop_pcie_link_width; + int64_t) + #define DEFINE_PROP_BIOS_CHS_TRANS(_n, _s, _f, _d) \ + DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_bios_chs_trans, int) ++#define DEFINE_PROP_SIZE32(_n, _s, _f, _d) \ ++ DEFINE_PROP_UNSIGNED(_n, _s, _f, _d, qdev_prop_size32, uint32_t) + #define DEFINE_PROP_BLOCKSIZE(_n, _s, _f) \ + DEFINE_PROP_UNSIGNED(_n, _s, _f, 0, qdev_prop_blocksize, uint16_t) + #define DEFINE_PROP_PCI_HOST_DEVADDR(_n, _s, _f) \ +-- +2.45.1.windows.1 + diff --git a/qemu.spec b/qemu.spec index 826018c47db60ad0e021b4cb779178f1a2742fed..1facaac15ab185fc0436288bbfb31b66a1a23440 100644 --- a/qemu.spec +++ b/qemu.spec @@ -1,6 +1,6 @@ Name: qemu Version: 4.1.0 -Release: 87 +Release: 88 Epoch: 10 Summary: QEMU is a generic and open source machine emulator and virtualizer License: GPLv2 and BSD and MIT and CC-BY-SA-4.0 @@ -424,6 +424,11 @@ Patch0411: aio-wait.h-introduce-AIO_WAIT_WHILE_UNLOCKED.patch Patch0412: main-loop.h-introduce-qemu_in_main_thread.patch Patch0413: nbd-server-CVE-2024-7409-Close-stray-clients-at-serv.patch Patch0414: nbd-server-CVE-2024-7409-Avoid-use-after-free-when-c.patch +Patch0415: system-physmem-Propagate-AddressSpace-to-MapClient-h.patch +Patch0416: system-physmem-Per-AddressSpace-bounce-buffering.patch +Patch0417: qdev-properties-add-size32-property-type.patch +Patch0418: softmmu-Support-concurrent-bounce-buffers-CVE-2024-8.patch +Patch0419: mac_dbdma-Remove-leftover-dma_memory_unmap-calls-CVE.patch BuildRequires: flex BuildRequires: bison @@ -824,6 +829,13 @@ getent passwd qemu >/dev/null || \ %endif %changelog +* Mon Oct 14 2024 Jiabo Feng - 10:4.1.0-88 +- mac_dbdma: Remove leftover `dma_memory_unmap` calls(CVE-2024-8612) +- softmmu: Support concurrent bounce buffers(CVE-2024-8612) +- qdev-properties: add size32 property type +- system/physmem: Per-AddressSpace bounce buffering +- system/physmem: Propagate AddressSpace to MapClient helpers + * Wed Sep 18 2024 Jiabo Feng - 10:4.1.0-87 - nbd/server: CVE-2024-7409: Avoid use-after-free when closing server diff --git a/softmmu-Support-concurrent-bounce-buffers-CVE-2024-8.patch b/softmmu-Support-concurrent-bounce-buffers-CVE-2024-8.patch new file mode 100644 index 0000000000000000000000000000000000000000..02cd353a722f6ba09f53b3e81592cc6a8198974d --- /dev/null +++ b/softmmu-Support-concurrent-bounce-buffers-CVE-2024-8.patch @@ -0,0 +1,286 @@ +From f622157a79ca2f7e45693ab32fa1a8114cdf3e54 Mon Sep 17 00:00:00 2001 +From: Mattias Nissler +Date: Mon, 19 Aug 2024 06:54:54 -0700 +Subject: [PATCH 4/5] softmmu: Support concurrent bounce buffers(CVE-2024-8612) +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +cherry-pick from 637b0aa139565cb82a7b9269e62214f87082635c + +When DMA memory can't be directly accessed, as is the case when +running the device model in a separate process without shareable DMA +file descriptors, bounce buffering is used. + +It is not uncommon for device models to request mapping of several DMA +regions at the same time. Examples include: + * net devices, e.g. when transmitting a packet that is split across + several TX descriptors (observed with igb) + * USB host controllers, when handling a packet with multiple data TRBs + (observed with xhci) + +Previously, qemu only provided a single bounce buffer per AddressSpace +and would fail DMA map requests while the buffer was already in use. In +turn, this would cause DMA failures that ultimately manifest as hardware +errors from the guest perspective. + +This change allocates DMA bounce buffers dynamically instead of +supporting only a single buffer. Thus, multiple DMA mappings work +correctly also when RAM can't be mmap()-ed. + +The total bounce buffer allocation size is limited individually for each +AddressSpace. The default limit is 4096 bytes, matching the previous +maximum buffer size. A new x-max-bounce-buffer-size parameter is +provided to configure the limit for PCI devices. + +Signed-off-by: Mattias Nissler +Reviewed-by: Philippe Mathieu-Daudé +Acked-by: Peter Xu +Link: https://lore.kernel.org/r/20240819135455.2957406-1-mnissler@rivosinc.com +Signed-off-by: Peter Xu +--- + exec.c | 78 ++++++++++++++++++++++++++++++------------- + hw/pci/pci.c | 8 +++++ + include/exec/memory.h | 14 +++----- + include/hw/pci/pci.h | 3 ++ + memory.c | 5 +-- + 5 files changed, 74 insertions(+), 34 deletions(-) + +diff --git a/exec.c b/exec.c +index ac4ebaaf27..c766939b4e 100644 +--- a/exec.c ++++ b/exec.c +@@ -3610,6 +3610,20 @@ void cpu_flush_icache_range(hwaddr start, hwaddr len) + NULL, len, FLUSH_CACHE); + } + ++/* ++ * A magic value stored in the first 8 bytes of the bounce buffer struct. Used ++ * to detect illegal pointers passed to address_space_unmap. ++ */ ++#define BOUNCE_BUFFER_MAGIC 0xb4017ceb4ffe12ed ++ ++typedef struct { ++ uint64_t magic; ++ MemoryRegion *mr; ++ hwaddr addr; ++ size_t len; ++ uint8_t buffer[]; ++} BounceBuffer; ++ + static void + address_space_unregister_map_client_do(AddressSpaceMapClient *client) + { +@@ -3635,7 +3649,7 @@ void address_space_register_map_client(AddressSpace *as, QEMUBH *bh) + qemu_mutex_lock(&as->map_client_list_lock); + client->bh = bh; + QLIST_INSERT_HEAD(&as->map_client_list, client, link); +- if (!atomic_read(&as->bounce.in_use)) { ++ if (atomic_read(&as->bounce_buffer_size) < as->max_bounce_buffer_size) { + address_space_notify_map_clients_locked(as); + } + qemu_mutex_unlock(&as->map_client_list_lock); +@@ -3769,30 +3783,41 @@ void *address_space_map(AddressSpace *as, + mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); + + if (!memory_access_is_direct(mr, is_write)) { +- if (atomic_xchg(&as->bounce.in_use, true)) { +- rcu_read_unlock(); ++ size_t used = atomic_read(&as->bounce_buffer_size); ++ for (;;) { ++ hwaddr alloc = MIN(as->max_bounce_buffer_size - used, l); ++ size_t new_size = used + alloc; ++ size_t actual = ++ atomic_cmpxchg(&as->bounce_buffer_size, used, new_size); ++ if (actual == used) { ++ l = alloc; ++ break; ++ } ++ used = actual; ++ } ++ ++ if (l == 0) { + *plen = 0; + return NULL; + } +- /* Avoid unbounded allocations */ +- l = MIN(l, TARGET_PAGE_SIZE); +- as->bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); +- as->bounce.addr = addr; +- as->bounce.len = l; + ++ BounceBuffer *bounce = g_malloc0(l + sizeof(BounceBuffer)); ++ bounce->magic = BOUNCE_BUFFER_MAGIC; + memory_region_ref(mr); +- as->bounce.mr = mr; ++ bounce->mr = mr; ++ bounce->addr = addr; ++ bounce->len = l; ++ + if (!is_write) { + flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED, +- as->bounce.buffer, l); ++ bounce->buffer, l); + } + + rcu_read_unlock(); + *plen = l; +- return as->bounce.buffer; ++ return bounce->buffer; + } + +- + memory_region_ref(mr); + *plen = flatview_extend_translation(fv, addr, len, mr, xlat, + l, is_write, attrs); +@@ -3809,12 +3834,11 @@ void *address_space_map(AddressSpace *as, + void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, + int is_write, hwaddr access_len) + { +- if (buffer != as->bounce.buffer) { +- MemoryRegion *mr; +- ram_addr_t addr1; ++ MemoryRegion *mr; ++ ram_addr_t addr1; + +- mr = memory_region_from_host(buffer, &addr1); +- assert(mr != NULL); ++ mr = memory_region_from_host(buffer, &addr1); ++ if (mr != NULL) { + if (is_write) { + invalidate_and_set_dirty(mr, addr1, access_len); + } +@@ -3824,14 +3848,22 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, + memory_region_unref(mr); + return; + } ++ ++ ++ BounceBuffer *bounce = container_of(buffer, BounceBuffer, buffer); ++ assert(bounce->magic == BOUNCE_BUFFER_MAGIC); ++ + if (is_write) { +- address_space_write(as, as->bounce.addr, MEMTXATTRS_UNSPECIFIED, +- as->bounce.buffer, access_len); ++ address_space_write(as, bounce->addr, MEMTXATTRS_UNSPECIFIED, ++ bounce->buffer, access_len); + } +- qemu_vfree(as->bounce.buffer); +- as->bounce.buffer = NULL; +- memory_region_unref(as->bounce.mr); +- atomic_mb_set(&as->bounce.in_use, false); ++ ++ atomic_sub(&as->bounce_buffer_size, bounce->len); ++ bounce->magic = ~BOUNCE_BUFFER_MAGIC; ++ memory_region_unref(bounce->mr); ++ g_free(bounce); ++ /* Write bounce_buffer_size before reading map_client_list. */ ++ smp_mb(); + address_space_notify_map_clients(as); + } + +diff --git a/hw/pci/pci.c b/hw/pci/pci.c +index 9f6632ae7d..4009f30e6c 100644 +--- a/hw/pci/pci.c ++++ b/hw/pci/pci.c +@@ -71,6 +71,8 @@ static Property pci_props[] = { + QEMU_PCIE_LNKSTA_DLLLA_BITNR, true), + DEFINE_PROP_BIT("x-pcie-extcap-init", PCIDevice, cap_present, + QEMU_PCIE_EXTCAP_INIT_BITNR, true), ++ DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice, ++ max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE), + DEFINE_PROP_END_OF_LIST() + }; + +@@ -1051,6 +1053,8 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, + "bus master container", UINT64_MAX); + address_space_init(&pci_dev->bus_master_as, + &pci_dev->bus_master_container_region, pci_dev->name); ++ pci_dev->bus_master_as.max_bounce_buffer_size = ++ pci_dev->max_bounce_buffer_size; + + if (qdev_hotplug) { + pci_init_bus_master(pci_dev); +@@ -2600,6 +2604,10 @@ static void pci_device_class_init(ObjectClass *klass, void *data) + k->unrealize = pci_qdev_unrealize; + k->bus_type = TYPE_PCI_BUS; + k->props = pci_props; ++ object_class_property_set_description( ++ klass, "x-max-bounce-buffer-size", ++ "Maximum buffer size allocated for bounce buffers used for mapped " ++ "access to indirect DMA memory", NULL); + } + + static void pci_device_class_base_init(ObjectClass *klass, void *data) +diff --git a/include/exec/memory.h b/include/exec/memory.h +index 84955de627..541a047990 100644 +--- a/include/exec/memory.h ++++ b/include/exec/memory.h +@@ -444,13 +444,7 @@ typedef struct AddressSpaceMapClient { + QLIST_ENTRY(AddressSpaceMapClient) link; + } AddressSpaceMapClient; + +-typedef struct { +- MemoryRegion *mr; +- void *buffer; +- hwaddr addr; +- hwaddr len; +- bool in_use; +-} BounceBuffer; ++#define DEFAULT_MAX_BOUNCE_BUFFER_SIZE (4096) + + /** + * AddressSpace: describes a mapping of addresses to #MemoryRegion objects +@@ -469,8 +463,10 @@ struct AddressSpace { + QTAILQ_HEAD(, MemoryListener) listeners; + QTAILQ_ENTRY(AddressSpace) address_spaces_link; + +- /* Bounce buffer to use for this address space. */ +- BounceBuffer bounce; ++ /* Maximum DMA bounce buffer size used for indirect memory map requests */ ++ size_t max_bounce_buffer_size; ++ /* Total size of bounce buffers currently allocated, atomically accessed */ ++ size_t bounce_buffer_size; + /* List of callbacks to invoke when buffers free up */ + QemuMutex map_client_list_lock; + QLIST_HEAD(, AddressSpaceMapClient) map_client_list; +diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h +index aaf1b9f70d..67bcde6ef9 100644 +--- a/include/hw/pci/pci.h ++++ b/include/hw/pci/pci.h +@@ -352,6 +352,9 @@ struct PCIDevice { + MSIVectorUseNotifier msix_vector_use_notifier; + MSIVectorReleaseNotifier msix_vector_release_notifier; + MSIVectorPollNotifier msix_vector_poll_notifier; ++ ++ /* Maximum DMA bounce buffer size used for indirect memory map requests */ ++ uint32_t max_bounce_buffer_size; + }; + + void pci_register_bar(PCIDevice *pci_dev, int region_num, +diff --git a/memory.c b/memory.c +index 67f45f9d15..b77ee70356 100644 +--- a/memory.c ++++ b/memory.c +@@ -2810,7 +2810,8 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) + as->ioeventfds = NULL; + QTAILQ_INIT(&as->listeners); + QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); +- as->bounce.in_use = false; ++ as->max_bounce_buffer_size = DEFAULT_MAX_BOUNCE_BUFFER_SIZE; ++ as->bounce_buffer_size = 0; + qemu_mutex_init(&as->map_client_list_lock); + QLIST_INIT(&as->map_client_list); + as->name = g_strdup(name ? name : "anonymous"); +@@ -2820,7 +2821,7 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) + + static void do_address_space_destroy(AddressSpace *as) + { +- assert(!atomic_read(&as->bounce.in_use)); ++ assert(atomic_read(&as->bounce_buffer_size) == 0); + assert(QLIST_EMPTY(&as->map_client_list)); + qemu_mutex_destroy(&as->map_client_list_lock); + +-- +2.45.1.windows.1 + diff --git a/system-physmem-Per-AddressSpace-bounce-buffering.patch b/system-physmem-Per-AddressSpace-bounce-buffering.patch new file mode 100644 index 0000000000000000000000000000000000000000..fead5c84f2a7223eb125e6504e9db45c0d0aab2d --- /dev/null +++ b/system-physmem-Per-AddressSpace-bounce-buffering.patch @@ -0,0 +1,263 @@ +From 5f828d2fe42344b024e769c7313db85c80c39588 Mon Sep 17 00:00:00 2001 +From: Mattias Nissler +Date: Thu, 7 Sep 2023 06:04:23 -0700 +Subject: [PATCH 2/5] system/physmem: Per-AddressSpace bounce buffering +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +cherry-pick from 69e78f1b3484e429274352a464a94fa1d78be339 + +Instead of using a single global bounce buffer, give each AddressSpace +its own bounce buffer. The MapClient callback mechanism moves to +AddressSpace accordingly. + +This is in preparation for generalizing bounce buffer handling further +to allow multiple bounce buffers, with a total allocation limit +configured per AddressSpace. + +Reviewed-by: Peter Xu +Tested-by: Jonathan Cameron +Signed-off-by: Mattias Nissler +Message-ID: <20240507094210.300566-2-mnissler@rivosinc.com> +Reviewed-by: Philippe Mathieu-Daudé +[PMD: Split patch, part 2/2] +Signed-off-by: Philippe Mathieu-Daudé +--- + exec.c | 79 ++++++++++++++++--------------------------- + include/exec/memory.h | 19 +++++++++++ + memory.c | 7 ++++ + 3 files changed, 56 insertions(+), 49 deletions(-) + +diff --git a/exec.c b/exec.c +index bb549f63ce..ac4ebaaf27 100644 +--- a/exec.c ++++ b/exec.c +@@ -3610,26 +3610,8 @@ void cpu_flush_icache_range(hwaddr start, hwaddr len) + NULL, len, FLUSH_CACHE); + } + +-typedef struct { +- MemoryRegion *mr; +- void *buffer; +- hwaddr addr; +- hwaddr len; +- bool in_use; +-} BounceBuffer; +- +-static BounceBuffer bounce; +- +-typedef struct MapClient { +- QEMUBH *bh; +- QLIST_ENTRY(MapClient) link; +-} MapClient; +- +-QemuMutex map_client_list_lock; +-static QLIST_HEAD(, MapClient) map_client_list +- = QLIST_HEAD_INITIALIZER(map_client_list); +- +-static void address_space_unregister_map_client_do(MapClient *client) ++static void ++address_space_unregister_map_client_do(AddressSpaceMapClient *client) + { + QLIST_REMOVE(client, link); + g_free(client); +@@ -3637,10 +3619,10 @@ static void address_space_unregister_map_client_do(MapClient *client) + + static void address_space_notify_map_clients_locked(AddressSpace *as) + { +- MapClient *client; ++ AddressSpaceMapClient *client; + +- while (!QLIST_EMPTY(&map_client_list)) { +- client = QLIST_FIRST(&map_client_list); ++ while (!QLIST_EMPTY(&as->map_client_list)) { ++ client = QLIST_FIRST(&as->map_client_list); + qemu_bh_schedule(client->bh); + address_space_unregister_map_client_do(client); + } +@@ -3648,15 +3630,15 @@ static void address_space_notify_map_clients_locked(AddressSpace *as) + + void address_space_register_map_client(AddressSpace *as, QEMUBH *bh) + { +- MapClient *client = g_malloc(sizeof(*client)); ++ AddressSpaceMapClient *client = g_malloc(sizeof(*client)); + +- qemu_mutex_lock(&map_client_list_lock); ++ qemu_mutex_lock(&as->map_client_list_lock); + client->bh = bh; +- QLIST_INSERT_HEAD(&map_client_list, client, link); +- if (!atomic_read(&bounce.in_use)) { ++ QLIST_INSERT_HEAD(&as->map_client_list, client, link); ++ if (!atomic_read(&as->bounce.in_use)) { + address_space_notify_map_clients_locked(as); + } +- qemu_mutex_unlock(&map_client_list_lock); ++ qemu_mutex_unlock(&as->map_client_list_lock); + } + + void cpu_exec_init_all(void) +@@ -3672,28 +3654,27 @@ void cpu_exec_init_all(void) + finalize_target_page_bits(); + io_mem_init(); + memory_map_init(); +- qemu_mutex_init(&map_client_list_lock); + } + + void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh) + { +- MapClient *client; ++ AddressSpaceMapClient *client; + +- qemu_mutex_lock(&map_client_list_lock); +- QLIST_FOREACH(client, &map_client_list, link) { ++ qemu_mutex_lock(&as->map_client_list_lock); ++ QLIST_FOREACH(client, &as->map_client_list, link) { + if (client->bh == bh) { + address_space_unregister_map_client_do(client); + break; + } + } +- qemu_mutex_unlock(&map_client_list_lock); ++ qemu_mutex_unlock(&as->map_client_list_lock); + } + + static void address_space_notify_map_clients(AddressSpace *as) + { +- qemu_mutex_lock(&map_client_list_lock); ++ qemu_mutex_lock(&as->map_client_list_lock); + address_space_notify_map_clients_locked(as); +- qemu_mutex_unlock(&map_client_list_lock); ++ qemu_mutex_unlock(&as->map_client_list_lock); + } + + static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, +@@ -3788,27 +3769,27 @@ void *address_space_map(AddressSpace *as, + mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); + + if (!memory_access_is_direct(mr, is_write)) { +- if (atomic_xchg(&bounce.in_use, true)) { ++ if (atomic_xchg(&as->bounce.in_use, true)) { + rcu_read_unlock(); + *plen = 0; + return NULL; + } + /* Avoid unbounded allocations */ + l = MIN(l, TARGET_PAGE_SIZE); +- bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); +- bounce.addr = addr; +- bounce.len = l; ++ as->bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); ++ as->bounce.addr = addr; ++ as->bounce.len = l; + + memory_region_ref(mr); +- bounce.mr = mr; ++ as->bounce.mr = mr; + if (!is_write) { + flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED, +- bounce.buffer, l); ++ as->bounce.buffer, l); + } + + rcu_read_unlock(); + *plen = l; +- return bounce.buffer; ++ return as->bounce.buffer; + } + + +@@ -3828,7 +3809,7 @@ void *address_space_map(AddressSpace *as, + void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, + int is_write, hwaddr access_len) + { +- if (buffer != bounce.buffer) { ++ if (buffer != as->bounce.buffer) { + MemoryRegion *mr; + ram_addr_t addr1; + +@@ -3844,13 +3825,13 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, + return; + } + if (is_write) { +- address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED, +- bounce.buffer, access_len); ++ address_space_write(as, as->bounce.addr, MEMTXATTRS_UNSPECIFIED, ++ as->bounce.buffer, access_len); + } +- qemu_vfree(bounce.buffer); +- bounce.buffer = NULL; +- memory_region_unref(bounce.mr); +- atomic_mb_set(&bounce.in_use, false); ++ qemu_vfree(as->bounce.buffer); ++ as->bounce.buffer = NULL; ++ memory_region_unref(as->bounce.mr); ++ atomic_mb_set(&as->bounce.in_use, false); + address_space_notify_map_clients(as); + } + +diff --git a/include/exec/memory.h b/include/exec/memory.h +index aa35416d80..84955de627 100644 +--- a/include/exec/memory.h ++++ b/include/exec/memory.h +@@ -439,6 +439,19 @@ struct MemoryListener { + QTAILQ_ENTRY(MemoryListener) link_as; + }; + ++typedef struct AddressSpaceMapClient { ++ QEMUBH *bh; ++ QLIST_ENTRY(AddressSpaceMapClient) link; ++} AddressSpaceMapClient; ++ ++typedef struct { ++ MemoryRegion *mr; ++ void *buffer; ++ hwaddr addr; ++ hwaddr len; ++ bool in_use; ++} BounceBuffer; ++ + /** + * AddressSpace: describes a mapping of addresses to #MemoryRegion objects + */ +@@ -455,6 +468,12 @@ struct AddressSpace { + struct MemoryRegionIoeventfd *ioeventfds; + QTAILQ_HEAD(, MemoryListener) listeners; + QTAILQ_ENTRY(AddressSpace) address_spaces_link; ++ ++ /* Bounce buffer to use for this address space. */ ++ BounceBuffer bounce; ++ /* List of callbacks to invoke when buffers free up */ ++ QemuMutex map_client_list_lock; ++ QLIST_HEAD(, AddressSpaceMapClient) map_client_list; + }; + + typedef struct AddressSpaceDispatch AddressSpaceDispatch; +diff --git a/memory.c b/memory.c +index fa7053f9cb..67f45f9d15 100644 +--- a/memory.c ++++ b/memory.c +@@ -2810,6 +2810,9 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) + as->ioeventfds = NULL; + QTAILQ_INIT(&as->listeners); + QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); ++ as->bounce.in_use = false; ++ qemu_mutex_init(&as->map_client_list_lock); ++ QLIST_INIT(&as->map_client_list); + as->name = g_strdup(name ? name : "anonymous"); + address_space_update_topology(as); + address_space_update_ioeventfds(as); +@@ -2817,6 +2820,10 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) + + static void do_address_space_destroy(AddressSpace *as) + { ++ assert(!atomic_read(&as->bounce.in_use)); ++ assert(QLIST_EMPTY(&as->map_client_list)); ++ qemu_mutex_destroy(&as->map_client_list_lock); ++ + assert(QTAILQ_EMPTY(&as->listeners)); + + flatview_unref(as->current_map); +-- +2.45.1.windows.1 + diff --git a/system-physmem-Propagate-AddressSpace-to-MapClient-h.patch b/system-physmem-Propagate-AddressSpace-to-MapClient-h.patch new file mode 100644 index 0000000000000000000000000000000000000000..91f54d49a14e054c4bbba2cb5a535b3f533cf629 --- /dev/null +++ b/system-physmem-Propagate-AddressSpace-to-MapClient-h.patch @@ -0,0 +1,206 @@ +From 3e13a72b4b99a74767267b2e9a9e4f6cafae7e66 Mon Sep 17 00:00:00 2001 +From: Mattias Nissler +Date: Thu, 7 Sep 2023 06:04:23 -0700 +Subject: [PATCH 1/5] system/physmem: Propagate AddressSpace to MapClient + helpers +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +cherry-pick from 5c62719710bab66a98f68ebdba333e2240ed6668 + +Propagate AddressSpace handler to following helpers: +- register_map_client() +- unregister_map_client() +- notify_map_clients[_locked]() + +Rename them using 'address_space_' prefix instead of 'cpu_'. + +The AddressSpace argument will be used in the next commit. + +Reviewed-by: Peter Xu +Tested-by: Jonathan Cameron +Signed-off-by: Mattias Nissler +Message-ID: <20240507094210.300566-2-mnissler@rivosinc.com> +[PMD: Split patch, part 1/2] +Signed-off-by: Philippe Mathieu-Daudé +--- + dma-helpers.c | 4 ++-- + exec.c | 24 ++++++++++++------------ + include/exec/cpu-common.h | 2 -- + include/exec/memory.h | 26 ++++++++++++++++++++++++-- + 4 files changed, 38 insertions(+), 18 deletions(-) + +diff --git a/dma-helpers.c b/dma-helpers.c +index d3871dc61e..397b437734 100644 +--- a/dma-helpers.c ++++ b/dma-helpers.c +@@ -155,7 +155,7 @@ static void dma_blk_cb(void *opaque, int ret) + if (dbs->iov.size == 0) { + trace_dma_map_wait(dbs); + dbs->bh = aio_bh_new(dbs->ctx, reschedule_dma, dbs); +- cpu_register_map_client(dbs->bh); ++ address_space_register_map_client(dbs->sg->as, dbs->bh); + return; + } + +@@ -185,7 +185,7 @@ static void dma_aio_cancel(BlockAIOCB *acb) + } + + if (dbs->bh) { +- cpu_unregister_map_client(dbs->bh); ++ address_space_unregister_map_client(dbs->sg->as, dbs->bh); + qemu_bh_delete(dbs->bh); + dbs->bh = NULL; + } +diff --git a/exec.c b/exec.c +index 0a6ac67c84..bb549f63ce 100644 +--- a/exec.c ++++ b/exec.c +@@ -3629,24 +3629,24 @@ QemuMutex map_client_list_lock; + static QLIST_HEAD(, MapClient) map_client_list + = QLIST_HEAD_INITIALIZER(map_client_list); + +-static void cpu_unregister_map_client_do(MapClient *client) ++static void address_space_unregister_map_client_do(MapClient *client) + { + QLIST_REMOVE(client, link); + g_free(client); + } + +-static void cpu_notify_map_clients_locked(void) ++static void address_space_notify_map_clients_locked(AddressSpace *as) + { + MapClient *client; + + while (!QLIST_EMPTY(&map_client_list)) { + client = QLIST_FIRST(&map_client_list); + qemu_bh_schedule(client->bh); +- cpu_unregister_map_client_do(client); ++ address_space_unregister_map_client_do(client); + } + } + +-void cpu_register_map_client(QEMUBH *bh) ++void address_space_register_map_client(AddressSpace *as, QEMUBH *bh) + { + MapClient *client = g_malloc(sizeof(*client)); + +@@ -3654,7 +3654,7 @@ void cpu_register_map_client(QEMUBH *bh) + client->bh = bh; + QLIST_INSERT_HEAD(&map_client_list, client, link); + if (!atomic_read(&bounce.in_use)) { +- cpu_notify_map_clients_locked(); ++ address_space_notify_map_clients_locked(as); + } + qemu_mutex_unlock(&map_client_list_lock); + } +@@ -3675,24 +3675,24 @@ void cpu_exec_init_all(void) + qemu_mutex_init(&map_client_list_lock); + } + +-void cpu_unregister_map_client(QEMUBH *bh) ++void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh) + { + MapClient *client; + + qemu_mutex_lock(&map_client_list_lock); + QLIST_FOREACH(client, &map_client_list, link) { + if (client->bh == bh) { +- cpu_unregister_map_client_do(client); ++ address_space_unregister_map_client_do(client); + break; + } + } + qemu_mutex_unlock(&map_client_list_lock); + } + +-static void cpu_notify_map_clients(void) ++static void address_space_notify_map_clients(AddressSpace *as) + { + qemu_mutex_lock(&map_client_list_lock); +- cpu_notify_map_clients_locked(); ++ address_space_notify_map_clients_locked(as); + qemu_mutex_unlock(&map_client_list_lock); + } + +@@ -3763,8 +3763,8 @@ flatview_extend_translation(FlatView *fv, hwaddr addr, + * May map a subset of the requested range, given by and returned in *plen. + * May return NULL if resources needed to perform the mapping are exhausted. + * Use only for reads OR writes - not for read-modify-write operations. +- * Use cpu_register_map_client() to know when retrying the map operation is +- * likely to succeed. ++ * Use address_space_register_map_client() to know when retrying the map ++ * operation is likely to succeed. + */ + void *address_space_map(AddressSpace *as, + hwaddr addr, +@@ -3851,7 +3851,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, + bounce.buffer = NULL; + memory_region_unref(bounce.mr); + atomic_mb_set(&bounce.in_use, false); +- cpu_notify_map_clients(); ++ address_space_notify_map_clients(as); + } + + void *cpu_physical_memory_map(hwaddr addr, +diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h +index f7dbe75fbc..6070fa83ff 100644 +--- a/include/exec/cpu-common.h ++++ b/include/exec/cpu-common.h +@@ -86,8 +86,6 @@ void *cpu_physical_memory_map(hwaddr addr, + int is_write); + void cpu_physical_memory_unmap(void *buffer, hwaddr len, + int is_write, hwaddr access_len); +-void cpu_register_map_client(QEMUBH *bh); +-void cpu_unregister_map_client(QEMUBH *bh); + + bool cpu_physical_memory_is_io(hwaddr phys_addr); + +diff --git a/include/exec/memory.h b/include/exec/memory.h +index ad260ebbd6..aa35416d80 100644 +--- a/include/exec/memory.h ++++ b/include/exec/memory.h +@@ -2072,8 +2072,8 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len, + * May return %NULL and set *@plen to zero(0), if resources needed to perform + * the mapping are exhausted. + * Use only for reads OR writes - not for read-modify-write operations. +- * Use cpu_register_map_client() to know when retrying the map operation is +- * likely to succeed. ++ * Use address_space_register_map_client() to know when retrying the map ++ * operation is likely to succeed. + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space +@@ -2098,6 +2098,28 @@ void *address_space_map(AddressSpace *as, hwaddr addr, + void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, + int is_write, hwaddr access_len); + ++/* ++ * address_space_register_map_client: Register a callback to invoke when ++ * resources for address_space_map() are available again. ++ * ++ * address_space_map may fail when there are not enough resources available, ++ * such as when bounce buffer memory would exceed the limit. The callback can ++ * be used to retry the address_space_map operation. Note that the callback ++ * gets automatically removed after firing. ++ * ++ * @as: #AddressSpace to be accessed ++ * @bh: callback to invoke when address_space_map() retry is appropriate ++ */ ++void address_space_register_map_client(AddressSpace *as, QEMUBH *bh); ++ ++/* ++ * address_space_unregister_map_client: Unregister a callback that has ++ * previously been registered and not fired yet. ++ * ++ * @as: #AddressSpace to be accessed ++ * @bh: callback to unregister ++ */ ++void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh); + + /* Internal functions, part of the implementation of address_space_read. */ + MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, +-- +2.45.1.windows.1 +