diff --git a/.gitlab-ci.d/buildtest.yml b/.gitlab-ci.d/buildtest.yml index 91663946de460c40f126184b34a3d5cea2a57daa..e1c780159882755f6eb48af497f70d74526031cb 100644 --- a/.gitlab-ci.d/buildtest.yml +++ b/.gitlab-ci.d/buildtest.yml @@ -41,7 +41,7 @@ build-system-ubuntu: variables: IMAGE: ubuntu2204 CONFIGURE_ARGS: --enable-docs - TARGETS: alpha-softmmu microblaze-softmmu mips64el-softmmu + TARGETS: alpha-softmmu microblazeel-softmmu mips64el-softmmu MAKE_CHECK_ARGS: check-build check-system-ubuntu: @@ -61,7 +61,7 @@ avocado-system-ubuntu: variables: IMAGE: ubuntu2204 MAKE_CHECK_ARGS: check-avocado - AVOCADO_TAGS: arch:alpha arch:microblaze arch:mips64el + AVOCADO_TAGS: arch:alpha arch:microblazeel arch:mips64el build-system-debian: extends: @@ -184,7 +184,7 @@ avocado-system-centos: variables: IMAGE: centos8 MAKE_CHECK_ARGS: check-avocado - AVOCADO_TAGS: arch:ppc64 arch:or1k arch:390x arch:x86_64 arch:rx + AVOCADO_TAGS: arch:ppc64 arch:or1k arch:s390x arch:x86_64 arch:rx arch:sh4 arch:nios2 build-system-opensuse: @@ -647,7 +647,10 @@ pages: - mkdir -p public # HTML-ised source tree - make gtags - - htags -anT --tree-view=filetree -m qemu_init + # We unset variables to work around a bug in some htags versions + # which causes it to fail when the environment is large + - CI_COMMIT_MESSAGE= CI_COMMIT_TAG_MESSAGE= htags + -anT --tree-view=filetree -m qemu_init -t "Welcome to the QEMU sourcecode" - mv HTML public/src # Project documentation diff --git a/.mailmap b/.mailmap index e12e19f691774c3b9ccbce44ec073e65b4049152..d94572af05b5b6346fc1014ce01cb1c44c46768f 100644 --- a/.mailmap +++ b/.mailmap @@ -81,6 +81,7 @@ Greg Kurz Huacai Chen Huacai Chen James Hogan +Juan Quintela Leif Lindholm Leif Lindholm Luc Michel diff --git a/.readthedocs.yml b/.readthedocs.yml index 7fb7b8dd61add42f8b374ba53513d205bfa710c8..0b262469ce6931e177fd203579d14a2c091253e1 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -5,16 +5,21 @@ # Required version: 2 +# Set the version of Python and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.11" + # Build documentation in the docs/ directory with Sphinx sphinx: configuration: docs/conf.py +# We recommend specifying your dependencies to enable reproducible builds: +# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +python: + install: + - requirements: docs/requirements.txt + # We want all the document formats formats: all - -# For consistency, we require that QEMU's Sphinx extensions -# run with at least the same minimum version of Python that -# we require for other Python in our codebase (our conf.py -# enforces this, and some code needs it.) -python: - version: 3.6 diff --git a/MAINTAINERS b/MAINTAINERS index 695e0bd34fbba253d77570e5b3ef8dabe7a174b3..b406fb20c05934460067f8f92a69d7f6aee76200 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -70,7 +70,6 @@ R: Daniel P. Berrangé R: Thomas Huth R: Markus Armbruster R: Philippe Mathieu-Daudé -R: Juan Quintela W: https://www.qemu.org/docs/master/devel/index.html S: Odd Fixes F: docs/devel/style.rst @@ -1123,6 +1122,21 @@ L: qemu-arm@nongnu.org S: Maintained F: hw/arm/olimex-stm32-h405.c +STM32L4x5 SoC Family +M: Arnaud Minier +M: Inès Varhol +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/stm32l4x5_soc.c +F: include/hw/arm/stm32l4x5_soc.h + +B-L475E-IOT01A IoT Node +M: Arnaud Minier +M: Inès Varhol +L: qemu-arm@nongnu.org +S: Maintained +F: hw/arm/b-l475e-iot01a.c + SmartFusion2 M: Subbaraya Sundeep M: Peter Maydell @@ -2167,6 +2181,17 @@ F: hw/vfio/ap.c F: docs/system/s390x/vfio-ap.rst L: qemu-s390x@nongnu.org +iommufd +M: Yi Liu +M: Eric Auger +M: Zhenzhong Duan +S: Supported +F: backends/iommufd.c +F: include/sysemu/iommufd.h +F: include/qemu/chardev_open.h +F: util/chardev_open.c +F: docs/devel/vfio-iommufd.rst + vhost M: Michael S. Tsirkin S: Supported @@ -2388,8 +2413,13 @@ F: hw/net/net_tx_pkt* Vmware M: Dmitry Fleytman S: Maintained +F: docs/specs/vmw_pvscsi-spec.txt +F: hw/display/vmware_vga.c F: hw/net/vmxnet* F: hw/scsi/vmw_pvscsi* +F: pc-bios/efi-vmxnet3.rom +F: pc-bios/vgabios-vmware.bin +F: roms/config.vga-vmware F: tests/qtest/vmxnet3-test.c F: docs/specs/vwm_pvscsi-spec.rst @@ -3339,10 +3369,8 @@ S: Odd Fixes F: scripts/checkpatch.pl Migration -M: Juan Quintela M: Peter Xu M: Fabiano Rosas -R: Leonardo Bras S: Maintained F: hw/core/vmstate-if.c F: include/hw/vmstate-if.h @@ -3359,10 +3387,8 @@ F: util/userfaultfd.c X: migration/rdma* RDMA Migration -M: Juan Quintela R: Li Zhijian R: Peter Xu -R: Leonardo Bras S: Odd Fixes F: migration/rdma* diff --git a/Makefile b/Makefile index 676a4a54f48043354afb4dcd8d5e54f9e140d9ff..8f369903356ced3097b1caf1120f923728a7b8d1 100644 --- a/Makefile +++ b/Makefile @@ -202,6 +202,7 @@ clean: recurse-clean ! -path ./roms/edk2/ArmPkg/Library/GccLto/liblto-arm.a \ -exec rm {} + rm -f TAGS cscope.* *~ */*~ + @$(MAKE) -Ctests/qemu-iotests clean VERSION = $(shell cat $(SRC_PATH)/VERSION) diff --git a/VERSION b/VERSION index fbb9ea12de3ae8faf54bd162988a9772eaaf7be9..11ff3cdc1ca0897e83ed0155dbbaf91a88adcd2f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -8.2.0 +8.2.50 diff --git a/accel/accel-blocker.c b/accel/accel-blocker.c index 1e7f423462df10ff7d7b09638d6006889e1e68c6..e083f24aa80747498192f19e1071949bc7b015ef 100644 --- a/accel/accel-blocker.c +++ b/accel/accel-blocker.c @@ -41,7 +41,7 @@ void accel_blocker_init(void) void accel_ioctl_begin(void) { - if (likely(qemu_mutex_iothread_locked())) { + if (likely(bql_locked())) { return; } @@ -51,7 +51,7 @@ void accel_ioctl_begin(void) void accel_ioctl_end(void) { - if (likely(qemu_mutex_iothread_locked())) { + if (likely(bql_locked())) { return; } @@ -62,7 +62,7 @@ void accel_ioctl_end(void) void accel_cpu_ioctl_begin(CPUState *cpu) { - if (unlikely(qemu_mutex_iothread_locked())) { + if (unlikely(bql_locked())) { return; } @@ -72,7 +72,7 @@ void accel_cpu_ioctl_begin(CPUState *cpu) void accel_cpu_ioctl_end(CPUState *cpu) { - if (unlikely(qemu_mutex_iothread_locked())) { + if (unlikely(bql_locked())) { return; } @@ -105,7 +105,7 @@ void accel_ioctl_inhibit_begin(void) * We allow to inhibit only when holding the BQL, so we can identify * when an inhibitor wants to issue an ioctl easily. */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); /* Block further invocations of the ioctls outside the BQL. */ CPU_FOREACH(cpu) { diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c index b75c919ac358677919598506ce55bacad35601f4..f4b0ec58900cf19264d2b548c1a0db8a6a55d945 100644 --- a/accel/dummy-cpus.c +++ b/accel/dummy-cpus.c @@ -24,7 +24,7 @@ static void *dummy_cpu_thread_fn(void *arg) rcu_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); cpu->neg.can_do_io = true; @@ -43,7 +43,7 @@ static void *dummy_cpu_thread_fn(void *arg) qemu_guest_random_seed_thread_part2(cpu->random_seed); do { - qemu_mutex_unlock_iothread(); + bql_unlock(); #ifndef _WIN32 do { int sig; @@ -56,11 +56,11 @@ static void *dummy_cpu_thread_fn(void *arg) #else qemu_sem_wait(&cpu->sem); #endif - qemu_mutex_lock_iothread(); + bql_lock(); qemu_wait_io_event(cpu); } while (!cpu->unplug); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_unregister_thread(); return NULL; } diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c index abe7adf7ee87c89532fdbb78d08748e1140340ae..8eabb696facb8993bc0f146c85ac377bf40f14ec 100644 --- a/accel/hvf/hvf-accel-ops.c +++ b/accel/hvf/hvf-accel-ops.c @@ -424,7 +424,7 @@ static void *hvf_cpu_thread_fn(void *arg) rcu_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); @@ -449,7 +449,7 @@ static void *hvf_cpu_thread_fn(void *arg) hvf_vcpu_destroy(cpu); cpu_thread_signal_destroyed(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_unregister_thread(); return NULL; } diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c index 6195150a0b4d2f310614d4efa18d409dd0c28044..45ff06e953842b67a65b9a1ab3d097d7aa1a0f84 100644 --- a/accel/kvm/kvm-accel-ops.c +++ b/accel/kvm/kvm-accel-ops.c @@ -33,7 +33,7 @@ static void *kvm_vcpu_thread_fn(void *arg) rcu_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); cpu->neg.can_do_io = true; @@ -58,7 +58,7 @@ static void *kvm_vcpu_thread_fn(void *arg) kvm_destroy_vcpu(cpu); cpu_thread_signal_destroyed(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_unregister_thread(); return NULL; } diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index e39a810a4e92333a251711a223eb57b05dc2033d..bbc60146d15a0dbba374c45a81c9a85713953df3 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -69,16 +69,6 @@ #define KVM_GUESTDBG_BLOCKIRQ 0 #endif -//#define DEBUG_KVM - -#ifdef DEBUG_KVM -#define DPRINTF(fmt, ...) \ - do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) -#else -#define DPRINTF(fmt, ...) \ - do { } while (0) -#endif - struct KVMParkedVcpu { unsigned long vcpu_id; int kvm_fd; @@ -98,7 +88,7 @@ bool kvm_allowed; bool kvm_readonly_mem_allowed; bool kvm_vm_attributes_allowed; bool kvm_msi_use_devid; -bool kvm_has_guest_debug; +static bool kvm_has_guest_debug; static int kvm_sstep_flags; static bool kvm_immediate_exit; static hwaddr kvm_max_slot_size = ~0; @@ -331,7 +321,7 @@ static int do_kvm_destroy_vcpu(CPUState *cpu) struct KVMParkedVcpu *vcpu = NULL; int ret = 0; - DPRINTF("kvm_destroy_vcpu\n"); + trace_kvm_destroy_vcpu(); ret = kvm_arch_destroy_vcpu(cpu); if (ret < 0) { @@ -341,7 +331,7 @@ static int do_kvm_destroy_vcpu(CPUState *cpu) mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); if (mmap_size < 0) { ret = mmap_size; - DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n"); + trace_kvm_failed_get_vcpu_mmap_size(); goto err; } @@ -443,7 +433,6 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp) PAGE_SIZE * KVM_DIRTY_LOG_PAGE_OFFSET); if (cpu->kvm_dirty_gfns == MAP_FAILED) { ret = -errno; - DPRINTF("mmap'ing vcpu dirty gfns failed: %d\n", ret); goto err; } } @@ -817,7 +806,7 @@ static void kvm_dirty_ring_flush(void) * should always be with BQL held, serialization is guaranteed. * However, let's be sure of it. */ - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); /* * First make sure to flush the hardware buffers by kicking all * vcpus out in a synchronous way. @@ -1402,9 +1391,9 @@ static void *kvm_dirty_ring_reaper_thread(void *data) trace_kvm_dirty_ring_reaper("wakeup"); r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING; - qemu_mutex_lock_iothread(); + bql_lock(); kvm_dirty_ring_reap(s, NULL); - qemu_mutex_unlock_iothread(); + bql_unlock(); r->reaper_iteration++; } @@ -2821,14 +2810,14 @@ int kvm_cpu_exec(CPUState *cpu) struct kvm_run *run = cpu->kvm_run; int ret, run_ret; - DPRINTF("kvm_cpu_exec()\n"); + trace_kvm_cpu_exec(); if (kvm_arch_process_async_events(cpu)) { qatomic_set(&cpu->exit_request, 0); return EXCP_HLT; } - qemu_mutex_unlock_iothread(); + bql_unlock(); cpu_exec_start(cpu); do { @@ -2848,7 +2837,7 @@ int kvm_cpu_exec(CPUState *cpu) kvm_arch_pre_run(cpu, run); if (qatomic_read(&cpu->exit_request)) { - DPRINTF("interrupt exit requested\n"); + trace_kvm_interrupt_exit_request(); /* * KVM requires us to reenter the kernel after IO exits to complete * instruction emulation. This self-signal will ensure that we @@ -2868,17 +2857,17 @@ int kvm_cpu_exec(CPUState *cpu) #ifdef KVM_HAVE_MCE_INJECTION if (unlikely(have_sigbus_pending)) { - qemu_mutex_lock_iothread(); + bql_lock(); kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code, pending_sigbus_addr); have_sigbus_pending = false; - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif if (run_ret < 0) { if (run_ret == -EINTR || run_ret == -EAGAIN) { - DPRINTF("io window exit\n"); + trace_kvm_io_window_exit(); kvm_eat_signals(cpu); ret = EXCP_INTERRUPT; break; @@ -2900,7 +2889,6 @@ int kvm_cpu_exec(CPUState *cpu) trace_kvm_run_exit(cpu->cpu_index, run->exit_reason); switch (run->exit_reason) { case KVM_EXIT_IO: - DPRINTF("handle_io\n"); /* Called outside BQL */ kvm_handle_io(run->io.port, attrs, (uint8_t *)run + run->io.data_offset, @@ -2910,7 +2898,6 @@ int kvm_cpu_exec(CPUState *cpu) ret = 0; break; case KVM_EXIT_MMIO: - DPRINTF("handle_mmio\n"); /* Called outside BQL */ address_space_rw(&address_space_memory, run->mmio.phys_addr, attrs, @@ -2920,11 +2907,9 @@ int kvm_cpu_exec(CPUState *cpu) ret = 0; break; case KVM_EXIT_IRQ_WINDOW_OPEN: - DPRINTF("irq_window_open\n"); ret = EXCP_INTERRUPT; break; case KVM_EXIT_SHUTDOWN: - DPRINTF("shutdown\n"); qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); ret = EXCP_INTERRUPT; break; @@ -2942,7 +2927,7 @@ int kvm_cpu_exec(CPUState *cpu) * still full. Got kicked by KVM_RESET_DIRTY_RINGS. */ trace_kvm_dirty_ring_full(cpu->cpu_index); - qemu_mutex_lock_iothread(); + bql_lock(); /* * We throttle vCPU by making it sleep once it exit from kernel * due to dirty ring full. In the dirtylimit scenario, reaping @@ -2954,11 +2939,12 @@ int kvm_cpu_exec(CPUState *cpu) } else { kvm_dirty_ring_reap(kvm_state, NULL); } - qemu_mutex_unlock_iothread(); + bql_unlock(); dirtylimit_vcpu_execute(cpu); ret = 0; break; case KVM_EXIT_SYSTEM_EVENT: + trace_kvm_run_exit_system_event(cpu->cpu_index, run->system_event.type); switch (run->system_event.type) { case KVM_SYSTEM_EVENT_SHUTDOWN: qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); @@ -2970,26 +2956,24 @@ int kvm_cpu_exec(CPUState *cpu) break; case KVM_SYSTEM_EVENT_CRASH: kvm_cpu_synchronize_state(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_system_guest_panicked(cpu_get_crash_info(cpu)); - qemu_mutex_unlock_iothread(); + bql_unlock(); ret = 0; break; default: - DPRINTF("kvm_arch_handle_exit\n"); ret = kvm_arch_handle_exit(cpu, run); break; } break; default: - DPRINTF("kvm_arch_handle_exit\n"); ret = kvm_arch_handle_exit(cpu, run); break; } } while (ret == 0); cpu_exec_end(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); if (ret < 0) { cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); diff --git a/accel/kvm/trace-events b/accel/kvm/trace-events index 399aaeb0ec757cf50ba44943d4ab4d30e02c1a0d..a25902597b1b668f9264a92c31a5dda094abf338 100644 --- a/accel/kvm/trace-events +++ b/accel/kvm/trace-events @@ -25,4 +25,9 @@ kvm_dirty_ring_reaper(const char *s) "%s" kvm_dirty_ring_reap(uint64_t count, int64_t t) "reaped %"PRIu64" pages (took %"PRIi64" us)" kvm_dirty_ring_reaper_kick(const char *reason) "%s" kvm_dirty_ring_flush(int finished) "%d" - +kvm_destroy_vcpu(void) "" +kvm_failed_get_vcpu_mmap_size(void) "" +kvm_cpu_exec(void) "" +kvm_interrupt_exit_request(void) "" +kvm_io_window_exit(void) "" +kvm_run_exit_system_event(int cpu_index, uint32_t event_type) "cpu_index %d, system_even_type %"PRIu32 diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index c938eb96f8fd654c7325595806a5084f0b9e0c79..67eda9865ee2a89f7d5a4f3a4142efb1a869d4fd 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -558,8 +558,8 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu) tcg_ctx->gen_tb = NULL; } #endif - if (qemu_mutex_iothread_locked()) { - qemu_mutex_unlock_iothread(); + if (bql_locked()) { + bql_unlock(); } assert_no_pages_locked(); } @@ -680,10 +680,10 @@ static inline bool cpu_handle_halt(CPUState *cpu) #if defined(TARGET_I386) if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { X86CPU *x86_cpu = X86_CPU(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); apic_poll_irq(x86_cpu->apic_state); cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif /* TARGET_I386 */ if (!cpu_has_work(cpu)) { @@ -749,9 +749,9 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) #else if (replay_exception()) { CPUClass *cc = CPU_GET_CLASS(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); cc->tcg_ops->do_interrupt(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); cpu->exception_index = -1; if (unlikely(cpu->singlestep_enabled)) { @@ -812,7 +812,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, if (unlikely(qatomic_read(&cpu->interrupt_request))) { int interrupt_request; - qemu_mutex_lock_iothread(); + bql_lock(); interrupt_request = cpu->interrupt_request; if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { /* Mask out external interrupts for this step. */ @@ -821,7 +821,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, if (interrupt_request & CPU_INTERRUPT_DEBUG) { cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; cpu->exception_index = EXCP_DEBUG; - qemu_mutex_unlock_iothread(); + bql_unlock(); return true; } #if !defined(CONFIG_USER_ONLY) @@ -832,7 +832,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; cpu->halted = 1; cpu->exception_index = EXCP_HLT; - qemu_mutex_unlock_iothread(); + bql_unlock(); return true; } #if defined(TARGET_I386) @@ -843,14 +843,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); do_cpu_init(x86_cpu); cpu->exception_index = EXCP_HALTED; - qemu_mutex_unlock_iothread(); + bql_unlock(); return true; } #else else if (interrupt_request & CPU_INTERRUPT_RESET) { replay_interrupt(); cpu_reset(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); return true; } #endif /* !TARGET_I386 */ @@ -873,7 +873,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, */ if (unlikely(cpu->singlestep_enabled)) { cpu->exception_index = EXCP_DEBUG; - qemu_mutex_unlock_iothread(); + bql_unlock(); return true; } cpu->exception_index = -1; @@ -892,7 +892,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, } /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* Finally, check if we need to exit to the main loop. */ diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index db3f93fda990c52984f438dad15f5b198b1a4fde..3facfcbb24b6c3a874868b29fd26115a9d7d6760 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1975,7 +1975,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, * @size: number of bytes * @mmu_idx: virtual address context * @ra: return address into tcg generated code, or 0 - * Context: iothread lock held + * Context: BQL held * * Load @size bytes from @addr, which is memory-mapped i/o. * The bytes are concatenated in big-endian order with @ret_be. @@ -2030,10 +2030,10 @@ static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); mr = section->mr; - qemu_mutex_lock_iothread(); + bql_lock(); ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx, type, ra, mr, mr_offset); - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } @@ -2054,12 +2054,12 @@ static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full, section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); mr = section->mr; - qemu_mutex_lock_iothread(); + bql_lock(); a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx, MMU_DATA_LOAD, ra, mr, mr_offset); b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx, MMU_DATA_LOAD, ra, mr, mr_offset + size - 8); - qemu_mutex_unlock_iothread(); + bql_unlock(); return int128_make128(b, a); } @@ -2521,7 +2521,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr, * @size: number of bytes * @mmu_idx: virtual address context * @ra: return address into tcg generated code, or 0 - * Context: iothread lock held + * Context: BQL held * * Store @size bytes at @addr, which is memory-mapped i/o. * The bytes to store are extracted in little-endian order from @val_le; @@ -2577,10 +2577,10 @@ static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); mr = section->mr; - qemu_mutex_lock_iothread(); + bql_lock(); ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx, ra, mr, mr_offset); - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } @@ -2601,12 +2601,12 @@ static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full, section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra); mr = section->mr; - qemu_mutex_lock_iothread(); + bql_lock(); int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8, mmu_idx, ra, mr, mr_offset); ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8, size - 8, mmu_idx, ra, mr, mr_offset + 8); - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } diff --git a/accel/tcg/meson.build b/accel/tcg/meson.build index 8783edd06ee578d0332852901c92bf7ccc1e6d70..d25638d6c15d0724c134b4f98f2fccdbdb7aee1b 100644 --- a/accel/tcg/meson.build +++ b/accel/tcg/meson.build @@ -17,7 +17,9 @@ if get_option('plugins') tcg_ss.add(files('plugin-gen.c')) endif tcg_ss.add(when: libdw, if_true: files('debuginfo.c')) -tcg_ss.add(when: 'CONFIG_LINUX', if_true: files('perf.c')) +if host_os == 'linux' + tcg_ss.add(files('perf.c')) +endif specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss) specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files( diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c index b25685fb712ecdbd7379d026c7bf83c0818be50e..9e1ae66f651dc0f867767d8cbc6378ec11114794 100644 --- a/accel/tcg/tcg-accel-ops-icount.c +++ b/accel/tcg/tcg-accel-ops-icount.c @@ -123,12 +123,12 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget) if (cpu->icount_budget == 0) { /* - * We're called without the iothread lock, so must take it while + * We're called without the BQL, so must take it while * we're calling timer handlers. */ - qemu_mutex_lock_iothread(); + bql_lock(); icount_notify_aio_contexts(); - qemu_mutex_unlock_iothread(); + bql_unlock(); } } diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c index fac80095bbd09944870a29312de0a906c63ca3c0..af7307013a54583b04de820d64a661c2a42a7bbf 100644 --- a/accel/tcg/tcg-accel-ops-mttcg.c +++ b/accel/tcg/tcg-accel-ops-mttcg.c @@ -76,7 +76,7 @@ static void *mttcg_cpu_thread_fn(void *arg) rcu_add_force_rcu_notifier(&force_rcu.notifier); tcg_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); @@ -91,9 +91,9 @@ static void *mttcg_cpu_thread_fn(void *arg) do { if (cpu_can_run(cpu)) { int r; - qemu_mutex_unlock_iothread(); + bql_unlock(); r = tcg_cpus_exec(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); switch (r) { case EXCP_DEBUG: cpu_handle_guest_debug(cpu); @@ -105,9 +105,9 @@ static void *mttcg_cpu_thread_fn(void *arg) */ break; case EXCP_ATOMIC: - qemu_mutex_unlock_iothread(); + bql_unlock(); cpu_exec_step_atomic(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); default: /* Ignore everything else? */ break; @@ -119,7 +119,7 @@ static void *mttcg_cpu_thread_fn(void *arg) } while (!cpu->unplug || cpu_can_run(cpu)); tcg_cpus_destroy(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_remove_force_rcu_notifier(&force_rcu.notifier); rcu_unregister_thread(); return NULL; diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c index 611932f3c3a8c953a3ff68612139696fed0e92e1..3208035d85c868fa91b0522c56bb151cd4c86295 100644 --- a/accel/tcg/tcg-accel-ops-rr.c +++ b/accel/tcg/tcg-accel-ops-rr.c @@ -109,9 +109,9 @@ static void rr_wait_io_event(void) { CPUState *cpu; - while (all_cpu_threads_idle()) { + while (all_cpu_threads_idle() && replay_can_wait()) { rr_stop_kick_timer(); - qemu_cond_wait_iothread(first_cpu->halt_cond); + qemu_cond_wait_bql(first_cpu->halt_cond); } rr_start_kick_timer(); @@ -188,7 +188,7 @@ static void *rr_cpu_thread_fn(void *arg) rcu_add_force_rcu_notifier(&force_rcu); tcg_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); @@ -198,7 +198,7 @@ static void *rr_cpu_thread_fn(void *arg) /* wait for initial kick-off after machine start */ while (first_cpu->stopped) { - qemu_cond_wait_iothread(first_cpu->halt_cond); + qemu_cond_wait_bql(first_cpu->halt_cond); /* process any pending work */ CPU_FOREACH(cpu) { @@ -218,9 +218,9 @@ static void *rr_cpu_thread_fn(void *arg) /* Only used for icount_enabled() */ int64_t cpu_budget = 0; - qemu_mutex_unlock_iothread(); + bql_unlock(); replay_mutex_lock(); - qemu_mutex_lock_iothread(); + bql_lock(); if (icount_enabled()) { int cpu_count = rr_cpu_count(); @@ -254,7 +254,7 @@ static void *rr_cpu_thread_fn(void *arg) if (cpu_can_run(cpu)) { int r; - qemu_mutex_unlock_iothread(); + bql_unlock(); if (icount_enabled()) { icount_prepare_for_run(cpu, cpu_budget); } @@ -262,15 +262,15 @@ static void *rr_cpu_thread_fn(void *arg) if (icount_enabled()) { icount_process_data(cpu); } - qemu_mutex_lock_iothread(); + bql_lock(); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); break; } else if (r == EXCP_ATOMIC) { - qemu_mutex_unlock_iothread(); + bql_unlock(); cpu_exec_step_atomic(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); break; } } else if (cpu->stop) { diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c index 1b5729068202e461dfd430828a267ecf77bed600..813065c0ecb006aea50d4bc38d1bd48f7a355d4e 100644 --- a/accel/tcg/tcg-accel-ops.c +++ b/accel/tcg/tcg-accel-ops.c @@ -88,7 +88,7 @@ static void tcg_cpu_reset_hold(CPUState *cpu) /* mask must never be zero, except for A20 change call */ void tcg_handle_interrupt(CPUState *cpu, int mask) { - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); cpu->interrupt_request |= mask; diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 79a88f5fb7577a2b2f04b668911660b40f85e38e..1737bb3da5831a9ed9e6b05cef4021d4dd64ff8a 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -649,7 +649,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) void cpu_interrupt(CPUState *cpu, int mask) { - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); cpu->interrupt_request |= mask; qatomic_set(&cpu->neg.icount_decr.u16.high, -1); } diff --git a/audio/audio.c b/audio/audio.c index 8d1e4ad92271b39893c4e6454cce4d66d36458fa..af0ae33fedb1e5407e66c2a51bb2f0881976de35 100644 --- a/audio/audio.c +++ b/audio/audio.c @@ -1683,7 +1683,7 @@ static const VMStateDescription vmstate_audio = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_audio_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_END_OF_LIST() } }; @@ -1744,7 +1744,7 @@ static AudioState *audio_init(Audiodev *dev, Error **errp) if (driver) { done = !audio_driver_init(s, driver, dev, errp); } else { - error_setg(errp, "Unknown audio driver `%s'\n", drvname); + error_setg(errp, "Unknown audio driver `%s'", drvname); } if (!done) { goto out; diff --git a/audio/coreaudio.m b/audio/coreaudio.m index 8cd129a27d02fa645b2d5231ee97425062e0f1bd..ab632b9bbbbdc1757c4d1fd41703fac53b18baab 100644 --- a/audio/coreaudio.m +++ b/audio/coreaudio.m @@ -299,7 +299,7 @@ static ret_type glue(coreaudio_, name)args_decl \ #undef COREAUDIO_WRAPPER_FUNC /* - * callback to feed audiooutput buffer. called without iothread lock. + * callback to feed audiooutput buffer. called without BQL. * allowed to lock "buf_mutex", but disallowed to have any other locks. */ static OSStatus audioDeviceIOProc( @@ -538,7 +538,7 @@ static void update_device_playback_state(coreaudioVoiceOut *core) } } -/* called without iothread lock. */ +/* called without BQL. */ static OSStatus handle_voice_change( AudioObjectID in_object_id, UInt32 in_number_addresses, @@ -547,7 +547,7 @@ static OSStatus handle_voice_change( { coreaudioVoiceOut *core = in_client_data; - qemu_mutex_lock_iothread(); + bql_lock(); if (core->outputDeviceID) { fini_out_device(core); @@ -557,7 +557,7 @@ static OSStatus handle_voice_change( update_device_playback_state(core); } - qemu_mutex_unlock_iothread(); + bql_unlock(); return 0; } diff --git a/backends/Kconfig b/backends/Kconfig index f35abc16092808b1fe5b033a346908e2d66bff0b..2cb23f62fa1526cedafedcc99a032e098075b846 100644 --- a/backends/Kconfig +++ b/backends/Kconfig @@ -1 +1,5 @@ source tpm/Kconfig + +config IOMMUFD + bool + depends on VFIO diff --git a/backends/dbus-vmstate.c b/backends/dbus-vmstate.c index a9d8cb0acd5dbdc39b16afba80cc02eb1d937793..be6c4d8e0ae7e5ebb8c96ae5da34539ddcd56d13 100644 --- a/backends/dbus-vmstate.c +++ b/backends/dbus-vmstate.c @@ -393,7 +393,7 @@ static const VMStateDescription dbus_vmstate = { .version_id = 0, .pre_save = dbus_vmstate_pre_save, .post_load = dbus_vmstate_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(data_size, DBusVMState), VMSTATE_VBUFFER_ALLOC_UINT32(data, DBusVMState, 0, 0, data_size), VMSTATE_END_OF_LIST() diff --git a/backends/hostmem-epc.c b/backends/hostmem-epc.c index 4e162d6789e86dc0de4ff3086b6e383fea059be4..735e2e1cf84670c891ba4571f09ec44256e64f82 100644 --- a/backends/hostmem-epc.c +++ b/backends/hostmem-epc.c @@ -17,31 +17,29 @@ #include "sysemu/hostmem.h" #include "hw/i386/hostmem-epc.h" -static void +static bool sgx_epc_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { + g_autofree char *name = NULL; uint32_t ram_flags; - char *name; int fd; if (!backend->size) { error_setg(errp, "can't create backend with size 0"); - return; + return false; } fd = qemu_open_old("/dev/sgx_vepc", O_RDWR); if (fd < 0) { error_setg_errno(errp, errno, "failed to open /dev/sgx_vepc to alloc SGX EPC"); - return; + return false; } name = object_get_canonical_path(OBJECT(backend)); ram_flags = (backend->share ? RAM_SHARED : 0) | RAM_PROTECTED; - memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), - name, backend->size, ram_flags, - fd, 0, errp); - g_free(name); + return memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name, + backend->size, ram_flags, fd, 0, errp); } static void sgx_epc_backend_instance_init(Object *obj) diff --git a/backends/hostmem-file.c b/backends/hostmem-file.c index 361d4a8103ef82cba0994b24bc989a65798741ef..ac3e433cbddbca395472f929fc6da8228316ef0e 100644 --- a/backends/hostmem-file.c +++ b/backends/hostmem-file.c @@ -36,24 +36,25 @@ struct HostMemoryBackendFile { OnOffAuto rom; }; -static void +static bool file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { #ifndef CONFIG_POSIX error_setg(errp, "backend '%s' not supported on this host", object_get_typename(OBJECT(backend))); + return false; #else HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(backend); + g_autofree gchar *name = NULL; uint32_t ram_flags; - gchar *name; if (!backend->size) { error_setg(errp, "can't create backend with size 0"); - return; + return false; } if (!fb->mem_path) { error_setg(errp, "mem-path property not set"); - return; + return false; } switch (fb->rom) { @@ -65,18 +66,18 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) if (!fb->readonly) { error_setg(errp, "property 'rom' = 'on' is not supported with" " 'readonly' = 'off'"); - return; + return false; } break; case ON_OFF_AUTO_OFF: if (fb->readonly && backend->share) { error_setg(errp, "property 'rom' = 'off' is incompatible with" " 'readonly' = 'on' and 'share' = 'on'"); - return; + return false; } break; default: - assert(false); + g_assert_not_reached(); } name = host_memory_backend_get_name(backend); @@ -86,10 +87,9 @@ file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) ram_flags |= backend->reserve ? 0 : RAM_NORESERVE; ram_flags |= fb->is_pmem ? RAM_PMEM : 0; ram_flags |= RAM_NAMED_FILE; - memory_region_init_ram_from_file(&backend->mr, OBJECT(backend), name, - backend->size, fb->align, ram_flags, - fb->mem_path, fb->offset, errp); - g_free(name); + return memory_region_init_ram_from_file(&backend->mr, OBJECT(backend), name, + backend->size, fb->align, ram_flags, + fb->mem_path, fb->offset, errp); #endif } diff --git a/backends/hostmem-memfd.c b/backends/hostmem-memfd.c index 3fc85c3db81bb71176cdfe65dbf0643e1504355b..3923ea9364d5ee993d758c96c6fc1ebf2fae89ec 100644 --- a/backends/hostmem-memfd.c +++ b/backends/hostmem-memfd.c @@ -31,17 +31,17 @@ struct HostMemoryBackendMemfd { bool seal; }; -static void +static bool memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { HostMemoryBackendMemfd *m = MEMORY_BACKEND_MEMFD(backend); + g_autofree char *name = NULL; uint32_t ram_flags; - char *name; int fd; if (!backend->size) { error_setg(errp, "can't create backend with size 0"); - return; + return false; } fd = qemu_memfd_create(TYPE_MEMORY_BACKEND_MEMFD, backend->size, @@ -49,15 +49,14 @@ memfd_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL : 0, errp); if (fd == -1) { - return; + return false; } name = host_memory_backend_get_name(backend); ram_flags = backend->share ? RAM_SHARED : 0; ram_flags |= backend->reserve ? 0 : RAM_NORESERVE; - memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name, - backend->size, ram_flags, fd, 0, errp); - g_free(name); + return memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), name, + backend->size, ram_flags, fd, 0, errp); } static bool diff --git a/backends/hostmem-ram.c b/backends/hostmem-ram.c index b8e55cdbd0f89078b01bb1575416173ce909526b..d121249f0f457edccdb24d10b6a79da4e0c026ff 100644 --- a/backends/hostmem-ram.c +++ b/backends/hostmem-ram.c @@ -16,23 +16,23 @@ #include "qemu/module.h" #include "qom/object_interfaces.h" -static void +static bool ram_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { + g_autofree char *name = NULL; uint32_t ram_flags; - char *name; if (!backend->size) { error_setg(errp, "can't create backend with size 0"); - return; + return false; } name = host_memory_backend_get_name(backend); ram_flags = backend->share ? RAM_SHARED : 0; ram_flags |= backend->reserve ? 0 : RAM_NORESERVE; - memory_region_init_ram_flags_nomigrate(&backend->mr, OBJECT(backend), name, - backend->size, ram_flags, errp); - g_free(name); + return memory_region_init_ram_flags_nomigrate(&backend->mr, OBJECT(backend), + name, backend->size, + ram_flags, errp); } static void diff --git a/backends/hostmem.c b/backends/hostmem.c index 747e7838c031c42aaab91c159cbaa9b96f7e5013..30f69b2cb5e95fd48e7f4a1061b332f65fc39587 100644 --- a/backends/hostmem.c +++ b/backends/hostmem.c @@ -219,7 +219,6 @@ static bool host_memory_backend_get_prealloc(Object *obj, Error **errp) static void host_memory_backend_set_prealloc(Object *obj, bool value, Error **errp) { - Error *local_err = NULL; HostMemoryBackend *backend = MEMORY_BACKEND(obj); if (!backend->reserve && value) { @@ -237,10 +236,8 @@ static void host_memory_backend_set_prealloc(Object *obj, bool value, void *ptr = memory_region_get_ram_ptr(&backend->mr); uint64_t sz = memory_region_size(&backend->mr); - qemu_prealloc_mem(fd, ptr, sz, backend->prealloc_threads, - backend->prealloc_context, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!qemu_prealloc_mem(fd, ptr, sz, backend->prealloc_threads, + backend->prealloc_context, errp)) { return; } backend->prealloc = true; @@ -324,91 +321,86 @@ host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) { HostMemoryBackend *backend = MEMORY_BACKEND(uc); HostMemoryBackendClass *bc = MEMORY_BACKEND_GET_CLASS(uc); - Error *local_err = NULL; void *ptr; uint64_t sz; - if (bc->alloc) { - bc->alloc(backend, &local_err); - if (local_err) { - goto out; - } + if (!bc->alloc) { + return; + } + if (!bc->alloc(backend, errp)) { + return; + } - ptr = memory_region_get_ram_ptr(&backend->mr); - sz = memory_region_size(&backend->mr); + ptr = memory_region_get_ram_ptr(&backend->mr); + sz = memory_region_size(&backend->mr); - if (backend->merge) { - qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE); - } - if (!backend->dump) { - qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP); - } + if (backend->merge) { + qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE); + } + if (!backend->dump) { + qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP); + } #ifdef CONFIG_NUMA - unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES); - /* lastbit == MAX_NODES means maxnode = 0 */ - unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1); - /* ensure policy won't be ignored in case memory is preallocated - * before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so - * this doesn't catch hugepage case. */ - unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE; - int mode = backend->policy; - - /* check for invalid host-nodes and policies and give more verbose - * error messages than mbind(). */ - if (maxnode && backend->policy == MPOL_DEFAULT) { - error_setg(errp, "host-nodes must be empty for policy default," - " or you should explicitly specify a policy other" - " than default"); - return; - } else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) { - error_setg(errp, "host-nodes must be set for policy %s", - HostMemPolicy_str(backend->policy)); - return; - } + unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES); + /* lastbit == MAX_NODES means maxnode = 0 */ + unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1); + /* ensure policy won't be ignored in case memory is preallocated + * before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so + * this doesn't catch hugepage case. */ + unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE; + int mode = backend->policy; + + /* check for invalid host-nodes and policies and give more verbose + * error messages than mbind(). */ + if (maxnode && backend->policy == MPOL_DEFAULT) { + error_setg(errp, "host-nodes must be empty for policy default," + " or you should explicitly specify a policy other" + " than default"); + return; + } else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) { + error_setg(errp, "host-nodes must be set for policy %s", + HostMemPolicy_str(backend->policy)); + return; + } - /* We can have up to MAX_NODES nodes, but we need to pass maxnode+1 - * as argument to mbind() due to an old Linux bug (feature?) which - * cuts off the last specified node. This means backend->host_nodes - * must have MAX_NODES+1 bits available. - */ - assert(sizeof(backend->host_nodes) >= - BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long)); - assert(maxnode <= MAX_NODES); + /* We can have up to MAX_NODES nodes, but we need to pass maxnode+1 + * as argument to mbind() due to an old Linux bug (feature?) which + * cuts off the last specified node. This means backend->host_nodes + * must have MAX_NODES+1 bits available. + */ + assert(sizeof(backend->host_nodes) >= + BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long)); + assert(maxnode <= MAX_NODES); #ifdef HAVE_NUMA_HAS_PREFERRED_MANY - if (mode == MPOL_PREFERRED && numa_has_preferred_many() > 0) { - /* - * Replace with MPOL_PREFERRED_MANY otherwise the mbind() below - * silently picks the first node. - */ - mode = MPOL_PREFERRED_MANY; - } + if (mode == MPOL_PREFERRED && numa_has_preferred_many() > 0) { + /* + * Replace with MPOL_PREFERRED_MANY otherwise the mbind() below + * silently picks the first node. + */ + mode = MPOL_PREFERRED_MANY; + } #endif - if (maxnode && - mbind(ptr, sz, mode, backend->host_nodes, maxnode + 1, flags)) { - if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) { - error_setg_errno(errp, errno, - "cannot bind memory to host NUMA nodes"); - return; - } + if (maxnode && + mbind(ptr, sz, mode, backend->host_nodes, maxnode + 1, flags)) { + if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) { + error_setg_errno(errp, errno, + "cannot bind memory to host NUMA nodes"); + return; } + } #endif - /* Preallocate memory after the NUMA policy has been instantiated. - * This is necessary to guarantee memory is allocated with - * specified NUMA policy in place. - */ - if (backend->prealloc) { - qemu_prealloc_mem(memory_region_get_fd(&backend->mr), ptr, sz, - backend->prealloc_threads, - backend->prealloc_context, &local_err); - if (local_err) { - goto out; - } - } + /* Preallocate memory after the NUMA policy has been instantiated. + * This is necessary to guarantee memory is allocated with + * specified NUMA policy in place. + */ + if (backend->prealloc && !qemu_prealloc_mem(memory_region_get_fd(&backend->mr), + ptr, sz, + backend->prealloc_threads, + backend->prealloc_context, errp)) { + return; } -out: - error_propagate(errp, local_err); } static bool diff --git a/backends/iommufd.c b/backends/iommufd.c new file mode 100644 index 0000000000000000000000000000000000000000..1ef683c7b080e688af46c5b98e61eafa73e39895 --- /dev/null +++ b/backends/iommufd.c @@ -0,0 +1,233 @@ +/* + * iommufd container backend + * + * Copyright (C) 2023 Intel Corporation. + * Copyright Red Hat, Inc. 2023 + * + * Authors: Yi Liu + * Eric Auger + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "sysemu/iommufd.h" +#include "qapi/error.h" +#include "qapi/qmp/qerror.h" +#include "qemu/module.h" +#include "qom/object_interfaces.h" +#include "qemu/error-report.h" +#include "monitor/monitor.h" +#include "trace.h" +#include +#include + +static void iommufd_backend_init(Object *obj) +{ + IOMMUFDBackend *be = IOMMUFD_BACKEND(obj); + + be->fd = -1; + be->users = 0; + be->owned = true; +} + +static void iommufd_backend_finalize(Object *obj) +{ + IOMMUFDBackend *be = IOMMUFD_BACKEND(obj); + + if (be->owned) { + close(be->fd); + be->fd = -1; + } +} + +static void iommufd_backend_set_fd(Object *obj, const char *str, Error **errp) +{ + IOMMUFDBackend *be = IOMMUFD_BACKEND(obj); + int fd = -1; + + fd = monitor_fd_param(monitor_cur(), str, errp); + if (fd == -1) { + error_prepend(errp, "Could not parse remote object fd %s:", str); + return; + } + be->fd = fd; + be->owned = false; + trace_iommu_backend_set_fd(be->fd); +} + +static bool iommufd_backend_can_be_deleted(UserCreatable *uc) +{ + IOMMUFDBackend *be = IOMMUFD_BACKEND(uc); + + return !be->users; +} + +static void iommufd_backend_class_init(ObjectClass *oc, void *data) +{ + UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); + + ucc->can_be_deleted = iommufd_backend_can_be_deleted; + + object_class_property_add_str(oc, "fd", NULL, iommufd_backend_set_fd); +} + +int iommufd_backend_connect(IOMMUFDBackend *be, Error **errp) +{ + int fd, ret = 0; + + if (be->owned && !be->users) { + fd = qemu_open_old("/dev/iommu", O_RDWR); + if (fd < 0) { + error_setg_errno(errp, errno, "/dev/iommu opening failed"); + ret = fd; + goto out; + } + be->fd = fd; + } + be->users++; +out: + trace_iommufd_backend_connect(be->fd, be->owned, + be->users, ret); + return ret; +} + +void iommufd_backend_disconnect(IOMMUFDBackend *be) +{ + if (!be->users) { + goto out; + } + be->users--; + if (!be->users && be->owned) { + close(be->fd); + be->fd = -1; + } +out: + trace_iommufd_backend_disconnect(be->fd, be->users); +} + +int iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id, + Error **errp) +{ + int ret, fd = be->fd; + struct iommu_ioas_alloc alloc_data = { + .size = sizeof(alloc_data), + .flags = 0, + }; + + ret = ioctl(fd, IOMMU_IOAS_ALLOC, &alloc_data); + if (ret) { + error_setg_errno(errp, errno, "Failed to allocate ioas"); + return ret; + } + + *ioas_id = alloc_data.out_ioas_id; + trace_iommufd_backend_alloc_ioas(fd, *ioas_id, ret); + + return ret; +} + +void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id) +{ + int ret, fd = be->fd; + struct iommu_destroy des = { + .size = sizeof(des), + .id = id, + }; + + ret = ioctl(fd, IOMMU_DESTROY, &des); + trace_iommufd_backend_free_id(fd, id, ret); + if (ret) { + error_report("Failed to free id: %u %m", id); + } +} + +int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova, + ram_addr_t size, void *vaddr, bool readonly) +{ + int ret, fd = be->fd; + struct iommu_ioas_map map = { + .size = sizeof(map), + .flags = IOMMU_IOAS_MAP_READABLE | + IOMMU_IOAS_MAP_FIXED_IOVA, + .ioas_id = ioas_id, + .__reserved = 0, + .user_va = (uintptr_t)vaddr, + .iova = iova, + .length = size, + }; + + if (!readonly) { + map.flags |= IOMMU_IOAS_MAP_WRITEABLE; + } + + ret = ioctl(fd, IOMMU_IOAS_MAP, &map); + trace_iommufd_backend_map_dma(fd, ioas_id, iova, size, + vaddr, readonly, ret); + if (ret) { + ret = -errno; + + /* TODO: Not support mapping hardware PCI BAR region for now. */ + if (errno == EFAULT) { + warn_report("IOMMU_IOAS_MAP failed: %m, PCI BAR?"); + } else { + error_report("IOMMU_IOAS_MAP failed: %m"); + } + } + return ret; +} + +int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id, + hwaddr iova, ram_addr_t size) +{ + int ret, fd = be->fd; + struct iommu_ioas_unmap unmap = { + .size = sizeof(unmap), + .ioas_id = ioas_id, + .iova = iova, + .length = size, + }; + + ret = ioctl(fd, IOMMU_IOAS_UNMAP, &unmap); + /* + * IOMMUFD takes mapping as some kind of object, unmapping + * nonexistent mapping is treated as deleting a nonexistent + * object and return ENOENT. This is different from legacy + * backend which allows it. vIOMMU may trigger a lot of + * redundant unmapping, to avoid flush the log, treat them + * as succeess for IOMMUFD just like legacy backend. + */ + if (ret && errno == ENOENT) { + trace_iommufd_backend_unmap_dma_non_exist(fd, ioas_id, iova, size, ret); + ret = 0; + } else { + trace_iommufd_backend_unmap_dma(fd, ioas_id, iova, size, ret); + } + + if (ret) { + ret = -errno; + error_report("IOMMU_IOAS_UNMAP failed: %m"); + } + return ret; +} + +static const TypeInfo iommufd_backend_info = { + .name = TYPE_IOMMUFD_BACKEND, + .parent = TYPE_OBJECT, + .instance_size = sizeof(IOMMUFDBackend), + .instance_init = iommufd_backend_init, + .instance_finalize = iommufd_backend_finalize, + .class_size = sizeof(IOMMUFDBackendClass), + .class_init = iommufd_backend_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_USER_CREATABLE }, + { } + } +}; + +static void register_types(void) +{ + type_register_static(&iommufd_backend_info); +} + +type_init(register_types); diff --git a/backends/meson.build b/backends/meson.build index 914c7c4afb905cfe710ad23dd1ee42907f6d1679..8b2b111497f7c6cd5cb6ca50ec6d1474a543fc9f 100644 --- a/backends/meson.build +++ b/backends/meson.build @@ -10,9 +10,13 @@ system_ss.add([files( 'confidential-guest-support.c', ), numa]) -system_ss.add(when: 'CONFIG_POSIX', if_true: files('rng-random.c')) -system_ss.add(when: 'CONFIG_POSIX', if_true: files('hostmem-file.c')) -system_ss.add(when: 'CONFIG_LINUX', if_true: files('hostmem-memfd.c')) +if host_os != 'windows' + system_ss.add(files('rng-random.c')) + system_ss.add(files('hostmem-file.c')) +endif +if host_os == 'linux' + system_ss.add(files('hostmem-memfd.c')) +endif if keyutils.found() system_ss.add(keyutils, files('cryptodev-lkcf.c')) endif @@ -20,6 +24,7 @@ if have_vhost_user system_ss.add(when: 'CONFIG_VIRTIO', if_true: files('vhost-user.c')) endif system_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost.c')) +system_ss.add(when: 'CONFIG_IOMMUFD', if_true: files('iommufd.c')) if have_vhost_user_crypto system_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost-user.c')) endif diff --git a/backends/tpm/tpm_emulator.c b/backends/tpm/tpm_emulator.c index f7f1b4ad7a80588c747883fe212d1207687728ed..5a8fba9bded9cfd5319395208464349f109b5c6e 100644 --- a/backends/tpm/tpm_emulator.c +++ b/backends/tpm/tpm_emulator.c @@ -904,7 +904,7 @@ static void tpm_emulator_vm_state_change(void *opaque, bool running, trace_tpm_emulator_vm_state_change(running, state); - if (!running || state != RUN_STATE_RUNNING || !tpm_emu->relock_storage) { + if (!running || !tpm_emu->relock_storage) { return; } @@ -939,7 +939,7 @@ static const VMStateDescription vmstate_tpm_emulator = { .version_id = 0, .pre_save = tpm_emulator_pre_save, .post_load = tpm_emulator_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(state_blobs.permanent_flags, TPMEmulator), VMSTATE_UINT32(state_blobs.permanent.size, TPMEmulator), VMSTATE_VBUFFER_ALLOC_UINT32(state_blobs.permanent.buffer, diff --git a/backends/trace-events b/backends/trace-events index 652eb76a5723e2053fe97338c481309c58284d6a..d45c6e31a67ed66d94787f60eb08a525cf6ff68b 100644 --- a/backends/trace-events +++ b/backends/trace-events @@ -5,3 +5,13 @@ dbus_vmstate_pre_save(void) dbus_vmstate_post_load(int version_id) "version_id: %d" dbus_vmstate_loading(const char *id) "id: %s" dbus_vmstate_saving(const char *id) "id: %s" + +# iommufd.c +iommufd_backend_connect(int fd, bool owned, uint32_t users, int ret) "fd=%d owned=%d users=%d (%d)" +iommufd_backend_disconnect(int fd, uint32_t users) "fd=%d users=%d" +iommu_backend_set_fd(int fd) "pre-opened /dev/iommu fd=%d" +iommufd_backend_map_dma(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, void *vaddr, bool readonly, int ret) " iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" addr=%p readonly=%d (%d)" +iommufd_backend_unmap_dma_non_exist(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, int ret) " Unmap nonexistent mapping: iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" (%d)" +iommufd_backend_unmap_dma(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, int ret) " iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" (%d)" +iommufd_backend_alloc_ioas(int iommufd, uint32_t ioas, int ret) " iommufd=%d ioas=%d (%d)" +iommufd_backend_free_id(int iommufd, uint32_t id, int ret) " iommufd=%d id=%d (%d)" diff --git a/block.c b/block.c index bfb0861ec61d2965e1ebd1d3d05976ab25a76c7f..a097772238ddbb634b3563e157b6f42a07662e17 100644 --- a/block.c +++ b/block.c @@ -1616,16 +1616,10 @@ out: g_free(gen_node_name); } -/* - * The caller must always hold @bs AioContext lock, because this function calls - * bdrv_refresh_total_sectors() which polls when called from non-coroutine - * context. - */ static int no_coroutine_fn GRAPH_UNLOCKED bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name, QDict *options, int open_flags, Error **errp) { - AioContext *ctx; Error *local_err = NULL; int i, ret; GLOBAL_STATE_CODE(); @@ -1673,21 +1667,15 @@ bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name, bs->supported_read_flags |= BDRV_REQ_REGISTERED_BUF; bs->supported_write_flags |= BDRV_REQ_REGISTERED_BUF; - /* Get the context after .bdrv_open, it can change the context */ - ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); - ret = bdrv_refresh_total_sectors(bs, bs->total_sectors); if (ret < 0) { error_setg_errno(errp, -ret, "Could not refresh total sector count"); - aio_context_release(ctx); return ret; } bdrv_graph_rdlock_main_loop(); bdrv_refresh_limits(bs, NULL, &local_err); bdrv_graph_rdunlock_main_loop(); - aio_context_release(ctx); if (local_err) { error_propagate(errp, local_err); @@ -1708,12 +1696,12 @@ bdrv_open_driver(BlockDriverState *bs, BlockDriver *drv, const char *node_name, open_failed: bs->drv = NULL; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); if (bs->file != NULL) { bdrv_unref_child(bs, bs->file); assert(!bs->file); } - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); g_free(bs->opaque); bs->opaque = NULL; @@ -2908,7 +2896,7 @@ uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm) * Replaces the node that a BdrvChild points to without updating permissions. * * If @new_bs is non-NULL, the parent of @child must already be drained through - * @child and the caller must hold the AioContext lock for @new_bs. + * @child. */ static void GRAPH_WRLOCK bdrv_replace_child_noperm(BdrvChild *child, BlockDriverState *new_bs) @@ -3048,9 +3036,8 @@ static TransactionActionDrv bdrv_attach_child_common_drv = { * * Returns new created child. * - * The caller must hold the AioContext lock for @child_bs. Both @parent_bs and - * @child_bs can move to a different AioContext in this function. Callers must - * make sure that their AioContext locking is still correct after this. + * Both @parent_bs and @child_bs can move to a different AioContext in this + * function. */ static BdrvChild * GRAPH_WRLOCK bdrv_attach_child_common(BlockDriverState *child_bs, @@ -3062,7 +3049,7 @@ bdrv_attach_child_common(BlockDriverState *child_bs, Transaction *tran, Error **errp) { BdrvChild *new_child; - AioContext *parent_ctx, *new_child_ctx; + AioContext *parent_ctx; AioContext *child_ctx = bdrv_get_aio_context(child_bs); assert(child_class->get_parent_desc); @@ -3114,12 +3101,6 @@ bdrv_attach_child_common(BlockDriverState *child_bs, } } - new_child_ctx = bdrv_get_aio_context(child_bs); - if (new_child_ctx != child_ctx) { - aio_context_release(child_ctx); - aio_context_acquire(new_child_ctx); - } - bdrv_ref(child_bs); /* * Let every new BdrvChild start with a drained parent. Inserting the child @@ -3149,20 +3130,14 @@ bdrv_attach_child_common(BlockDriverState *child_bs, }; tran_add(tran, &bdrv_attach_child_common_drv, s); - if (new_child_ctx != child_ctx) { - aio_context_release(new_child_ctx); - aio_context_acquire(child_ctx); - } - return new_child; } /* * Function doesn't update permissions, caller is responsible for this. * - * The caller must hold the AioContext lock for @child_bs. Both @parent_bs and - * @child_bs can move to a different AioContext in this function. Callers must - * make sure that their AioContext locking is still correct after this. + * Both @parent_bs and @child_bs can move to a different AioContext in this + * function. * * After calling this function, the transaction @tran may only be completed * while holding a writer lock for the graph. @@ -3202,9 +3177,6 @@ bdrv_attach_child_noperm(BlockDriverState *parent_bs, * * On failure NULL is returned, errp is set and the reference to * child_bs is also dropped. - * - * The caller must hold the AioContext lock @child_bs, but not that of @ctx - * (unless @child_bs is already in @ctx). */ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs, const char *child_name, @@ -3244,9 +3216,6 @@ out: * * On failure NULL is returned, errp is set and the reference to * child_bs is also dropped. - * - * If @parent_bs and @child_bs are in different AioContexts, the caller must - * hold the AioContext lock for @child_bs, but not for @parent_bs. */ BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs, BlockDriverState *child_bs, @@ -3436,9 +3405,8 @@ static BdrvChildRole bdrv_backing_role(BlockDriverState *bs) * * Function doesn't update permissions, caller is responsible for this. * - * The caller must hold the AioContext lock for @child_bs. Both @parent_bs and - * @child_bs can move to a different AioContext in this function. Callers must - * make sure that their AioContext locking is still correct after this. + * Both @parent_bs and @child_bs can move to a different AioContext in this + * function. * * After calling this function, the transaction @tran may only be completed * while holding a writer lock for the graph. @@ -3531,9 +3499,8 @@ out: } /* - * The caller must hold the AioContext lock for @backing_hd. Both @bs and - * @backing_hd can move to a different AioContext in this function. Callers must - * make sure that their AioContext locking is still correct after this. + * Both @bs and @backing_hd can move to a different AioContext in this + * function. * * If a backing child is already present (i.e. we're detaching a node), that * child node must be drained. @@ -3575,9 +3542,9 @@ int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, bdrv_ref(drain_bs); bdrv_drained_begin(drain_bs); - bdrv_graph_wrlock(backing_hd); + bdrv_graph_wrlock(); ret = bdrv_set_backing_hd_drained(bs, backing_hd, errp); - bdrv_graph_wrunlock(backing_hd); + bdrv_graph_wrunlock(); bdrv_drained_end(drain_bs); bdrv_unref(drain_bs); @@ -3592,8 +3559,6 @@ int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, * itself, all options starting with "${bdref_key}." are considered part of the * BlockdevRef. * - * The caller must hold the main AioContext lock. - * * TODO Can this be unified with bdrv_open_image()? */ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, @@ -3605,7 +3570,6 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, int ret = 0; bool implicit_backing = false; BlockDriverState *backing_hd; - AioContext *backing_hd_ctx; QDict *options; QDict *tmp_parent_options = NULL; Error *local_err = NULL; @@ -3691,11 +3655,8 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, /* Hook up the backing file link; drop our reference, bs owns the * backing_hd reference now */ - backing_hd_ctx = bdrv_get_aio_context(backing_hd); - aio_context_acquire(backing_hd_ctx); ret = bdrv_set_backing_hd(bs, backing_hd, errp); bdrv_unref(backing_hd); - aio_context_release(backing_hd_ctx); if (ret < 0) { goto free_exit; @@ -3767,9 +3728,7 @@ done: * * The BlockdevRef will be removed from the options QDict. * - * The caller must hold the lock of the main AioContext and no other AioContext. - * @parent can move to a different AioContext in this function. Callers must - * make sure that their AioContext locking is still correct after this. + * @parent can move to a different AioContext in this function. */ BdrvChild *bdrv_open_child(const char *filename, QDict *options, const char *bdref_key, @@ -3780,7 +3739,6 @@ BdrvChild *bdrv_open_child(const char *filename, { BlockDriverState *bs; BdrvChild *child; - AioContext *ctx; GLOBAL_STATE_CODE(); @@ -3790,13 +3748,10 @@ BdrvChild *bdrv_open_child(const char *filename, return NULL; } - bdrv_graph_wrlock(NULL); - ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); + bdrv_graph_wrlock(); child = bdrv_attach_child(parent, bs, bdref_key, child_class, child_role, errp); - aio_context_release(ctx); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); return child; } @@ -3804,9 +3759,7 @@ BdrvChild *bdrv_open_child(const char *filename, /* * Wrapper on bdrv_open_child() for most popular case: open primary child of bs. * - * The caller must hold the lock of the main AioContext and no other AioContext. - * @parent can move to a different AioContext in this function. Callers must - * make sure that their AioContext locking is still correct after this. + * @parent can move to a different AioContext in this function. */ int bdrv_open_file_child(const char *filename, QDict *options, const char *bdref_key, @@ -3881,7 +3834,6 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs, int64_t total_size; QemuOpts *opts = NULL; BlockDriverState *bs_snapshot = NULL; - AioContext *ctx = bdrv_get_aio_context(bs); int ret; GLOBAL_STATE_CODE(); @@ -3890,9 +3842,7 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs, instead of opening 'filename' directly */ /* Get the required size from the image */ - aio_context_acquire(ctx); total_size = bdrv_getlength(bs); - aio_context_release(ctx); if (total_size < 0) { error_setg_errno(errp, -total_size, "Could not get image size"); @@ -3927,10 +3877,7 @@ static BlockDriverState *bdrv_append_temp_snapshot(BlockDriverState *bs, goto out; } - aio_context_acquire(ctx); ret = bdrv_append(bs_snapshot, bs, errp); - aio_context_release(ctx); - if (ret < 0) { bs_snapshot = NULL; goto out; @@ -3955,8 +3902,6 @@ out: * The reference parameter may be used to specify an existing block device which * should be opened. If specified, neither options nor a filename may be given, * nor can an existing BDS be reused (that is, *pbs has to be NULL). - * - * The caller must always hold the main AioContext lock. */ static BlockDriverState * no_coroutine_fn bdrv_open_inherit(const char *filename, const char *reference, QDict *options, @@ -3974,7 +3919,6 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options, Error *local_err = NULL; QDict *snapshot_options = NULL; int snapshot_flags = 0; - AioContext *ctx = qemu_get_aio_context(); assert(!child_class || !flags); assert(!child_class == !parent); @@ -4115,12 +4059,10 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options, /* Not requesting BLK_PERM_CONSISTENT_READ because we're only * looking at the header to guess the image format. This works even * in cases where a guest would not see a consistent state. */ - ctx = bdrv_get_aio_context(file_bs); - aio_context_acquire(ctx); + AioContext *ctx = bdrv_get_aio_context(file_bs); file = blk_new(ctx, 0, BLK_PERM_ALL); blk_insert_bs(file, file_bs, &local_err); bdrv_unref(file_bs); - aio_context_release(ctx); if (local_err) { goto fail; @@ -4167,13 +4109,8 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options, goto fail; } - /* The AioContext could have changed during bdrv_open_common() */ - ctx = bdrv_get_aio_context(bs); - if (file) { - aio_context_acquire(ctx); blk_unref(file); - aio_context_release(ctx); file = NULL; } @@ -4231,16 +4168,13 @@ bdrv_open_inherit(const char *filename, const char *reference, QDict *options, * (snapshot_bs); thus, we have to drop the strong reference to bs * (which we obtained by calling bdrv_new()). bs will not be deleted, * though, because the overlay still has a reference to it. */ - aio_context_acquire(ctx); bdrv_unref(bs); - aio_context_release(ctx); bs = snapshot_bs; } return bs; fail: - aio_context_acquire(ctx); blk_unref(file); qobject_unref(snapshot_options); qobject_unref(bs->explicit_options); @@ -4249,21 +4183,17 @@ fail: bs->options = NULL; bs->explicit_options = NULL; bdrv_unref(bs); - aio_context_release(ctx); error_propagate(errp, local_err); return NULL; close_and_fail: - aio_context_acquire(ctx); bdrv_unref(bs); - aio_context_release(ctx); qobject_unref(snapshot_options); qobject_unref(options); error_propagate(errp, local_err); return NULL; } -/* The caller must always hold the main AioContext lock. */ BlockDriverState *bdrv_open(const char *filename, const char *reference, QDict *options, int flags, Error **errp) { @@ -4540,12 +4470,7 @@ void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue) if (bs_queue) { BlockReopenQueueEntry *bs_entry, *next; QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { - AioContext *ctx = bdrv_get_aio_context(bs_entry->state.bs); - - aio_context_acquire(ctx); bdrv_drained_end(bs_entry->state.bs); - aio_context_release(ctx); - qobject_unref(bs_entry->state.explicit_options); qobject_unref(bs_entry->state.options); g_free(bs_entry); @@ -4577,7 +4502,6 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) { int ret = -1; BlockReopenQueueEntry *bs_entry, *next; - AioContext *ctx; Transaction *tran = tran_new(); g_autoptr(GSList) refresh_list = NULL; @@ -4586,10 +4510,7 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) GLOBAL_STATE_CODE(); QTAILQ_FOREACH(bs_entry, bs_queue, entry) { - ctx = bdrv_get_aio_context(bs_entry->state.bs); - aio_context_acquire(ctx); ret = bdrv_flush(bs_entry->state.bs); - aio_context_release(ctx); if (ret < 0) { error_setg_errno(errp, -ret, "Error flushing drive"); goto abort; @@ -4598,10 +4519,7 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) QTAILQ_FOREACH(bs_entry, bs_queue, entry) { assert(bs_entry->state.bs->quiesce_counter > 0); - ctx = bdrv_get_aio_context(bs_entry->state.bs); - aio_context_acquire(ctx); ret = bdrv_reopen_prepare(&bs_entry->state, bs_queue, tran, errp); - aio_context_release(ctx); if (ret < 0) { goto abort; } @@ -4644,24 +4562,18 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) * to first element. */ QTAILQ_FOREACH_REVERSE(bs_entry, bs_queue, entry) { - ctx = bdrv_get_aio_context(bs_entry->state.bs); - aio_context_acquire(ctx); bdrv_reopen_commit(&bs_entry->state); - aio_context_release(ctx); } - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); tran_commit(tran); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); QTAILQ_FOREACH_REVERSE(bs_entry, bs_queue, entry) { BlockDriverState *bs = bs_entry->state.bs; if (bs->drv->bdrv_reopen_commit_post) { - ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); bs->drv->bdrv_reopen_commit_post(&bs_entry->state); - aio_context_release(ctx); } } @@ -4669,16 +4581,13 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) goto cleanup; abort: - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); tran_abort(tran); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { if (bs_entry->prepared) { - ctx = bdrv_get_aio_context(bs_entry->state.bs); - aio_context_acquire(ctx); bdrv_reopen_abort(&bs_entry->state); - aio_context_release(ctx); } } @@ -4691,24 +4600,13 @@ cleanup: int bdrv_reopen(BlockDriverState *bs, QDict *opts, bool keep_old_opts, Error **errp) { - AioContext *ctx = bdrv_get_aio_context(bs); BlockReopenQueue *queue; - int ret; GLOBAL_STATE_CODE(); queue = bdrv_reopen_queue(NULL, bs, opts, keep_old_opts); - if (ctx != qemu_get_aio_context()) { - aio_context_release(ctx); - } - ret = bdrv_reopen_multiple(queue, errp); - - if (ctx != qemu_get_aio_context()) { - aio_context_acquire(ctx); - } - - return ret; + return bdrv_reopen_multiple(queue, errp); } int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, @@ -4743,10 +4641,7 @@ int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only, * * Return 0 on success, otherwise return < 0 and set @errp. * - * The caller must hold the AioContext lock of @reopen_state->bs. * @reopen_state->bs can move to a different AioContext in this function. - * Callers must make sure that their AioContext locking is still correct after - * this. */ static int GRAPH_UNLOCKED bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, @@ -4760,7 +4655,6 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, const char *child_name = is_backing ? "backing" : "file"; QObject *value; const char *str; - AioContext *ctx, *old_ctx; bool has_child; int ret; @@ -4844,25 +4738,13 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, bdrv_drained_begin(old_child_bs); } - old_ctx = bdrv_get_aio_context(bs); - ctx = bdrv_get_aio_context(new_child_bs); - if (old_ctx != ctx) { - aio_context_release(old_ctx); - aio_context_acquire(ctx); - } - bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(new_child_bs); + bdrv_graph_wrlock(); ret = bdrv_set_file_or_backing_noperm(bs, new_child_bs, is_backing, tran, errp); - bdrv_graph_wrunlock_ctx(ctx); - - if (old_ctx != ctx) { - aio_context_release(ctx); - aio_context_acquire(old_ctx); - } + bdrv_graph_wrunlock(); if (old_child_bs) { bdrv_drained_end(old_child_bs); @@ -4892,8 +4774,6 @@ out_rdlock: * It is the responsibility of the caller to then call the abort() or * commit() for any other BDS that have been left in a prepare() state * - * The caller must hold the AioContext lock of @reopen_state->bs. - * * After calling this function, the transaction @change_child_tran may only be * completed while holding a writer lock for the graph. */ @@ -5209,14 +5089,14 @@ static void bdrv_close(BlockDriverState *bs) bs->drv = NULL; } - bdrv_graph_wrlock(bs); + bdrv_graph_wrlock(); QLIST_FOREACH_SAFE(child, &bs->children, next, next) { bdrv_unref_child(bs, child); } assert(!bs->backing); assert(!bs->file); - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); g_free(bs->opaque); bs->opaque = NULL; @@ -5509,9 +5389,9 @@ int bdrv_drop_filter(BlockDriverState *bs, Error **errp) bdrv_graph_rdunlock_main_loop(); bdrv_drained_begin(child_bs); - bdrv_graph_wrlock(bs); + bdrv_graph_wrlock(); ret = bdrv_replace_node_common(bs, child_bs, true, true, errp); - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); bdrv_drained_end(child_bs); return ret; @@ -5528,8 +5408,6 @@ int bdrv_drop_filter(BlockDriverState *bs, Error **errp) * child. * * This function does not create any image files. - * - * The caller must hold the AioContext lock for @bs_top. */ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, Error **errp) @@ -5537,7 +5415,6 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, int ret; BdrvChild *child; Transaction *tran = tran_new(); - AioContext *old_context, *new_context = NULL; GLOBAL_STATE_CODE(); @@ -5545,23 +5422,10 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, assert(!bs_new->backing); bdrv_graph_rdunlock_main_loop(); - old_context = bdrv_get_aio_context(bs_top); bdrv_drained_begin(bs_top); - - /* - * bdrv_drained_begin() requires that only the AioContext of the drained - * node is locked, and at this point it can still differ from the AioContext - * of bs_top. - */ - new_context = bdrv_get_aio_context(bs_new); - aio_context_release(old_context); - aio_context_acquire(new_context); bdrv_drained_begin(bs_new); - aio_context_release(new_context); - aio_context_acquire(old_context); - new_context = NULL; - bdrv_graph_wrlock(bs_top); + bdrv_graph_wrlock(); child = bdrv_attach_child_noperm(bs_new, bs_top, "backing", &child_of_bds, bdrv_backing_role(bs_new), @@ -5571,18 +5435,6 @@ int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top, goto out; } - /* - * bdrv_attach_child_noperm could change the AioContext of bs_top and - * bs_new, but at least they are in the same AioContext now. This is the - * AioContext that we need to lock for the rest of the function. - */ - new_context = bdrv_get_aio_context(bs_top); - - if (old_context != new_context) { - aio_context_release(old_context); - aio_context_acquire(new_context); - } - ret = bdrv_replace_node_noperm(bs_top, bs_new, true, tran, errp); if (ret < 0) { goto out; @@ -5593,16 +5445,11 @@ out: tran_finalize(tran, ret); bdrv_refresh_limits(bs_top, NULL, NULL); - bdrv_graph_wrunlock(bs_top); + bdrv_graph_wrunlock(); bdrv_drained_end(bs_top); bdrv_drained_end(bs_new); - if (new_context && old_context != new_context) { - aio_context_release(new_context); - aio_context_acquire(old_context); - } - return ret; } @@ -5620,7 +5467,7 @@ int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs, bdrv_ref(old_bs); bdrv_drained_begin(old_bs); bdrv_drained_begin(new_bs); - bdrv_graph_wrlock(new_bs); + bdrv_graph_wrlock(); bdrv_replace_child_tran(child, new_bs, tran); @@ -5631,7 +5478,7 @@ int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs, tran_finalize(tran, ret); - bdrv_graph_wrunlock(new_bs); + bdrv_graph_wrunlock(); bdrv_drained_end(old_bs); bdrv_drained_end(new_bs); bdrv_unref(old_bs); @@ -5667,9 +5514,8 @@ static void bdrv_delete(BlockDriverState *bs) * after the call (even on failure), so if the caller intends to reuse the * dictionary, it needs to use qobject_ref() before calling bdrv_open. * - * The caller holds the AioContext lock for @bs. It must make sure that @bs - * stays in the same AioContext, i.e. @options must not refer to nodes in a - * different AioContext. + * The caller must make sure that @bs stays in the same AioContext, i.e. + * @options must not refer to nodes in a different AioContext. */ BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options, int flags, Error **errp) @@ -5697,12 +5543,8 @@ BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options, GLOBAL_STATE_CODE(); - aio_context_release(ctx); - aio_context_acquire(qemu_get_aio_context()); new_node_bs = bdrv_new_open_driver_opts(drv, node_name, options, flags, errp); - aio_context_release(qemu_get_aio_context()); - aio_context_acquire(ctx); assert(bdrv_get_aio_context(bs) == ctx); options = NULL; /* bdrv_new_open_driver() eats options */ @@ -5718,9 +5560,9 @@ BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options, bdrv_ref(bs); bdrv_drained_begin(bs); bdrv_drained_begin(new_node_bs); - bdrv_graph_wrlock(new_node_bs); + bdrv_graph_wrlock(); ret = bdrv_replace_node(bs, new_node_bs, errp); - bdrv_graph_wrunlock(new_node_bs); + bdrv_graph_wrunlock(); bdrv_drained_end(new_node_bs); bdrv_drained_end(bs); bdrv_unref(bs); @@ -5975,7 +5817,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, bdrv_ref(top); bdrv_drained_begin(base); - bdrv_graph_wrlock(base); + bdrv_graph_wrlock(); if (!top->drv || !base->drv) { goto exit_wrlock; @@ -6015,7 +5857,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, * That's a FIXME. */ bdrv_replace_node_common(top, base, false, false, &local_err); - bdrv_graph_wrunlock(base); + bdrv_graph_wrunlock(); if (local_err) { error_report_err(local_err); @@ -6052,7 +5894,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, goto exit; exit_wrlock: - bdrv_graph_wrunlock(base); + bdrv_graph_wrunlock(); exit: bdrv_drained_end(base); bdrv_unref(top); @@ -7037,12 +6879,9 @@ void bdrv_activate_all(Error **errp) GRAPH_RDLOCK_GUARD_MAINLOOP(); for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { - AioContext *aio_context = bdrv_get_aio_context(bs); int ret; - aio_context_acquire(aio_context); ret = bdrv_activate(bs, errp); - aio_context_release(aio_context); if (ret < 0) { bdrv_next_cleanup(&it); return; @@ -7137,20 +6976,10 @@ int bdrv_inactivate_all(void) BlockDriverState *bs = NULL; BdrvNextIterator it; int ret = 0; - GSList *aio_ctxs = NULL, *ctx; GLOBAL_STATE_CODE(); GRAPH_RDLOCK_GUARD_MAINLOOP(); - for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { - AioContext *aio_context = bdrv_get_aio_context(bs); - - if (!g_slist_find(aio_ctxs, aio_context)) { - aio_ctxs = g_slist_prepend(aio_ctxs, aio_context); - aio_context_acquire(aio_context); - } - } - for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { /* Nodes with BDS parents are covered by recursion from the last * parent that gets inactivated. Don't inactivate them a second @@ -7161,17 +6990,10 @@ int bdrv_inactivate_all(void) ret = bdrv_inactivate_recurse(bs); if (ret < 0) { bdrv_next_cleanup(&it); - goto out; + break; } } -out: - for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) { - AioContext *aio_context = ctx->data; - aio_context_release(aio_context); - } - g_slist_free(aio_ctxs); - return ret; } @@ -7257,11 +7079,8 @@ void bdrv_unref(BlockDriverState *bs) static void bdrv_schedule_unref_bh(void *opaque) { BlockDriverState *bs = opaque; - AioContext *ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); bdrv_unref(bs); - aio_context_release(ctx); } /* @@ -7398,8 +7217,6 @@ void bdrv_img_create(const char *filename, const char *fmt, return; } - aio_context_acquire(qemu_get_aio_context()); - /* Create parameter list */ create_opts = qemu_opts_append(create_opts, drv->create_opts); create_opts = qemu_opts_append(create_opts, proto_drv->create_opts); @@ -7549,7 +7366,6 @@ out: qemu_opts_del(opts); qemu_opts_free(create_opts); error_propagate(errp, local_err); - aio_context_release(qemu_get_aio_context()); } AioContext *bdrv_get_aio_context(BlockDriverState *bs) @@ -7583,33 +7399,6 @@ void coroutine_fn bdrv_co_leave(BlockDriverState *bs, AioContext *old_ctx) bdrv_dec_in_flight(bs); } -void coroutine_fn bdrv_co_lock(BlockDriverState *bs) -{ - AioContext *ctx = bdrv_get_aio_context(bs); - - /* In the main thread, bs->aio_context won't change concurrently */ - assert(qemu_get_current_aio_context() == qemu_get_aio_context()); - - /* - * We're in coroutine context, so we already hold the lock of the main - * loop AioContext. Don't lock it twice to avoid deadlocks. - */ - assert(qemu_in_coroutine()); - if (ctx != qemu_get_aio_context()) { - aio_context_acquire(ctx); - } -} - -void coroutine_fn bdrv_co_unlock(BlockDriverState *bs) -{ - AioContext *ctx = bdrv_get_aio_context(bs); - - assert(qemu_in_coroutine()); - if (ctx != qemu_get_aio_context()) { - aio_context_release(ctx); - } -} - static void bdrv_do_remove_aio_context_notifier(BdrvAioNotifier *ban) { GLOBAL_STATE_CODE(); @@ -7728,21 +7517,8 @@ static void bdrv_set_aio_context_commit(void *opaque) BdrvStateSetAioContext *state = (BdrvStateSetAioContext *) opaque; BlockDriverState *bs = (BlockDriverState *) state->bs; AioContext *new_context = state->new_ctx; - AioContext *old_context = bdrv_get_aio_context(bs); - /* - * Take the old AioContex when detaching it from bs. - * At this point, new_context lock is already acquired, and we are now - * also taking old_context. This is safe as long as bdrv_detach_aio_context - * does not call AIO_POLL_WHILE(). - */ - if (old_context != qemu_get_aio_context()) { - aio_context_acquire(old_context); - } bdrv_detach_aio_context(bs); - if (old_context != qemu_get_aio_context()) { - aio_context_release(old_context); - } bdrv_attach_aio_context(bs, new_context); } @@ -7757,10 +7533,6 @@ static TransactionActionDrv set_aio_context = { * * Must be called from the main AioContext. * - * The caller must own the AioContext lock for the old AioContext of bs, but it - * must not own the AioContext lock for new_context (unless new_context is the - * same as the current context of bs). - * * @visited will accumulate all visited BdrvChild objects. The caller is * responsible for freeing the list afterwards. */ @@ -7813,13 +7585,6 @@ static bool bdrv_change_aio_context(BlockDriverState *bs, AioContext *ctx, * * If ignore_child is not NULL, that child (and its subgraph) will not * be touched. - * - * This function still requires the caller to take the bs current - * AioContext lock, otherwise draining will fail since AIO_WAIT_WHILE - * assumes the lock is always held if bs is in another AioContext. - * For the same reason, it temporarily also holds the new AioContext, since - * bdrv_drained_end calls BDRV_POLL_WHILE that assumes the lock is taken too. - * Therefore the new AioContext lock must not be taken by the caller. */ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, BdrvChild *ignore_child, Error **errp) @@ -7827,7 +7592,6 @@ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, Transaction *tran; GHashTable *visited; int ret; - AioContext *old_context = bdrv_get_aio_context(bs); GLOBAL_STATE_CODE(); /* @@ -7846,8 +7610,8 @@ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, /* * Linear phase: go through all callbacks collected in the transaction. - * Run all callbacks collected in the recursion to switch all nodes - * AioContext lock (transaction commit), or undo all changes done in the + * Run all callbacks collected in the recursion to switch every node's + * AioContext (transaction commit), or undo all changes done in the * recursion (transaction abort). */ @@ -7857,34 +7621,7 @@ int bdrv_try_change_aio_context(BlockDriverState *bs, AioContext *ctx, return -EPERM; } - /* - * Release old AioContext, it won't be needed anymore, as all - * bdrv_drained_begin() have been called already. - */ - if (qemu_get_aio_context() != old_context) { - aio_context_release(old_context); - } - - /* - * Acquire new AioContext since bdrv_drained_end() is going to be called - * after we switched all nodes in the new AioContext, and the function - * assumes that the lock of the bs is always taken. - */ - if (qemu_get_aio_context() != ctx) { - aio_context_acquire(ctx); - } - tran_commit(tran); - - if (qemu_get_aio_context() != ctx) { - aio_context_release(ctx); - } - - /* Re-acquire the old AioContext, since the caller takes and releases it. */ - if (qemu_get_aio_context() != old_context) { - aio_context_acquire(old_context); - } - return 0; } @@ -8006,7 +7743,6 @@ BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs, const char *node_name, Error **errp) { BlockDriverState *to_replace_bs = bdrv_find_node(node_name); - AioContext *aio_context; GLOBAL_STATE_CODE(); @@ -8015,12 +7751,8 @@ BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs, return NULL; } - aio_context = bdrv_get_aio_context(to_replace_bs); - aio_context_acquire(aio_context); - if (bdrv_op_is_blocked(to_replace_bs, BLOCK_OP_TYPE_REPLACE, errp)) { - to_replace_bs = NULL; - goto out; + return NULL; } /* We don't want arbitrary node of the BDS chain to be replaced only the top @@ -8033,12 +7765,9 @@ BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs, "because it cannot be guaranteed that doing so would not " "lead to an abrupt change of visible data", node_name, parent_bs->node_name); - to_replace_bs = NULL; - goto out; + return NULL; } -out: - aio_context_release(aio_context); return to_replace_bs; } diff --git a/block/backup.c b/block/backup.c index 8aae5836d760a2218514d3d805019165f9868743..ec29d6b810858affea550877271028bdd9010019 100644 --- a/block/backup.c +++ b/block/backup.c @@ -496,10 +496,10 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, block_copy_set_speed(bcs, speed); /* Required permissions are taken by copy-before-write filter target */ - bdrv_graph_wrlock(target); + bdrv_graph_wrlock(); block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL, &error_abort); - bdrv_graph_wrunlock(target); + bdrv_graph_wrunlock(); return &job->common; diff --git a/block/blklogwrites.c b/block/blklogwrites.c index 3678f6cf4254185a75f3c17d6560087cec6f2ad7..7207b2e7578006131e362fb98379055224ba06f4 100644 --- a/block/blklogwrites.c +++ b/block/blklogwrites.c @@ -251,9 +251,9 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags, ret = 0; fail_log: if (ret < 0) { - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, s->log_file); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); s->log_file = NULL; } fail: @@ -265,10 +265,10 @@ static void blk_log_writes_close(BlockDriverState *bs) { BDRVBlkLogWritesState *s = bs->opaque; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, s->log_file); s->log_file = NULL; - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); } static int64_t coroutine_fn GRAPH_RDLOCK diff --git a/block/blkverify.c b/block/blkverify.c index 9b17c46644358dd0675567512e4d8fcc2cd294a8..ec45d8335ed80745101ea95f24e9c90145622a01 100644 --- a/block/blkverify.c +++ b/block/blkverify.c @@ -151,10 +151,10 @@ static void blkverify_close(BlockDriverState *bs) { BDRVBlkverifyState *s = bs->opaque; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, s->test_file); s->test_file = NULL; - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); } static int64_t coroutine_fn GRAPH_RDLOCK diff --git a/block/block-backend.c b/block/block-backend.c index ec21148806982ae42c5a58526b31069f3c481a71..209eb075286ac6d02f239f68889f57437155178c 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -390,8 +390,6 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm) * Both sets of permissions can be changed later using blk_set_perm(). * * Return the new BlockBackend on success, null on failure. - * - * Callers must hold the AioContext lock of @bs. */ BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm, uint64_t shared_perm, Error **errp) @@ -416,8 +414,6 @@ BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm, * Just as with bdrv_open(), after having called this function the reference to * @options belongs to the block layer (even on failure). * - * Called without holding an AioContext lock. - * * TODO: Remove @filename and @flags; it should be possible to specify a whole * BDS tree just by specifying the @options QDict (or @reference, * alternatively). At the time of adding this function, this is not possible, @@ -429,7 +425,6 @@ BlockBackend *blk_new_open(const char *filename, const char *reference, { BlockBackend *blk; BlockDriverState *bs; - AioContext *ctx; uint64_t perm = 0; uint64_t shared = BLK_PERM_ALL; @@ -459,23 +454,18 @@ BlockBackend *blk_new_open(const char *filename, const char *reference, shared = BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED; } - aio_context_acquire(qemu_get_aio_context()); bs = bdrv_open(filename, reference, options, flags, errp); - aio_context_release(qemu_get_aio_context()); if (!bs) { return NULL; } /* bdrv_open() could have moved bs to a different AioContext */ - ctx = bdrv_get_aio_context(bs); blk = blk_new(bdrv_get_aio_context(bs), perm, shared); blk->perm = perm; blk->shared_perm = shared; - aio_context_acquire(ctx); blk_insert_bs(blk, bs, errp); bdrv_unref(bs); - aio_context_release(ctx); if (!blk->root) { blk_unref(blk); @@ -577,13 +567,9 @@ void blk_remove_all_bs(void) GLOBAL_STATE_CODE(); while ((blk = blk_all_next(blk)) != NULL) { - AioContext *ctx = blk_get_aio_context(blk); - - aio_context_acquire(ctx); if (blk->root) { blk_remove_bs(blk); } - aio_context_release(ctx); } } @@ -882,14 +868,11 @@ BlockBackend *blk_by_public(BlockBackendPublic *public) /* * Disassociates the currently associated BlockDriverState from @blk. - * - * The caller must hold the AioContext lock for the BlockBackend. */ void blk_remove_bs(BlockBackend *blk) { ThrottleGroupMember *tgm = &blk->public.throttle_group_member; BdrvChild *root; - AioContext *ctx; GLOBAL_STATE_CODE(); @@ -919,30 +902,26 @@ void blk_remove_bs(BlockBackend *blk) root = blk->root; blk->root = NULL; - ctx = bdrv_get_aio_context(root->bs); - bdrv_graph_wrlock(root->bs); + bdrv_graph_wrlock(); bdrv_root_unref_child(root); - bdrv_graph_wrunlock_ctx(ctx); + bdrv_graph_wrunlock(); } /* * Associates a new BlockDriverState with @blk. - * - * Callers must hold the AioContext lock of @bs. */ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp) { ThrottleGroupMember *tgm = &blk->public.throttle_group_member; - AioContext *ctx = bdrv_get_aio_context(bs); GLOBAL_STATE_CODE(); bdrv_ref(bs); - bdrv_graph_wrlock(bs); + bdrv_graph_wrlock(); blk->root = bdrv_root_attach_child(bs, "root", &child_root, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, blk->perm, blk->shared_perm, blk, errp); - bdrv_graph_wrunlock_ctx(ctx); + bdrv_graph_wrunlock(); if (blk->root == NULL) { return -EPERM; } @@ -2739,20 +2718,16 @@ int blk_commit_all(void) GRAPH_RDLOCK_GUARD_MAINLOOP(); while ((blk = blk_all_next(blk)) != NULL) { - AioContext *aio_context = blk_get_aio_context(blk); BlockDriverState *unfiltered_bs = bdrv_skip_filters(blk_bs(blk)); - aio_context_acquire(aio_context); if (blk_is_inserted(blk) && bdrv_cow_child(unfiltered_bs)) { int ret; ret = bdrv_commit(unfiltered_bs); if (ret < 0) { - aio_context_release(aio_context); return ret; } } - aio_context_release(aio_context); } return 0; } diff --git a/block/commit.c b/block/commit.c index 69cc75be0c329a140c73896e820e5cbe2a8c0dc3..1dd7a65ffb8946855876b1b66d638e0c8476c64a 100644 --- a/block/commit.c +++ b/block/commit.c @@ -100,9 +100,9 @@ static void commit_abort(Job *job) bdrv_graph_rdunlock_main_loop(); bdrv_drained_begin(commit_top_backing_bs); - bdrv_graph_wrlock(commit_top_backing_bs); + bdrv_graph_wrlock(); bdrv_replace_node(s->commit_top_bs, commit_top_backing_bs, &error_abort); - bdrv_graph_wrunlock(commit_top_backing_bs); + bdrv_graph_wrunlock(); bdrv_drained_end(commit_top_backing_bs); bdrv_unref(s->commit_top_bs); @@ -339,7 +339,7 @@ void commit_start(const char *job_id, BlockDriverState *bs, * this is the responsibility of the interface (i.e. whoever calls * commit_start()). */ - bdrv_graph_wrlock(top); + bdrv_graph_wrlock(); s->base_overlay = bdrv_find_overlay(top, base); assert(s->base_overlay); @@ -370,19 +370,19 @@ void commit_start(const char *job_id, BlockDriverState *bs, ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, iter_shared_perms, errp); if (ret < 0) { - bdrv_graph_wrunlock(top); + bdrv_graph_wrunlock(); goto fail; } } if (bdrv_freeze_backing_chain(commit_top_bs, base, errp) < 0) { - bdrv_graph_wrunlock(top); + bdrv_graph_wrunlock(); goto fail; } s->chain_frozen = true; ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp); - bdrv_graph_wrunlock(top); + bdrv_graph_wrunlock(); if (ret < 0) { goto fail; @@ -434,9 +434,9 @@ fail: * otherwise this would fail because of lack of permissions. */ if (commit_top_bs) { bdrv_drained_begin(top); - bdrv_graph_wrlock(top); + bdrv_graph_wrlock(); bdrv_replace_node(commit_top_bs, top, &error_abort); - bdrv_graph_wrunlock(top); + bdrv_graph_wrunlock(); bdrv_drained_end(top); } } diff --git a/block/copy-before-write.c b/block/copy-before-write.c index 13972879b1b381b8bfb702f1b58a5e5b64f0f228..0842a1a6dfbe6d11a72cbd848459000e339f77b1 100644 --- a/block/copy-before-write.c +++ b/block/copy-before-write.c @@ -412,7 +412,6 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags, int64_t cluster_size; g_autoptr(BlockdevOptions) full_opts = NULL; BlockdevOptionsCbw *opts; - AioContext *ctx; int ret; full_opts = cbw_parse_options(options, errp); @@ -435,15 +434,11 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags, GRAPH_RDLOCK_GUARD_MAINLOOP(); - ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); - if (opts->bitmap) { bitmap = block_dirty_bitmap_lookup(opts->bitmap->node, opts->bitmap->name, NULL, errp); if (!bitmap) { - ret = -EINVAL; - goto out; + return -EINVAL; } } s->on_cbw_error = opts->has_on_cbw_error ? opts->on_cbw_error : @@ -461,24 +456,21 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags, s->bcs = block_copy_state_new(bs->file, s->target, bitmap, errp); if (!s->bcs) { error_prepend(errp, "Cannot create block-copy-state: "); - ret = -EINVAL; - goto out; + return -EINVAL; } cluster_size = block_copy_cluster_size(s->bcs); s->done_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp); if (!s->done_bitmap) { - ret = -EINVAL; - goto out; + return -EINVAL; } bdrv_disable_dirty_bitmap(s->done_bitmap); /* s->access_bitmap starts equal to bcs bitmap */ s->access_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp); if (!s->access_bitmap) { - ret = -EINVAL; - goto out; + return -EINVAL; } bdrv_disable_dirty_bitmap(s->access_bitmap); bdrv_dirty_bitmap_merge_internal(s->access_bitmap, @@ -487,11 +479,7 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags, qemu_co_mutex_init(&s->lock); QLIST_INIT(&s->frozen_read_reqs); - - ret = 0; -out: - aio_context_release(ctx); - return ret; + return 0; } static void cbw_close(BlockDriverState *bs) diff --git a/block/export/export.c b/block/export/export.c index a8f274e5268934c5ff2b0e8b3ae5242a5f8fc904..6d51ae8ed78132a638c65808fdbc394aa7723f26 100644 --- a/block/export/export.c +++ b/block/export/export.c @@ -114,7 +114,6 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) } ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); if (export->iothread) { IOThread *iothread; @@ -133,8 +132,6 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) set_context_errp = fixed_iothread ? errp : NULL; ret = bdrv_try_change_aio_context(bs, new_ctx, NULL, set_context_errp); if (ret == 0) { - aio_context_release(ctx); - aio_context_acquire(new_ctx); ctx = new_ctx; } else if (fixed_iothread) { goto fail; @@ -191,8 +188,6 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) assert(exp->blk != NULL); QLIST_INSERT_HEAD(&block_exports, exp, next); - - aio_context_release(ctx); return exp; fail: @@ -200,7 +195,6 @@ fail: blk_set_dev_ops(blk, NULL, NULL); blk_unref(blk); } - aio_context_release(ctx); if (exp) { g_free(exp->id); g_free(exp); @@ -218,9 +212,6 @@ void blk_exp_ref(BlockExport *exp) static void blk_exp_delete_bh(void *opaque) { BlockExport *exp = opaque; - AioContext *aio_context = exp->ctx; - - aio_context_acquire(aio_context); assert(exp->refcount == 0); QLIST_REMOVE(exp, next); @@ -230,8 +221,6 @@ static void blk_exp_delete_bh(void *opaque) qapi_event_send_block_export_deleted(exp->id); g_free(exp->id); g_free(exp); - - aio_context_release(aio_context); } void blk_exp_unref(BlockExport *exp) @@ -249,22 +238,16 @@ void blk_exp_unref(BlockExport *exp) * connections and other internally held references start to shut down. When * the function returns, there may still be active references while the export * is in the process of shutting down. - * - * Acquires exp->ctx internally. Callers must *not* hold the lock. */ void blk_exp_request_shutdown(BlockExport *exp) { - AioContext *aio_context = exp->ctx; - - aio_context_acquire(aio_context); - /* * If the user doesn't own the export any more, it is already shutting * down. We must not call .request_shutdown and decrease the refcount a * second time. */ if (!exp->user_owned) { - goto out; + return; } exp->drv->request_shutdown(exp); @@ -272,9 +255,6 @@ void blk_exp_request_shutdown(BlockExport *exp) assert(exp->user_owned); exp->user_owned = false; blk_exp_unref(exp); - -out: - aio_context_release(aio_context); } /* diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c index 16f48388d38d183c7d92d12f81ec51ea221a9a8e..50c358e8cd5eae527d9457223f7b608f34cf726f 100644 --- a/block/export/vhost-user-blk-server.c +++ b/block/export/vhost-user-blk-server.c @@ -278,7 +278,6 @@ static void vu_blk_exp_resize(void *opaque) vu_config_change_msg(&vexp->vu_server.vu_dev); } -/* Called with vexp->export.ctx acquired */ static void vu_blk_drained_begin(void *opaque) { VuBlkExport *vexp = opaque; @@ -287,7 +286,6 @@ static void vu_blk_drained_begin(void *opaque) vhost_user_server_detach_aio_context(&vexp->vu_server); } -/* Called with vexp->export.blk AioContext acquired */ static void vu_blk_drained_end(void *opaque) { VuBlkExport *vexp = opaque; @@ -300,8 +298,6 @@ static void vu_blk_drained_end(void *opaque) * Ensures that bdrv_drained_begin() waits until in-flight requests complete * and the server->co_trip coroutine has terminated. It will be restarted in * vhost_user_server_attach_aio_context(). - * - * Called with vexp->export.ctx acquired. */ static bool vu_blk_drained_poll(void *opaque) { diff --git a/block/file-posix.c b/block/file-posix.c index b862406c719307d448908db87905cd4eef50ee34..35684f7e21c867e33fb9cd1ca02837ff6ac48aa7 100644 --- a/block/file-posix.c +++ b/block/file-posix.c @@ -712,17 +712,11 @@ static int raw_open_common(BlockDriverState *bs, QDict *options, #ifdef CONFIG_LINUX_AIO /* Currently Linux does AIO only for files opened with O_DIRECT */ - if (s->use_linux_aio) { - if (!(s->open_flags & O_DIRECT)) { - error_setg(errp, "aio=native was specified, but it requires " - "cache.direct=on, which was not specified."); - ret = -EINVAL; - goto fail; - } - if (!aio_setup_linux_aio(bdrv_get_aio_context(bs), errp)) { - error_prepend(errp, "Unable to use native AIO: "); - goto fail; - } + if (s->use_linux_aio && !(s->open_flags & O_DIRECT)) { + error_setg(errp, "aio=native was specified, but it requires " + "cache.direct=on, which was not specified."); + ret = -EINVAL; + goto fail; } #else if (s->use_linux_aio) { @@ -733,14 +727,7 @@ static int raw_open_common(BlockDriverState *bs, QDict *options, } #endif /* !defined(CONFIG_LINUX_AIO) */ -#ifdef CONFIG_LINUX_IO_URING - if (s->use_linux_io_uring) { - if (!aio_setup_linux_io_uring(bdrv_get_aio_context(bs), errp)) { - error_prepend(errp, "Unable to use io_uring: "); - goto fail; - } - } -#else +#ifndef CONFIG_LINUX_IO_URING if (s->use_linux_io_uring) { error_setg(errp, "aio=io_uring was specified, but is not supported " "in this build."); @@ -2444,6 +2431,48 @@ static bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov) return true; } +#ifdef CONFIG_LINUX_IO_URING +static inline bool raw_check_linux_io_uring(BDRVRawState *s) +{ + Error *local_err = NULL; + AioContext *ctx; + + if (!s->use_linux_io_uring) { + return false; + } + + ctx = qemu_get_current_aio_context(); + if (unlikely(!aio_setup_linux_io_uring(ctx, &local_err))) { + error_reportf_err(local_err, "Unable to use linux io_uring, " + "falling back to thread pool: "); + s->use_linux_io_uring = false; + return false; + } + return true; +} +#endif + +#ifdef CONFIG_LINUX_AIO +static inline bool raw_check_linux_aio(BDRVRawState *s) +{ + Error *local_err = NULL; + AioContext *ctx; + + if (!s->use_linux_aio) { + return false; + } + + ctx = qemu_get_current_aio_context(); + if (unlikely(!aio_setup_linux_aio(ctx, &local_err))) { + error_reportf_err(local_err, "Unable to use Linux AIO, " + "falling back to thread pool: "); + s->use_linux_aio = false; + return false; + } + return true; +} +#endif + static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr, uint64_t bytes, QEMUIOVector *qiov, int type) { @@ -2474,13 +2503,13 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, int64_t *offset_ptr, if (s->needs_alignment && !bdrv_qiov_is_aligned(bs, qiov)) { type |= QEMU_AIO_MISALIGNED; #ifdef CONFIG_LINUX_IO_URING - } else if (s->use_linux_io_uring) { + } else if (raw_check_linux_io_uring(s)) { assert(qiov->size == bytes); ret = luring_co_submit(bs, s->fd, offset, qiov, type); goto out; #endif #ifdef CONFIG_LINUX_AIO - } else if (s->use_linux_aio) { + } else if (raw_check_linux_aio(s)) { assert(qiov->size == bytes); ret = laio_co_submit(s->fd, offset, qiov, type, s->aio_max_batch); @@ -2567,39 +2596,13 @@ static int coroutine_fn raw_co_flush_to_disk(BlockDriverState *bs) }; #ifdef CONFIG_LINUX_IO_URING - if (s->use_linux_io_uring) { + if (raw_check_linux_io_uring(s)) { return luring_co_submit(bs, s->fd, 0, NULL, QEMU_AIO_FLUSH); } #endif return raw_thread_pool_submit(handle_aiocb_flush, &acb); } -static void raw_aio_attach_aio_context(BlockDriverState *bs, - AioContext *new_context) -{ - BDRVRawState __attribute__((unused)) *s = bs->opaque; -#ifdef CONFIG_LINUX_AIO - if (s->use_linux_aio) { - Error *local_err = NULL; - if (!aio_setup_linux_aio(new_context, &local_err)) { - error_reportf_err(local_err, "Unable to use native AIO, " - "falling back to thread pool: "); - s->use_linux_aio = false; - } - } -#endif -#ifdef CONFIG_LINUX_IO_URING - if (s->use_linux_io_uring) { - Error *local_err = NULL; - if (!aio_setup_linux_io_uring(new_context, &local_err)) { - error_reportf_err(local_err, "Unable to use linux io_uring, " - "falling back to thread pool: "); - s->use_linux_io_uring = false; - } - } -#endif -} - static void raw_close(BlockDriverState *bs) { BDRVRawState *s = bs->opaque; @@ -3896,7 +3899,6 @@ BlockDriver bdrv_file = { .bdrv_co_copy_range_from = raw_co_copy_range_from, .bdrv_co_copy_range_to = raw_co_copy_range_to, .bdrv_refresh_limits = raw_refresh_limits, - .bdrv_attach_aio_context = raw_aio_attach_aio_context, .bdrv_co_truncate = raw_co_truncate, .bdrv_co_getlength = raw_co_getlength, @@ -4266,7 +4268,6 @@ static BlockDriver bdrv_host_device = { .bdrv_co_copy_range_from = raw_co_copy_range_from, .bdrv_co_copy_range_to = raw_co_copy_range_to, .bdrv_refresh_limits = raw_refresh_limits, - .bdrv_attach_aio_context = raw_aio_attach_aio_context, .bdrv_co_truncate = raw_co_truncate, .bdrv_co_getlength = raw_co_getlength, @@ -4402,7 +4403,6 @@ static BlockDriver bdrv_host_cdrom = { .bdrv_co_pwritev = raw_co_pwritev, .bdrv_co_flush_to_disk = raw_co_flush_to_disk, .bdrv_refresh_limits = cdrom_refresh_limits, - .bdrv_attach_aio_context = raw_aio_attach_aio_context, .bdrv_co_truncate = raw_co_truncate, .bdrv_co_getlength = raw_co_getlength, @@ -4528,7 +4528,6 @@ static BlockDriver bdrv_host_cdrom = { .bdrv_co_pwritev = raw_co_pwritev, .bdrv_co_flush_to_disk = raw_co_flush_to_disk, .bdrv_refresh_limits = cdrom_refresh_limits, - .bdrv_attach_aio_context = raw_aio_attach_aio_context, .bdrv_co_truncate = raw_co_truncate, .bdrv_co_getlength = raw_co_getlength, diff --git a/block/graph-lock.c b/block/graph-lock.c index 079e878d9b4dfb2b2637a26624e8078bba1eff32..c81162b147304994a0318a19aa1455b18dab12b6 100644 --- a/block/graph-lock.c +++ b/block/graph-lock.c @@ -106,27 +106,12 @@ static uint32_t reader_count(void) return rd; } -void no_coroutine_fn bdrv_graph_wrlock(BlockDriverState *bs) +void no_coroutine_fn bdrv_graph_wrlock(void) { - AioContext *ctx = NULL; - GLOBAL_STATE_CODE(); assert(!qatomic_read(&has_writer)); assert(!qemu_in_coroutine()); - /* - * Release only non-mainloop AioContext. The mainloop often relies on the - * BQL and doesn't lock the main AioContext before doing things. - */ - if (bs) { - ctx = bdrv_get_aio_context(bs); - if (ctx != qemu_get_aio_context()) { - aio_context_release(ctx); - } else { - ctx = NULL; - } - } - /* Make sure that constantly arriving new I/O doesn't cause starvation */ bdrv_drain_all_begin_nopoll(); @@ -155,27 +140,13 @@ void no_coroutine_fn bdrv_graph_wrlock(BlockDriverState *bs) } while (reader_count() >= 1); bdrv_drain_all_end(); - - if (ctx) { - aio_context_acquire(bdrv_get_aio_context(bs)); - } } -void no_coroutine_fn bdrv_graph_wrunlock_ctx(AioContext *ctx) +void no_coroutine_fn bdrv_graph_wrunlock(void) { GLOBAL_STATE_CODE(); assert(qatomic_read(&has_writer)); - /* - * Release only non-mainloop AioContext. The mainloop often relies on the - * BQL and doesn't lock the main AioContext before doing things. - */ - if (ctx && ctx != qemu_get_aio_context()) { - aio_context_release(ctx); - } else { - ctx = NULL; - } - WITH_QEMU_LOCK_GUARD(&aio_context_list_lock) { /* * No need for memory barriers, this works in pair with @@ -197,17 +168,6 @@ void no_coroutine_fn bdrv_graph_wrunlock_ctx(AioContext *ctx) * progress. */ aio_bh_poll(qemu_get_aio_context()); - - if (ctx) { - aio_context_acquire(ctx); - } -} - -void no_coroutine_fn bdrv_graph_wrunlock(BlockDriverState *bs) -{ - AioContext *ctx = bs ? bdrv_get_aio_context(bs) : NULL; - - bdrv_graph_wrunlock_ctx(ctx); } void coroutine_fn bdrv_graph_co_rdlock(void) diff --git a/block/io.c b/block/io.c index 7e62fabbf57cc9865492c62b0181ad9e8396769e..8fa7670571a82fcb4a772c9f98dba43954b1abe6 100644 --- a/block/io.c +++ b/block/io.c @@ -294,8 +294,6 @@ static void bdrv_co_drain_bh_cb(void *opaque) BlockDriverState *bs = data->bs; if (bs) { - AioContext *ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); bdrv_dec_in_flight(bs); if (data->begin) { bdrv_do_drained_begin(bs, data->parent, data->poll); @@ -303,7 +301,6 @@ static void bdrv_co_drain_bh_cb(void *opaque) assert(!data->poll); bdrv_do_drained_end(bs, data->parent); } - aio_context_release(ctx); } else { assert(data->begin); bdrv_drain_all_begin(); @@ -320,8 +317,6 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, { BdrvCoDrainData data; Coroutine *self = qemu_coroutine_self(); - AioContext *ctx = bdrv_get_aio_context(bs); - AioContext *co_ctx = qemu_coroutine_get_aio_context(self); /* Calling bdrv_drain() from a BH ensures the current coroutine yields and * other coroutines run if they were queued by aio_co_enter(). */ @@ -340,17 +335,6 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, bdrv_inc_in_flight(bs); } - /* - * Temporarily drop the lock across yield or we would get deadlocks. - * bdrv_co_drain_bh_cb() reaquires the lock as needed. - * - * When we yield below, the lock for the current context will be - * released, so if this is actually the lock that protects bs, don't drop - * it a second time. - */ - if (ctx != co_ctx) { - aio_context_release(ctx); - } replay_bh_schedule_oneshot_event(qemu_get_aio_context(), bdrv_co_drain_bh_cb, &data); @@ -358,11 +342,6 @@ static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs, /* If we are resumed from some other event (such as an aio completion or a * timer callback), it is a bug in the caller that should be fixed. */ assert(data.done); - - /* Reacquire the AioContext of bs if we dropped it */ - if (ctx != co_ctx) { - aio_context_acquire(ctx); - } } static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent, @@ -478,13 +457,12 @@ static bool bdrv_drain_all_poll(void) GLOBAL_STATE_CODE(); GRAPH_RDLOCK_GUARD_MAINLOOP(); - /* bdrv_drain_poll() can't make changes to the graph and we are holding the - * main AioContext lock, so iterating bdrv_next_all_states() is safe. */ + /* + * bdrv_drain_poll() can't make changes to the graph and we hold the BQL, + * so iterating bdrv_next_all_states() is safe. + */ while ((bs = bdrv_next_all_states(bs))) { - AioContext *aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); result |= bdrv_drain_poll(bs, NULL, true); - aio_context_release(aio_context); } return result; @@ -525,11 +503,7 @@ void bdrv_drain_all_begin_nopoll(void) /* Quiesce all nodes, without polling in-flight requests yet. The graph * cannot change during this loop. */ while ((bs = bdrv_next_all_states(bs))) { - AioContext *aio_context = bdrv_get_aio_context(bs); - - aio_context_acquire(aio_context); bdrv_do_drained_begin(bs, NULL, false); - aio_context_release(aio_context); } } @@ -588,11 +562,7 @@ void bdrv_drain_all_end(void) } while ((bs = bdrv_next_all_states(bs))) { - AioContext *aio_context = bdrv_get_aio_context(bs); - - aio_context_acquire(aio_context); bdrv_do_drained_end(bs, NULL); - aio_context_release(aio_context); } assert(qemu_get_current_aio_context() == qemu_get_aio_context()); @@ -2368,15 +2338,10 @@ int bdrv_flush_all(void) } for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { - AioContext *aio_context = bdrv_get_aio_context(bs); - int ret; - - aio_context_acquire(aio_context); - ret = bdrv_flush(bs); + int ret = bdrv_flush(bs); if (ret < 0 && !result) { result = ret; } - aio_context_release(aio_context); } return result; diff --git a/block/meson.build b/block/meson.build index 59ff6d380c041e415e0e49f127e787c94faa301e..e1f03fd773e9e0f4c7dbec1608d88fb9dcfc5869 100644 --- a/block/meson.build +++ b/block/meson.build @@ -88,10 +88,15 @@ if get_option('parallels').allowed() block_ss.add(files('parallels.c', 'parallels-ext.c')) endif -block_ss.add(when: 'CONFIG_WIN32', if_true: files('file-win32.c', 'win32-aio.c')) -block_ss.add(when: 'CONFIG_POSIX', if_true: [files('file-posix.c'), coref, iokit]) +if host_os == 'windows' + block_ss.add(files('file-win32.c', 'win32-aio.c')) +else + block_ss.add(files('file-posix.c'), coref, iokit) +endif block_ss.add(when: libiscsi, if_true: files('iscsi-opts.c')) -block_ss.add(when: 'CONFIG_LINUX', if_true: files('nvme.c')) +if host_os == 'linux' + block_ss.add(files('nvme.c')) +endif if get_option('replication').allowed() block_ss.add(files('replication.c')) endif diff --git a/block/mirror.c b/block/mirror.c index cd9d3ad4a8065ac18528f1ac07a25093d7b496cb..5145eb53e1027c487a6cd42864e375af1b88a49f 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -662,7 +662,6 @@ static int mirror_exit_common(Job *job) MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); BlockJob *bjob = &s->common; MirrorBDSOpaque *bs_opaque; - AioContext *replace_aio_context = NULL; BlockDriverState *src; BlockDriverState *target_bs; BlockDriverState *mirror_top_bs; @@ -677,7 +676,6 @@ static int mirror_exit_common(Job *job) } s->prepared = true; - aio_context_acquire(qemu_get_aio_context()); bdrv_graph_rdlock_main_loop(); mirror_top_bs = s->mirror_top_bs; @@ -742,11 +740,6 @@ static int mirror_exit_common(Job *job) } bdrv_graph_rdunlock_main_loop(); - if (s->to_replace) { - replace_aio_context = bdrv_get_aio_context(s->to_replace); - aio_context_acquire(replace_aio_context); - } - if (s->should_complete && !abort) { BlockDriverState *to_replace = s->to_replace ?: src; bool ro = bdrv_is_read_only(to_replace); @@ -764,7 +757,7 @@ static int mirror_exit_common(Job *job) * check for an op blocker on @to_replace, and we have our own * there. */ - bdrv_graph_wrlock(target_bs); + bdrv_graph_wrlock(); if (bdrv_recurse_can_replace(src, to_replace)) { bdrv_replace_node(to_replace, target_bs, &local_err); } else { @@ -773,7 +766,7 @@ static int mirror_exit_common(Job *job) "would not lead to an abrupt change of visible data", to_replace->node_name, target_bs->node_name); } - bdrv_graph_wrunlock(target_bs); + bdrv_graph_wrunlock(); bdrv_drained_end(to_replace); if (local_err) { error_report_err(local_err); @@ -785,9 +778,6 @@ static int mirror_exit_common(Job *job) error_free(s->replace_blocker); bdrv_unref(s->to_replace); } - if (replace_aio_context) { - aio_context_release(replace_aio_context); - } g_free(s->replaces); /* @@ -796,9 +786,9 @@ static int mirror_exit_common(Job *job) * valid. */ block_job_remove_all_bdrv(bjob); - bdrv_graph_wrlock(mirror_top_bs); + bdrv_graph_wrlock(); bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort); - bdrv_graph_wrunlock(mirror_top_bs); + bdrv_graph_wrunlock(); bdrv_drained_end(target_bs); bdrv_unref(target_bs); @@ -811,8 +801,6 @@ static int mirror_exit_common(Job *job) bdrv_unref(mirror_top_bs); bdrv_unref(src); - aio_context_release(qemu_get_aio_context()); - return ret; } @@ -1191,24 +1179,17 @@ static void mirror_complete(Job *job, Error **errp) /* block all operations on to_replace bs */ if (s->replaces) { - AioContext *replace_aio_context; - s->to_replace = bdrv_find_node(s->replaces); if (!s->to_replace) { error_setg(errp, "Node name '%s' not found", s->replaces); return; } - replace_aio_context = bdrv_get_aio_context(s->to_replace); - aio_context_acquire(replace_aio_context); - /* TODO Translate this into child freeze system. */ error_setg(&s->replace_blocker, "block device is in use by block-job-complete"); bdrv_op_block_all(s->to_replace, s->replace_blocker); bdrv_ref(s->to_replace); - - aio_context_release(replace_aio_context); } s->should_complete = true; @@ -1914,13 +1895,13 @@ static BlockJob *mirror_start_job( */ bdrv_disable_dirty_bitmap(s->dirty_bitmap); - bdrv_graph_wrlock(bs); + bdrv_graph_wrlock(); ret = block_job_add_bdrv(&s->common, "source", bs, 0, BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE | BLK_PERM_CONSISTENT_READ, errp); if (ret < 0) { - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); goto fail; } @@ -1965,17 +1946,17 @@ static BlockJob *mirror_start_job( ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, iter_shared_perms, errp); if (ret < 0) { - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); goto fail; } } if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) { - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); goto fail; } } - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); QTAILQ_INIT(&s->ops_in_flight); @@ -2001,12 +1982,12 @@ fail: bs_opaque->stop = true; bdrv_drained_begin(bs); - bdrv_graph_wrlock(bs); + bdrv_graph_wrlock(); assert(mirror_top_bs->backing->bs == bs); bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, &error_abort); bdrv_replace_node(mirror_top_bs, bs, &error_abort); - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); bdrv_drained_end(bs); bdrv_unref(mirror_top_bs); diff --git a/block/monitor/bitmap-qmp-cmds.c b/block/monitor/bitmap-qmp-cmds.c index 70d01a37763f3d872e14e9d6bdf4df3889d07b6b..a738e7bbf7a132fcb66a016c0e568993b5108932 100644 --- a/block/monitor/bitmap-qmp-cmds.c +++ b/block/monitor/bitmap-qmp-cmds.c @@ -95,7 +95,6 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name, { BlockDriverState *bs; BdrvDirtyBitmap *bitmap; - AioContext *aio_context; if (!name || name[0] == '\0') { error_setg(errp, "Bitmap name cannot be empty"); @@ -107,14 +106,11 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name, return; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - if (has_granularity) { if (granularity < 512 || !is_power_of_2(granularity)) { error_setg(errp, "Granularity must be power of 2 " "and at least 512"); - goto out; + return; } } else { /* Default to cluster size, if available: */ @@ -132,12 +128,12 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name, if (persistent && !bdrv_can_store_new_dirty_bitmap(bs, name, granularity, errp)) { - goto out; + return; } bitmap = bdrv_create_dirty_bitmap(bs, granularity, name, errp); if (bitmap == NULL) { - goto out; + return; } if (disabled) { @@ -145,9 +141,6 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name, } bdrv_dirty_bitmap_set_persistence(bitmap, persistent); - -out: - aio_context_release(aio_context); } BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name, @@ -157,7 +150,6 @@ BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name, { BlockDriverState *bs; BdrvDirtyBitmap *bitmap; - AioContext *aio_context; GLOBAL_STATE_CODE(); @@ -166,19 +158,14 @@ BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name, return NULL; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - if (bdrv_dirty_bitmap_check(bitmap, BDRV_BITMAP_BUSY | BDRV_BITMAP_RO, errp)) { - aio_context_release(aio_context); return NULL; } if (bdrv_dirty_bitmap_get_persistence(bitmap) && bdrv_remove_persistent_dirty_bitmap(bs, name, errp) < 0) { - aio_context_release(aio_context); return NULL; } @@ -190,7 +177,6 @@ BdrvDirtyBitmap *block_dirty_bitmap_remove(const char *node, const char *name, *bitmap_bs = bs; } - aio_context_release(aio_context); return release ? NULL : bitmap; } diff --git a/block/monitor/block-hmp-cmds.c b/block/monitor/block-hmp-cmds.c index c729cbf1eb8ab228f35ae6b6e87f2a6809fdab3b..bdbb5cb141aeab4c65a55e86a9575b623b314dea 100644 --- a/block/monitor/block-hmp-cmds.c +++ b/block/monitor/block-hmp-cmds.c @@ -141,7 +141,6 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict) const char *id = qdict_get_str(qdict, "id"); BlockBackend *blk; BlockDriverState *bs; - AioContext *aio_context; Error *local_err = NULL; GLOBAL_STATE_CODE(); @@ -168,14 +167,10 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict) return; } - aio_context = blk_get_aio_context(blk); - aio_context_acquire(aio_context); - bs = blk_bs(blk); if (bs) { if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, &local_err)) { error_report_err(local_err); - aio_context_release(aio_context); return; } @@ -196,8 +191,6 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict) } else { blk_unref(blk); } - - aio_context_release(aio_context); } void hmp_commit(Monitor *mon, const QDict *qdict) @@ -213,7 +206,6 @@ void hmp_commit(Monitor *mon, const QDict *qdict) ret = blk_commit_all(); } else { BlockDriverState *bs; - AioContext *aio_context; blk = blk_by_name(device); if (!blk) { @@ -222,18 +214,13 @@ void hmp_commit(Monitor *mon, const QDict *qdict) } bs = bdrv_skip_implicit_filters(blk_bs(blk)); - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); if (!blk_is_available(blk)) { error_report("Device '%s' has no medium", device); - aio_context_release(aio_context); return; } ret = bdrv_commit(bs); - - aio_context_release(aio_context); } if (ret < 0) { error_report("'commit' error for '%s': %s", device, strerror(-ret)); @@ -560,7 +547,6 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict) BlockBackend *blk = NULL; BlockDriverState *bs = NULL; BlockBackend *local_blk = NULL; - AioContext *ctx = NULL; bool qdev = qdict_get_try_bool(qdict, "qdev", false); const char *device = qdict_get_str(qdict, "device"); const char *command = qdict_get_str(qdict, "command"); @@ -582,9 +568,6 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict) } } - ctx = blk ? blk_get_aio_context(blk) : bdrv_get_aio_context(bs); - aio_context_acquire(ctx); - if (bs) { blk = local_blk = blk_new(bdrv_get_aio_context(bs), 0, BLK_PERM_ALL); ret = blk_insert_bs(blk, bs, &err); @@ -622,11 +605,6 @@ void hmp_qemu_io(Monitor *mon, const QDict *qdict) fail: blk_unref(local_blk); - - if (ctx) { - aio_context_release(ctx); - } - hmp_handle_error(mon, err); } @@ -882,7 +860,6 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict) int nb_sns, i; int total; int *global_snapshots; - AioContext *aio_context; typedef struct SnapshotEntry { QEMUSnapshotInfo sn; @@ -909,11 +886,8 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict) error_report_err(err); return; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); nb_sns = bdrv_snapshot_list(bs, &sn_tab); - aio_context_release(aio_context); if (nb_sns < 0) { monitor_printf(mon, "bdrv_snapshot_list: error %d\n", nb_sns); @@ -924,9 +898,7 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict) int bs1_nb_sns = 0; ImageEntry *ie; SnapshotEntry *se; - AioContext *ctx = bdrv_get_aio_context(bs1); - aio_context_acquire(ctx); if (bdrv_can_snapshot(bs1)) { sn = NULL; bs1_nb_sns = bdrv_snapshot_list(bs1, &sn); @@ -944,7 +916,6 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict) } g_free(sn); } - aio_context_release(ctx); } if (no_snapshot) { diff --git a/block/qapi-sysemu.c b/block/qapi-sysemu.c index 1618cd225a321044efd7c890ce45059392147934..e4282631d23deb3f7f780746ac2c42f52cbeed31 100644 --- a/block/qapi-sysemu.c +++ b/block/qapi-sysemu.c @@ -174,7 +174,6 @@ blockdev_remove_medium(const char *device, const char *id, Error **errp) { BlockBackend *blk; BlockDriverState *bs; - AioContext *aio_context; bool has_attached_device; GLOBAL_STATE_CODE(); @@ -204,13 +203,10 @@ blockdev_remove_medium(const char *device, const char *id, Error **errp) return; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - bdrv_graph_rdlock_main_loop(); if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) { bdrv_graph_rdunlock_main_loop(); - goto out; + return; } bdrv_graph_rdunlock_main_loop(); @@ -223,9 +219,6 @@ blockdev_remove_medium(const char *device, const char *id, Error **errp) * value passed here (i.e. false). */ blk_dev_change_media_cb(blk, false, &error_abort); } - -out: - aio_context_release(aio_context); } void qmp_blockdev_remove_medium(const char *id, Error **errp) @@ -237,7 +230,6 @@ static void qmp_blockdev_insert_anon_medium(BlockBackend *blk, BlockDriverState *bs, Error **errp) { Error *local_err = NULL; - AioContext *ctx; bool has_device; int ret; @@ -259,11 +251,7 @@ static void qmp_blockdev_insert_anon_medium(BlockBackend *blk, return; } - ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); ret = blk_insert_bs(blk, bs, errp); - aio_context_release(ctx); - if (ret < 0) { return; } @@ -374,9 +362,7 @@ void qmp_blockdev_change_medium(const char *device, qdict_put_str(options, "driver", format); } - aio_context_acquire(qemu_get_aio_context()); medium_bs = bdrv_open(filename, NULL, options, bdrv_flags, errp); - aio_context_release(qemu_get_aio_context()); if (!medium_bs) { goto fail; @@ -437,20 +423,16 @@ void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp) ThrottleConfig cfg; BlockDriverState *bs; BlockBackend *blk; - AioContext *aio_context; blk = qmp_get_blk(arg->device, arg->id, errp); if (!blk) { return; } - aio_context = blk_get_aio_context(blk); - aio_context_acquire(aio_context); - bs = blk_bs(blk); if (!bs) { error_setg(errp, "Device has no medium"); - goto out; + return; } throttle_config_init(&cfg); @@ -505,7 +487,7 @@ void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp) } if (!throttle_is_valid(&cfg, errp)) { - goto out; + return; } if (throttle_enabled(&cfg)) { @@ -522,9 +504,6 @@ void qmp_block_set_io_throttle(BlockIOThrottle *arg, Error **errp) /* If all throttling settings are set to 0, disable I/O limits */ blk_io_limits_disable(blk); } - -out: - aio_context_release(aio_context); } void qmp_block_latency_histogram_set( diff --git a/block/qapi.c b/block/qapi.c index 82a30b38fe70b0e75f50e527bd68991ee4e8271a..9e806fa230d8e9d7361a61a79f2c06e7a64bcb1c 100644 --- a/block/qapi.c +++ b/block/qapi.c @@ -234,13 +234,11 @@ bdrv_do_query_node_info(BlockDriverState *bs, BlockNodeInfo *info, Error **errp) int ret; Error *err = NULL; - aio_context_acquire(bdrv_get_aio_context(bs)); - size = bdrv_getlength(bs); if (size < 0) { error_setg_errno(errp, -size, "Can't get image size '%s'", bs->exact_filename); - goto out; + return; } bdrv_refresh_filename(bs); @@ -265,7 +263,7 @@ bdrv_do_query_node_info(BlockDriverState *bs, BlockNodeInfo *info, Error **errp) info->format_specific = bdrv_get_specific_info(bs, &err); if (err) { error_propagate(errp, err); - goto out; + return; } backing_filename = bs->backing_file; if (backing_filename[0] != '\0') { @@ -300,11 +298,8 @@ bdrv_do_query_node_info(BlockDriverState *bs, BlockNodeInfo *info, Error **errp) break; default: error_propagate(errp, err); - goto out; + return; } - -out: - aio_context_release(bdrv_get_aio_context(bs)); } /** @@ -709,15 +704,10 @@ BlockStatsList *qmp_query_blockstats(bool has_query_nodes, /* Just to be safe if query_nodes is not always initialized */ if (has_query_nodes && query_nodes) { for (bs = bdrv_next_node(NULL); bs; bs = bdrv_next_node(bs)) { - AioContext *ctx = bdrv_get_aio_context(bs); - - aio_context_acquire(ctx); QAPI_LIST_APPEND(tail, bdrv_query_bds_stats(bs, false)); - aio_context_release(ctx); } } else { for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) { - AioContext *ctx = blk_get_aio_context(blk); BlockStats *s; char *qdev; @@ -725,7 +715,6 @@ BlockStatsList *qmp_query_blockstats(bool has_query_nodes, continue; } - aio_context_acquire(ctx); s = bdrv_query_bds_stats(blk_bs(blk), true); s->device = g_strdup(blk_name(blk)); @@ -737,7 +726,6 @@ BlockStatsList *qmp_query_blockstats(bool has_query_nodes, } bdrv_query_blk_stats(s->stats, blk); - aio_context_release(ctx); QAPI_LIST_APPEND(tail, s); } diff --git a/block/qcow2.c b/block/qcow2.c index 13e032bd5e2e07565bfd29cfd481560437bcd609..9bee66fff59af3017ac594d49d8e24cf3c2b6200 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -2807,9 +2807,9 @@ qcow2_do_close(BlockDriverState *bs, bool close_data_file) if (close_data_file && has_data_file(bs)) { GLOBAL_STATE_CODE(); bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, s->data_file); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); s->data_file = NULL; bdrv_graph_rdlock_main_loop(); } diff --git a/block/quorum.c b/block/quorum.c index 505b8b3e18e71f7082d14d205d3c5ee6d5251829..db8fe891c4b84dc1724accf1d7e6d4f55788be66 100644 --- a/block/quorum.c +++ b/block/quorum.c @@ -1037,14 +1037,14 @@ static int quorum_open(BlockDriverState *bs, QDict *options, int flags, close_exit: /* cleanup on error */ - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); for (i = 0; i < s->num_children; i++) { if (!opened[i]) { continue; } bdrv_unref_child(bs, s->children[i]); } - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); g_free(s->children); g_free(opened); exit: @@ -1057,11 +1057,11 @@ static void quorum_close(BlockDriverState *bs) BDRVQuorumState *s = bs->opaque; int i; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); for (i = 0; i < s->num_children; i++) { bdrv_unref_child(bs, s->children[i]); } - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); g_free(s->children); } diff --git a/block/raw-format.c b/block/raw-format.c index 1111dffd54f7da49bffa43d6177208b96e929aba..ac7e8495f66ceb4b5d0879747365cca1b4face97 100644 --- a/block/raw-format.c +++ b/block/raw-format.c @@ -470,7 +470,6 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVRawState *s = bs->opaque; - AioContext *ctx; bool has_size; uint64_t offset, size; BdrvChildRole file_role; @@ -522,11 +521,7 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags, bs->file->bs->filename); } - ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); ret = raw_apply_options(bs, s, offset, has_size, size, errp); - aio_context_release(ctx); - if (ret < 0) { return ret; } diff --git a/block/replication.c b/block/replication.c index 5ded5f1ca94ba7e893c30d7d525931028f65b816..ca6bd0a72055d1d376921eb46fda4f0d990da836 100644 --- a/block/replication.c +++ b/block/replication.c @@ -394,14 +394,7 @@ static void reopen_backing_file(BlockDriverState *bs, bool writable, } if (reopen_queue) { - AioContext *ctx = bdrv_get_aio_context(bs); - if (ctx != qemu_get_aio_context()) { - aio_context_release(ctx); - } bdrv_reopen_multiple(reopen_queue, errp); - if (ctx != qemu_get_aio_context()) { - aio_context_acquire(ctx); - } } } @@ -462,14 +455,11 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, BlockDriverState *top_bs; BdrvChild *active_disk, *hidden_disk, *secondary_disk; int64_t active_length, hidden_length, disk_length; - AioContext *aio_context; Error *local_err = NULL; BackupPerf perf = { .use_copy_range = true, .max_workers = 1 }; GLOBAL_STATE_CODE(); - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); s = bs->opaque; if (s->stage == BLOCK_REPLICATION_DONE || @@ -479,20 +469,17 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, * Ignore the request because the secondary side of replication * doesn't have to do anything anymore. */ - aio_context_release(aio_context); return; } if (s->stage != BLOCK_REPLICATION_NONE) { error_setg(errp, "Block replication is running or done"); - aio_context_release(aio_context); return; } if (s->mode != mode) { error_setg(errp, "The parameter mode's value is invalid, needs %d," " but got %d", s->mode, mode); - aio_context_release(aio_context); return; } @@ -505,7 +492,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, if (!active_disk || !active_disk->bs || !active_disk->bs->backing) { error_setg(errp, "Active disk doesn't have backing file"); bdrv_graph_rdunlock_main_loop(); - aio_context_release(aio_context); return; } @@ -513,7 +499,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, if (!hidden_disk->bs || !hidden_disk->bs->backing) { error_setg(errp, "Hidden disk doesn't have backing file"); bdrv_graph_rdunlock_main_loop(); - aio_context_release(aio_context); return; } @@ -521,7 +506,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, if (!secondary_disk->bs || !bdrv_has_blk(secondary_disk->bs)) { error_setg(errp, "The secondary disk doesn't have block backend"); bdrv_graph_rdunlock_main_loop(); - aio_context_release(aio_context); return; } bdrv_graph_rdunlock_main_loop(); @@ -534,7 +518,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, active_length != hidden_length || hidden_length != disk_length) { error_setg(errp, "Active disk, hidden disk, secondary disk's length" " are not the same"); - aio_context_release(aio_context); return; } @@ -546,7 +529,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, !hidden_disk->bs->drv->bdrv_make_empty) { error_setg(errp, "Active disk or hidden disk doesn't support make_empty"); - aio_context_release(aio_context); bdrv_graph_rdunlock_main_loop(); return; } @@ -556,11 +538,10 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, reopen_backing_file(bs, true, &local_err); if (local_err) { error_propagate(errp, local_err); - aio_context_release(aio_context); return; } - bdrv_graph_wrlock(bs); + bdrv_graph_wrlock(); bdrv_ref(hidden_disk->bs); s->hidden_disk = bdrv_attach_child(bs, hidden_disk->bs, "hidden disk", @@ -568,8 +549,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, &local_err); if (local_err) { error_propagate(errp, local_err); - bdrv_graph_wrunlock(bs); - aio_context_release(aio_context); + bdrv_graph_wrunlock(); return; } @@ -579,8 +559,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, BDRV_CHILD_DATA, &local_err); if (local_err) { error_propagate(errp, local_err); - bdrv_graph_wrunlock(bs); - aio_context_release(aio_context); + bdrv_graph_wrunlock(); return; } @@ -592,15 +571,14 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, if (!top_bs || !bdrv_is_root_node(top_bs) || !check_top_bs(top_bs, bs)) { error_setg(errp, "No top_bs or it is invalid"); - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); reopen_backing_file(bs, false, NULL); - aio_context_release(aio_context); return; } bdrv_op_block_all(top_bs, s->blocker); bdrv_op_unblock(top_bs, BLOCK_OP_TYPE_DATAPLANE, s->blocker); - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); s->backup_job = backup_job_create( NULL, s->secondary_disk->bs, s->hidden_disk->bs, @@ -612,13 +590,11 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, if (local_err) { error_propagate(errp, local_err); backup_job_cleanup(bs); - aio_context_release(aio_context); return; } job_start(&s->backup_job->job); break; default: - aio_context_release(aio_context); abort(); } @@ -629,18 +605,12 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode, } s->error = 0; - aio_context_release(aio_context); } static void replication_do_checkpoint(ReplicationState *rs, Error **errp) { BlockDriverState *bs = rs->opaque; - BDRVReplicationState *s; - AioContext *aio_context; - - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - s = bs->opaque; + BDRVReplicationState *s = bs->opaque; if (s->stage == BLOCK_REPLICATION_DONE || s->stage == BLOCK_REPLICATION_FAILOVER) { @@ -649,38 +619,28 @@ static void replication_do_checkpoint(ReplicationState *rs, Error **errp) * Ignore the request because the secondary side of replication * doesn't have to do anything anymore. */ - aio_context_release(aio_context); return; } if (s->mode == REPLICATION_MODE_SECONDARY) { secondary_do_checkpoint(bs, errp); } - aio_context_release(aio_context); } static void replication_get_error(ReplicationState *rs, Error **errp) { BlockDriverState *bs = rs->opaque; - BDRVReplicationState *s; - AioContext *aio_context; - - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - s = bs->opaque; + BDRVReplicationState *s = bs->opaque; if (s->stage == BLOCK_REPLICATION_NONE) { error_setg(errp, "Block replication is not running"); - aio_context_release(aio_context); return; } if (s->error) { error_setg(errp, "I/O error occurred"); - aio_context_release(aio_context); return; } - aio_context_release(aio_context); } static void replication_done(void *opaque, int ret) @@ -691,12 +651,12 @@ static void replication_done(void *opaque, int ret) if (ret == 0) { s->stage = BLOCK_REPLICATION_DONE; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, s->secondary_disk); s->secondary_disk = NULL; bdrv_unref_child(bs, s->hidden_disk); s->hidden_disk = NULL; - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); s->error = 0; } else { @@ -708,12 +668,7 @@ static void replication_done(void *opaque, int ret) static void replication_stop(ReplicationState *rs, bool failover, Error **errp) { BlockDriverState *bs = rs->opaque; - BDRVReplicationState *s; - AioContext *aio_context; - - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - s = bs->opaque; + BDRVReplicationState *s = bs->opaque; if (s->stage == BLOCK_REPLICATION_DONE || s->stage == BLOCK_REPLICATION_FAILOVER) { @@ -722,13 +677,11 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp) * Ignore the request because the secondary side of replication * doesn't have to do anything anymore. */ - aio_context_release(aio_context); return; } if (s->stage != BLOCK_REPLICATION_RUNNING) { error_setg(errp, "Block replication is not running"); - aio_context_release(aio_context); return; } @@ -744,15 +697,12 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp) * disk, secondary disk in backup_job_completed(). */ if (s->backup_job) { - aio_context_release(aio_context); job_cancel_sync(&s->backup_job->job, true); - aio_context_acquire(aio_context); } if (!failover) { secondary_do_checkpoint(bs, errp); s->stage = BLOCK_REPLICATION_DONE; - aio_context_release(aio_context); return; } @@ -765,10 +715,8 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp) bdrv_graph_rdunlock_main_loop(); break; default: - aio_context_release(aio_context); abort(); } - aio_context_release(aio_context); } static const char *const replication_strong_runtime_opts[] = { diff --git a/block/snapshot.c b/block/snapshot.c index ec8cf4810b06dfaca1fe58b6e6fd5f9c7dda8a81..8694fc0a3eba6717f1d2d0a404331c6d82b76eba 100644 --- a/block/snapshot.c +++ b/block/snapshot.c @@ -196,8 +196,10 @@ bdrv_snapshot_fallback(BlockDriverState *bs) int bdrv_can_snapshot(BlockDriverState *bs) { BlockDriver *drv = bs->drv; + GLOBAL_STATE_CODE(); - if (!drv || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) { + + if (!drv || !bdrv_is_inserted(bs) || !bdrv_is_writable(bs)) { return 0; } @@ -290,9 +292,9 @@ int bdrv_snapshot_goto(BlockDriverState *bs, } /* .bdrv_open() will re-attach it */ - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, fallback); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); ret = bdrv_snapshot_goto(fallback_bs, snapshot_id, errp); open_ret = drv->bdrv_open(bs, options, bs->open_flags, &local_err); @@ -525,9 +527,7 @@ static bool GRAPH_RDLOCK bdrv_all_snapshots_includes_bs(BlockDriverState *bs) return bdrv_has_blk(bs) || QLIST_EMPTY(&bs->parents); } -/* Group operations. All block drivers are involved. - * These functions will properly handle dataplane (take aio_context_acquire - * when appropriate for appropriate block drivers) */ +/* Group operations. All block drivers are involved. */ bool bdrv_all_can_snapshot(bool has_devices, strList *devices, Error **errp) @@ -545,14 +545,11 @@ bool bdrv_all_can_snapshot(bool has_devices, strList *devices, iterbdrvs = bdrvs; while (iterbdrvs) { BlockDriverState *bs = iterbdrvs->data; - AioContext *ctx = bdrv_get_aio_context(bs); bool ok = true; - aio_context_acquire(ctx); if (devices || bdrv_all_snapshots_includes_bs(bs)) { ok = bdrv_can_snapshot(bs); } - aio_context_release(ctx); if (!ok) { error_setg(errp, "Device '%s' is writable but does not support " "snapshots", bdrv_get_device_or_node_name(bs)); @@ -582,18 +579,15 @@ int bdrv_all_delete_snapshot(const char *name, iterbdrvs = bdrvs; while (iterbdrvs) { BlockDriverState *bs = iterbdrvs->data; - AioContext *ctx = bdrv_get_aio_context(bs); QEMUSnapshotInfo sn1, *snapshot = &sn1; int ret = 0; - aio_context_acquire(ctx); if ((devices || bdrv_all_snapshots_includes_bs(bs)) && bdrv_snapshot_find(bs, snapshot, name) >= 0) { ret = bdrv_snapshot_delete(bs, snapshot->id_str, snapshot->name, errp); } - aio_context_release(ctx); if (ret < 0) { error_prepend(errp, "Could not delete snapshot '%s' on '%s': ", name, bdrv_get_device_or_node_name(bs)); @@ -628,17 +622,14 @@ int bdrv_all_goto_snapshot(const char *name, iterbdrvs = bdrvs; while (iterbdrvs) { BlockDriverState *bs = iterbdrvs->data; - AioContext *ctx = bdrv_get_aio_context(bs); bool all_snapshots_includes_bs; - aio_context_acquire(ctx); bdrv_graph_rdlock_main_loop(); all_snapshots_includes_bs = bdrv_all_snapshots_includes_bs(bs); bdrv_graph_rdunlock_main_loop(); ret = (devices || all_snapshots_includes_bs) ? bdrv_snapshot_goto(bs, name, errp) : 0; - aio_context_release(ctx); if (ret < 0) { bdrv_graph_rdlock_main_loop(); error_prepend(errp, "Could not load snapshot '%s' on '%s': ", @@ -670,15 +661,12 @@ int bdrv_all_has_snapshot(const char *name, iterbdrvs = bdrvs; while (iterbdrvs) { BlockDriverState *bs = iterbdrvs->data; - AioContext *ctx = bdrv_get_aio_context(bs); QEMUSnapshotInfo sn; int ret = 0; - aio_context_acquire(ctx); if (devices || bdrv_all_snapshots_includes_bs(bs)) { ret = bdrv_snapshot_find(bs, &sn, name); } - aio_context_release(ctx); if (ret < 0) { if (ret == -ENOENT) { return 0; @@ -715,10 +703,8 @@ int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn, iterbdrvs = bdrvs; while (iterbdrvs) { BlockDriverState *bs = iterbdrvs->data; - AioContext *ctx = bdrv_get_aio_context(bs); int ret = 0; - aio_context_acquire(ctx); if (bs == vm_state_bs) { sn->vm_state_size = vm_state_size; ret = bdrv_snapshot_create(bs, sn); @@ -726,7 +712,6 @@ int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn, sn->vm_state_size = 0; ret = bdrv_snapshot_create(bs, sn); } - aio_context_release(ctx); if (ret < 0) { error_setg(errp, "Could not create snapshot '%s' on '%s'", sn->name, bdrv_get_device_or_node_name(bs)); @@ -757,13 +742,10 @@ BlockDriverState *bdrv_all_find_vmstate_bs(const char *vmstate_bs, iterbdrvs = bdrvs; while (iterbdrvs) { BlockDriverState *bs = iterbdrvs->data; - AioContext *ctx = bdrv_get_aio_context(bs); bool found = false; - aio_context_acquire(ctx); found = (devices || bdrv_all_snapshots_includes_bs(bs)) && bdrv_can_snapshot(bs); - aio_context_release(ctx); if (vmstate_bs) { if (g_str_equal(vmstate_bs, diff --git a/block/stream.c b/block/stream.c index 01fe7c0f166123748ba1d7e6e64c438e732ee322..048c2d282f37c6cfe50222cff32d64c1009e768d 100644 --- a/block/stream.c +++ b/block/stream.c @@ -99,9 +99,9 @@ static int stream_prepare(Job *job) } } - bdrv_graph_wrlock(s->target_bs); + bdrv_graph_wrlock(); bdrv_set_backing_hd_drained(unfiltered_bs, base, &local_err); - bdrv_graph_wrunlock(s->target_bs); + bdrv_graph_wrunlock(); /* * This call will do I/O, so the graph can change again from here on. @@ -366,10 +366,10 @@ void stream_start(const char *job_id, BlockDriverState *bs, * already have our own plans. Also don't allow resize as the image size is * queried only at the job start and then cached. */ - bdrv_graph_wrlock(bs); + bdrv_graph_wrlock(); if (block_job_add_bdrv(&s->common, "active node", bs, 0, basic_flags | BLK_PERM_WRITE, errp)) { - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); goto fail; } @@ -389,11 +389,11 @@ void stream_start(const char *job_id, BlockDriverState *bs, ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, basic_flags, errp); if (ret < 0) { - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); goto fail; } } - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); s->base_overlay = base_overlay; s->above_base = above_base; diff --git a/block/vmdk.c b/block/vmdk.c index d6971c706750a5c18cbd71c1e38a179ab15e61bb..bf78e1238351d86152b5d7f205e12747ef51d357 100644 --- a/block/vmdk.c +++ b/block/vmdk.c @@ -272,7 +272,7 @@ static void vmdk_free_extents(BlockDriverState *bs) BDRVVmdkState *s = bs->opaque; VmdkExtent *e; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); for (i = 0; i < s->num_extents; i++) { e = &s->extents[i]; g_free(e->l1_table); @@ -283,7 +283,7 @@ static void vmdk_free_extents(BlockDriverState *bs) bdrv_unref_child(bs, e->file); } } - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); g_free(s->extents); } @@ -1247,9 +1247,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, 0, 0, 0, 0, 0, &extent, errp); if (ret < 0) { bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, extent_file); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); bdrv_graph_rdlock_main_loop(); goto out; } @@ -1266,9 +1266,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, g_free(buf); if (ret) { bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, extent_file); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); bdrv_graph_rdlock_main_loop(); goto out; } @@ -1277,9 +1277,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, ret = vmdk_open_se_sparse(bs, extent_file, bs->open_flags, errp); if (ret) { bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, extent_file); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); bdrv_graph_rdlock_main_loop(); goto out; } @@ -1287,9 +1287,9 @@ vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options, } else { error_setg(errp, "Unsupported extent type '%s'", type); bdrv_graph_rdunlock_main_loop(); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(bs, extent_file); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); bdrv_graph_rdlock_main_loop(); ret = -ENOTSUP; goto out; diff --git a/block/write-threshold.c b/block/write-threshold.c index 76d8885677e266af57574020c403ee4901e9134c..56fe88de811344046eb87b43784c5ebf8042aebd 100644 --- a/block/write-threshold.c +++ b/block/write-threshold.c @@ -33,7 +33,6 @@ void qmp_block_set_write_threshold(const char *node_name, Error **errp) { BlockDriverState *bs; - AioContext *aio_context; bs = bdrv_find_node(node_name); if (!bs) { @@ -41,12 +40,7 @@ void qmp_block_set_write_threshold(const char *node_name, return; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - bdrv_write_threshold_set(bs, threshold_bytes); - - aio_context_release(aio_context); } void bdrv_write_threshold_check_write(BlockDriverState *bs, int64_t offset, diff --git a/blockdev.c b/blockdev.c index c91f49e7b62c04a3bd8155826294692a4ebfa5fc..3a5e7222eca6b6ff3a9fd5f0d7179cd902fdfc7c 100644 --- a/blockdev.c +++ b/blockdev.c @@ -662,7 +662,6 @@ err_no_opts: /* Takes the ownership of bs_opts */ BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp) { - BlockDriverState *bs; int bdrv_flags = 0; GLOBAL_STATE_CODE(); @@ -677,11 +676,7 @@ BlockDriverState *bds_tree_init(QDict *bs_opts, Error **errp) bdrv_flags |= BDRV_O_INACTIVE; } - aio_context_acquire(qemu_get_aio_context()); - bs = bdrv_open(NULL, NULL, bs_opts, bdrv_flags, errp); - aio_context_release(qemu_get_aio_context()); - - return bs; + return bdrv_open(NULL, NULL, bs_opts, bdrv_flags, errp); } void blockdev_close_all_bdrv_states(void) @@ -690,11 +685,7 @@ void blockdev_close_all_bdrv_states(void) GLOBAL_STATE_CODE(); QTAILQ_FOREACH_SAFE(bs, &monitor_bdrv_states, monitor_list, next_bs) { - AioContext *ctx = bdrv_get_aio_context(bs); - - aio_context_acquire(ctx); bdrv_unref(bs); - aio_context_release(ctx); } } @@ -1048,7 +1039,6 @@ fail: static BlockDriverState *qmp_get_root_bs(const char *name, Error **errp) { BlockDriverState *bs; - AioContext *aio_context; GRAPH_RDLOCK_GUARD_MAINLOOP(); @@ -1062,16 +1052,11 @@ static BlockDriverState *qmp_get_root_bs(const char *name, Error **errp) return NULL; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - if (!bdrv_is_inserted(bs)) { error_setg(errp, "Device has no medium"); bs = NULL; } - aio_context_release(aio_context); - return bs; } @@ -1141,7 +1126,6 @@ SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device, Error **errp) { BlockDriverState *bs; - AioContext *aio_context; QEMUSnapshotInfo sn; Error *local_err = NULL; SnapshotInfo *info = NULL; @@ -1154,39 +1138,35 @@ SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device, if (!bs) { return NULL; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); if (!id && !name) { error_setg(errp, "Name or id must be provided"); - goto out_aio_context; + return NULL; } if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE, errp)) { - goto out_aio_context; + return NULL; } ret = bdrv_snapshot_find_by_id_and_name(bs, id, name, &sn, &local_err); if (local_err) { error_propagate(errp, local_err); - goto out_aio_context; + return NULL; } if (!ret) { error_setg(errp, "Snapshot with id '%s' and name '%s' does not exist on " "device '%s'", STR_OR_NULL(id), STR_OR_NULL(name), device); - goto out_aio_context; + return NULL; } bdrv_snapshot_delete(bs, id, name, &local_err); if (local_err) { error_propagate(errp, local_err); - goto out_aio_context; + return NULL; } - aio_context_release(aio_context); - info = g_new0(SnapshotInfo, 1); info->id = g_strdup(sn.id_str); info->name = g_strdup(sn.name); @@ -1201,10 +1181,6 @@ SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device, } return info; - -out_aio_context: - aio_context_release(aio_context); - return NULL; } /* internal snapshot private data */ @@ -1232,7 +1208,6 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal, bool ret; int64_t rt; InternalSnapshotState *state = g_new0(InternalSnapshotState, 1); - AioContext *aio_context; int ret1; GLOBAL_STATE_CODE(); @@ -1248,33 +1223,30 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal, return; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - state->bs = bs; /* Paired with .clean() */ bdrv_drained_begin(bs); if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_INTERNAL_SNAPSHOT, errp)) { - goto out; + return; } if (bdrv_is_read_only(bs)) { error_setg(errp, "Device '%s' is read only", device); - goto out; + return; } if (!bdrv_can_snapshot(bs)) { error_setg(errp, "Block format '%s' used by device '%s' " "does not support internal snapshots", bs->drv->format_name, device); - goto out; + return; } if (!strlen(name)) { error_setg(errp, "Name is empty"); - goto out; + return; } /* check whether a snapshot with name exist */ @@ -1282,12 +1254,12 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal, &local_err); if (local_err) { error_propagate(errp, local_err); - goto out; + return; } else if (ret) { error_setg(errp, "Snapshot with name '%s' already exists on device '%s'", name, device); - goto out; + return; } /* 3. take the snapshot */ @@ -1308,14 +1280,11 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal, error_setg_errno(errp, -ret1, "Failed to create snapshot '%s' on device '%s'", name, device); - goto out; + return; } /* 4. succeed, mark a snapshot is created */ state->created = true; - -out: - aio_context_release(aio_context); } static void internal_snapshot_abort(void *opaque) @@ -1323,7 +1292,6 @@ static void internal_snapshot_abort(void *opaque) InternalSnapshotState *state = opaque; BlockDriverState *bs = state->bs; QEMUSnapshotInfo *sn = &state->sn; - AioContext *aio_context; Error *local_error = NULL; GLOBAL_STATE_CODE(); @@ -1333,9 +1301,6 @@ static void internal_snapshot_abort(void *opaque) return; } - aio_context = bdrv_get_aio_context(state->bs); - aio_context_acquire(aio_context); - if (bdrv_snapshot_delete(bs, sn->id_str, sn->name, &local_error) < 0) { error_reportf_err(local_error, "Failed to delete snapshot with id '%s' and " @@ -1343,25 +1308,17 @@ static void internal_snapshot_abort(void *opaque) sn->id_str, sn->name, bdrv_get_device_name(bs)); } - - aio_context_release(aio_context); } static void internal_snapshot_clean(void *opaque) { g_autofree InternalSnapshotState *state = opaque; - AioContext *aio_context; if (!state->bs) { return; } - aio_context = bdrv_get_aio_context(state->bs); - aio_context_acquire(aio_context); - bdrv_drained_end(state->bs); - - aio_context_release(aio_context); } /* external snapshot private data */ @@ -1395,7 +1352,6 @@ static void external_snapshot_action(TransactionAction *action, /* File name of the new image (for 'blockdev-snapshot-sync') */ const char *new_image_file; ExternalSnapshotState *state = g_new0(ExternalSnapshotState, 1); - AioContext *aio_context; uint64_t perm, shared; /* TODO We'll eventually have to take a writer lock in this function */ @@ -1435,26 +1391,23 @@ static void external_snapshot_action(TransactionAction *action, return; } - aio_context = bdrv_get_aio_context(state->old_bs); - aio_context_acquire(aio_context); - /* Paired with .clean() */ bdrv_drained_begin(state->old_bs); if (!bdrv_is_inserted(state->old_bs)) { error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, device); - goto out; + return; } if (bdrv_op_is_blocked(state->old_bs, BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT, errp)) { - goto out; + return; } if (!bdrv_is_read_only(state->old_bs)) { if (bdrv_flush(state->old_bs)) { error_setg(errp, QERR_IO_ERROR); - goto out; + return; } } @@ -1466,13 +1419,13 @@ static void external_snapshot_action(TransactionAction *action, if (node_name && !snapshot_node_name) { error_setg(errp, "New overlay node-name missing"); - goto out; + return; } if (snapshot_node_name && bdrv_lookup_bs(snapshot_node_name, snapshot_node_name, NULL)) { error_setg(errp, "New overlay node-name already in use"); - goto out; + return; } flags = state->old_bs->open_flags; @@ -1485,20 +1438,18 @@ static void external_snapshot_action(TransactionAction *action, int64_t size = bdrv_getlength(state->old_bs); if (size < 0) { error_setg_errno(errp, -size, "bdrv_getlength failed"); - goto out; + return; } bdrv_refresh_filename(state->old_bs); - aio_context_release(aio_context); bdrv_img_create(new_image_file, format, state->old_bs->filename, state->old_bs->drv->format_name, NULL, size, flags, false, &local_err); - aio_context_acquire(aio_context); if (local_err) { error_propagate(errp, local_err); - goto out; + return; } } @@ -1508,20 +1459,15 @@ static void external_snapshot_action(TransactionAction *action, } qdict_put_str(options, "driver", format); } - aio_context_release(aio_context); - aio_context_acquire(qemu_get_aio_context()); state->new_bs = bdrv_open(new_image_file, snapshot_ref, options, flags, errp); - aio_context_release(qemu_get_aio_context()); /* We will manually add the backing_hd field to the bs later */ if (!state->new_bs) { return; } - aio_context_acquire(aio_context); - /* * Allow attaching a backing file to an overlay that's already in use only * if the parents don't assume that they are already seeing a valid image. @@ -1530,41 +1476,34 @@ static void external_snapshot_action(TransactionAction *action, bdrv_get_cumulative_perm(state->new_bs, &perm, &shared); if (perm & BLK_PERM_CONSISTENT_READ) { error_setg(errp, "The overlay is already in use"); - goto out; + return; } if (state->new_bs->drv->is_filter) { error_setg(errp, "Filters cannot be used as overlays"); - goto out; + return; } if (bdrv_cow_child(state->new_bs)) { error_setg(errp, "The overlay already has a backing image"); - goto out; + return; } if (!state->new_bs->drv->supports_backing) { error_setg(errp, "The overlay does not support backing images"); - goto out; + return; } ret = bdrv_append(state->new_bs, state->old_bs, errp); if (ret < 0) { - goto out; + return; } state->overlay_appended = true; - -out: - aio_context_release(aio_context); } static void external_snapshot_commit(void *opaque) { ExternalSnapshotState *state = opaque; - AioContext *aio_context; - - aio_context = bdrv_get_aio_context(state->old_bs); - aio_context_acquire(aio_context); /* We don't need (or want) to use the transactional * bdrv_reopen_multiple() across all the entries at once, because we @@ -1572,8 +1511,6 @@ static void external_snapshot_commit(void *opaque) if (!qatomic_read(&state->old_bs->copy_on_read)) { bdrv_reopen_set_read_only(state->old_bs, true, NULL); } - - aio_context_release(aio_context); } static void external_snapshot_abort(void *opaque) @@ -1586,7 +1523,6 @@ static void external_snapshot_abort(void *opaque) int ret; aio_context = bdrv_get_aio_context(state->old_bs); - aio_context_acquire(aio_context); bdrv_ref(state->old_bs); /* we can't let bdrv_set_backind_hd() close state->old_bs; we need it */ @@ -1599,26 +1535,18 @@ static void external_snapshot_abort(void *opaque) */ tmp_context = bdrv_get_aio_context(state->old_bs); if (aio_context != tmp_context) { - aio_context_release(aio_context); - aio_context_acquire(tmp_context); - ret = bdrv_try_change_aio_context(state->old_bs, aio_context, NULL, NULL); assert(ret == 0); - - aio_context_release(tmp_context); - aio_context_acquire(aio_context); } bdrv_drained_begin(state->new_bs); - bdrv_graph_wrlock(state->old_bs); + bdrv_graph_wrlock(); bdrv_replace_node(state->new_bs, state->old_bs, &error_abort); - bdrv_graph_wrunlock(state->old_bs); + bdrv_graph_wrunlock(); bdrv_drained_end(state->new_bs); bdrv_unref(state->old_bs); /* bdrv_replace_node() ref'ed old_bs */ - - aio_context_release(aio_context); } } } @@ -1626,19 +1554,13 @@ static void external_snapshot_abort(void *opaque) static void external_snapshot_clean(void *opaque) { g_autofree ExternalSnapshotState *state = opaque; - AioContext *aio_context; if (!state->old_bs) { return; } - aio_context = bdrv_get_aio_context(state->old_bs); - aio_context_acquire(aio_context); - bdrv_drained_end(state->old_bs); bdrv_unref(state->new_bs); - - aio_context_release(aio_context); } typedef struct DriveBackupState { @@ -1670,7 +1592,6 @@ static void drive_backup_action(DriveBackup *backup, BlockDriverState *target_bs; BlockDriverState *source = NULL; AioContext *aio_context; - AioContext *old_context; const char *format; QDict *options; Error *local_err = NULL; @@ -1698,7 +1619,6 @@ static void drive_backup_action(DriveBackup *backup, } aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); state->bs = bs; /* Paired with .clean() */ @@ -1713,7 +1633,7 @@ static void drive_backup_action(DriveBackup *backup, bdrv_graph_rdlock_main_loop(); if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { bdrv_graph_rdunlock_main_loop(); - goto out; + return; } flags = bs->open_flags | BDRV_O_RDWR; @@ -1744,7 +1664,7 @@ static void drive_backup_action(DriveBackup *backup, size = bdrv_getlength(bs); if (size < 0) { error_setg_errno(errp, -size, "bdrv_getlength failed"); - goto out; + return; } if (backup->mode != NEW_IMAGE_MODE_EXISTING) { @@ -1770,7 +1690,7 @@ static void drive_backup_action(DriveBackup *backup, if (local_err) { error_propagate(errp, local_err); - goto out; + return; } options = qdict_new(); @@ -1779,30 +1699,18 @@ static void drive_backup_action(DriveBackup *backup, if (format) { qdict_put_str(options, "driver", format); } - aio_context_release(aio_context); - aio_context_acquire(qemu_get_aio_context()); target_bs = bdrv_open(backup->target, NULL, options, flags, errp); - aio_context_release(qemu_get_aio_context()); - if (!target_bs) { return; } - /* Honor bdrv_try_change_aio_context() context acquisition requirements. */ - old_context = bdrv_get_aio_context(target_bs); - aio_context_acquire(old_context); - ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp); if (ret < 0) { bdrv_unref(target_bs); - aio_context_release(old_context); return; } - aio_context_release(old_context); - aio_context_acquire(aio_context); - if (set_backing_hd) { if (bdrv_set_backing_hd(target_bs, source, errp) < 0) { goto unref; @@ -1815,22 +1723,14 @@ static void drive_backup_action(DriveBackup *backup, unref: bdrv_unref(target_bs); -out: - aio_context_release(aio_context); } static void drive_backup_commit(void *opaque) { DriveBackupState *state = opaque; - AioContext *aio_context; - - aio_context = bdrv_get_aio_context(state->bs); - aio_context_acquire(aio_context); assert(state->job); job_start(&state->job->job); - - aio_context_release(aio_context); } static void drive_backup_abort(void *opaque) @@ -1845,18 +1745,12 @@ static void drive_backup_abort(void *opaque) static void drive_backup_clean(void *opaque) { g_autofree DriveBackupState *state = opaque; - AioContext *aio_context; if (!state->bs) { return; } - aio_context = bdrv_get_aio_context(state->bs); - aio_context_acquire(aio_context); - bdrv_drained_end(state->bs); - - aio_context_release(aio_context); } typedef struct BlockdevBackupState { @@ -1881,7 +1775,6 @@ static void blockdev_backup_action(BlockdevBackup *backup, BlockDriverState *bs; BlockDriverState *target_bs; AioContext *aio_context; - AioContext *old_context; int ret; tran_add(tran, &blockdev_backup_drv, state); @@ -1898,17 +1791,12 @@ static void blockdev_backup_action(BlockdevBackup *backup, /* Honor bdrv_try_change_aio_context() context acquisition requirements. */ aio_context = bdrv_get_aio_context(bs); - old_context = bdrv_get_aio_context(target_bs); - aio_context_acquire(old_context); ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp); if (ret < 0) { - aio_context_release(old_context); return; } - aio_context_release(old_context); - aio_context_acquire(aio_context); state->bs = bs; /* Paired with .clean() */ @@ -1917,22 +1805,14 @@ static void blockdev_backup_action(BlockdevBackup *backup, state->job = do_backup_common(qapi_BlockdevBackup_base(backup), bs, target_bs, aio_context, block_job_txn, errp); - - aio_context_release(aio_context); } static void blockdev_backup_commit(void *opaque) { BlockdevBackupState *state = opaque; - AioContext *aio_context; - - aio_context = bdrv_get_aio_context(state->bs); - aio_context_acquire(aio_context); assert(state->job); job_start(&state->job->job); - - aio_context_release(aio_context); } static void blockdev_backup_abort(void *opaque) @@ -1947,18 +1827,12 @@ static void blockdev_backup_abort(void *opaque) static void blockdev_backup_clean(void *opaque) { g_autofree BlockdevBackupState *state = opaque; - AioContext *aio_context; if (!state->bs) { return; } - aio_context = bdrv_get_aio_context(state->bs); - aio_context_acquire(aio_context); - bdrv_drained_end(state->bs); - - aio_context_release(aio_context); } typedef struct BlockDirtyBitmapState { @@ -2390,18 +2264,13 @@ void coroutine_fn qmp_block_resize(const char *device, const char *node_name, return; } - bdrv_co_lock(bs); bdrv_drained_begin(bs); - bdrv_co_unlock(bs); old_ctx = bdrv_co_enter(bs); blk_co_truncate(blk, size, false, PREALLOC_MODE_OFF, 0, errp); bdrv_co_leave(bs, old_ctx); - bdrv_co_lock(bs); bdrv_drained_end(bs); - bdrv_co_unlock(bs); - blk_co_unref(blk); } @@ -2454,7 +2323,6 @@ void qmp_block_stream(const char *job_id, const char *device, } aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); bdrv_graph_rdlock_main_loop(); if (base) { @@ -2521,7 +2389,7 @@ void qmp_block_stream(const char *job_id, const char *device, if (!base_bs && backing_file) { error_setg(errp, "backing file specified, but streaming the " "entire chain"); - goto out; + return; } if (has_auto_finalize && !auto_finalize) { @@ -2536,18 +2404,14 @@ void qmp_block_stream(const char *job_id, const char *device, filter_node_name, &local_err); if (local_err) { error_propagate(errp, local_err); - goto out; + return; } trace_qmp_block_stream(bs); - -out: - aio_context_release(aio_context); return; out_rdlock: bdrv_graph_rdunlock_main_loop(); - aio_context_release(aio_context); } void qmp_block_commit(const char *job_id, const char *device, @@ -2606,10 +2470,9 @@ void qmp_block_commit(const char *job_id, const char *device, } aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, errp)) { - goto out; + return; } /* default top_bs is the active layer */ @@ -2617,16 +2480,16 @@ void qmp_block_commit(const char *job_id, const char *device, if (top_node && top) { error_setg(errp, "'top-node' and 'top' are mutually exclusive"); - goto out; + return; } else if (top_node) { top_bs = bdrv_lookup_bs(NULL, top_node, errp); if (top_bs == NULL) { - goto out; + return; } if (!bdrv_chain_contains(bs, top_bs)) { error_setg(errp, "'%s' is not in this backing file chain", top_node); - goto out; + return; } } else if (top) { /* This strcmp() is just a shortcut, there is no need to @@ -2640,35 +2503,35 @@ void qmp_block_commit(const char *job_id, const char *device, if (top_bs == NULL) { error_setg(errp, "Top image file %s not found", top ? top : "NULL"); - goto out; + return; } assert(bdrv_get_aio_context(top_bs) == aio_context); if (base_node && base) { error_setg(errp, "'base-node' and 'base' are mutually exclusive"); - goto out; + return; } else if (base_node) { base_bs = bdrv_lookup_bs(NULL, base_node, errp); if (base_bs == NULL) { - goto out; + return; } if (!bdrv_chain_contains(top_bs, base_bs)) { error_setg(errp, "'%s' is not in this backing file chain", base_node); - goto out; + return; } } else if (base) { base_bs = bdrv_find_backing_image(top_bs, base); if (base_bs == NULL) { error_setg(errp, "Can't find '%s' in the backing chain", base); - goto out; + return; } } else { base_bs = bdrv_find_base(top_bs); if (base_bs == NULL) { error_setg(errp, "There is no backimg image"); - goto out; + return; } } @@ -2678,14 +2541,14 @@ void qmp_block_commit(const char *job_id, const char *device, iter = bdrv_filter_or_cow_bs(iter)) { if (bdrv_op_is_blocked(iter, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) { - goto out; + return; } } /* Do not allow attempts to commit an image into itself */ if (top_bs == base_bs) { error_setg(errp, "cannot commit an image into itself"); - goto out; + return; } /* @@ -2708,7 +2571,7 @@ void qmp_block_commit(const char *job_id, const char *device, error_setg(errp, "'backing-file' specified, but 'top' has a " "writer on it"); } - goto out; + return; } if (!job_id) { /* @@ -2724,7 +2587,7 @@ void qmp_block_commit(const char *job_id, const char *device, } else { BlockDriverState *overlay_bs = bdrv_find_overlay(bs, top_bs); if (bdrv_op_is_blocked(overlay_bs, BLOCK_OP_TYPE_COMMIT_TARGET, errp)) { - goto out; + return; } commit_start(job_id, bs, base_bs, top_bs, job_flags, speed, on_error, backing_file, @@ -2732,11 +2595,8 @@ void qmp_block_commit(const char *job_id, const char *device, } if (local_err != NULL) { error_propagate(errp, local_err); - goto out; + return; } - -out: - aio_context_release(aio_context); } /* Common QMP interface for drive-backup and blockdev-backup */ @@ -2985,8 +2845,6 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, if (replaces) { BlockDriverState *to_replace_bs; - AioContext *aio_context; - AioContext *replace_aio_context; int64_t bs_size, replace_size; bs_size = bdrv_getlength(bs); @@ -3000,19 +2858,7 @@ static void blockdev_mirror_common(const char *job_id, BlockDriverState *bs, return; } - aio_context = bdrv_get_aio_context(bs); - replace_aio_context = bdrv_get_aio_context(to_replace_bs); - /* - * bdrv_getlength() is a co-wrapper and uses AIO_WAIT_WHILE. Be sure not - * to acquire the same AioContext twice. - */ - if (replace_aio_context != aio_context) { - aio_context_acquire(replace_aio_context); - } replace_size = bdrv_getlength(to_replace_bs); - if (replace_aio_context != aio_context) { - aio_context_release(replace_aio_context); - } if (replace_size < 0) { error_setg_errno(errp, -replace_size, @@ -3041,7 +2887,6 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) BlockDriverState *bs; BlockDriverState *target_backing_bs, *target_bs; AioContext *aio_context; - AioContext *old_context; BlockMirrorBackingMode backing_mode; Error *local_err = NULL; QDict *options = NULL; @@ -3064,7 +2909,6 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) } aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); if (!arg->has_mode) { arg->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; @@ -3088,14 +2932,14 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) size = bdrv_getlength(bs); if (size < 0) { error_setg_errno(errp, -size, "bdrv_getlength failed"); - goto out; + return; } if (arg->replaces) { if (!arg->node_name) { error_setg(errp, "a node-name must be provided when replacing a" " named node of the graph"); - goto out; + return; } } @@ -3143,7 +2987,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) if (local_err) { error_propagate(errp, local_err); - goto out; + return; } options = qdict_new(); @@ -3153,15 +2997,11 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) if (format) { qdict_put_str(options, "driver", format); } - aio_context_release(aio_context); /* Mirroring takes care of copy-on-write using the source's backing * file. */ - aio_context_acquire(qemu_get_aio_context()); target_bs = bdrv_open(arg->target, NULL, options, flags, errp); - aio_context_release(qemu_get_aio_context()); - if (!target_bs) { return; } @@ -3173,20 +3013,12 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) bdrv_graph_rdunlock_main_loop(); - /* Honor bdrv_try_change_aio_context() context acquisition requirements. */ - old_context = bdrv_get_aio_context(target_bs); - aio_context_acquire(old_context); - ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp); if (ret < 0) { bdrv_unref(target_bs); - aio_context_release(old_context); return; } - aio_context_release(old_context); - aio_context_acquire(aio_context); - blockdev_mirror_common(arg->job_id, bs, target_bs, arg->replaces, arg->sync, backing_mode, zero_target, @@ -3202,8 +3034,6 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) arg->has_auto_dismiss, arg->auto_dismiss, errp); bdrv_unref(target_bs); -out: - aio_context_release(aio_context); } void qmp_blockdev_mirror(const char *job_id, @@ -3226,7 +3056,6 @@ void qmp_blockdev_mirror(const char *job_id, BlockDriverState *bs; BlockDriverState *target_bs; AioContext *aio_context; - AioContext *old_context; BlockMirrorBackingMode backing_mode = MIRROR_LEAVE_BACKING_CHAIN; bool zero_target; int ret; @@ -3243,18 +3072,11 @@ void qmp_blockdev_mirror(const char *job_id, zero_target = (sync == MIRROR_SYNC_MODE_FULL); - /* Honor bdrv_try_change_aio_context() context acquisition requirements. */ - old_context = bdrv_get_aio_context(target_bs); aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(old_context); ret = bdrv_try_change_aio_context(target_bs, aio_context, NULL, errp); - - aio_context_release(old_context); - aio_context_acquire(aio_context); - if (ret < 0) { - goto out; + return; } blockdev_mirror_common(job_id, bs, target_bs, @@ -3269,8 +3091,6 @@ void qmp_blockdev_mirror(const char *job_id, has_auto_finalize, auto_finalize, has_auto_dismiss, auto_dismiss, errp); -out: - aio_context_release(aio_context); } /* @@ -3433,7 +3253,6 @@ void qmp_change_backing_file(const char *device, Error **errp) { BlockDriverState *bs = NULL; - AioContext *aio_context; BlockDriverState *image_bs = NULL; Error *local_err = NULL; bool ro; @@ -3444,9 +3263,6 @@ void qmp_change_backing_file(const char *device, return; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - bdrv_graph_rdlock_main_loop(); image_bs = bdrv_lookup_bs(NULL, image_node_name, &local_err); @@ -3485,7 +3301,7 @@ void qmp_change_backing_file(const char *device, if (ro) { if (bdrv_reopen_set_read_only(image_bs, false, errp) != 0) { - goto out; + return; } } @@ -3503,14 +3319,10 @@ void qmp_change_backing_file(const char *device, if (ro) { bdrv_reopen_set_read_only(image_bs, true, errp); } - -out: - aio_context_release(aio_context); return; out_rdlock: bdrv_graph_rdunlock_main_loop(); - aio_context_release(aio_context); } void qmp_blockdev_add(BlockdevOptions *options, Error **errp) @@ -3550,7 +3362,6 @@ void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp) for (; reopen_list != NULL; reopen_list = reopen_list->next) { BlockdevOptions *options = reopen_list->value; BlockDriverState *bs; - AioContext *ctx; QObject *obj; Visitor *v; QDict *qdict; @@ -3578,12 +3389,7 @@ void qmp_blockdev_reopen(BlockdevOptionsList *reopen_list, Error **errp) qdict_flatten(qdict); - ctx = bdrv_get_aio_context(bs); - aio_context_acquire(ctx); - queue = bdrv_reopen_queue(queue, bs, qdict, false); - - aio_context_release(ctx); } /* Perform the reopen operation */ @@ -3596,7 +3402,6 @@ fail: void qmp_blockdev_del(const char *node_name, Error **errp) { - AioContext *aio_context; BlockDriverState *bs; GLOBAL_STATE_CODE(); @@ -3611,30 +3416,25 @@ void qmp_blockdev_del(const char *node_name, Error **errp) error_setg(errp, "Node %s is in use", node_name); return; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_DRIVE_DEL, errp)) { - goto out; + return; } if (!QTAILQ_IN_USE(bs, monitor_list)) { error_setg(errp, "Node %s is not owned by the monitor", bs->node_name); - goto out; + return; } if (bs->refcnt > 1) { error_setg(errp, "Block device %s is in use", bdrv_get_device_or_node_name(bs)); - goto out; + return; } QTAILQ_REMOVE(&monitor_bdrv_states, bs, monitor_list); bdrv_unref(bs); - -out: - aio_context_release(aio_context); } static BdrvChild * GRAPH_RDLOCK @@ -3657,7 +3457,7 @@ void qmp_x_blockdev_change(const char *parent, const char *child, BlockDriverState *parent_bs, *new_bs = NULL; BdrvChild *p_child; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); parent_bs = bdrv_lookup_bs(parent, parent, errp); if (!parent_bs) { @@ -3693,7 +3493,7 @@ void qmp_x_blockdev_change(const char *parent, const char *child, } out: - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); } BlockJobInfoList *qmp_query_block_jobs(Error **errp) @@ -3724,7 +3524,6 @@ BlockJobInfoList *qmp_query_block_jobs(Error **errp) void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread, bool has_force, bool force, Error **errp) { - AioContext *old_context; AioContext *new_context; BlockDriverState *bs; @@ -3756,12 +3555,7 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread, new_context = qemu_get_aio_context(); } - old_context = bdrv_get_aio_context(bs); - aio_context_acquire(old_context); - bdrv_try_change_aio_context(bs, new_context, NULL, errp); - - aio_context_release(old_context); } QemuOptsList qemu_common_drive_opts = { diff --git a/blockjob.c b/blockjob.c index b7a29052b94fcb1a8a2ffb33ac8ac23962bd5c7d..d5f29e14af2ac2835e1939467e61ae9900ec85c5 100644 --- a/blockjob.c +++ b/blockjob.c @@ -198,9 +198,7 @@ void block_job_remove_all_bdrv(BlockJob *job) * one to make sure that such a concurrent access does not attempt * to process an already freed BdrvChild. */ - aio_context_release(job->job.aio_context); - bdrv_graph_wrlock(NULL); - aio_context_acquire(job->job.aio_context); + bdrv_graph_wrlock(); while (job->nodes) { GSList *l = job->nodes; BdrvChild *c = l->data; @@ -212,7 +210,7 @@ void block_job_remove_all_bdrv(BlockJob *job) g_slist_free_1(l); } - bdrv_graph_wrunlock_ctx(job->job.aio_context); + bdrv_graph_wrunlock(); } bool block_job_has_bdrv(BlockJob *job, BlockDriverState *bs) @@ -234,28 +232,12 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, uint64_t perm, uint64_t shared_perm, Error **errp) { BdrvChild *c; - AioContext *ctx = bdrv_get_aio_context(bs); - bool need_context_ops; GLOBAL_STATE_CODE(); bdrv_ref(bs); - need_context_ops = ctx != job->job.aio_context; - - if (need_context_ops) { - if (job->job.aio_context != qemu_get_aio_context()) { - aio_context_release(job->job.aio_context); - } - aio_context_acquire(ctx); - } c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job, errp); - if (need_context_ops) { - aio_context_release(ctx); - if (job->job.aio_context != qemu_get_aio_context()) { - aio_context_acquire(job->job.aio_context); - } - } if (c == NULL) { return -EPERM; } @@ -514,7 +496,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, int ret; GLOBAL_STATE_CODE(); - bdrv_graph_wrlock(bs); + bdrv_graph_wrlock(); if (job_id == NULL && !(flags & JOB_INTERNAL)) { job_id = bdrv_get_device_name(bs); @@ -523,7 +505,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, job = job_create(job_id, &driver->job_driver, txn, bdrv_get_aio_context(bs), flags, cb, opaque, errp); if (job == NULL) { - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); return NULL; } @@ -563,11 +545,11 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver, goto fail; } - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); return job; fail: - bdrv_graph_wrunlock(bs); + bdrv_graph_wrunlock(); job_early_fail(&job->job); return NULL; } diff --git a/bsd-user/main.c b/bsd-user/main.c index e6014f517ee0664b450a096e457f2a7114864160..4de226d211b2cdb5201885352dd0faeebf1bd400 100644 --- a/bsd-user/main.c +++ b/bsd-user/main.c @@ -378,10 +378,7 @@ int main(int argc, char **argv) } else if (!strcmp(r, "cpu")) { cpu_model = argv[optind++]; if (is_help_option(cpu_model)) { - /* XXX: implement xxx_cpu_list for targets that still miss it */ -#if defined(cpu_list) - cpu_list(); -#endif + list_cpus(); exit(1); } } else if (!strcmp(r, "B")) { diff --git a/bsd-user/meson.build b/bsd-user/meson.build index c6bfd3b2b539cdcef9bbc82bdc2d5aab59caf51f..39bad0ae33e7a27d91b8e9069cf96ee0fc4cc293 100644 --- a/bsd-user/meson.build +++ b/bsd-user/meson.build @@ -24,6 +24,6 @@ kvm = cc.find_library('kvm', required: true) bsd_user_ss.add(elf, procstat, kvm) # Pull in the OS-specific build glue, if any -subdir(targetos) +subdir(host_os) specific_ss.add_all(when: 'CONFIG_BSD_USER', if_true: bsd_user_ss) diff --git a/chardev/char-fe.c b/chardev/char-fe.c index 7789f7be9c873928be895d618e98978b504005d5..20222a4cad5c0c5a1f7b3eb9de5aa5f370bd03a3 100644 --- a/chardev/char-fe.c +++ b/chardev/char-fe.c @@ -211,7 +211,7 @@ bool qemu_chr_fe_init(CharBackend *b, Chardev *s, Error **errp) } } - b->fe_open = false; + b->fe_is_open = false; b->tag = tag; b->chr = s; return true; @@ -257,7 +257,7 @@ void qemu_chr_fe_set_handlers_full(CharBackend *b, bool sync_state) { Chardev *s; - int fe_open; + bool fe_open; s = b->chr; if (!s) { @@ -265,10 +265,10 @@ void qemu_chr_fe_set_handlers_full(CharBackend *b, } if (!opaque && !fd_can_read && !fd_read && !fd_event) { - fe_open = 0; + fe_open = false; remove_fd_in_watch(s); } else { - fe_open = 1; + fe_open = true; } b->chr_can_read = fd_can_read; b->chr_read = fd_read; @@ -336,7 +336,7 @@ void qemu_chr_fe_set_echo(CharBackend *be, bool echo) } } -void qemu_chr_fe_set_open(CharBackend *be, int fe_open) +void qemu_chr_fe_set_open(CharBackend *be, bool is_open) { Chardev *chr = be->chr; @@ -344,12 +344,12 @@ void qemu_chr_fe_set_open(CharBackend *be, int fe_open) return; } - if (be->fe_open == fe_open) { + if (be->fe_is_open == is_open) { return; } - be->fe_open = fe_open; + be->fe_is_open = is_open; if (CHARDEV_GET_CLASS(chr)->chr_set_fe_open) { - CHARDEV_GET_CLASS(chr)->chr_set_fe_open(chr, fe_open); + CHARDEV_GET_CLASS(chr)->chr_set_fe_open(chr, is_open); } } diff --git a/chardev/char.c b/chardev/char.c index 996a024c7a2cdb9f59e13d92c4da15fc2cf0acdb..3c43fb1278f8b2915764ddd6cd68f7e644794d01 100644 --- a/chardev/char.c +++ b/chardev/char.c @@ -171,6 +171,18 @@ int qemu_chr_write(Chardev *s, const uint8_t *buf, int len, bool write_all) return res; } + if (replay_mode == REPLAY_MODE_RECORD) { + /* + * When recording we don't want temporary conditions to + * perturb the result. By ensuring we write everything we can + * while recording we avoid playback being out of sync if it + * doesn't encounter the same temporary conditions (usually + * triggered by external programs not reading the chardev fast + * enough and pipes filling up). + */ + write_all = true; + } + res = qemu_chr_write_buffer(s, buf, len, &offset, write_all); if (qemu_chr_replay(s) && replay_mode == REPLAY_MODE_RECORD) { @@ -518,7 +530,7 @@ static const ChardevClass *char_get_class(const char *driver, Error **errp) if (object_class_is_abstract(oc)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "driver", - "an abstract device type"); + "a non-abstract device type"); return NULL; } @@ -750,7 +762,7 @@ static int qmp_query_chardev_foreach(Object *obj, void *data) value->label = g_strdup(chr->label); value->filename = g_strdup(chr->filename); - value->frontend_open = chr->be && chr->be->fe_open; + value->frontend_open = chr->be && chr->be->fe_is_open; QAPI_LIST_PREPEND(*list, value); diff --git a/chardev/meson.build b/chardev/meson.build index 6d56ad32fdb07e211a6d9b8760b1cf83f49c6495..c80337d15fa58b987461ce19fd965bdfbc86ba99 100644 --- a/chardev/meson.build +++ b/chardev/meson.build @@ -12,21 +12,23 @@ chardev_ss.add(files( 'char-udp.c', 'char.c', )) -chardev_ss.add(when: 'CONFIG_POSIX', if_true: [files( - 'char-fd.c', - 'char-pty.c', -), util]) -if targetos in ['linux', 'gnu/kfreebsd', 'freebsd', 'dragonfly'] - chardev_ss.add(files('char-parallel.c')) +if host_os == 'windows' + chardev_ss.add(files( + 'char-console.c', + 'char-win-stdio.c', + 'char-win.c', + )) +else + chardev_ss.add(files( + 'char-fd.c', + 'char-pty.c', + ), util) + if host_os in ['linux', 'gnu/kfreebsd', 'freebsd', 'dragonfly'] + chardev_ss.add(files('char-parallel.c')) + endif endif -chardev_ss.add(when: 'CONFIG_WIN32', if_true: files( - 'char-console.c', - 'char-win-stdio.c', - 'char-win.c', -)) - -chardev_ss = chardev_ss.apply(config_targetos, strict: false) +chardev_ss = chardev_ss.apply({}) system_ss.add(files( 'char-hmp-cmds.c', diff --git a/configs/devices/arm-softmmu/default.mak b/configs/devices/arm-softmmu/default.mak index 980c48a7d99573ae236cb4ab387d9bf09857bd9e..023faa2f750ef596c62d1d3963a21fd4f5ebaf78 100644 --- a/configs/devices/arm-softmmu/default.mak +++ b/configs/devices/arm-softmmu/default.mak @@ -19,6 +19,7 @@ CONFIG_ARM_VIRT=y # CONFIG_NSERIES=n # CONFIG_STELLARIS=n # CONFIG_STM32VLDISCOVERY=n +# CONFIG_B_L475E_IOT01A=n # CONFIG_REALVIEW=n # CONFIG_VERSATILE=n # CONFIG_VEXPRESS=n diff --git a/configure b/configure index bdda912f3626000ed0006276a36919bcf30821c1..21ab9a64e98dddf0e1fd606cd54ecc448c69271b 100755 --- a/configure +++ b/configure @@ -334,30 +334,30 @@ EOF } if check_define __linux__ ; then - targetos=linux + host_os=linux elif check_define _WIN32 ; then - targetos=windows + host_os=windows elif check_define __OpenBSD__ ; then - targetos=openbsd + host_os=openbsd elif check_define __sun__ ; then - targetos=sunos + host_os=sunos elif check_define __HAIKU__ ; then - targetos=haiku + host_os=haiku elif check_define __FreeBSD__ ; then - targetos=freebsd + host_os=freebsd elif check_define __FreeBSD_kernel__ && check_define __GLIBC__; then - targetos=gnu/kfreebsd + host_os=gnu/kfreebsd elif check_define __DragonFly__ ; then - targetos=dragonfly + host_os=dragonfly elif check_define __NetBSD__; then - targetos=netbsd + host_os=netbsd elif check_define __APPLE__; then - targetos=darwin + host_os=darwin else # This is a fatal error, but don't report it yet, because we # might be going to just print the --help text, or it might # be the result of a missing compiler. - targetos=bogus + host_os=bogus fi if test ! -z "$cpu" ; then @@ -573,13 +573,13 @@ do fi done -if test "$targetos" = "windows" ; then +if test "$host_os" = "windows" ; then EXESUF=".exe" fi meson_option_build_array() { printf '[' - (if test "$targetos" = windows; then + (if test "$host_os" = windows; then IFS=\; else IFS=: @@ -802,7 +802,7 @@ mak_wilds="" if [ -n "$host_arch" ] && [ -d "$source_path/common-user/host/$host_arch" ]; then if [ "$linux_user" != no ]; then - if [ "$targetos" = linux ]; then + if [ "$host_os" = linux ]; then linux_user=yes elif [ "$linux_user" = yes ]; then error_exit "linux-user not supported on this architecture" @@ -813,9 +813,9 @@ if [ -n "$host_arch" ] && [ -d "$source_path/common-user/host/$host_arch" ]; the fi if [ "$bsd_user" != no ]; then if [ "$bsd_user" = "" ]; then - test $targetos = freebsd && bsd_user=yes + test $host_os = freebsd && bsd_user=yes fi - if [ "$bsd_user" = yes ] && ! [ -d "$source_path/bsd-user/$targetos" ]; then + if [ "$bsd_user" = yes ] && ! [ -d "$source_path/bsd-user/$host_os" ]; then error_exit "bsd-user not supported on this host OS" fi if [ "$bsd_user" = "yes" ]; then @@ -998,7 +998,7 @@ if test -z "$ninja"; then fi fi -if test "$targetos" = "bogus"; then +if test "$host_os" = "bogus"; then # Now that we know that we're not printing the help and that # the compiler works (so the results of the check_defines we used # to identify the OS are reliable), if we didn't recognize the @@ -1007,7 +1007,7 @@ if test "$targetos" = "bogus"; then fi # test for any invalid configuration combinations -if test "$targetos" = "windows" && ! has "$dlltool"; then +if test "$host_os" = "windows" && ! has "$dlltool"; then if test "$plugins" = "yes"; then error_exit "TCG plugins requires dlltool to build on Windows platforms" fi @@ -1041,7 +1041,7 @@ static THREAD int tls_var; int main(void) { return tls_var; } EOF -if test "$targetos" = windows || test "$targetos" = haiku; then +if test "$host_os" = windows || test "$host_os" = haiku; then if test "$pie" = "yes"; then error_exit "PIE not available due to missing OS support" fi @@ -1231,6 +1231,7 @@ probe_target_compiler() { got_cross_cc=no container_image= container_hosts= + container_cross_prefix= container_cross_cc= container_cross_ar= container_cross_as= @@ -1272,16 +1273,33 @@ probe_target_compiler() { test "$container" != no || continue test "$host" = "$cpu" || continue case $target_arch in + # debian-all-test-cross architectures + + hppa|m68k|mips|riscv64|sparc64) + container_image=debian-all-test-cross + ;; + mips64) + container_image=debian-all-test-cross + container_cross_prefix=mips64-linux-gnuabi64- + ;; + ppc|ppc64|ppc64le) + container_image=debian-all-test-cross + container_cross_prefix=powerpc${target_arch#ppc}-linux-gnu- + ;; + + # debian-legacy-test-cross architectures (need Debian 11) + # - libc6.1-dev-alpha-cross: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1054412 + # - sh4-linux-user: binaries don't run with bookworm compiler + + alpha|sh4) + container_image=debian-legacy-test-cross + ;; + + # architectures with individual containers + aarch64) # We don't have any bigendian build tools so we only use this for AArch64 container_image=debian-arm64-cross - container_cross_prefix=aarch64-linux-gnu- - container_cross_cc=${container_cross_prefix}gcc - ;; - alpha) - container_image=debian-legacy-test-cross - container_cross_prefix=alpha-linux-gnu- - container_cross_cc=${container_cross_prefix}gcc ;; arm) # We don't have any bigendian build tools so we only use this for ARM @@ -1290,18 +1308,11 @@ probe_target_compiler() { ;; cris) container_image=fedora-cris-cross - container_cross_prefix=cris-linux-gnu- ;; hexagon) - container_image=debian-hexagon-cross container_cross_prefix=hexagon-unknown-linux-musl- container_cross_cc=${container_cross_prefix}clang ;; - hppa) - container_image=debian-all-test-cross - container_cross_prefix=hppa-linux-gnu- - container_cross_cc=${container_cross_prefix}gcc - ;; i386) container_image=debian-i686-cross container_cross_prefix=i686-linux-gnu- @@ -1310,59 +1321,19 @@ probe_target_compiler() { container_image=debian-loongarch-cross container_cross_prefix=loongarch64-unknown-linux-gnu- ;; - m68k) - container_image=debian-all-test-cross - container_cross_prefix=m68k-linux-gnu- - container_cross_cc=${container_cross_prefix}gcc - ;; microblaze) - container_image=debian-microblaze-cross container_cross_prefix=microblaze-linux-musl- ;; mips64el) container_image=debian-mips64el-cross container_cross_prefix=mips64el-linux-gnuabi64- ;; - mips64) - container_image=debian-all-test-cross - container_cross_prefix=mips64-linux-gnuabi64- - ;; - mips) - container_image=debian-all-test-cross - container_cross_prefix=mips-linux-gnu- - ;; - nios2) - container_image=debian-nios2-cross - container_cross_prefix=nios2-linux-gnu- - ;; - ppc) - container_image=debian-all-test-cross - container_cross_prefix=powerpc-linux-gnu- - container_cross_cc=${container_cross_prefix}gcc - ;; - ppc64|ppc64le) - container_image=debian-all-test-cross - container_cross_prefix=powerpc${target_arch#ppc}-linux-gnu- - ;; - riscv64) - container_image=debian-all-test-cross - container_cross_prefix=riscv64-linux-gnu- - ;; - sh4) - container_image=debian-legacy-test-cross - container_cross_prefix=sh4-linux-gnu- - ;; - sparc64) - container_image=debian-all-test-cross - container_cross_prefix=sparc64-linux-gnu- - ;; tricore) container_image=debian-tricore-cross container_cross_prefix=tricore- ;; x86_64) container_image=debian-amd64-cross - container_cross_prefix=x86_64-linux-gnu- ;; xtensa*) container_image=debian-xtensa-cross @@ -1370,12 +1341,10 @@ probe_target_compiler() { # default to the dc232b cpu container_cross_prefix=/opt/2020.07/xtensa-dc232b-elf/bin/xtensa-dc232b-elf- ;; - *) - # Debian and GNU architecture names usually match - container_image=debian-$target_arch-cross - container_cross_prefix=$target_arch-linux-gnu- - ;; esac + # Debian and GNU architecture names usually match + : ${container_image:=debian-$target_arch-cross} + : ${container_cross_prefix:=$target_arch-linux-gnu-} : ${container_cross_cc:=${container_cross_prefix}gcc} : ${container_cross_ar:=${container_cross_prefix}ar} : ${container_cross_as:=${container_cross_prefix}as} @@ -1387,8 +1356,8 @@ probe_target_compiler() { done try=cross - # For softmmu/roms we might be able to use the host compiler - if [ "${1%softmmu}" != "$1" ]; then + # For softmmu/roms also look for a bi-endian or multilib-enabled host compiler + if [ "${1%softmmu}" != "$1" ] || test "$target_arch" = "$cpu"; then case "$target_arch:$cpu" in aarch64_be:aarch64 | \ armeb:arm | \ @@ -1559,7 +1528,7 @@ LINKS="$LINKS pc-bios/s390-ccw/Makefile" LINKS="$LINKS pc-bios/vof/Makefile" LINKS="$LINKS .gdbinit scripts" # scripts needed by relative path in .gdbinit LINKS="$LINKS tests/avocado tests/data" -LINKS="$LINKS tests/qemu-iotests/check" +LINKS="$LINKS tests/qemu-iotests/check tests/qemu-iotests/Makefile" LINKS="$LINKS python" LINKS="$LINKS contrib/plugins/Makefile " for f in $LINKS ; do @@ -1572,8 +1541,8 @@ echo "# Automatically generated by configure - do not modify" > Makefile.prereqs # Mac OS X ships with a broken assembler if have_target i386-softmmu x86_64-softmmu && \ - test "$targetos" != "darwin" && test "$targetos" != "sunos" && \ - test "$targetos" != "haiku" && \ + test "$host_os" != "darwin" && test "$host_os" != "sunos" && \ + test "$host_os" != "haiku" && \ probe_target_compiler i386-softmmu; then subdirs="$subdirs pc-bios/optionrom" config_mak=pc-bios/optionrom/config.mak @@ -1637,7 +1606,7 @@ echo "NINJA=$ninja" >> $config_host_mak echo "EXESUF=$EXESUF" >> $config_host_mak # use included Linux headers for KVM architectures -if test "$targetos" = "linux" && test -n "$linux_arch"; then +if test "$host_os" = "linux" && test -n "$linux_arch"; then symlink "$source_path/linux-headers/asm-$linux_arch" linux-headers/asm fi @@ -1660,21 +1629,20 @@ echo "SRC_PATH=$source_path/contrib/plugins" >> contrib/plugins/$config_host_mak echo "PKG_CONFIG=${pkg_config}" >> contrib/plugins/$config_host_mak echo "CC=$cc $CPU_CFLAGS" >> contrib/plugins/$config_host_mak echo "CFLAGS=${CFLAGS-$default_cflags} $EXTRA_CFLAGS" >> contrib/plugins/$config_host_mak -if test "$targetos" = windows; then +if test "$host_os" = windows; then echo "DLLTOOL=$dlltool" >> contrib/plugins/$config_host_mak fi -if test "$targetos" = darwin; then +if test "$host_os" = darwin; then echo "CONFIG_DARWIN=y" >> contrib/plugins/$config_host_mak fi -if test "$targetos" = windows; then +if test "$host_os" = windows; then echo "CONFIG_WIN32=y" >> contrib/plugins/$config_host_mak fi # tests/tcg configuration -(config_host_mak=tests/tcg/config-host.mak mkdir -p tests/tcg -echo "# Automatically generated by configure - do not modify" > $config_host_mak -echo "SRC_PATH=$source_path" >> $config_host_mak +echo "# Automatically generated by configure - do not modify" > tests/tcg/$config_host_mak +echo "SRC_PATH=$source_path" >> tests/tcg/$config_host_mak tcg_tests_targets= for target in $target_list; do @@ -1717,9 +1685,8 @@ for target in $target_list; do done if test "$tcg" = "enabled"; then - echo "TCG_TESTS_TARGETS=$tcg_tests_targets" >> config-host.mak + echo "TCG_TESTS_TARGETS=$tcg_tests_targets" >> $config_host_mak fi -) if test "$skip_meson" = no; then cross="config-meson.cross.new" @@ -1748,7 +1715,7 @@ if test "$skip_meson" = no; then echo "# environment defaults, can still be overridden on " >> $cross echo "# the command line" >> $cross if test -e "$source_path/.git" && \ - { test "$targetos" = linux || test "$targetos" = "windows"; }; then + { test "$host_os" = linux || test "$host_os" = "windows"; }; then echo 'werror = true' >> $cross fi echo "[project options]" >> $cross @@ -1785,7 +1752,7 @@ if test "$skip_meson" = no; then echo "windmc = [$(meson_quote $windmc)]" >> $cross if test "$cross_compile" = "yes"; then echo "[host_machine]" >> $cross - echo "system = '$targetos'" >> $cross + echo "system = '$host_os'" >> $cross case "$cpu" in i386) echo "cpu_family = 'x86'" >> $cross @@ -1811,8 +1778,8 @@ if test "$skip_meson" = no; then fi mv $cross config-meson.cross meson_add_machine_file config-meson.cross - if test -f "$source_path/configs/meson/$targetos.txt"; then - meson_add_machine_file $source_path/configs/meson/$targetos.txt + if test -f "$source_path/configs/meson/$host_os.txt"; then + meson_add_machine_file $source_path/configs/meson/$host_os.txt fi rm -rf meson-private meson-info meson-logs diff --git a/contrib/ivshmem-client/meson.build b/contrib/ivshmem-client/meson.build index ce8dcca84ddb23de1d8eaa813f5a2faf6f82df38..3c8b09af4bf05a32a740c763079b9a1c065b75ab 100644 --- a/contrib/ivshmem-client/meson.build +++ b/contrib/ivshmem-client/meson.build @@ -1,4 +1,4 @@ executable('ivshmem-client', files('ivshmem-client.c', 'main.c'), genh, dependencies: glib, - build_by_default: targetos == 'linux', + build_by_default: host_os == 'linux', install: false) diff --git a/contrib/ivshmem-server/meson.build b/contrib/ivshmem-server/meson.build index c6c3c82e89f363e1929bfa08e318f5b22906ef0a..1c8fea6594d6364c44ab039d224c578a3f9a11ba 100644 --- a/contrib/ivshmem-server/meson.build +++ b/contrib/ivshmem-server/meson.build @@ -1,4 +1,4 @@ executable('ivshmem-server', files('ivshmem-server.c', 'main.c'), genh, dependencies: [qemuutil, rt], - build_by_default: targetos == 'linux', + build_by_default: host_os == 'linux', install: false) diff --git a/contrib/vhost-user-blk/meson.build b/contrib/vhost-user-blk/meson.build index dcb9e2ffcd08420130efb73633383afb388ad706..ac1eece37a450eef7906a4c64076d6c438cfcd94 100644 --- a/contrib/vhost-user-blk/meson.build +++ b/contrib/vhost-user-blk/meson.build @@ -1,4 +1,4 @@ executable('vhost-user-blk', files('vhost-user-blk.c'), dependencies: [qemuutil, vhost_user], - build_by_default: targetos == 'linux', + build_by_default: host_os == 'linux', install: false) diff --git a/contrib/vhost-user-input/meson.build b/contrib/vhost-user-input/meson.build index 21a9ed4f15ec239b29118bc28e6ef130ec1b3995..840d866594bf754b94e167f651c686008a4f2640 100644 --- a/contrib/vhost-user-input/meson.build +++ b/contrib/vhost-user-input/meson.build @@ -1,4 +1,4 @@ executable('vhost-user-input', files('main.c'), dependencies: [qemuutil, vhost_user], - build_by_default: targetos == 'linux', + build_by_default: host_os == 'linux', install: false) diff --git a/contrib/vhost-user-scsi/meson.build b/contrib/vhost-user-scsi/meson.build index cc893f6f203297f31fe6954f9588f1c79c6d0123..44be04853e4da1b34a3a49aa5f3fa383e919a890 100644 --- a/contrib/vhost-user-scsi/meson.build +++ b/contrib/vhost-user-scsi/meson.build @@ -1,6 +1,6 @@ if libiscsi.found() executable('vhost-user-scsi', files('vhost-user-scsi.c'), dependencies: [qemuutil, libiscsi, vhost_user], - build_by_default: targetos == 'linux', + build_by_default: host_os == 'linux', install: false) endif diff --git a/cpu-common.c b/cpu-common.c index c81fd72d16d5c9e5f6f8af2d10b6f8f20e9cd3a6..ce78273af5971dda7ce4e3b1bcb68b3e41e66f91 100644 --- a/cpu-common.c +++ b/cpu-common.c @@ -351,11 +351,11 @@ void process_queued_cpu_work(CPUState *cpu) * BQL, so it goes to sleep; start_exclusive() is sleeping too, so * neither CPU can proceed. */ - qemu_mutex_unlock_iothread(); + bql_unlock(); start_exclusive(); wi->func(cpu, wi->data); end_exclusive(); - qemu_mutex_lock_iothread(); + bql_lock(); } else { wi->func(cpu, wi->data); } diff --git a/cpu-target.c b/cpu-target.c index 508013e23d2603f4e6f5275b5b657b7d19c114fa..5eecd7ea2d75d1e5edc7d2770db844b6ad596764 100644 --- a/cpu-target.c +++ b/cpu-target.c @@ -24,6 +24,7 @@ #include "hw/qdev-core.h" #include "hw/qdev-properties.h" #include "qemu/error-report.h" +#include "qemu/qemu-print.h" #include "migration/vmstate.h" #ifdef CONFIG_USER_ONLY #include "qemu.h" @@ -87,7 +88,7 @@ static const VMStateDescription vmstate_cpu_common_exception_index = { .version_id = 1, .minimum_version_id = 1, .needed = cpu_common_exception_index_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(exception_index, CPUState), VMSTATE_END_OF_LIST() } @@ -105,7 +106,7 @@ static const VMStateDescription vmstate_cpu_common_crash_occurred = { .version_id = 1, .minimum_version_id = 1, .needed = cpu_common_crash_occurred_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(crash_occurred, CPUState), VMSTATE_END_OF_LIST() } @@ -117,12 +118,12 @@ const VMStateDescription vmstate_cpu_common = { .minimum_version_id = 1, .pre_load = cpu_common_pre_load, .post_load = cpu_common_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(halted, CPUState), VMSTATE_UINT32(interrupt_request, CPUState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_cpu_common_exception_index, &vmstate_cpu_common_crash_occurred, NULL @@ -241,6 +242,21 @@ void cpu_exec_initfn(CPUState *cpu) #endif } +char *cpu_model_from_type(const char *typename) +{ + const char *suffix = "-" CPU_RESOLVING_TYPE; + + if (!object_class_by_name(typename)) { + return NULL; + } + + if (g_str_has_suffix(typename, suffix)) { + return g_strndup(typename, strlen(typename) - strlen(suffix)); + } + + return g_strdup(typename); +} + const char *parse_cpu_option(const char *cpu_option) { ObjectClass *oc; @@ -268,12 +284,34 @@ const char *parse_cpu_option(const char *cpu_option) return cpu_type; } +#ifndef cpu_list +static void cpu_list_entry(gpointer data, gpointer user_data) +{ + CPUClass *cc = CPU_CLASS(OBJECT_CLASS(data)); + const char *typename = object_class_get_name(OBJECT_CLASS(data)); + g_autofree char *model = cpu_model_from_type(typename); + + if (cc->deprecation_note) { + qemu_printf(" %s (deprecated)\n", model); + } else { + qemu_printf(" %s\n", model); + } +} + +static void cpu_list(void) +{ + GSList *list; + + list = object_class_get_list_sorted(TYPE_CPU, false); + qemu_printf("Available CPUs:\n"); + g_slist_foreach(list, cpu_list_entry, NULL); + g_slist_free(list); +} +#endif + void list_cpus(void) { - /* XXX: implement xxx_cpu_list for targets that still miss it */ -#if defined(cpu_list) cpu_list(); -#endif } #if defined(CONFIG_USER_ONLY) diff --git a/disas/riscv.c b/disas/riscv.c index e9458e574b9b7f07d865d3c8402cbb05269a9cc8..8a546d5ea53167d8239ac16a185e9fd0705a90d9 100644 --- a/disas/riscv.c +++ b/disas/riscv.c @@ -903,6 +903,9 @@ typedef enum { rv_op_vwsll_vv = 872, rv_op_vwsll_vx = 873, rv_op_vwsll_vi = 874, + rv_op_amocas_w = 875, + rv_op_amocas_d = 876, + rv_op_amocas_q = 877, } rv_op; /* register names */ @@ -2090,6 +2093,9 @@ const rv_opcode_data rvi_opcode_data[] = { { "vwsll.vv", rv_codec_v_r, rv_fmt_vd_vs2_vs1_vm, NULL, 0, 0, 0 }, { "vwsll.vx", rv_codec_v_r, rv_fmt_vd_vs2_rs1_vm, NULL, 0, 0, 0 }, { "vwsll.vi", rv_codec_v_i, rv_fmt_vd_vs2_uimm_vm, NULL, 0, 0, 0 }, + { "amocas.w", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amocas.d", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, + { "amocas.q", rv_codec_r_a, rv_fmt_aqrl_rd_rs2_rs1, NULL, 0, 0, 0 }, }; /* CSR names */ @@ -2841,6 +2847,9 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 34: op = rv_op_amoxor_w; break; case 35: op = rv_op_amoxor_d; break; case 36: op = rv_op_amoxor_q; break; + case 42: op = rv_op_amocas_w; break; + case 43: op = rv_op_amocas_d; break; + case 44: op = rv_op_amocas_q; break; case 66: op = rv_op_amoor_w; break; case 67: op = rv_op_amoor_d; break; case 68: op = rv_op_amoor_q; break; diff --git a/docs/devel/build-system.rst b/docs/devel/build-system.rst index 43d6005881e0f4f44c50bf766ab442ba3d12297f..09caf2f8e199d67884f408a369146ae5e42b7360 100644 --- a/docs/devel/build-system.rst +++ b/docs/devel/build-system.rst @@ -256,21 +256,6 @@ Target-independent emulator sourcesets: ``system_ss`` only in system emulators, ``user_ss`` only in user-mode emulators. - Target-independent sourcesets must exercise particular care when using - ``if_false`` rules. The ``if_false`` rule will be used correctly when linking - emulator binaries; however, when *compiling* target-independent files - into .o files, Meson may need to pick *both* the ``if_true`` and - ``if_false`` sides to cater for targets that want either side. To - achieve that, you can add a special rule using the ``CONFIG_ALL`` - symbol:: - - # Some targets have CONFIG_ACPI, some don't, so this is not enough - system_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi.c'), - if_false: files('acpi-stub.c')) - - # This is required as well: - system_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-stub.c')) - Target-dependent emulator sourcesets: In the target-dependent set lives CPU emulation, some device emulation and much glue code. This sometimes also has to be compiled multiple times, diff --git a/docs/devel/clocks.rst b/docs/devel/clocks.rst index 675fbeb6abea15a01cdefe3065bd7bac9b5e00b5..c4d14bde048b6479f68d8ddd37280aa19777d2bc 100644 --- a/docs/devel/clocks.rst +++ b/docs/devel/clocks.rst @@ -502,7 +502,7 @@ This is typically used to migrate an input clock state. For example: VMStateDescription my_device_vmstate = { .name = "my_device", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { [...], /* other migrated fields */ VMSTATE_CLOCK(clk, MyDeviceState), VMSTATE_END_OF_LIST() diff --git a/docs/devel/index-internals.rst b/docs/devel/index-internals.rst index 6f81df92bcaba790477aff1ccb51048409331950..5636e9cf1d78bfb859cd8282778e777ac6fe18ea 100644 --- a/docs/devel/index-internals.rst +++ b/docs/devel/index-internals.rst @@ -11,12 +11,12 @@ Details about QEMU's various subsystems including how to add features to them. block-coroutine-wrapper clocks ebpf_rss - migration + migration/index multi-process reset s390-cpu-topology s390-dasd-ipl tracing - vfio-migration + vfio-iommufd writing-monitor-commands virtio-backends diff --git a/docs/devel/kconfig.rst b/docs/devel/kconfig.rst index 73f52de10677c0a3eaa8720ccf4763f74dfe5834..ccb9a46bd7724e937221acc0086009c2a7b1f3ca 100644 --- a/docs/devel/kconfig.rst +++ b/docs/devel/kconfig.rst @@ -316,6 +316,6 @@ variable:: host_kconfig = \ (have_tpm ? ['CONFIG_TPM=y'] : []) + \ - (targetos == 'linux' ? ['CONFIG_LINUX=y'] : []) + \ + (host_os == 'linux' ? ['CONFIG_LINUX=y'] : []) + \ (have_ivshmem ? ['CONFIG_IVSHMEM=y'] : []) + \ ... diff --git a/docs/devel/migration/best-practices.rst b/docs/devel/migration/best-practices.rst new file mode 100644 index 0000000000000000000000000000000000000000..d7c34a30149d68065cb547637e878777863586dd --- /dev/null +++ b/docs/devel/migration/best-practices.rst @@ -0,0 +1,48 @@ +============== +Best practices +============== + +Debugging +========= + +The migration stream can be analyzed thanks to ``scripts/analyze-migration.py``. + +Example usage: + +.. code-block:: shell + + $ qemu-system-x86_64 -display none -monitor stdio + (qemu) migrate "exec:cat > mig" + (qemu) q + $ ./scripts/analyze-migration.py -f mig + { + "ram (3)": { + "section sizes": { + "pc.ram": "0x0000000008000000", + ... + +See also ``analyze-migration.py -h`` help for more options. + +Firmware +======== + +Migration migrates the copies of RAM and ROM, and thus when running +on the destination it includes the firmware from the source. Even after +resetting a VM, the old firmware is used. Only once QEMU has been restarted +is the new firmware in use. + +- Changes in firmware size can cause changes in the required RAMBlock size + to hold the firmware and thus migration can fail. In practice it's best + to pad firmware images to convenient powers of 2 with plenty of space + for growth. + +- Care should be taken with device emulation code so that newer + emulation code can work with older firmware to allow forward migration. + +- Care should be taken with newer firmware so that backward migration + to older systems with older device emulation code will work. + +In some cases it may be best to tie specific firmware versions to specific +versioned machine types to cut down on the combinations that will need +support. This is also useful when newer versions of firmware outgrow +the padding. diff --git a/docs/devel/migration/compatibility.rst b/docs/devel/migration/compatibility.rst new file mode 100644 index 0000000000000000000000000000000000000000..5a5417ef069ef38f2039eb94dc5fcc6370e69af3 --- /dev/null +++ b/docs/devel/migration/compatibility.rst @@ -0,0 +1,517 @@ +Backwards compatibility +======================= + +How backwards compatibility works +--------------------------------- + +When we do migration, we have two QEMU processes: the source and the +target. There are two cases, they are the same version or they are +different versions. The easy case is when they are the same version. +The difficult one is when they are different versions. + +There are two things that are different, but they have very similar +names and sometimes get confused: + +- QEMU version +- machine type version + +Let's start with a practical example, we start with: + +- qemu-system-x86_64 (v5.2), from now on qemu-5.2. +- qemu-system-x86_64 (v5.1), from now on qemu-5.1. + +Related to this are the "latest" machine types defined on each of +them: + +- pc-q35-5.2 (newer one in qemu-5.2) from now on pc-5.2 +- pc-q35-5.1 (newer one in qemu-5.1) from now on pc-5.1 + +First of all, migration is only supposed to work if you use the same +machine type in both source and destination. The QEMU hardware +configuration needs to be the same also on source and destination. +Most aspects of the backend configuration can be changed at will, +except for a few cases where the backend features influence frontend +device feature exposure. But that is not relevant for this section. + +I am going to list the number of combinations that we can have. Let's +start with the trivial ones, QEMU is the same on source and +destination: + +1 - qemu-5.2 -M pc-5.2 -> migrates to -> qemu-5.2 -M pc-5.2 + + This is the latest QEMU with the latest machine type. + This have to work, and if it doesn't work it is a bug. + +2 - qemu-5.1 -M pc-5.1 -> migrates to -> qemu-5.1 -M pc-5.1 + + Exactly the same case than the previous one, but for 5.1. + Nothing to see here either. + +This are the easiest ones, we will not talk more about them in this +section. + +Now we start with the more interesting cases. Consider the case where +we have the same QEMU version in both sides (qemu-5.2) but we are using +the latest machine type for that version (pc-5.2) but one of an older +QEMU version, in this case pc-5.1. + +3 - qemu-5.2 -M pc-5.1 -> migrates to -> qemu-5.2 -M pc-5.1 + + It needs to use the definition of pc-5.1 and the devices as they + were configured on 5.1, but this should be easy in the sense that + both sides are the same QEMU and both sides have exactly the same + idea of what the pc-5.1 machine is. + +4 - qemu-5.1 -M pc-5.2 -> migrates to -> qemu-5.1 -M pc-5.2 + + This combination is not possible as the qemu-5.1 doesn't understand + pc-5.2 machine type. So nothing to worry here. + +Now it comes the interesting ones, when both QEMU processes are +different. Notice also that the machine type needs to be pc-5.1, +because we have the limitation than qemu-5.1 doesn't know pc-5.2. So +the possible cases are: + +5 - qemu-5.2 -M pc-5.1 -> migrates to -> qemu-5.1 -M pc-5.1 + + This migration is known as newer to older. We need to make sure + when we are developing 5.2 we need to take care about not to break + migration to qemu-5.1. Notice that we can't make updates to + qemu-5.1 to understand whatever qemu-5.2 decides to change, so it is + in qemu-5.2 side to make the relevant changes. + +6 - qemu-5.1 -M pc-5.1 -> migrates to -> qemu-5.2 -M pc-5.1 + + This migration is known as older to newer. We need to make sure + than we are able to receive migrations from qemu-5.1. The problem is + similar to the previous one. + +If qemu-5.1 and qemu-5.2 were the same, there will not be any +compatibility problems. But the reason that we create qemu-5.2 is to +get new features, devices, defaults, etc. + +If we get a device that has a new feature, or change a default value, +we have a problem when we try to migrate between different QEMU +versions. + +So we need a way to tell qemu-5.2 that when we are using machine type +pc-5.1, it needs to **not** use the feature, to be able to migrate to +real qemu-5.1. + +And the equivalent part when migrating from qemu-5.1 to qemu-5.2. +qemu-5.2 has to expect that it is not going to get data for the new +feature, because qemu-5.1 doesn't know about it. + +How do we tell QEMU about these device feature changes? In +hw/core/machine.c:hw_compat_X_Y arrays. + +If we change a default value, we need to put back the old value on +that array. And the device, during initialization needs to look at +that array to see what value it needs to get for that feature. And +what are we going to put in that array, the value of a property. + +To create a property for a device, we need to use one of the +DEFINE_PROP_*() macros. See include/hw/qdev-properties.h to find the +macros that exist. With it, we set the default value for that +property, and that is what it is going to get in the latest released +version. But if we want a different value for a previous version, we +can change that in the hw_compat_X_Y arrays. + +hw_compat_X_Y is an array of registers that have the format: + +- name_device +- name_property +- value + +Let's see a practical example. + +In qemu-5.2 virtio-blk-device got multi queue support. This is a +change that is not backward compatible. In qemu-5.1 it has one +queue. In qemu-5.2 it has the same number of queues as the number of +cpus in the system. + +When we are doing migration, if we migrate from a device that has 4 +queues to a device that have only one queue, we don't know where to +put the extra information for the other 3 queues, and we fail +migration. + +Similar problem when we migrate from qemu-5.1 that has only one queue +to qemu-5.2, we only sent information for one queue, but destination +has 4, and we have 3 queues that are not properly initialized and +anything can happen. + +So, how can we address this problem. Easy, just convince qemu-5.2 +that when it is running pc-5.1, it needs to set the number of queues +for virtio-blk-devices to 1. + +That way we fix the cases 5 and 6. + +5 - qemu-5.2 -M pc-5.1 -> migrates to -> qemu-5.1 -M pc-5.1 + + qemu-5.2 -M pc-5.1 sets number of queues to be 1. + qemu-5.1 -M pc-5.1 expects number of queues to be 1. + + correct. migration works. + +6 - qemu-5.1 -M pc-5.1 -> migrates to -> qemu-5.2 -M pc-5.1 + + qemu-5.1 -M pc-5.1 sets number of queues to be 1. + qemu-5.2 -M pc-5.1 expects number of queues to be 1. + + correct. migration works. + +And now the other interesting case, case 3. In this case we have: + +3 - qemu-5.2 -M pc-5.1 -> migrates to -> qemu-5.2 -M pc-5.1 + + Here we have the same QEMU in both sides. So it doesn't matter a + lot if we have set the number of queues to 1 or not, because + they are the same. + + WRONG! + + Think what happens if we do one of this double migrations: + + A -> migrates -> B -> migrates -> C + + where: + + A: qemu-5.1 -M pc-5.1 + B: qemu-5.2 -M pc-5.1 + C: qemu-5.2 -M pc-5.1 + + migration A -> B is case 6, so number of queues needs to be 1. + + migration B -> C is case 3, so we don't care. But actually we + care because we haven't started the guest in qemu-5.2, it came + migrated from qemu-5.1. So to be in the safe place, we need to + always use number of queues 1 when we are using pc-5.1. + +Now, how was this done in reality? The following commit shows how it +was done:: + + commit 9445e1e15e66c19e42bea942ba810db28052cd05 + Author: Stefan Hajnoczi + Date: Tue Aug 18 15:33:47 2020 +0100 + + virtio-blk-pci: default num_queues to -smp N + +The relevant parts for migration are:: + + @@ -1281,7 +1284,8 @@ static Property virtio_blk_properties[] = { + #endif + DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0, + true), + - DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, 1), + + DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, + + VIRTIO_BLK_AUTO_NUM_QUEUES), + DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 256), + +It changes the default value of num_queues. But it fishes it for old +machine types to have the right value:: + + @@ -31,6 +31,7 @@ + GlobalProperty hw_compat_5_1[] = { + ... + + { "virtio-blk-device", "num-queues", "1"}, + ... + }; + +A device with different features on both sides +---------------------------------------------- + +Let's assume that we are using the same QEMU binary on both sides, +just to make the things easier. But we have a device that has +different features on both sides of the migration. That can be +because the devices are different, because the kernel driver of both +devices have different features, whatever. + +How can we get this to work with migration. The way to do that is +"theoretically" easy. You have to get the features that the device +has in the source of the migration. The features that the device has +on the target of the migration, you get the intersection of the +features of both sides, and that is the way that you should launch +QEMU. + +Notice that this is not completely related to QEMU. The most +important thing here is that this should be handled by the managing +application that launches QEMU. If QEMU is configured correctly, the +migration will succeed. + +That said, actually doing it is complicated. Almost all devices are +bad at being able to be launched with only some features enabled. +With one big exception: cpus. + +You can read the documentation for QEMU x86 cpu models here: + +https://qemu-project.gitlab.io/qemu/system/qemu-cpu-models.html + +See when they talk about migration they recommend that one chooses the +newest cpu model that is supported for all cpus. + +Let's say that we have: + +Host A: + +Device X has the feature Y + +Host B: + +Device X has not the feature Y + +If we try to migrate without any care from host A to host B, it will +fail because when migration tries to load the feature Y on +destination, it will find that the hardware is not there. + +Doing this would be the equivalent of doing with cpus: + +Host A: + +$ qemu-system-x86_64 -cpu host + +Host B: + +$ qemu-system-x86_64 -cpu host + +When both hosts have different cpu features this is guaranteed to +fail. Especially if Host B has less features than host A. If host A +has less features than host B, sometimes it works. Important word of +last sentence is "sometimes". + +So, forgetting about cpu models and continuing with the -cpu host +example, let's see that the differences of the cpus is that Host A and +B have the following features: + +Features: 'pcid' 'stibp' 'taa-no' +Host A: X X +Host B: X + +And we want to migrate between them, the way configure both QEMU cpu +will be: + +Host A: + +$ qemu-system-x86_64 -cpu host,pcid=off,stibp=off + +Host B: + +$ qemu-system-x86_64 -cpu host,taa-no=off + +And you would be able to migrate between them. It is responsibility +of the management application or of the user to make sure that the +configuration is correct. QEMU doesn't know how to look at this kind +of features in general. + +Notice that we don't recommend to use -cpu host for migration. It is +used in this example because it makes the example simpler. + +Other devices have worse control about individual features. If they +want to be able to migrate between hosts that show different features, +the device needs a way to configure which ones it is going to use. + +In this section we have considered that we are using the same QEMU +binary in both sides of the migration. If we use different QEMU +versions process, then we need to have into account all other +differences and the examples become even more complicated. + +How to mitigate when we have a backward compatibility error +----------------------------------------------------------- + +We broke migration for old machine types continuously during +development. But as soon as we find that there is a problem, we fix +it. The problem is what happens when we detect after we have done a +release that something has gone wrong. + +Let see how it worked with one example. + +After the release of qemu-8.0 we found a problem when doing migration +of the machine type pc-7.2. + +- $ qemu-7.2 -M pc-7.2 -> qemu-7.2 -M pc-7.2 + + This migration works + +- $ qemu-8.0 -M pc-7.2 -> qemu-8.0 -M pc-7.2 + + This migration works + +- $ qemu-8.0 -M pc-7.2 -> qemu-7.2 -M pc-7.2 + + This migration fails + +- $ qemu-7.2 -M pc-7.2 -> qemu-8.0 -M pc-7.2 + + This migration fails + +So clearly something fails when migration between qemu-7.2 and +qemu-8.0 with machine type pc-7.2. The error messages, and git bisect +pointed to this commit. + +In qemu-8.0 we got this commit:: + + commit 010746ae1db7f52700cb2e2c46eb94f299cfa0d2 + Author: Jonathan Cameron + Date: Thu Mar 2 13:37:02 2023 +0000 + + hw/pci/aer: Implement PCI_ERR_UNCOR_MASK register + + +The relevant bits of the commit for our example are this ones:: + + --- a/hw/pci/pcie_aer.c + +++ b/hw/pci/pcie_aer.c + @@ -112,6 +112,10 @@ int pcie_aer_init(PCIDevice *dev, + + pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, + PCI_ERR_UNC_SUPPORTED); + + pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK, + + PCI_ERR_UNC_MASK_DEFAULT); + + pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, + + PCI_ERR_UNC_SUPPORTED); + + pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, + PCI_ERR_UNC_SEVERITY_DEFAULT); + +The patch changes how we configure PCI space for AER. But QEMU fails +when the PCI space configuration is different between source and +destination. + +The following commit shows how this got fixed:: + + commit 5ed3dabe57dd9f4c007404345e5f5bf0e347317f + Author: Leonardo Bras + Date: Tue May 2 21:27:02 2023 -0300 + + hw/pci: Disable PCI_ERR_UNCOR_MASK register for machine type < 8.0 + + [...] + +The relevant parts of the fix in QEMU are as follow: + +First, we create a new property for the device to be able to configure +the old behaviour or the new behaviour:: + + diff --git a/hw/pci/pci.c b/hw/pci/pci.c + index 8a87ccc8b0..5153ad63d6 100644 + --- a/hw/pci/pci.c + +++ b/hw/pci/pci.c + @@ -79,6 +79,8 @@ static Property pci_props[] = { + DEFINE_PROP_STRING("failover_pair_id", PCIDevice, + failover_pair_id), + DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), + + DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present, + + QEMU_PCIE_ERR_UNC_MASK_BITNR, true), + DEFINE_PROP_END_OF_LIST() + }; + +Notice that we enable the feature for new machine types. + +Now we see how the fix is done. This is going to depend on what kind +of breakage happens, but in this case it is quite simple:: + + diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c + index 103667c368..374d593ead 100644 + --- a/hw/pci/pcie_aer.c + +++ b/hw/pci/pcie_aer.c + @@ -112,10 +112,13 @@ int pcie_aer_init(PCIDevice *dev, uint8_t cap_ver, + uint16_t offset, + + pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, + PCI_ERR_UNC_SUPPORTED); + - pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK, + - PCI_ERR_UNC_MASK_DEFAULT); + - pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, + - PCI_ERR_UNC_SUPPORTED); + + + + if (dev->cap_present & QEMU_PCIE_ERR_UNC_MASK) { + + pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK, + + PCI_ERR_UNC_MASK_DEFAULT); + + pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, + + PCI_ERR_UNC_SUPPORTED); + + } + + pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, + PCI_ERR_UNC_SEVERITY_DEFAULT); + +I.e. If the property bit is enabled, we configure it as we did for +qemu-8.0. If the property bit is not set, we configure it as it was in 7.2. + +And now, everything that is missing is disabling the feature for old +machine types:: + + diff --git a/hw/core/machine.c b/hw/core/machine.c + index 47a34841a5..07f763eb2e 100644 + --- a/hw/core/machine.c + +++ b/hw/core/machine.c + @@ -48,6 +48,7 @@ GlobalProperty hw_compat_7_2[] = { + { "e1000e", "migrate-timadj", "off" }, + { "virtio-mem", "x-early-migration", "false" }, + { "migration", "x-preempt-pre-7-2", "true" }, + + { TYPE_PCI_DEVICE, "x-pcie-err-unc-mask", "off" }, + }; + const size_t hw_compat_7_2_len = G_N_ELEMENTS(hw_compat_7_2); + +And now, when qemu-8.0.1 is released with this fix, all combinations +are going to work as supposed. + +- $ qemu-7.2 -M pc-7.2 -> qemu-7.2 -M pc-7.2 (works) +- $ qemu-8.0.1 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 (works) +- $ qemu-8.0.1 -M pc-7.2 -> qemu-7.2 -M pc-7.2 (works) +- $ qemu-7.2 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 (works) + +So the normality has been restored and everything is ok, no? + +Not really, now our matrix is much bigger. We started with the easy +cases, migration from the same version to the same version always +works: + +- $ qemu-7.2 -M pc-7.2 -> qemu-7.2 -M pc-7.2 +- $ qemu-8.0 -M pc-7.2 -> qemu-8.0 -M pc-7.2 +- $ qemu-8.0.1 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 + +Now the interesting ones. When the QEMU processes versions are +different. For the 1st set, their fail and we can do nothing, both +versions are released and we can't change anything. + +- $ qemu-7.2 -M pc-7.2 -> qemu-8.0 -M pc-7.2 +- $ qemu-8.0 -M pc-7.2 -> qemu-7.2 -M pc-7.2 + +This two are the ones that work. The whole point of making the +change in qemu-8.0.1 release was to fix this issue: + +- $ qemu-7.2 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 +- $ qemu-8.0.1 -M pc-7.2 -> qemu-7.2 -M pc-7.2 + +But now we found that qemu-8.0 neither can migrate to qemu-7.2 not +qemu-8.0.1. + +- $ qemu-8.0 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 +- $ qemu-8.0.1 -M pc-7.2 -> qemu-8.0 -M pc-7.2 + +So, if we start a pc-7.2 machine in qemu-8.0 we can't migrate it to +anything except to qemu-8.0. + +Can we do better? + +Yeap. If we know that we are going to do this migration: + +- $ qemu-8.0 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 + +We can launch the appropriate devices with:: + + --device...,x-pci-e-err-unc-mask=on + +And now we can receive a migration from 8.0. And from now on, we can +do that migration to new machine types if we remember to enable that +property for pc-7.2. Notice that we need to remember, it is not +enough to know that the source of the migration is qemu-8.0. Think of +this example: + +$ qemu-8.0 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 -> qemu-8.2 -M pc-7.2 + +In the second migration, the source is not qemu-8.0, but we still have +that "problem" and have that property enabled. Notice that we need to +continue having this mark/property until we have this machine +rebooted. But it is not a normal reboot (that don't reload QEMU) we +need the machine to poweroff/poweron on a fixed QEMU. And from now +on we can use the proper real machine. diff --git a/docs/devel/migration/dirty-limit.rst b/docs/devel/migration/dirty-limit.rst new file mode 100644 index 0000000000000000000000000000000000000000..8f32329d5fda2e239d378c0bc4662fd8cf6d7d23 --- /dev/null +++ b/docs/devel/migration/dirty-limit.rst @@ -0,0 +1,71 @@ +Dirty limit +=========== + +The dirty limit, short for dirty page rate upper limit, is a new capability +introduced in the 8.1 QEMU release that uses a new algorithm based on the KVM +dirty ring to throttle down the guest during live migration. + +The algorithm framework is as follows: + +:: + + ------------------------------------------------------------------------------ + main --------------> throttle thread ------------> PREPARE(1) <-------- + thread \ | | + \ | | + \ V | + -\ CALCULATE(2) | + \ | | + \ | | + \ V | + \ SET PENALTY(3) ----- + -\ | + \ | + \ V + -> virtual CPU thread -------> ACCEPT PENALTY(4) + ------------------------------------------------------------------------------ + +When the qmp command qmp_set_vcpu_dirty_limit is called for the first time, +the QEMU main thread starts the throttle thread. The throttle thread, once +launched, executes the loop, which consists of three steps: + + - PREPARE (1) + + The entire work of PREPARE (1) is preparation for the second stage, + CALCULATE(2), as the name implies. It involves preparing the dirty + page rate value and the corresponding upper limit of the VM: + The dirty page rate is calculated via the KVM dirty ring mechanism, + which tells QEMU how many dirty pages a virtual CPU has had since the + last KVM_EXIT_DIRTY_RING_FULL exception; The dirty page rate upper + limit is specified by caller, therefore fetch it directly. + + - CALCULATE (2) + + Calculate a suitable sleep period for each virtual CPU, which will be + used to determine the penalty for the target virtual CPU. The + computation must be done carefully in order to reduce the dirty page + rate progressively down to the upper limit without oscillation. To + achieve this, two strategies are provided: the first is to add or + subtract sleep time based on the ratio of the current dirty page rate + to the limit, which is used when the current dirty page rate is far + from the limit; the second is to add or subtract a fixed time when + the current dirty page rate is close to the limit. + + - SET PENALTY (3) + + Set the sleep time for each virtual CPU that should be penalized based + on the results of the calculation supplied by step CALCULATE (2). + +After completing the three above stages, the throttle thread loops back +to step PREPARE (1) until the dirty limit is reached. + +On the other hand, each virtual CPU thread reads the sleep duration and +sleeps in the path of the KVM_EXIT_DIRTY_RING_FULL exception handler, that +is ACCEPT PENALTY (4). Virtual CPUs tied with writing processes will +obviously exit to the path and get penalized, whereas virtual CPUs involved +with read processes will not. + +In summary, thanks to the KVM dirty ring technology, the dirty limit +algorithm will restrict virtual CPUs as needed to keep their dirty page +rate inside the limit. This leads to more steady reading performance during +live migration and can aid in improving large guest responsiveness. diff --git a/docs/devel/migration/features.rst b/docs/devel/migration/features.rst new file mode 100644 index 0000000000000000000000000000000000000000..a9acaf618eec0c6efbc76549a3488842994a3fef --- /dev/null +++ b/docs/devel/migration/features.rst @@ -0,0 +1,12 @@ +Migration features +================== + +Migration has plenty of features to support different use cases. + +.. toctree:: + :maxdepth: 2 + + postcopy + dirty-limit + vfio + virtio diff --git a/docs/devel/migration/index.rst b/docs/devel/migration/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..2aa294d6314bd4cb18623e0caf9adc6d93fd030e --- /dev/null +++ b/docs/devel/migration/index.rst @@ -0,0 +1,13 @@ +Migration +========= + +This is the main entry for QEMU migration documentations. It explains how +QEMU live migration works. + +.. toctree:: + :maxdepth: 2 + + main + features + compatibility + best-practices diff --git a/docs/devel/migration.rst b/docs/devel/migration/main.rst similarity index 37% rename from docs/devel/migration.rst rename to docs/devel/migration/main.rst index ec55089b2530a0296f858b1f078b557d12e154b7..00b9c3d32f549527ab25872ad39a79514bd7cecd 100644 --- a/docs/devel/migration.rst +++ b/docs/devel/migration/main.rst @@ -1,6 +1,6 @@ -========= -Migration -========= +=================== +Migration framework +=================== QEMU has code to load/save the state of the guest that it is running. These are two complementary operations. Saving the state just does @@ -52,27 +52,6 @@ All these migration protocols use the same infrastructure to save/restore state devices. This infrastructure is shared with the savevm/loadvm functionality. -Debugging -========= - -The migration stream can be analyzed thanks to ``scripts/analyze-migration.py``. - -Example usage: - -.. code-block:: shell - - $ qemu-system-x86_64 -display none -monitor stdio - (qemu) migrate "exec:cat > mig" - (qemu) q - $ ./scripts/analyze-migration.py -f mig - { - "ram (3)": { - "section sizes": { - "pc.ram": "0x0000000008000000", - ... - -See also ``analyze-migration.py -h`` help for more options. - Common infrastructure ===================== @@ -158,7 +137,7 @@ An example (from hw/input/pckbd.c) .name = "pckbd", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(write_cmd, KBDState), VMSTATE_UINT8(status, KBDState), VMSTATE_UINT8(mode, KBDState), @@ -294,7 +273,7 @@ Example: .pre_save = ide_drive_pio_pre_save, .post_load = ide_drive_pio_post_load, .needed = ide_drive_pio_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(req_nb_sectors, IDEState), VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1, vmstate_info_uint8, uint8_t), @@ -312,11 +291,11 @@ Example: .version_id = 3, .minimum_version_id = 0, .post_load = ide_drive_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { .... several fields .... VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_ide_drive_pio_state, NULL } @@ -594,921 +573,3 @@ path. Return path - opened by main thread, written by main thread AND postcopy thread (protected by rp_mutex) -Dirty limit -===================== -The dirty limit, short for dirty page rate upper limit, is a new capability -introduced in the 8.1 QEMU release that uses a new algorithm based on the KVM -dirty ring to throttle down the guest during live migration. - -The algorithm framework is as follows: - -:: - - ------------------------------------------------------------------------------ - main --------------> throttle thread ------------> PREPARE(1) <-------- - thread \ | | - \ | | - \ V | - -\ CALCULATE(2) | - \ | | - \ | | - \ V | - \ SET PENALTY(3) ----- - -\ | - \ | - \ V - -> virtual CPU thread -------> ACCEPT PENALTY(4) - ------------------------------------------------------------------------------ - -When the qmp command qmp_set_vcpu_dirty_limit is called for the first time, -the QEMU main thread starts the throttle thread. The throttle thread, once -launched, executes the loop, which consists of three steps: - - - PREPARE (1) - - The entire work of PREPARE (1) is preparation for the second stage, - CALCULATE(2), as the name implies. It involves preparing the dirty - page rate value and the corresponding upper limit of the VM: - The dirty page rate is calculated via the KVM dirty ring mechanism, - which tells QEMU how many dirty pages a virtual CPU has had since the - last KVM_EXIT_DIRTY_RING_FULL exception; The dirty page rate upper - limit is specified by caller, therefore fetch it directly. - - - CALCULATE (2) - - Calculate a suitable sleep period for each virtual CPU, which will be - used to determine the penalty for the target virtual CPU. The - computation must be done carefully in order to reduce the dirty page - rate progressively down to the upper limit without oscillation. To - achieve this, two strategies are provided: the first is to add or - subtract sleep time based on the ratio of the current dirty page rate - to the limit, which is used when the current dirty page rate is far - from the limit; the second is to add or subtract a fixed time when - the current dirty page rate is close to the limit. - - - SET PENALTY (3) - - Set the sleep time for each virtual CPU that should be penalized based - on the results of the calculation supplied by step CALCULATE (2). - -After completing the three above stages, the throttle thread loops back -to step PREPARE (1) until the dirty limit is reached. - -On the other hand, each virtual CPU thread reads the sleep duration and -sleeps in the path of the KVM_EXIT_DIRTY_RING_FULL exception handler, that -is ACCEPT PENALTY (4). Virtual CPUs tied with writing processes will -obviously exit to the path and get penalized, whereas virtual CPUs involved -with read processes will not. - -In summary, thanks to the KVM dirty ring technology, the dirty limit -algorithm will restrict virtual CPUs as needed to keep their dirty page -rate inside the limit. This leads to more steady reading performance during -live migration and can aid in improving large guest responsiveness. - -Postcopy -======== - -'Postcopy' migration is a way to deal with migrations that refuse to converge -(or take too long to converge) its plus side is that there is an upper bound on -the amount of migration traffic and time it takes, the down side is that during -the postcopy phase, a failure of *either* side causes the guest to be lost. - -In postcopy the destination CPUs are started before all the memory has been -transferred, and accesses to pages that are yet to be transferred cause -a fault that's translated by QEMU into a request to the source QEMU. - -Postcopy can be combined with precopy (i.e. normal migration) so that if precopy -doesn't finish in a given time the switch is made to postcopy. - -Enabling postcopy ------------------ - -To enable postcopy, issue this command on the monitor (both source and -destination) prior to the start of migration: - -``migrate_set_capability postcopy-ram on`` - -The normal commands are then used to start a migration, which is still -started in precopy mode. Issuing: - -``migrate_start_postcopy`` - -will now cause the transition from precopy to postcopy. -It can be issued immediately after migration is started or any -time later on. Issuing it after the end of a migration is harmless. - -Blocktime is a postcopy live migration metric, intended to show how -long the vCPU was in state of interruptible sleep due to pagefault. -That metric is calculated both for all vCPUs as overlapped value, and -separately for each vCPU. These values are calculated on destination -side. To enable postcopy blocktime calculation, enter following -command on destination monitor: - -``migrate_set_capability postcopy-blocktime on`` - -Postcopy blocktime can be retrieved by query-migrate qmp command. -postcopy-blocktime value of qmp command will show overlapped blocking -time for all vCPU, postcopy-vcpu-blocktime will show list of blocking -time per vCPU. - -.. note:: - During the postcopy phase, the bandwidth limits set using - ``migrate_set_parameter`` is ignored (to avoid delaying requested pages that - the destination is waiting for). - -Postcopy device transfer ------------------------- - -Loading of device data may cause the device emulation to access guest RAM -that may trigger faults that have to be resolved by the source, as such -the migration stream has to be able to respond with page data *during* the -device load, and hence the device data has to be read from the stream completely -before the device load begins to free the stream up. This is achieved by -'packaging' the device data into a blob that's read in one go. - -Source behaviour ----------------- - -Until postcopy is entered the migration stream is identical to normal -precopy, except for the addition of a 'postcopy advise' command at -the beginning, to tell the destination that postcopy might happen. -When postcopy starts the source sends the page discard data and then -forms the 'package' containing: - - - Command: 'postcopy listen' - - The device state - - A series of sections, identical to the precopy streams device state stream - containing everything except postcopiable devices (i.e. RAM) - - Command: 'postcopy run' - -The 'package' is sent as the data part of a Command: ``CMD_PACKAGED``, and the -contents are formatted in the same way as the main migration stream. - -During postcopy the source scans the list of dirty pages and sends them -to the destination without being requested (in much the same way as precopy), -however when a page request is received from the destination, the dirty page -scanning restarts from the requested location. This causes requested pages -to be sent quickly, and also causes pages directly after the requested page -to be sent quickly in the hope that those pages are likely to be used -by the destination soon. - -Destination behaviour ---------------------- - -Initially the destination looks the same as precopy, with a single thread -reading the migration stream; the 'postcopy advise' and 'discard' commands -are processed to change the way RAM is managed, but don't affect the stream -processing. - -:: - - ------------------------------------------------------------------------------ - 1 2 3 4 5 6 7 - main -----DISCARD-CMD_PACKAGED ( LISTEN DEVICE DEVICE DEVICE RUN ) - thread | | - | (page request) - | \___ - v \ - listen thread: --- page -- page -- page -- page -- page -- - - a b c - ------------------------------------------------------------------------------ - -- On receipt of ``CMD_PACKAGED`` (1) - - All the data associated with the package - the ( ... ) section in the diagram - - is read into memory, and the main thread recurses into qemu_loadvm_state_main - to process the contents of the package (2) which contains commands (3,6) and - devices (4...) - -- On receipt of 'postcopy listen' - 3 -(i.e. the 1st command in the package) - - a new thread (a) is started that takes over servicing the migration stream, - while the main thread carries on loading the package. It loads normal - background page data (b) but if during a device load a fault happens (5) - the returned page (c) is loaded by the listen thread allowing the main - threads device load to carry on. - -- The last thing in the ``CMD_PACKAGED`` is a 'RUN' command (6) - - letting the destination CPUs start running. At the end of the - ``CMD_PACKAGED`` (7) the main thread returns to normal running behaviour and - is no longer used by migration, while the listen thread carries on servicing - page data until the end of migration. - -Postcopy Recovery ------------------ - -Comparing to precopy, postcopy is special on error handlings. When any -error happens (in this case, mostly network errors), QEMU cannot easily -fail a migration because VM data resides in both source and destination -QEMU instances. On the other hand, when issue happens QEMU on both sides -will go into a paused state. It'll need a recovery phase to continue a -paused postcopy migration. - -The recovery phase normally contains a few steps: - - - When network issue occurs, both QEMU will go into PAUSED state - - - When the network is recovered (or a new network is provided), the admin - can setup the new channel for migration using QMP command - 'migrate-recover' on destination node, preparing for a resume. - - - On source host, the admin can continue the interrupted postcopy - migration using QMP command 'migrate' with resume=true flag set. - - - After the connection is re-established, QEMU will continue the postcopy - migration on both sides. - -During a paused postcopy migration, the VM can logically still continue -running, and it will not be impacted from any page access to pages that -were already migrated to destination VM before the interruption happens. -However, if any of the missing pages got accessed on destination VM, the VM -thread will be halted waiting for the page to be migrated, it means it can -be halted until the recovery is complete. - -The impact of accessing missing pages can be relevant to different -configurations of the guest. For example, when with async page fault -enabled, logically the guest can proactively schedule out the threads -accessing missing pages. - -Postcopy states ---------------- - -Postcopy moves through a series of states (see postcopy_state) from -ADVISE->DISCARD->LISTEN->RUNNING->END - - - Advise - - Set at the start of migration if postcopy is enabled, even - if it hasn't had the start command; here the destination - checks that its OS has the support needed for postcopy, and performs - setup to ensure the RAM mappings are suitable for later postcopy. - The destination will fail early in migration at this point if the - required OS support is not present. - (Triggered by reception of POSTCOPY_ADVISE command) - - - Discard - - Entered on receipt of the first 'discard' command; prior to - the first Discard being performed, hugepages are switched off - (using madvise) to ensure that no new huge pages are created - during the postcopy phase, and to cause any huge pages that - have discards on them to be broken. - - - Listen - - The first command in the package, POSTCOPY_LISTEN, switches - the destination state to Listen, and starts a new thread - (the 'listen thread') which takes over the job of receiving - pages off the migration stream, while the main thread carries - on processing the blob. With this thread able to process page - reception, the destination now 'sensitises' the RAM to detect - any access to missing pages (on Linux using the 'userfault' - system). - - - Running - - POSTCOPY_RUN causes the destination to synchronise all - state and start the CPUs and IO devices running. The main - thread now finishes processing the migration package and - now carries on as it would for normal precopy migration - (although it can't do the cleanup it would do as it - finishes a normal migration). - - - Paused - - Postcopy can run into a paused state (normally on both sides when - happens), where all threads will be temporarily halted mostly due to - network errors. When reaching paused state, migration will make sure - the qemu binary on both sides maintain the data without corrupting - the VM. To continue the migration, the admin needs to fix the - migration channel using the QMP command 'migrate-recover' on the - destination node, then resume the migration using QMP command 'migrate' - again on source node, with resume=true flag set. - - - End - - The listen thread can now quit, and perform the cleanup of migration - state, the migration is now complete. - -Source side page map --------------------- - -The 'migration bitmap' in postcopy is basically the same as in the precopy, -where each of the bit to indicate that page is 'dirty' - i.e. needs -sending. During the precopy phase this is updated as the CPU dirties -pages, however during postcopy the CPUs are stopped and nothing should -dirty anything any more. Instead, dirty bits are cleared when the relevant -pages are sent during postcopy. - -Postcopy with hugepages ------------------------ - -Postcopy now works with hugetlbfs backed memory: - - a) The linux kernel on the destination must support userfault on hugepages. - b) The huge-page configuration on the source and destination VMs must be - identical; i.e. RAMBlocks on both sides must use the same page size. - c) Note that ``-mem-path /dev/hugepages`` will fall back to allocating normal - RAM if it doesn't have enough hugepages, triggering (b) to fail. - Using ``-mem-prealloc`` enforces the allocation using hugepages. - d) Care should be taken with the size of hugepage used; postcopy with 2MB - hugepages works well, however 1GB hugepages are likely to be problematic - since it takes ~1 second to transfer a 1GB hugepage across a 10Gbps link, - and until the full page is transferred the destination thread is blocked. - -Postcopy with shared memory ---------------------------- - -Postcopy migration with shared memory needs explicit support from the other -processes that share memory and from QEMU. There are restrictions on the type of -memory that userfault can support shared. - -The Linux kernel userfault support works on ``/dev/shm`` memory and on ``hugetlbfs`` -(although the kernel doesn't provide an equivalent to ``madvise(MADV_DONTNEED)`` -for hugetlbfs which may be a problem in some configurations). - -The vhost-user code in QEMU supports clients that have Postcopy support, -and the ``vhost-user-bridge`` (in ``tests/``) and the DPDK package have changes -to support postcopy. - -The client needs to open a userfaultfd and register the areas -of memory that it maps with userfault. The client must then pass the -userfaultfd back to QEMU together with a mapping table that allows -fault addresses in the clients address space to be converted back to -RAMBlock/offsets. The client's userfaultfd is added to the postcopy -fault-thread and page requests are made on behalf of the client by QEMU. -QEMU performs 'wake' operations on the client's userfaultfd to allow it -to continue after a page has arrived. - -.. note:: - There are two future improvements that would be nice: - a) Some way to make QEMU ignorant of the addresses in the clients - address space - b) Avoiding the need for QEMU to perform ufd-wake calls after the - pages have arrived - -Retro-fitting postcopy to existing clients is possible: - a) A mechanism is needed for the registration with userfault as above, - and the registration needs to be coordinated with the phases of - postcopy. In vhost-user extra messages are added to the existing - control channel. - b) Any thread that can block due to guest memory accesses must be - identified and the implication understood; for example if the - guest memory access is made while holding a lock then all other - threads waiting for that lock will also be blocked. - -Postcopy Preemption Mode ------------------------- - -Postcopy preempt is a new capability introduced in 8.0 QEMU release, it -allows urgent pages (those got page fault requested from destination QEMU -explicitly) to be sent in a separate preempt channel, rather than queued in -the background migration channel. Anyone who cares about latencies of page -faults during a postcopy migration should enable this feature. By default, -it's not enabled. - -Firmware -======== - -Migration migrates the copies of RAM and ROM, and thus when running -on the destination it includes the firmware from the source. Even after -resetting a VM, the old firmware is used. Only once QEMU has been restarted -is the new firmware in use. - -- Changes in firmware size can cause changes in the required RAMBlock size - to hold the firmware and thus migration can fail. In practice it's best - to pad firmware images to convenient powers of 2 with plenty of space - for growth. - -- Care should be taken with device emulation code so that newer - emulation code can work with older firmware to allow forward migration. - -- Care should be taken with newer firmware so that backward migration - to older systems with older device emulation code will work. - -In some cases it may be best to tie specific firmware versions to specific -versioned machine types to cut down on the combinations that will need -support. This is also useful when newer versions of firmware outgrow -the padding. - - -Backwards compatibility -======================= - -How backwards compatibility works ---------------------------------- - -When we do migration, we have two QEMU processes: the source and the -target. There are two cases, they are the same version or they are -different versions. The easy case is when they are the same version. -The difficult one is when they are different versions. - -There are two things that are different, but they have very similar -names and sometimes get confused: - -- QEMU version -- machine type version - -Let's start with a practical example, we start with: - -- qemu-system-x86_64 (v5.2), from now on qemu-5.2. -- qemu-system-x86_64 (v5.1), from now on qemu-5.1. - -Related to this are the "latest" machine types defined on each of -them: - -- pc-q35-5.2 (newer one in qemu-5.2) from now on pc-5.2 -- pc-q35-5.1 (newer one in qemu-5.1) from now on pc-5.1 - -First of all, migration is only supposed to work if you use the same -machine type in both source and destination. The QEMU hardware -configuration needs to be the same also on source and destination. -Most aspects of the backend configuration can be changed at will, -except for a few cases where the backend features influence frontend -device feature exposure. But that is not relevant for this section. - -I am going to list the number of combinations that we can have. Let's -start with the trivial ones, QEMU is the same on source and -destination: - -1 - qemu-5.2 -M pc-5.2 -> migrates to -> qemu-5.2 -M pc-5.2 - - This is the latest QEMU with the latest machine type. - This have to work, and if it doesn't work it is a bug. - -2 - qemu-5.1 -M pc-5.1 -> migrates to -> qemu-5.1 -M pc-5.1 - - Exactly the same case than the previous one, but for 5.1. - Nothing to see here either. - -This are the easiest ones, we will not talk more about them in this -section. - -Now we start with the more interesting cases. Consider the case where -we have the same QEMU version in both sides (qemu-5.2) but we are using -the latest machine type for that version (pc-5.2) but one of an older -QEMU version, in this case pc-5.1. - -3 - qemu-5.2 -M pc-5.1 -> migrates to -> qemu-5.2 -M pc-5.1 - - It needs to use the definition of pc-5.1 and the devices as they - were configured on 5.1, but this should be easy in the sense that - both sides are the same QEMU and both sides have exactly the same - idea of what the pc-5.1 machine is. - -4 - qemu-5.1 -M pc-5.2 -> migrates to -> qemu-5.1 -M pc-5.2 - - This combination is not possible as the qemu-5.1 doesn't understand - pc-5.2 machine type. So nothing to worry here. - -Now it comes the interesting ones, when both QEMU processes are -different. Notice also that the machine type needs to be pc-5.1, -because we have the limitation than qemu-5.1 doesn't know pc-5.2. So -the possible cases are: - -5 - qemu-5.2 -M pc-5.1 -> migrates to -> qemu-5.1 -M pc-5.1 - - This migration is known as newer to older. We need to make sure - when we are developing 5.2 we need to take care about not to break - migration to qemu-5.1. Notice that we can't make updates to - qemu-5.1 to understand whatever qemu-5.2 decides to change, so it is - in qemu-5.2 side to make the relevant changes. - -6 - qemu-5.1 -M pc-5.1 -> migrates to -> qemu-5.2 -M pc-5.1 - - This migration is known as older to newer. We need to make sure - than we are able to receive migrations from qemu-5.1. The problem is - similar to the previous one. - -If qemu-5.1 and qemu-5.2 were the same, there will not be any -compatibility problems. But the reason that we create qemu-5.2 is to -get new features, devices, defaults, etc. - -If we get a device that has a new feature, or change a default value, -we have a problem when we try to migrate between different QEMU -versions. - -So we need a way to tell qemu-5.2 that when we are using machine type -pc-5.1, it needs to **not** use the feature, to be able to migrate to -real qemu-5.1. - -And the equivalent part when migrating from qemu-5.1 to qemu-5.2. -qemu-5.2 has to expect that it is not going to get data for the new -feature, because qemu-5.1 doesn't know about it. - -How do we tell QEMU about these device feature changes? In -hw/core/machine.c:hw_compat_X_Y arrays. - -If we change a default value, we need to put back the old value on -that array. And the device, during initialization needs to look at -that array to see what value it needs to get for that feature. And -what are we going to put in that array, the value of a property. - -To create a property for a device, we need to use one of the -DEFINE_PROP_*() macros. See include/hw/qdev-properties.h to find the -macros that exist. With it, we set the default value for that -property, and that is what it is going to get in the latest released -version. But if we want a different value for a previous version, we -can change that in the hw_compat_X_Y arrays. - -hw_compat_X_Y is an array of registers that have the format: - -- name_device -- name_property -- value - -Let's see a practical example. - -In qemu-5.2 virtio-blk-device got multi queue support. This is a -change that is not backward compatible. In qemu-5.1 it has one -queue. In qemu-5.2 it has the same number of queues as the number of -cpus in the system. - -When we are doing migration, if we migrate from a device that has 4 -queues to a device that have only one queue, we don't know where to -put the extra information for the other 3 queues, and we fail -migration. - -Similar problem when we migrate from qemu-5.1 that has only one queue -to qemu-5.2, we only sent information for one queue, but destination -has 4, and we have 3 queues that are not properly initialized and -anything can happen. - -So, how can we address this problem. Easy, just convince qemu-5.2 -that when it is running pc-5.1, it needs to set the number of queues -for virtio-blk-devices to 1. - -That way we fix the cases 5 and 6. - -5 - qemu-5.2 -M pc-5.1 -> migrates to -> qemu-5.1 -M pc-5.1 - - qemu-5.2 -M pc-5.1 sets number of queues to be 1. - qemu-5.1 -M pc-5.1 expects number of queues to be 1. - - correct. migration works. - -6 - qemu-5.1 -M pc-5.1 -> migrates to -> qemu-5.2 -M pc-5.1 - - qemu-5.1 -M pc-5.1 sets number of queues to be 1. - qemu-5.2 -M pc-5.1 expects number of queues to be 1. - - correct. migration works. - -And now the other interesting case, case 3. In this case we have: - -3 - qemu-5.2 -M pc-5.1 -> migrates to -> qemu-5.2 -M pc-5.1 - - Here we have the same QEMU in both sides. So it doesn't matter a - lot if we have set the number of queues to 1 or not, because - they are the same. - - WRONG! - - Think what happens if we do one of this double migrations: - - A -> migrates -> B -> migrates -> C - - where: - - A: qemu-5.1 -M pc-5.1 - B: qemu-5.2 -M pc-5.1 - C: qemu-5.2 -M pc-5.1 - - migration A -> B is case 6, so number of queues needs to be 1. - - migration B -> C is case 3, so we don't care. But actually we - care because we haven't started the guest in qemu-5.2, it came - migrated from qemu-5.1. So to be in the safe place, we need to - always use number of queues 1 when we are using pc-5.1. - -Now, how was this done in reality? The following commit shows how it -was done:: - - commit 9445e1e15e66c19e42bea942ba810db28052cd05 - Author: Stefan Hajnoczi - Date: Tue Aug 18 15:33:47 2020 +0100 - - virtio-blk-pci: default num_queues to -smp N - -The relevant parts for migration are:: - - @@ -1281,7 +1284,8 @@ static Property virtio_blk_properties[] = { - #endif - DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0, - true), - - DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, 1), - + DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, - + VIRTIO_BLK_AUTO_NUM_QUEUES), - DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 256), - -It changes the default value of num_queues. But it fishes it for old -machine types to have the right value:: - - @@ -31,6 +31,7 @@ - GlobalProperty hw_compat_5_1[] = { - ... - + { "virtio-blk-device", "num-queues", "1"}, - ... - }; - -A device with different features on both sides ----------------------------------------------- - -Let's assume that we are using the same QEMU binary on both sides, -just to make the things easier. But we have a device that has -different features on both sides of the migration. That can be -because the devices are different, because the kernel driver of both -devices have different features, whatever. - -How can we get this to work with migration. The way to do that is -"theoretically" easy. You have to get the features that the device -has in the source of the migration. The features that the device has -on the target of the migration, you get the intersection of the -features of both sides, and that is the way that you should launch -QEMU. - -Notice that this is not completely related to QEMU. The most -important thing here is that this should be handled by the managing -application that launches QEMU. If QEMU is configured correctly, the -migration will succeed. - -That said, actually doing it is complicated. Almost all devices are -bad at being able to be launched with only some features enabled. -With one big exception: cpus. - -You can read the documentation for QEMU x86 cpu models here: - -https://qemu-project.gitlab.io/qemu/system/qemu-cpu-models.html - -See when they talk about migration they recommend that one chooses the -newest cpu model that is supported for all cpus. - -Let's say that we have: - -Host A: - -Device X has the feature Y - -Host B: - -Device X has not the feature Y - -If we try to migrate without any care from host A to host B, it will -fail because when migration tries to load the feature Y on -destination, it will find that the hardware is not there. - -Doing this would be the equivalent of doing with cpus: - -Host A: - -$ qemu-system-x86_64 -cpu host - -Host B: - -$ qemu-system-x86_64 -cpu host - -When both hosts have different cpu features this is guaranteed to -fail. Especially if Host B has less features than host A. If host A -has less features than host B, sometimes it works. Important word of -last sentence is "sometimes". - -So, forgetting about cpu models and continuing with the -cpu host -example, let's see that the differences of the cpus is that Host A and -B have the following features: - -Features: 'pcid' 'stibp' 'taa-no' -Host A: X X -Host B: X - -And we want to migrate between them, the way configure both QEMU cpu -will be: - -Host A: - -$ qemu-system-x86_64 -cpu host,pcid=off,stibp=off - -Host B: - -$ qemu-system-x86_64 -cpu host,taa-no=off - -And you would be able to migrate between them. It is responsibility -of the management application or of the user to make sure that the -configuration is correct. QEMU doesn't know how to look at this kind -of features in general. - -Notice that we don't recommend to use -cpu host for migration. It is -used in this example because it makes the example simpler. - -Other devices have worse control about individual features. If they -want to be able to migrate between hosts that show different features, -the device needs a way to configure which ones it is going to use. - -In this section we have considered that we are using the same QEMU -binary in both sides of the migration. If we use different QEMU -versions process, then we need to have into account all other -differences and the examples become even more complicated. - -How to mitigate when we have a backward compatibility error ------------------------------------------------------------ - -We broke migration for old machine types continuously during -development. But as soon as we find that there is a problem, we fix -it. The problem is what happens when we detect after we have done a -release that something has gone wrong. - -Let see how it worked with one example. - -After the release of qemu-8.0 we found a problem when doing migration -of the machine type pc-7.2. - -- $ qemu-7.2 -M pc-7.2 -> qemu-7.2 -M pc-7.2 - - This migration works - -- $ qemu-8.0 -M pc-7.2 -> qemu-8.0 -M pc-7.2 - - This migration works - -- $ qemu-8.0 -M pc-7.2 -> qemu-7.2 -M pc-7.2 - - This migration fails - -- $ qemu-7.2 -M pc-7.2 -> qemu-8.0 -M pc-7.2 - - This migration fails - -So clearly something fails when migration between qemu-7.2 and -qemu-8.0 with machine type pc-7.2. The error messages, and git bisect -pointed to this commit. - -In qemu-8.0 we got this commit:: - - commit 010746ae1db7f52700cb2e2c46eb94f299cfa0d2 - Author: Jonathan Cameron - Date: Thu Mar 2 13:37:02 2023 +0000 - - hw/pci/aer: Implement PCI_ERR_UNCOR_MASK register - - -The relevant bits of the commit for our example are this ones:: - - --- a/hw/pci/pcie_aer.c - +++ b/hw/pci/pcie_aer.c - @@ -112,6 +112,10 @@ int pcie_aer_init(PCIDevice *dev, - - pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, - PCI_ERR_UNC_SUPPORTED); - + pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK, - + PCI_ERR_UNC_MASK_DEFAULT); - + pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, - + PCI_ERR_UNC_SUPPORTED); - - pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, - PCI_ERR_UNC_SEVERITY_DEFAULT); - -The patch changes how we configure PCI space for AER. But QEMU fails -when the PCI space configuration is different between source and -destination. - -The following commit shows how this got fixed:: - - commit 5ed3dabe57dd9f4c007404345e5f5bf0e347317f - Author: Leonardo Bras - Date: Tue May 2 21:27:02 2023 -0300 - - hw/pci: Disable PCI_ERR_UNCOR_MASK register for machine type < 8.0 - - [...] - -The relevant parts of the fix in QEMU are as follow: - -First, we create a new property for the device to be able to configure -the old behaviour or the new behaviour:: - - diff --git a/hw/pci/pci.c b/hw/pci/pci.c - index 8a87ccc8b0..5153ad63d6 100644 - --- a/hw/pci/pci.c - +++ b/hw/pci/pci.c - @@ -79,6 +79,8 @@ static Property pci_props[] = { - DEFINE_PROP_STRING("failover_pair_id", PCIDevice, - failover_pair_id), - DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), - + DEFINE_PROP_BIT("x-pcie-err-unc-mask", PCIDevice, cap_present, - + QEMU_PCIE_ERR_UNC_MASK_BITNR, true), - DEFINE_PROP_END_OF_LIST() - }; - -Notice that we enable the feature for new machine types. - -Now we see how the fix is done. This is going to depend on what kind -of breakage happens, but in this case it is quite simple:: - - diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c - index 103667c368..374d593ead 100644 - --- a/hw/pci/pcie_aer.c - +++ b/hw/pci/pcie_aer.c - @@ -112,10 +112,13 @@ int pcie_aer_init(PCIDevice *dev, uint8_t cap_ver, - uint16_t offset, - - pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, - PCI_ERR_UNC_SUPPORTED); - - pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK, - - PCI_ERR_UNC_MASK_DEFAULT); - - pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, - - PCI_ERR_UNC_SUPPORTED); - + - + if (dev->cap_present & QEMU_PCIE_ERR_UNC_MASK) { - + pci_set_long(dev->config + offset + PCI_ERR_UNCOR_MASK, - + PCI_ERR_UNC_MASK_DEFAULT); - + pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_MASK, - + PCI_ERR_UNC_SUPPORTED); - + } - - pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, - PCI_ERR_UNC_SEVERITY_DEFAULT); - -I.e. If the property bit is enabled, we configure it as we did for -qemu-8.0. If the property bit is not set, we configure it as it was in 7.2. - -And now, everything that is missing is disabling the feature for old -machine types:: - - diff --git a/hw/core/machine.c b/hw/core/machine.c - index 47a34841a5..07f763eb2e 100644 - --- a/hw/core/machine.c - +++ b/hw/core/machine.c - @@ -48,6 +48,7 @@ GlobalProperty hw_compat_7_2[] = { - { "e1000e", "migrate-timadj", "off" }, - { "virtio-mem", "x-early-migration", "false" }, - { "migration", "x-preempt-pre-7-2", "true" }, - + { TYPE_PCI_DEVICE, "x-pcie-err-unc-mask", "off" }, - }; - const size_t hw_compat_7_2_len = G_N_ELEMENTS(hw_compat_7_2); - -And now, when qemu-8.0.1 is released with this fix, all combinations -are going to work as supposed. - -- $ qemu-7.2 -M pc-7.2 -> qemu-7.2 -M pc-7.2 (works) -- $ qemu-8.0.1 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 (works) -- $ qemu-8.0.1 -M pc-7.2 -> qemu-7.2 -M pc-7.2 (works) -- $ qemu-7.2 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 (works) - -So the normality has been restored and everything is ok, no? - -Not really, now our matrix is much bigger. We started with the easy -cases, migration from the same version to the same version always -works: - -- $ qemu-7.2 -M pc-7.2 -> qemu-7.2 -M pc-7.2 -- $ qemu-8.0 -M pc-7.2 -> qemu-8.0 -M pc-7.2 -- $ qemu-8.0.1 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 - -Now the interesting ones. When the QEMU processes versions are -different. For the 1st set, their fail and we can do nothing, both -versions are released and we can't change anything. - -- $ qemu-7.2 -M pc-7.2 -> qemu-8.0 -M pc-7.2 -- $ qemu-8.0 -M pc-7.2 -> qemu-7.2 -M pc-7.2 - -This two are the ones that work. The whole point of making the -change in qemu-8.0.1 release was to fix this issue: - -- $ qemu-7.2 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 -- $ qemu-8.0.1 -M pc-7.2 -> qemu-7.2 -M pc-7.2 - -But now we found that qemu-8.0 neither can migrate to qemu-7.2 not -qemu-8.0.1. - -- $ qemu-8.0 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 -- $ qemu-8.0.1 -M pc-7.2 -> qemu-8.0 -M pc-7.2 - -So, if we start a pc-7.2 machine in qemu-8.0 we can't migrate it to -anything except to qemu-8.0. - -Can we do better? - -Yeap. If we know that we are going to do this migration: - -- $ qemu-8.0 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 - -We can launch the appropriate devices with:: - - --device...,x-pci-e-err-unc-mask=on - -And now we can receive a migration from 8.0. And from now on, we can -do that migration to new machine types if we remember to enable that -property for pc-7.2. Notice that we need to remember, it is not -enough to know that the source of the migration is qemu-8.0. Think of -this example: - -$ qemu-8.0 -M pc-7.2 -> qemu-8.0.1 -M pc-7.2 -> qemu-8.2 -M pc-7.2 - -In the second migration, the source is not qemu-8.0, but we still have -that "problem" and have that property enabled. Notice that we need to -continue having this mark/property until we have this machine -rebooted. But it is not a normal reboot (that don't reload QEMU) we -need the machine to poweroff/poweron on a fixed QEMU. And from now -on we can use the proper real machine. diff --git a/docs/devel/migration/postcopy.rst b/docs/devel/migration/postcopy.rst new file mode 100644 index 0000000000000000000000000000000000000000..6c51e96d798418ecb048a71bba8b0847d971bcc7 --- /dev/null +++ b/docs/devel/migration/postcopy.rst @@ -0,0 +1,313 @@ +======== +Postcopy +======== + +.. contents:: + +'Postcopy' migration is a way to deal with migrations that refuse to converge +(or take too long to converge) its plus side is that there is an upper bound on +the amount of migration traffic and time it takes, the down side is that during +the postcopy phase, a failure of *either* side causes the guest to be lost. + +In postcopy the destination CPUs are started before all the memory has been +transferred, and accesses to pages that are yet to be transferred cause +a fault that's translated by QEMU into a request to the source QEMU. + +Postcopy can be combined with precopy (i.e. normal migration) so that if precopy +doesn't finish in a given time the switch is made to postcopy. + +Enabling postcopy +================= + +To enable postcopy, issue this command on the monitor (both source and +destination) prior to the start of migration: + +``migrate_set_capability postcopy-ram on`` + +The normal commands are then used to start a migration, which is still +started in precopy mode. Issuing: + +``migrate_start_postcopy`` + +will now cause the transition from precopy to postcopy. +It can be issued immediately after migration is started or any +time later on. Issuing it after the end of a migration is harmless. + +Blocktime is a postcopy live migration metric, intended to show how +long the vCPU was in state of interruptible sleep due to pagefault. +That metric is calculated both for all vCPUs as overlapped value, and +separately for each vCPU. These values are calculated on destination +side. To enable postcopy blocktime calculation, enter following +command on destination monitor: + +``migrate_set_capability postcopy-blocktime on`` + +Postcopy blocktime can be retrieved by query-migrate qmp command. +postcopy-blocktime value of qmp command will show overlapped blocking +time for all vCPU, postcopy-vcpu-blocktime will show list of blocking +time per vCPU. + +.. note:: + During the postcopy phase, the bandwidth limits set using + ``migrate_set_parameter`` is ignored (to avoid delaying requested pages that + the destination is waiting for). + +Postcopy internals +================== + +State machine +------------- + +Postcopy moves through a series of states (see postcopy_state) from +ADVISE->DISCARD->LISTEN->RUNNING->END + + - Advise + + Set at the start of migration if postcopy is enabled, even + if it hasn't had the start command; here the destination + checks that its OS has the support needed for postcopy, and performs + setup to ensure the RAM mappings are suitable for later postcopy. + The destination will fail early in migration at this point if the + required OS support is not present. + (Triggered by reception of POSTCOPY_ADVISE command) + + - Discard + + Entered on receipt of the first 'discard' command; prior to + the first Discard being performed, hugepages are switched off + (using madvise) to ensure that no new huge pages are created + during the postcopy phase, and to cause any huge pages that + have discards on them to be broken. + + - Listen + + The first command in the package, POSTCOPY_LISTEN, switches + the destination state to Listen, and starts a new thread + (the 'listen thread') which takes over the job of receiving + pages off the migration stream, while the main thread carries + on processing the blob. With this thread able to process page + reception, the destination now 'sensitises' the RAM to detect + any access to missing pages (on Linux using the 'userfault' + system). + + - Running + + POSTCOPY_RUN causes the destination to synchronise all + state and start the CPUs and IO devices running. The main + thread now finishes processing the migration package and + now carries on as it would for normal precopy migration + (although it can't do the cleanup it would do as it + finishes a normal migration). + + - Paused + + Postcopy can run into a paused state (normally on both sides when + happens), where all threads will be temporarily halted mostly due to + network errors. When reaching paused state, migration will make sure + the qemu binary on both sides maintain the data without corrupting + the VM. To continue the migration, the admin needs to fix the + migration channel using the QMP command 'migrate-recover' on the + destination node, then resume the migration using QMP command 'migrate' + again on source node, with resume=true flag set. + + - End + + The listen thread can now quit, and perform the cleanup of migration + state, the migration is now complete. + +Device transfer +--------------- + +Loading of device data may cause the device emulation to access guest RAM +that may trigger faults that have to be resolved by the source, as such +the migration stream has to be able to respond with page data *during* the +device load, and hence the device data has to be read from the stream completely +before the device load begins to free the stream up. This is achieved by +'packaging' the device data into a blob that's read in one go. + +Source behaviour +---------------- + +Until postcopy is entered the migration stream is identical to normal +precopy, except for the addition of a 'postcopy advise' command at +the beginning, to tell the destination that postcopy might happen. +When postcopy starts the source sends the page discard data and then +forms the 'package' containing: + + - Command: 'postcopy listen' + - The device state + + A series of sections, identical to the precopy streams device state stream + containing everything except postcopiable devices (i.e. RAM) + - Command: 'postcopy run' + +The 'package' is sent as the data part of a Command: ``CMD_PACKAGED``, and the +contents are formatted in the same way as the main migration stream. + +During postcopy the source scans the list of dirty pages and sends them +to the destination without being requested (in much the same way as precopy), +however when a page request is received from the destination, the dirty page +scanning restarts from the requested location. This causes requested pages +to be sent quickly, and also causes pages directly after the requested page +to be sent quickly in the hope that those pages are likely to be used +by the destination soon. + +Destination behaviour +--------------------- + +Initially the destination looks the same as precopy, with a single thread +reading the migration stream; the 'postcopy advise' and 'discard' commands +are processed to change the way RAM is managed, but don't affect the stream +processing. + +:: + + ------------------------------------------------------------------------------ + 1 2 3 4 5 6 7 + main -----DISCARD-CMD_PACKAGED ( LISTEN DEVICE DEVICE DEVICE RUN ) + thread | | + | (page request) + | \___ + v \ + listen thread: --- page -- page -- page -- page -- page -- + + a b c + ------------------------------------------------------------------------------ + +- On receipt of ``CMD_PACKAGED`` (1) + + All the data associated with the package - the ( ... ) section in the diagram - + is read into memory, and the main thread recurses into qemu_loadvm_state_main + to process the contents of the package (2) which contains commands (3,6) and + devices (4...) + +- On receipt of 'postcopy listen' - 3 -(i.e. the 1st command in the package) + + a new thread (a) is started that takes over servicing the migration stream, + while the main thread carries on loading the package. It loads normal + background page data (b) but if during a device load a fault happens (5) + the returned page (c) is loaded by the listen thread allowing the main + threads device load to carry on. + +- The last thing in the ``CMD_PACKAGED`` is a 'RUN' command (6) + + letting the destination CPUs start running. At the end of the + ``CMD_PACKAGED`` (7) the main thread returns to normal running behaviour and + is no longer used by migration, while the listen thread carries on servicing + page data until the end of migration. + +Source side page bitmap +----------------------- + +The 'migration bitmap' in postcopy is basically the same as in the precopy, +where each of the bit to indicate that page is 'dirty' - i.e. needs +sending. During the precopy phase this is updated as the CPU dirties +pages, however during postcopy the CPUs are stopped and nothing should +dirty anything any more. Instead, dirty bits are cleared when the relevant +pages are sent during postcopy. + +Postcopy features +================= + +Postcopy recovery +----------------- + +Comparing to precopy, postcopy is special on error handlings. When any +error happens (in this case, mostly network errors), QEMU cannot easily +fail a migration because VM data resides in both source and destination +QEMU instances. On the other hand, when issue happens QEMU on both sides +will go into a paused state. It'll need a recovery phase to continue a +paused postcopy migration. + +The recovery phase normally contains a few steps: + + - When network issue occurs, both QEMU will go into PAUSED state + + - When the network is recovered (or a new network is provided), the admin + can setup the new channel for migration using QMP command + 'migrate-recover' on destination node, preparing for a resume. + + - On source host, the admin can continue the interrupted postcopy + migration using QMP command 'migrate' with resume=true flag set. + + - After the connection is re-established, QEMU will continue the postcopy + migration on both sides. + +During a paused postcopy migration, the VM can logically still continue +running, and it will not be impacted from any page access to pages that +were already migrated to destination VM before the interruption happens. +However, if any of the missing pages got accessed on destination VM, the VM +thread will be halted waiting for the page to be migrated, it means it can +be halted until the recovery is complete. + +The impact of accessing missing pages can be relevant to different +configurations of the guest. For example, when with async page fault +enabled, logically the guest can proactively schedule out the threads +accessing missing pages. + +Postcopy with hugepages +----------------------- + +Postcopy now works with hugetlbfs backed memory: + + a) The linux kernel on the destination must support userfault on hugepages. + b) The huge-page configuration on the source and destination VMs must be + identical; i.e. RAMBlocks on both sides must use the same page size. + c) Note that ``-mem-path /dev/hugepages`` will fall back to allocating normal + RAM if it doesn't have enough hugepages, triggering (b) to fail. + Using ``-mem-prealloc`` enforces the allocation using hugepages. + d) Care should be taken with the size of hugepage used; postcopy with 2MB + hugepages works well, however 1GB hugepages are likely to be problematic + since it takes ~1 second to transfer a 1GB hugepage across a 10Gbps link, + and until the full page is transferred the destination thread is blocked. + +Postcopy with shared memory +--------------------------- + +Postcopy migration with shared memory needs explicit support from the other +processes that share memory and from QEMU. There are restrictions on the type of +memory that userfault can support shared. + +The Linux kernel userfault support works on ``/dev/shm`` memory and on ``hugetlbfs`` +(although the kernel doesn't provide an equivalent to ``madvise(MADV_DONTNEED)`` +for hugetlbfs which may be a problem in some configurations). + +The vhost-user code in QEMU supports clients that have Postcopy support, +and the ``vhost-user-bridge`` (in ``tests/``) and the DPDK package have changes +to support postcopy. + +The client needs to open a userfaultfd and register the areas +of memory that it maps with userfault. The client must then pass the +userfaultfd back to QEMU together with a mapping table that allows +fault addresses in the clients address space to be converted back to +RAMBlock/offsets. The client's userfaultfd is added to the postcopy +fault-thread and page requests are made on behalf of the client by QEMU. +QEMU performs 'wake' operations on the client's userfaultfd to allow it +to continue after a page has arrived. + +.. note:: + There are two future improvements that would be nice: + a) Some way to make QEMU ignorant of the addresses in the clients + address space + b) Avoiding the need for QEMU to perform ufd-wake calls after the + pages have arrived + +Retro-fitting postcopy to existing clients is possible: + a) A mechanism is needed for the registration with userfault as above, + and the registration needs to be coordinated with the phases of + postcopy. In vhost-user extra messages are added to the existing + control channel. + b) Any thread that can block due to guest memory accesses must be + identified and the implication understood; for example if the + guest memory access is made while holding a lock then all other + threads waiting for that lock will also be blocked. + +Postcopy preemption mode +------------------------ + +Postcopy preempt is a new capability introduced in 8.0 QEMU release, it +allows urgent pages (those got page fault requested from destination QEMU +explicitly) to be sent in a separate preempt channel, rather than queued in +the background migration channel. Anyone who cares about latencies of page +faults during a postcopy migration should enable this feature. By default, +it's not enabled. diff --git a/docs/devel/vfio-migration.rst b/docs/devel/migration/vfio.rst similarity index 99% rename from docs/devel/vfio-migration.rst rename to docs/devel/migration/vfio.rst index 605fe60e9695a50813b1294bb970ce2c39d2ba07..c49482eab66d8e831ea1c2c791fc895b51893e4d 100644 --- a/docs/devel/vfio-migration.rst +++ b/docs/devel/migration/vfio.rst @@ -1,5 +1,5 @@ ===================== -VFIO device Migration +VFIO device migration ===================== Migration of virtual machine involves saving the state for each device that diff --git a/docs/devel/virtio-migration.txt b/docs/devel/migration/virtio.rst similarity index 37% rename from docs/devel/virtio-migration.txt rename to docs/devel/migration/virtio.rst index 98a6b0ffb5731034be51158bba0ebdd04ddb07bd..611a18b821519e51fc5ddc84b1b5049a8219387a 100644 --- a/docs/devel/virtio-migration.txt +++ b/docs/devel/migration/virtio.rst @@ -1,5 +1,6 @@ -Virtio devices and migration -============================ +======================= +Virtio device migration +======================= Copyright 2015 IBM Corp. @@ -8,91 +9,97 @@ the COPYING file in the top-level directory. Saving and restoring the state of virtio devices is a bit of a twisty maze, for several reasons: + - state is distributed between several parts: + - virtio core, for common fields like features, number of queues, ... + - virtio transport (pci, ccw, ...), for the different proxy devices and transport specific state (msix vectors, indicators, ...) + - virtio device (net, blk, ...), for the different device types and their state (mac address, request queue, ...) + - most fields are saved via the stream interface; subsequently, subsections have been added to make cross-version migration possible This file attempts to document the current procedure and point out some caveats. - Save state procedure ==================== -virtio core virtio transport virtio device ------------ ---------------- ------------- - - save() function registered - via VMState wrapper on - device class -virtio_save() <---------- - ------> save_config() - - save proxy device - - save transport-specific - device fields -- save common device - fields -- save common virtqueue - fields - ------> save_queue() - - save transport-specific - virtqueue fields - ------> save_device() - - save device-specific - fields -- save subsections - - device endianness, - if changed from - default endianness - - 64 bit features, if - any high feature bit - is set - - virtio-1 virtqueue - fields, if VERSION_1 - is set +:: + virtio core virtio transport virtio device + ----------- ---------------- ------------- + + save() function registered + via VMState wrapper on + device class + virtio_save() <---------- + ------> save_config() + - save proxy device + - save transport-specific + device fields + - save common device + fields + - save common virtqueue + fields + ------> save_queue() + - save transport-specific + virtqueue fields + ------> save_device() + - save device-specific + fields + - save subsections + - device endianness, + if changed from + default endianness + - 64 bit features, if + any high feature bit + is set + - virtio-1 virtqueue + fields, if VERSION_1 + is set Load state procedure ==================== -virtio core virtio transport virtio device ------------ ---------------- ------------- - - load() function registered - via VMState wrapper on - device class -virtio_load() <---------- - ------> load_config() - - load proxy device - - load transport-specific - device fields -- load common device - fields -- load common virtqueue - fields - ------> load_queue() - - load transport-specific - virtqueue fields -- notify guest - ------> load_device() - - load device-specific - fields -- load subsections - - device endianness - - 64 bit features - - virtio-1 virtqueue - fields -- sanitize endianness -- sanitize features -- virtqueue index sanity - check - - feature-dependent setup +:: + virtio core virtio transport virtio device + ----------- ---------------- ------------- + + load() function registered + via VMState wrapper on + device class + virtio_load() <---------- + ------> load_config() + - load proxy device + - load transport-specific + device fields + - load common device + fields + - load common virtqueue + fields + ------> load_queue() + - load transport-specific + virtqueue fields + - notify guest + ------> load_device() + - load device-specific + fields + - load subsections + - device endianness + - 64 bit features + - virtio-1 virtqueue + fields + - sanitize endianness + - sanitize features + - virtqueue index sanity + check + - feature-dependent setup Implications of this setup ========================== diff --git a/docs/devel/multi-thread-tcg.rst b/docs/devel/multi-thread-tcg.rst index c9541a7b20a433ef8034d39212284d1824033dfd..7302c3bf5348859e778f7f143252aaf96cc75fbf 100644 --- a/docs/devel/multi-thread-tcg.rst +++ b/docs/devel/multi-thread-tcg.rst @@ -226,10 +226,9 @@ instruction. This could be a future optimisation. Emulated hardware state ----------------------- -Currently thanks to KVM work any access to IO memory is automatically -protected by the global iothread mutex, also known as the BQL (Big -QEMU Lock). Any IO region that doesn't use global mutex is expected to -do its own locking. +Currently thanks to KVM work any access to IO memory is automatically protected +by the BQL (Big QEMU Lock). Any IO region that doesn't use the BQL is expected +to do its own locking. However IO memory isn't the only way emulated hardware state can be modified. Some architectures have model specific registers that diff --git a/docs/devel/multiple-iothreads.txt b/docs/devel/multiple-iothreads.txt index a3e949f6b3addd7005626940946a08137dbb19a4..de85767b124b19a9fcc0538903ee6c5b300b1bd6 100644 --- a/docs/devel/multiple-iothreads.txt +++ b/docs/devel/multiple-iothreads.txt @@ -5,7 +5,7 @@ the COPYING file in the top-level directory. This document explains the IOThread feature and how to write code that runs -outside the QEMU global mutex. +outside the BQL. The main loop and IOThreads --------------------------- @@ -29,13 +29,13 @@ scalability bottleneck on hosts with many CPUs. Work can be spread across several IOThreads instead of just one main loop. When set up correctly this can improve I/O latency and reduce jitter seen by the guest. -The main loop is also deeply associated with the QEMU global mutex, which is a -scalability bottleneck in itself. vCPU threads and the main loop use the QEMU -global mutex to serialize execution of QEMU code. This mutex is necessary -because a lot of QEMU's code historically was not thread-safe. +The main loop is also deeply associated with the BQL, which is a +scalability bottleneck in itself. vCPU threads and the main loop use the BQL +to serialize execution of QEMU code. This mutex is necessary because a lot of +QEMU's code historically was not thread-safe. The fact that all I/O processing is done in a single main loop and that the -QEMU global mutex is contended by all vCPU threads and the main loop explain +BQL is contended by all vCPU threads and the main loop explain why it is desirable to place work into IOThreads. The experimental virtio-blk data-plane implementation has been benchmarked and @@ -66,7 +66,7 @@ There are several old APIs that use the main loop AioContext: Since they implicitly work on the main loop they cannot be used in code that runs in an IOThread. They might cause a crash or deadlock if called from an -IOThread since the QEMU global mutex is not held. +IOThread since the BQL is not held. Instead, use the AioContext functions directly (see include/block/aio.h): * aio_set_fd_handler() - monitor a file descriptor @@ -88,27 +88,18 @@ loop, depending on which AioContext instance the caller passes in. How to synchronize with an IOThread ----------------------------------- -AioContext is not thread-safe so some rules must be followed when using file -descriptors, event notifiers, timers, or BHs across threads: +Variables that can be accessed by multiple threads require some form of +synchronization such as qemu_mutex_lock(), rcu_read_lock(), etc. -1. AioContext functions can always be called safely. They handle their -own locking internally. - -2. Other threads wishing to access the AioContext must use -aio_context_acquire()/aio_context_release() for mutual exclusion. Once the -context is acquired no other thread can access it or run event loop iterations -in this AioContext. - -Legacy code sometimes nests aio_context_acquire()/aio_context_release() calls. -Do not use nesting anymore, it is incompatible with the BDRV_POLL_WHILE() macro -used in the block layer and can lead to hangs. - -There is currently no lock ordering rule if a thread needs to acquire multiple -AioContexts simultaneously. Therefore, it is only safe for code holding the -QEMU global mutex to acquire other AioContexts. +AioContext functions like aio_set_fd_handler(), aio_set_event_notifier(), +aio_bh_new(), and aio_timer_new() are thread-safe. They can be used to trigger +activity in an IOThread. Side note: the best way to schedule a function call across threads is to call -aio_bh_schedule_oneshot(). No acquire/release or locking is needed. +aio_bh_schedule_oneshot(). + +The main loop thread can wait synchronously for a condition using +AIO_WAIT_WHILE(). AioContext and the block layer ------------------------------ @@ -124,22 +115,16 @@ Block layer code must therefore expect to run in an IOThread and avoid using old APIs that implicitly use the main loop. See the "How to program for IOThreads" above for information on how to do that. -If main loop code such as a QMP function wishes to access a BlockDriverState -it must first call aio_context_acquire(bdrv_get_aio_context(bs)) to ensure -that callbacks in the IOThread do not run in parallel. - Code running in the monitor typically needs to ensure that past requests from the guest are completed. When a block device is running in an IOThread, the IOThread can also process requests from the guest (via ioeventfd). To achieve both objects, wrap the code between bdrv_drained_begin() and bdrv_drained_end(), thus creating a "drained -section". The functions must be called between aio_context_acquire() -and aio_context_release(). You can freely release and re-acquire the -AioContext within a drained section. - -Long-running jobs (usually in the form of coroutines) are best scheduled in -the BlockDriverState's AioContext to avoid the need to acquire/release around -each bdrv_*() call. The functions bdrv_add/remove_aio_context_notifier, -or alternatively blk_add/remove_aio_context_notifier if you use BlockBackends, -can be used to get a notification whenever bdrv_try_change_aio_context() moves a +section". + +Long-running jobs (usually in the form of coroutines) are often scheduled in +the BlockDriverState's AioContext. The functions +bdrv_add/remove_aio_context_notifier, or alternatively +blk_add/remove_aio_context_notifier if you use BlockBackends, can be used to +get a notification whenever bdrv_try_change_aio_context() moves a BlockDriverState to a different AioContext. diff --git a/docs/devel/qapi-code-gen.rst b/docs/devel/qapi-code-gen.rst index 7f78183cd489d3266e08a4133840f0dd96a55536..ea8228518cacfc53e19c8c433a4766a64251b01d 100644 --- a/docs/devel/qapi-code-gen.rst +++ b/docs/devel/qapi-code-gen.rst @@ -594,7 +594,7 @@ blocking the guest and other background operations. Coroutine safety can be hard to prove, similar to thread safety. Common pitfalls are: -- The global mutex isn't held across ``qemu_coroutine_yield()``, so +- The BQL isn't held across ``qemu_coroutine_yield()``, so operations that used to assume that they execute atomically may have to be more careful to protect against changes in the global state. diff --git a/docs/devel/replay.rst b/docs/devel/replay.rst index 0244be8b9c4d1696f42f7106a9f72f4e8fffc353..effd856f0c6b17583e01be00b2c5a0c7aeb0eec3 100644 --- a/docs/devel/replay.rst +++ b/docs/devel/replay.rst @@ -184,7 +184,7 @@ modes. Reading and writing requests are created by CPU thread of QEMU. Later these requests proceed to block layer which creates "bottom halves". Bottom halves consist of callback and its parameters. They are processed when -main loop locks the global mutex. These locks are not synchronized with +main loop locks the BQL. These locks are not synchronized with replaying process because main loop also processes the events that do not affect the virtual machine state (like user interaction with monitor). diff --git a/docs/devel/reset.rst b/docs/devel/reset.rst index 38ed1790f7c01c523786c86f8d03b05ccc4a5809..d4e79718baccf3a8ce73f42a2d3af523d359d8bf 100644 --- a/docs/devel/reset.rst +++ b/docs/devel/reset.rst @@ -19,7 +19,7 @@ Triggering reset This section documents the APIs which "users" of a resettable object should use to control it. All resettable control functions must be called while holding -the iothread lock. +the BQL. You can apply a reset to an object using ``resettable_assert_reset()``. You need to call ``resettable_release_reset()`` to release the object from reset. To diff --git a/docs/devel/vfio-iommufd.rst b/docs/devel/vfio-iommufd.rst new file mode 100644 index 0000000000000000000000000000000000000000..3d1c11f175e5968e9f1519da70c9a0a6ced03995 --- /dev/null +++ b/docs/devel/vfio-iommufd.rst @@ -0,0 +1,166 @@ +=============================== +IOMMUFD BACKEND usage with VFIO +=============================== + +(Same meaning for backend/container/BE) + +With the introduction of iommufd, the Linux kernel provides a generic +interface for user space drivers to propagate their DMA mappings to kernel +for assigned devices. While the legacy kernel interface is group-centric, +the new iommufd interface is device-centric, relying on device fd and iommufd. + +To support both interfaces in the QEMU VFIO device, introduce a base container +to abstract the common part of VFIO legacy and iommufd container. So that the +generic VFIO code can use either container. + +The base container implements generic functions such as memory_listener and +address space management whereas the derived container implements callbacks +specific to either legacy or iommufd. Each container has its own way to setup +secure context and dma management interface. The below diagram shows how it +looks like with both containers. + +:: + + VFIO AddressSpace/Memory + +-------+ +----------+ +-----+ +-----+ + | pci | | platform | | ap | | ccw | + +---+---+ +----+-----+ +--+--+ +--+--+ +----------------------+ + | | | | | AddressSpace | + | | | | +------------+---------+ + +---V-----------V-----------V--------V----+ / + | VFIOAddressSpace | <------------+ + | | | MemoryListener + | VFIOContainerBase list | + +-------+----------------------------+----+ + | | + | | + +-------V------+ +--------V----------+ + | iommufd | | vfio legacy | + | container | | container | + +-------+------+ +--------+----------+ + | | + | /dev/iommu | /dev/vfio/vfio + | /dev/vfio/devices/vfioX | /dev/vfio/$group_id + Userspace | | + ============+============================+=========================== + Kernel | device fd | + +---------------+ | group/container fd + | (BIND_IOMMUFD | | (SET_CONTAINER/SET_IOMMU) + | ATTACH_IOAS) | | device fd + | | | + | +-------V------------V-----------------+ + iommufd | | vfio | + (map/unmap | +---------+--------------------+-------+ + ioas_copy) | | | map/unmap + | | | + +------V------+ +-----V------+ +------V--------+ + | iommfd core | | device | | vfio iommu | + +-------------+ +------------+ +---------------+ + +* Secure Context setup + + - iommufd BE: uses device fd and iommufd to setup secure context + (bind_iommufd, attach_ioas) + - vfio legacy BE: uses group fd and container fd to setup secure context + (set_container, set_iommu) + +* Device access + + - iommufd BE: device fd is opened through ``/dev/vfio/devices/vfioX`` + - vfio legacy BE: device fd is retrieved from group fd ioctl + +* DMA Mapping flow + + 1. VFIOAddressSpace receives MemoryRegion add/del via MemoryListener + 2. VFIO populates DMA map/unmap via the container BEs + * iommufd BE: uses iommufd + * vfio legacy BE: uses container fd + +Example configuration +===================== + +Step 1: configure the host device +--------------------------------- + +It's exactly same as the VFIO device with legacy VFIO container. + +Step 2: configure QEMU +---------------------- + +Interactions with the ``/dev/iommu`` are abstracted by a new iommufd +object (compiled in with the ``CONFIG_IOMMUFD`` option). + +Any QEMU device (e.g. VFIO device) wishing to use ``/dev/iommu`` must +be linked with an iommufd object. It gets a new optional property +named iommufd which allows to pass an iommufd object. Take ``vfio-pci`` +device for example: + +.. code-block:: bash + + -object iommufd,id=iommufd0 + -device vfio-pci,host=0000:02:00.0,iommufd=iommufd0 + +Note the ``/dev/iommu`` and VFIO cdev can be externally opened by a +management layer. In such a case the fd is passed, the fd supports a +string naming the fd or a number, for example: + +.. code-block:: bash + + -object iommufd,id=iommufd0,fd=22 + -device vfio-pci,iommufd=iommufd0,fd=23 + +If the ``fd`` property is not passed, the fd is opened by QEMU. + +If no ``iommufd`` object is passed to the ``vfio-pci`` device, iommufd +is not used and the user gets the behavior based on the legacy VFIO +container: + +.. code-block:: bash + + -device vfio-pci,host=0000:02:00.0 + +Supported platform +================== + +Supports x86, ARM and s390x currently. + +Caveats +======= + +Dirty page sync +--------------- + +Dirty page sync with iommufd backend is unsupported yet, live migration is +disabled by default. But it can be force enabled like below, low efficient +though. + +.. code-block:: bash + + -object iommufd,id=iommufd0 + -device vfio-pci,host=0000:02:00.0,iommufd=iommufd0,enable-migration=on + +P2P DMA +------- + +PCI p2p DMA is unsupported as IOMMUFD doesn't support mapping hardware PCI +BAR region yet. Below warning shows for assigned PCI device, it's not a bug. + +.. code-block:: none + + qemu-system-x86_64: warning: IOMMU_IOAS_MAP failed: Bad address, PCI BAR? + qemu-system-x86_64: vfio_container_dma_map(0x560cb6cb1620, 0xe000000021000, 0x3000, 0x7f32ed55c000) = -14 (Bad address) + +FD passing with mdev +-------------------- + +``vfio-pci`` device checks sysfsdev property to decide if backend is a mdev. +If FD passing is used, there is no way to know that and the mdev is treated +like a real PCI device. There is an error as below if user wants to enable +RAM discarding for mdev. + +.. code-block:: none + + qemu-system-x86_64: -device vfio-pci,iommufd=iommufd0,x-balloon-allowed=on,fd=9: vfio VFIO_FD9: x-balloon-allowed only potentially compatible with mdev devices + +``vfio-ap`` and ``vfio-ccw`` devices don't have same issue as their backend +devices are always mdev and RAM discarding is force enabled. diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..691e5218ec7bfaf31472555b22f8890e487f179c --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,2 @@ +sphinx==5.3.0 +sphinx_rtd_theme==1.1.1 diff --git a/docs/system/arm/b-l475e-iot01a.rst b/docs/system/arm/b-l475e-iot01a.rst new file mode 100644 index 0000000000000000000000000000000000000000..2b128e6b847dc998973d42f678320739826eee41 --- /dev/null +++ b/docs/system/arm/b-l475e-iot01a.rst @@ -0,0 +1,46 @@ +B-L475E-IOT01A IoT Node (``b-l475e-iot01a``) +============================================ + +The B-L475E-IOT01A IoT Node uses the STM32L475VG SoC which is based on +ARM Cortex-M4F core. It is part of STMicroelectronics +:doc:`STM32 boards ` and more specifically the STM32L4 +ultra-low power series. The STM32L4x5 chip runs at up to 80 MHz and +integrates 128 KiB of SRAM and up to 1MiB of Flash. The B-L475E-IOT01A board +namely features 64 Mibit QSPI Flash, BT, WiFi and RF connectivity, +USART, I2C, SPI, CAN and USB OTG, as well as a variety of sensors. + +Supported devices +""""""""""""""""" + +Currently, B-L475E-IOT01A machine's implementation is minimal, +it only supports the following device: + +- Cortex-M4F based STM32L4x5 SoC + +Missing devices +""""""""""""""" + +The B-L475E-IOT01A does *not* support the following devices: + +- Extended interrupts and events controller (EXTI) +- Reset and clock control (RCC) +- Serial ports (UART) +- System configuration controller (SYSCFG) +- General-purpose I/Os (GPIO) +- Analog to Digital Converter (ADC) +- SPI controller +- Timer controller (TIMER) + +See the complete list of unimplemented peripheral devices +in the STM32L4x5 module : ``./hw/arm/stm32l4x5_soc.c`` + +Boot options +"""""""""""" + +The B-L475E-IOT01A machine can be started using the ``-kernel`` +option to load a firmware. Example: + +.. code-block:: bash + + $ qemu-system-arm -M b-l475e-iot01a -kernel firmware.bin + diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst index 0b604f90059a94fae26342767590fbd6dac40486..f67aea2d8364202e33aa31a1cf6764bf4fb33553 100644 --- a/docs/system/arm/emulation.rst +++ b/docs/system/arm/emulation.rst @@ -63,6 +63,8 @@ the following architecture extensions: - FEAT_MTE (Memory Tagging Extension) - FEAT_MTE2 (Memory Tagging Extension) - FEAT_MTE3 (MTE Asymmetric Fault Handling) +- FEAT_NV (Nested Virtualization) +- FEAT_NV2 (Enhanced nested virtualization support) - FEAT_PACIMP (Pointer authentication - IMPLEMENTATION DEFINED algorithm) - FEAT_PACQARMA3 (Pointer authentication - QARMA3 algorithm) - FEAT_PACQARMA5 (Pointer authentication - QARMA5 algorithm) diff --git a/docs/system/arm/palm.rst b/docs/system/arm/palm.rst index 47ff9b36d46161bca5d21e0078c0543b2fe72c87..61bc8d34f40b5d91d5beef018365321dd90119d3 100644 --- a/docs/system/arm/palm.rst +++ b/docs/system/arm/palm.rst @@ -14,7 +14,7 @@ following elements: - On-chip Real Time Clock - TI TSC2102i touchscreen controller / analog-digital converter / - Audio CODEC, connected through MicroWire and |I2S| busses + Audio CODEC, connected through MicroWire and |I2S| buses - GPIO-connected matrix keypad diff --git a/docs/system/arm/stm32.rst b/docs/system/arm/stm32.rst index d7265b763d47abcf9fc6f1c6f9bfc4ee55e2466c..3b640f3ee07ea07927ba1587195af79e135884bb 100644 --- a/docs/system/arm/stm32.rst +++ b/docs/system/arm/stm32.rst @@ -16,11 +16,13 @@ based on this chip : - ``netduino2`` Netduino 2 board with STM32F205RFT6 microcontroller -The STM32F4 series is based on ARM Cortex-M4F core. This series is pin-to-pin -compatible with STM32F2 series. The following machines are based on this chip : +The STM32F4 series is based on ARM Cortex-M4F core, as well as the STM32L4 +ultra-low-power series. The STM32F4 series is pin-to-pin compatible with STM32F2 series. +The following machines are based on this ARM Cortex-M4F chip : - ``netduinoplus2`` Netduino Plus 2 board with STM32F405RGT6 microcontroller - ``olimex-stm32-h405`` Olimex STM32 H405 board with STM32F405RGT6 microcontroller +- ``b-l475e-iot01a`` :doc:`B-L475E-IOT01A IoT Node ` board with STM32L475VG microcontroller There are many other STM32 series that are currently not supported by QEMU. diff --git a/docs/system/arm/xlnx-versal-virt.rst b/docs/system/arm/xlnx-versal-virt.rst index d2d1b266926d885ac64b6365e3d7d9bd13525e72..0bafc76469de6e42e5892dbd087424c3d3ecd8f0 100644 --- a/docs/system/arm/xlnx-versal-virt.rst +++ b/docs/system/arm/xlnx-versal-virt.rst @@ -194,7 +194,7 @@ To use a different index value, N, from default of 0, add: .. code-block:: bash - -global xlnx,bbram-ctrl.drive-index=N + -global driver=xlnx.bbram-ctrl,property=drive-index,value=N eFUSE File Backend """""""""""""""""" @@ -212,7 +212,7 @@ To use a different index value, N, from default of 1, add: .. code-block:: bash - -global xlnx,efuse.drive-index=N + -global xlnx-efuse.drive-index=N .. warning:: In actual physical Versal, BBRAM and eFUSE contain sensitive data. diff --git a/docs/system/arm/xscale.rst b/docs/system/arm/xscale.rst index d2d5949e1026e3f6d868ad5aea13369ef20d329f..e239136c3c71f7d49df7335f5b2fbc7a7738df2c 100644 --- a/docs/system/arm/xscale.rst +++ b/docs/system/arm/xscale.rst @@ -32,4 +32,4 @@ The clamshell PDA models emulation includes the following peripherals: - Three on-chip UARTs -- WM8750 audio CODEC on |I2C| and |I2S| busses +- WM8750 audio CODEC on |I2C| and |I2S| buses diff --git a/docs/system/devices/can.rst b/docs/system/devices/can.rst index 0af3d9912a69479e884c184906835829289ee11e..09121836fdbb0c3e8b9fdd849c05bcb534e8a833 100644 --- a/docs/system/devices/can.rst +++ b/docs/system/devices/can.rst @@ -1,12 +1,12 @@ CAN Bus Emulation Support ========================= The CAN bus emulation provides mechanism to connect multiple -emulated CAN controller chips together by one or multiple CAN busses -(the controller device "canbus" parameter). The individual busses +emulated CAN controller chips together by one or multiple CAN buses +(the controller device "canbus" parameter). The individual buses can be connected to host system CAN API (at this time only Linux SocketCAN is supported). -The concept of busses is generic and different CAN controllers +The concept of buses is generic and different CAN controllers can be implemented. The initial submission implemented SJA1000 controller which diff --git a/docs/system/invocation.rst b/docs/system/invocation.rst index 4ba38fc23d2d412de8ca2229dab344e5bc0f7db6..14b7db1c102bc859b03af0903cac503d72f9f6de 100644 --- a/docs/system/invocation.rst +++ b/docs/system/invocation.rst @@ -10,6 +10,11 @@ Invocation disk_image is a raw hard disk image for IDE hard disk 0. Some targets do not need a disk image. +When dealing with options parameters as arbitrary strings containing +commas, such as in "file=my,file" and "string=a,b", it's necessary to +double the commas. For instance,"-fw_cfg name=z,string=a,,b" will be +parsed as "-fw_cfg name=z,string=a,b". + .. hxtool-doc:: qemu-options.hx Device URL Syntax diff --git a/docs/system/qemu-manpage.rst b/docs/system/qemu-manpage.rst index c47a4127582f2946664f3e2cdbef77846b87c1db..3ade4ee45b5e15fb0aba9c947f39b3feef6fce20 100644 --- a/docs/system/qemu-manpage.rst +++ b/docs/system/qemu-manpage.rst @@ -31,6 +31,11 @@ Options disk_image is a raw hard disk image for IDE hard disk 0. Some targets do not need a disk image. +When dealing with options parameters as arbitrary strings containing +commas, such as in "file=my,file" and "string=a,b", it's necessary to +double the commas. For instance,"-fw_cfg name=z,string=a,,b" will be +parsed as "-fw_cfg name=z,string=a,b". + .. hxtool-doc:: qemu-options.hx .. include:: keys.rst.inc diff --git a/docs/system/riscv/sifive_u.rst b/docs/system/riscv/sifive_u.rst index 7b166567f9762b5cd0b323448a09cc8ab65acc4a..8f55ae8e313057081adfa24a9eaea4c4d4ebeaa6 100644 --- a/docs/system/riscv/sifive_u.rst +++ b/docs/system/riscv/sifive_u.rst @@ -210,7 +210,7 @@ command line options with ``qemu-system-riscv32``. Running U-Boot -------------- -U-Boot mainline v2021.07 release is tested at the time of writing. To build a +U-Boot mainline v2024.01 release is tested at the time of writing. To build a U-Boot mainline bootloader that can be booted by the ``sifive_u`` machine, use the sifive_unleashed_defconfig with similar commands as described above for Linux: @@ -325,15 +325,10 @@ configuration of U-Boot: $ export CROSS_COMPILE=riscv64-linux- $ make sifive_unleashed_defconfig - $ make menuconfig - -then manually select the following configuration: - - * Device Tree Control ---> Provider of DTB for DT Control ---> Prior Stage bootloader DTB - -and unselect the following configuration: - - * Library routines ---> Allow access to binman information in the device tree + $ ./scripts/config --enable OF_BOARD + $ ./scripts/config --disable BINMAN_FDT + $ ./scripts/config --disable SPL + $ make olddefconfig This changes U-Boot to use the QEMU generated device tree blob, and bypass running the U-Boot SPL stage. @@ -352,17 +347,13 @@ It's possible to create a 32-bit U-Boot S-mode image as well. $ export CROSS_COMPILE=riscv64-linux- $ make sifive_unleashed_defconfig - $ make menuconfig - -then manually update the following configuration in U-Boot: - - * Device Tree Control ---> Provider of DTB for DT Control ---> Prior Stage bootloader DTB - * RISC-V architecture ---> Base ISA ---> RV32I - * Boot options ---> Boot images ---> Text Base ---> 0x80400000 - -and unselect the following configuration: - - * Library routines ---> Allow access to binman information in the device tree + $ ./scripts/config --disable ARCH_RV64I + $ ./scripts/config --enable ARCH_RV32I + $ ./scripts/config --set-val TEXT_BASE 0x80400000 + $ ./scripts/config --enable OF_BOARD + $ ./scripts/config --disable BINMAN_FDT + $ ./scripts/config --disable SPL + $ make olddefconfig Use the same command line options to boot the 32-bit U-Boot S-mode image: diff --git a/docs/system/riscv/virt.rst b/docs/system/riscv/virt.rst index f5fa7b8b29e6d9a08e9f4f8deb3e6196d1a8b1d7..9a06f95a3444c1eb2e49b09d6821c3a943980687 100644 --- a/docs/system/riscv/virt.rst +++ b/docs/system/riscv/virt.rst @@ -95,6 +95,11 @@ The following machine-specific options are supported: SiFive CLINT. When not specified, this option is assumed to be "off". This option is restricted to the TCG accelerator. +- acpi=[on|off|auto] + + When this option is "on" (which is the default), ACPI tables are generated and + exposed as firmware tables etc/acpi/rsdp and etc/acpi/tables. + - aia=[none|aplic|aplic-imsic] This option allows selecting interrupt controller defined by the AIA diff --git a/docs/system/target-arm.rst b/docs/system/target-arm.rst index 790ac1b8a2b5aa3052cfc93e97e8bb2ac5a1772a..c9d7c0dda7e89f8ed3abf191ff1c7b4ecb332f91 100644 --- a/docs/system/target-arm.rst +++ b/docs/system/target-arm.rst @@ -84,6 +84,7 @@ undocumented; you can get a complete list by running arm/vexpress arm/aspeed arm/bananapi_m2u.rst + arm/b-l475e-iot01a.rst arm/sabrelite arm/digic arm/cubieboard diff --git a/docs/tools/qemu-img.rst b/docs/tools/qemu-img.rst index 4459c065f1942b7061a3e8b54352df2004978094..3653adb963ee17f5165b46b053935e3c42e1445e 100644 --- a/docs/tools/qemu-img.rst +++ b/docs/tools/qemu-img.rst @@ -406,7 +406,7 @@ Command description: Compare exits with ``0`` in case the images are equal and with ``1`` in case the images differ. Other exit codes mean an error occurred during execution and standard error output should contain an error message. - The following table sumarizes all exit codes of the compare subcommand: + The following table summarizes all exit codes of the compare subcommand: 0 Images are identical (or requested help was printed) diff --git a/dump/dump.c b/dump/dump.c index 481905076493c7d723e01f84a08e76ea0e5550e3..84064d890d2cf6df5c7e92e974f6e823988ad855 100644 --- a/dump/dump.c +++ b/dump/dump.c @@ -108,11 +108,11 @@ static int dump_cleanup(DumpState *s) s->guest_note = NULL; if (s->resume) { if (s->detached) { - qemu_mutex_lock_iothread(); + bql_lock(); } vm_start(); if (s->detached) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } } migrate_del_blocker(&dump_migration_blocker); diff --git a/fsdev/meson.build b/fsdev/meson.build index 1bec0659245a2d08631c43c4b77e6748c8b6fa51..e20d7255e1e195c50e015bb2e17df2fd46e9ce7b 100644 --- a/fsdev/meson.build +++ b/fsdev/meson.build @@ -1,13 +1,13 @@ fsdev_ss = ss.source_set() fsdev_ss.add(files('qemu-fsdev-opts.c', 'qemu-fsdev-throttle.c')) -fsdev_ss.add(when: 'CONFIG_ALL', if_true: files('qemu-fsdev-dummy.c')) fsdev_ss.add(when: ['CONFIG_FSDEV_9P'], if_true: files( '9p-iov-marshal.c', '9p-marshal.c', 'qemu-fsdev.c', ), if_false: files('qemu-fsdev-dummy.c')) -system_ss.add_all(when: 'CONFIG_LINUX', if_true: fsdev_ss) -system_ss.add_all(when: 'CONFIG_DARWIN', if_true: fsdev_ss) +if host_os in ['linux', 'darwin'] + system_ss.add_all(fsdev_ss) +endif if have_virtfs_proxy_helper executable('virtfs-proxy-helper', diff --git a/gdbstub/meson.build b/gdbstub/meson.build index e5bccba34e5b37ddd89bd955b62f78b172648af5..da5721d8452b00742da50f74be78ce1615ced8d1 100644 --- a/gdbstub/meson.build +++ b/gdbstub/meson.build @@ -14,8 +14,8 @@ gdb_system_ss = ss.source_set() gdb_user_ss.add(files('gdbstub.c', 'user.c')) gdb_system_ss.add(files('gdbstub.c', 'system.c')) -gdb_user_ss = gdb_user_ss.apply(config_targetos, strict: false) -gdb_system_ss = gdb_system_ss.apply(config_targetos, strict: false) +gdb_user_ss = gdb_user_ss.apply({}) +gdb_system_ss = gdb_system_ss.apply({}) libgdb_user = static_library('gdb_user', gdb_user_ss.sources() + genh, diff --git a/hw/9pfs/meson.build b/hw/9pfs/meson.build index 2944ea63c38cfa2a59f4ec7bff028ab039313ac1..f1b62fa8c8056690d28865fbb263e0d03504373f 100644 --- a/hw/9pfs/meson.build +++ b/hw/9pfs/meson.build @@ -13,8 +13,11 @@ fs_ss.add(files( 'coth.c', 'coxattr.c', )) -fs_ss.add(when: 'CONFIG_LINUX', if_true: files('9p-util-linux.c')) -fs_ss.add(when: 'CONFIG_DARWIN', if_true: files('9p-util-darwin.c')) +if host_os == 'darwin' + fs_ss.add(files('9p-util-darwin.c')) +elif host_os == 'linux' + fs_ss.add(files('9p-util-linux.c')) +endif fs_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen-9p-backend.c')) system_ss.add_all(when: 'CONFIG_FSDEV_9P', if_true: fs_ss) diff --git a/hw/9pfs/virtio-9p-device.c b/hw/9pfs/virtio-9p-device.c index 5f522e68e9f2a9b7030ebe9883f16c6a25c1ea89..efa41cfd73f9b366c708160bdfdd16ad20d16421 100644 --- a/hw/9pfs/virtio-9p-device.c +++ b/hw/9pfs/virtio-9p-device.c @@ -237,7 +237,7 @@ static const VMStateDescription vmstate_virtio_9p = { .name = "virtio-9p", .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c index 011d2c6c2ddf94f1b3f9af99436f99c11655deea..2d81c1e7908a67b74460be9e09897c00d4cd092a 100644 --- a/hw/acpi/cpu.c +++ b/hw/acpi/cpu.c @@ -297,7 +297,7 @@ static const VMStateDescription vmstate_cpuhp_sts = { .name = "CPU hotplug device state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(is_inserting, AcpiCpuStatus), VMSTATE_BOOL(is_removing, AcpiCpuStatus), VMSTATE_UINT32(ost_event, AcpiCpuStatus), @@ -310,7 +310,7 @@ const VMStateDescription vmstate_cpu_hotplug = { .name = "CPU hotplug state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(selector, CPUHotplugState), VMSTATE_UINT8(command, CPUHotplugState), VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs, CPUHotplugState, dev_count, diff --git a/hw/acpi/cpu_hotplug.c b/hw/acpi/cpu_hotplug.c index 634bbecb319c1e63d063b5c461aa368bc18fb7a4..6f78db0ccbec63310c6626f56fb25f1ac8a9454b 100644 --- a/hw/acpi/cpu_hotplug.c +++ b/hw/acpi/cpu_hotplug.c @@ -59,7 +59,8 @@ static const MemoryRegionOps AcpiCpuHotplug_ops = { }, }; -static void acpi_set_cpu_present_bit(AcpiCpuHotplug *g, CPUState *cpu) +static void acpi_set_cpu_present_bit(AcpiCpuHotplug *g, CPUState *cpu, + bool *swtchd_to_modern) { CPUClass *k = CPU_GET_CLASS(cpu); int64_t cpu_id; @@ -68,23 +69,34 @@ static void acpi_set_cpu_present_bit(AcpiCpuHotplug *g, CPUState *cpu) if ((cpu_id / 8) >= ACPI_GPE_PROC_LEN) { object_property_set_bool(g->device, "cpu-hotplug-legacy", false, &error_abort); + *swtchd_to_modern = true; return; } + *swtchd_to_modern = false; g->sts[cpu_id / 8] |= (1 << (cpu_id % 8)); } void legacy_acpi_cpu_plug_cb(HotplugHandler *hotplug_dev, AcpiCpuHotplug *g, DeviceState *dev, Error **errp) { - acpi_set_cpu_present_bit(g, CPU(dev)); - acpi_send_event(DEVICE(hotplug_dev), ACPI_CPU_HOTPLUG_STATUS); + bool swtchd_to_modern; + Error *local_err = NULL; + + acpi_set_cpu_present_bit(g, CPU(dev), &swtchd_to_modern); + if (swtchd_to_modern) { + /* propagate the hotplug to the modern interface */ + hotplug_handler_plug(hotplug_dev, dev, &local_err); + } else { + acpi_send_event(DEVICE(hotplug_dev), ACPI_CPU_HOTPLUG_STATUS); + } } void legacy_acpi_cpu_hotplug_init(MemoryRegion *parent, Object *owner, AcpiCpuHotplug *gpe_cpu, uint16_t base) { CPUState *cpu; + bool swtchd_to_modern; memory_region_init_io(&gpe_cpu->io, owner, &AcpiCpuHotplug_ops, gpe_cpu, "acpi-cpu-hotplug", ACPI_GPE_PROC_LEN); @@ -92,7 +104,7 @@ void legacy_acpi_cpu_hotplug_init(MemoryRegion *parent, Object *owner, gpe_cpu->device = owner; CPU_FOREACH(cpu) { - acpi_set_cpu_present_bit(gpe_cpu, cpu); + acpi_set_cpu_present_bit(gpe_cpu, cpu, &swtchd_to_modern); } } diff --git a/hw/acpi/erst.c b/hw/acpi/erst.c index ba751dc60e26b581315448f83fd572d97d738cae..b2f1b1363012e241fa8635538bbfe7ec83ab23fb 100644 --- a/hw/acpi/erst.c +++ b/hw/acpi/erst.c @@ -932,7 +932,7 @@ static const VMStateDescription erst_vmstate = { .version_id = 1, .minimum_version_id = 1, .post_load = erst_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(operation, ERSTDeviceState), VMSTATE_UINT8(busy_status, ERSTDeviceState), VMSTATE_UINT8(command_status, ERSTDeviceState), diff --git a/hw/acpi/generic_event_device.c b/hw/acpi/generic_event_device.c index a3d31631fe0dba5b797baecbf85432c4a8e7e8e7..2d6e91b124e59292e51fd4d899db195568ae5cf1 100644 --- a/hw/acpi/generic_event_device.c +++ b/hw/acpi/generic_event_device.c @@ -312,7 +312,7 @@ static const VMStateDescription vmstate_memhp_state = { .name = "acpi-ged/memhp", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_MEMORY_HOTPLUG(memhp_state, AcpiGedState), VMSTATE_END_OF_LIST() } @@ -322,7 +322,7 @@ static const VMStateDescription vmstate_ged_state = { .name = "acpi-ged-state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(sel, GEDState), VMSTATE_END_OF_LIST() } @@ -332,7 +332,7 @@ static const VMStateDescription vmstate_ghes = { .name = "acpi-ghes", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(ghes_addr_le, AcpiGhesState), VMSTATE_END_OF_LIST() }, @@ -349,7 +349,7 @@ static const VMStateDescription vmstate_ghes_state = { .version_id = 1, .minimum_version_id = 1, .needed = ghes_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(ghes_state, AcpiGedState, 1, vmstate_ghes, AcpiGhesState), VMSTATE_END_OF_LIST() @@ -360,11 +360,11 @@ static const VMStateDescription vmstate_acpi_ged = { .name = "acpi-ged", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(ged_state, AcpiGedState, 1, vmstate_ged_state, GEDState), VMSTATE_END_OF_LIST(), }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_memhp_state, &vmstate_ghes_state, NULL diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c index 25e2c7243e0464985c27dce13a69fc0fceca667a..573d032e8e55adda7643df772361de7f0e205abf 100644 --- a/hw/acpi/ich9.c +++ b/hw/acpi/ich9.c @@ -164,7 +164,7 @@ static const VMStateDescription vmstate_memhp_state = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_test_use_memhp, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, ICH9LPCPMRegs), VMSTATE_END_OF_LIST() } @@ -181,7 +181,7 @@ static const VMStateDescription vmstate_tco_io_state = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_test_use_tco, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(tco_regs, ICH9LPCPMRegs, 1, vmstate_tco_io_sts, TCOIORegs), VMSTATE_END_OF_LIST() @@ -208,7 +208,7 @@ static const VMStateDescription vmstate_cpuhp_state = { .minimum_version_id = 1, .needed = vmstate_test_use_cpuhp, .pre_load = vmstate_cpuhp_pre_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CPU_HOTPLUG(cpuhp_state, ICH9LPCPMRegs), VMSTATE_END_OF_LIST() } @@ -226,7 +226,7 @@ static const VMStateDescription vmstate_pcihp_state = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_test_use_pcihp, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_HOTPLUG(acpi_pci_hotplug, ICH9LPCPMRegs, NULL, NULL), @@ -239,7 +239,7 @@ const VMStateDescription vmstate_ich9_pm = { .version_id = 1, .minimum_version_id = 1, .post_load = ich9_pm_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(acpi_regs.pm1.evt.sts, ICH9LPCPMRegs), VMSTATE_UINT16(acpi_regs.pm1.evt.en, ICH9LPCPMRegs), VMSTATE_UINT16(acpi_regs.pm1.cnt.cnt, ICH9LPCPMRegs), @@ -251,7 +251,7 @@ const VMStateDescription vmstate_ich9_pm = { VMSTATE_UINT32(smi_sts, ICH9LPCPMRegs), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_memhp_state, &vmstate_tco_io_state, &vmstate_cpuhp_state, diff --git a/hw/acpi/ich9_tco.c b/hw/acpi/ich9_tco.c index 1540f4fd4614365c74b2a0b18d281bdf40c16bf5..81606219f7328fe65f91b5ce39032f96d42ed6ba 100644 --- a/hw/acpi/ich9_tco.c +++ b/hw/acpi/ich9_tco.c @@ -254,7 +254,7 @@ const VMStateDescription vmstate_tco_io_sts = { .name = "tco io device status", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(tco.rld, TCOIORegs), VMSTATE_UINT8(tco.din, TCOIORegs), VMSTATE_UINT8(tco.dout, TCOIORegs), diff --git a/hw/acpi/memory_hotplug.c b/hw/acpi/memory_hotplug.c index 0b883df81336abed56e9f16ccfb6c1e50b24c58c..de6f974ebbaede806a0484e2a04e5912d252a8bf 100644 --- a/hw/acpi/memory_hotplug.c +++ b/hw/acpi/memory_hotplug.c @@ -317,7 +317,7 @@ static const VMStateDescription vmstate_memhp_sts = { .name = "memory hotplug device state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(is_enabled, MemStatus), VMSTATE_BOOL(is_inserting, MemStatus), VMSTATE_UINT32(ost_event, MemStatus), @@ -330,7 +330,7 @@ const VMStateDescription vmstate_memory_hotplug = { .name = "memory hotplug state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(selector, MemHotplugState), VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs, MemHotplugState, dev_count, vmstate_memhp_sts, MemStatus), diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build index fc1b952379a88a74f336e6ae50614ee3c25eb2d1..5441c9b1e4c124b092a326f0f56fd0617b2d393f 100644 --- a/hw/acpi/meson.build +++ b/hw/acpi/meson.build @@ -33,9 +33,4 @@ endif system_ss.add(when: 'CONFIG_ACPI', if_false: files('acpi-stub.c', 'aml-build-stub.c', 'ghes-stub.c', 'acpi_interface.c')) system_ss.add(when: 'CONFIG_ACPI_PCI_BRIDGE', if_false: files('pci-bridge-stub.c')) system_ss.add_all(when: 'CONFIG_ACPI', if_true: acpi_ss) -system_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-stub.c', 'aml-build-stub.c', - 'acpi-x86-stub.c', 'ipmi-stub.c', 'ghes-stub.c', - 'acpi-mem-hotplug-stub.c', 'acpi-cpu-hotplug-stub.c', - 'acpi-pci-hotplug-stub.c', 'acpi-nvdimm-stub.c', - 'cxl-stub.c', 'pci-bridge-stub.c')) system_ss.add(files('acpi-qmp-cmds.c')) diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c index 4f75c873e267734f5aa26a9cd2665d5e0ef27457..5f79c9016b40cb08a27b597fc6892fc1e2ad0719 100644 --- a/hw/acpi/pcihp.c +++ b/hw/acpi/pcihp.c @@ -517,7 +517,7 @@ const VMStateDescription vmstate_acpi_pcihp_pci_status = { .name = "acpi_pcihp_pci_status", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(up, AcpiPciHpPciStatus), VMSTATE_UINT32(down, AcpiPciHpPciStatus), VMSTATE_END_OF_LIST() diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c index dd523d2e4ced509c38d0b95405a39d1c33d9f0f6..debe1adb8463906a52fc46e8bae9d0def5c3d5a1 100644 --- a/hw/acpi/piix4.c +++ b/hw/acpi/piix4.c @@ -147,7 +147,7 @@ static const VMStateDescription vmstate_gpe = { .name = "gpe", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_GPE_ARRAY(sts, ACPIGPE), VMSTATE_GPE_ARRAY(en, ACPIGPE), VMSTATE_END_OF_LIST() @@ -158,7 +158,7 @@ static const VMStateDescription vmstate_pci_status = { .name = "pci_status", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(up, struct AcpiPciHpPciStatus), VMSTATE_UINT32(down, struct AcpiPciHpPciStatus), VMSTATE_END_OF_LIST() @@ -189,7 +189,7 @@ static const VMStateDescription vmstate_memhp_state = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_test_use_memhp, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, PIIX4PMState), VMSTATE_END_OF_LIST() } @@ -214,7 +214,7 @@ static const VMStateDescription vmstate_cpuhp_state = { .minimum_version_id = 1, .needed = vmstate_test_use_cpuhp, .pre_load = vmstate_cpuhp_pre_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CPU_HOTPLUG(cpuhp_state, PIIX4PMState), VMSTATE_END_OF_LIST() } @@ -247,7 +247,7 @@ static const VMStateDescription vmstate_acpi = { .version_id = 3, .minimum_version_id = 3, .post_load = vmstate_acpi_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PIIX4PMState), VMSTATE_UINT16(ar.pm1.evt.sts, PIIX4PMState), VMSTATE_UINT16(ar.pm1.evt.en, PIIX4PMState), @@ -269,7 +269,7 @@ static const VMStateDescription vmstate_acpi = { vmstate_test_migrate_acpi_index), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_memhp_state, &vmstate_cpuhp_state, NULL diff --git a/hw/acpi/vmgenid.c b/hw/acpi/vmgenid.c index a39315c1b353ebcfe81452650de9b92c3c7d9784..e63c8af4c3fdc5adc9c0b257fe62f7570fc6baa2 100644 --- a/hw/acpi/vmgenid.c +++ b/hw/acpi/vmgenid.c @@ -178,7 +178,7 @@ static const VMStateDescription vmstate_vmgenid = { .version_id = 1, .minimum_version_id = 1, .post_load = vmgenid_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(vmgenid_addr_le, VmGenIdState, sizeof(uint64_t)), VMSTATE_END_OF_LIST() }, diff --git a/hw/adc/aspeed_adc.c b/hw/adc/aspeed_adc.c index 0d2966312954f03d07e29ef3d7b9d08a2fa61555..68bdbc73b0e66be18ce876fcc4b99d0c229aa193 100644 --- a/hw/adc/aspeed_adc.c +++ b/hw/adc/aspeed_adc.c @@ -280,7 +280,7 @@ static const VMStateDescription vmstate_aspeed_adc_engine = { .name = TYPE_ASPEED_ADC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedADCEngineState, ASPEED_ADC_NR_REGS), VMSTATE_END_OF_LIST(), } diff --git a/hw/adc/max111x.c b/hw/adc/max111x.c index e8bf4cccd44529f956be04e20bca8bb58d2c8d96..957d177e1ce2f362b6ef806073083880a116a2c3 100644 --- a/hw/adc/max111x.c +++ b/hw/adc/max111x.c @@ -96,7 +96,7 @@ static const VMStateDescription vmstate_max111x = { .name = "max111x", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SSI_PERIPHERAL(parent_obj, MAX111xState), VMSTATE_UINT8(tb1, MAX111xState), VMSTATE_UINT8(rb2, MAX111xState), diff --git a/hw/adc/npcm7xx_adc.c b/hw/adc/npcm7xx_adc.c index bc6f3f55e643b26669feba2077a478d7cf6a2840..c6647eec6d746171e9521311eb89c7bb7e8912bd 100644 --- a/hw/adc/npcm7xx_adc.c +++ b/hw/adc/npcm7xx_adc.c @@ -253,7 +253,7 @@ static const VMStateDescription vmstate_npcm7xx_adc = { .name = "npcm7xx-adc", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER(conv_timer, NPCM7xxADCState), VMSTATE_UINT32(con, NPCM7xxADCState), VMSTATE_UINT32(data, NPCM7xxADCState), diff --git a/hw/adc/stm32f2xx_adc.c b/hw/adc/stm32f2xx_adc.c index 01a0b14e69d6c7de7112036ebc4143af582300e9..e9df6ea53f3d88d2827ee22922b4c115aa86888b 100644 --- a/hw/adc/stm32f2xx_adc.c +++ b/hw/adc/stm32f2xx_adc.c @@ -254,7 +254,7 @@ static const VMStateDescription vmstate_stm32f2xx_adc = { .name = TYPE_STM32F2XX_ADC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(adc_sr, STM32F2XXADCState), VMSTATE_UINT32(adc_cr1, STM32F2XXADCState), VMSTATE_UINT32(adc_cr2, STM32F2XXADCState), diff --git a/hw/adc/zynq-xadc.c b/hw/adc/zynq-xadc.c index 032e19cbd0af2582e5c9fc71bdb4257973f9a169..34268319a40768cccd39cf4eafbf7c04d5338159 100644 --- a/hw/adc/zynq-xadc.c +++ b/hw/adc/zynq-xadc.c @@ -269,7 +269,7 @@ static const VMStateDescription vmstate_zynq_xadc = { .name = "zynq-xadc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, ZynqXADCState, ZYNQ_XADC_NUM_IO_REGS), VMSTATE_UINT16_ARRAY(xadc_regs, ZynqXADCState, ZYNQ_XADC_NUM_ADC_REGS), diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig index 3ada335a24374d4d543bc14c19b0a0b0bd1505b3..39d255425b1ad5d5c19a7af0866eda11fa8b4f0f 100644 --- a/hw/arm/Kconfig +++ b/hw/arm/Kconfig @@ -8,6 +8,7 @@ config ARM_VIRT imply TPM_TIS_SYSBUS imply TPM_TIS_I2C imply NVDIMM + imply IOMMUFD select ARM_GIC select ACPI select ARM_SMMUV3 @@ -448,6 +449,17 @@ config STM32F405_SOC select STM32F4XX_SYSCFG select STM32F4XX_EXTI +config B_L475E_IOT01A + bool + default y + depends on TCG && ARM + select STM32L4X5_SOC + +config STM32L4X5_SOC + bool + select ARM_V7M + select OR_IRQ + config XLNX_ZYNQMP_ARM bool default y if PIXMAN @@ -536,6 +548,7 @@ config FSL_IMX6 select IMX_I2C select IMX_USBPHY select WDT_IMX2 + select PL310 # cache controller select SDHCI config ASPEED_SOC diff --git a/hw/arm/armsse.c b/hw/arm/armsse.c index 31acbf73471df862a9310f88158983020ea507a4..91502d157a913c9673483661823a8f16e24d5909 100644 --- a/hw/arm/armsse.c +++ b/hw/arm/armsse.c @@ -1022,10 +1022,8 @@ static void armsse_realize(DeviceState *dev, Error **errp) * later if necessary. */ if (extract32(info->cpuwait_rst, i, 1)) { - if (!object_property_set_bool(cpuobj, "start-powered-off", true, - errp)) { - return; - } + object_property_set_bool(cpuobj, "start-powered-off", true, + &error_abort); } if (!s->cpu_fpu[i]) { if (!object_property_set_bool(cpuobj, "vfp", false, errp)) { @@ -1677,7 +1675,7 @@ static const VMStateDescription armsse_vmstate = { .name = "iotkit", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(mainclk, ARMSSE), VMSTATE_CLOCK(s32kclk, ARMSSE), VMSTATE_UINT32(nsccfg, ARMSSE), diff --git a/hw/arm/armv7m.c b/hw/arm/armv7m.c index d10abb36a8ea75dd3372a9fa93ccad8a55482fb7..1f218277734b5bc71ca074c98f3634901faa85a4 100644 --- a/hw/arm/armv7m.c +++ b/hw/arm/armv7m.c @@ -256,6 +256,8 @@ static void armv7m_instance_init(Object *obj) object_initialize_child(obj, "nvic", &s->nvic, TYPE_NVIC); object_property_add_alias(obj, "num-irq", OBJECT(&s->nvic), "num-irq"); + object_property_add_alias(obj, "num-prio-bits", + OBJECT(&s->nvic), "num-prio-bits"); object_initialize_child(obj, "systick-reg-ns", &s->systick[M_REG_NS], TYPE_SYSTICK); @@ -318,12 +320,6 @@ static void armv7m_realize(DeviceState *dev, Error **errp) return; } } - if (object_property_find(OBJECT(s->cpu), "start-powered-off")) { - if (!object_property_set_bool(OBJECT(s->cpu), "start-powered-off", - s->start_powered_off, errp)) { - return; - } - } if (object_property_find(OBJECT(s->cpu), "vfp")) { if (!object_property_set_bool(OBJECT(s->cpu), "vfp", s->vfp, errp)) { return; @@ -334,6 +330,8 @@ static void armv7m_realize(DeviceState *dev, Error **errp) return; } } + object_property_set_bool(OBJECT(s->cpu), "start-powered-off", + s->start_powered_off, &error_abort); /* * Real M-profile hardware can be configured with a different number of @@ -559,7 +557,7 @@ static const VMStateDescription vmstate_armv7m = { .name = "armv7m", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(refclk, ARMv7MState), VMSTATE_CLOCK(cpuclk, ARMv7MState), VMSTATE_END_OF_LIST() diff --git a/hw/arm/aspeed_ast2400.c b/hw/arm/aspeed_ast2400.c index a4334c81b8f320eb2a63a09499a4ac50bcc34648..0baa2ff96e46b377a0ebf78ace2460bcea48bfc9 100644 --- a/hw/arm/aspeed_ast2400.c +++ b/hw/arm/aspeed_ast2400.c @@ -247,7 +247,6 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) Aspeed2400SoCState *a = ASPEED2400_SOC(dev); AspeedSoCState *s = ASPEED_SOC(dev); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - Error *err = NULL; g_autofree char *sram_name = NULL; /* Default boot region (SPI memory or ROMs) */ @@ -276,9 +275,8 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) /* SRAM */ sram_name = g_strdup_printf("aspeed.sram.%d", CPU(&a->cpu[0])->cpu_index); - memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, + errp)) { return; } memory_region_add_subregion(s->memory, diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c index b965fbab5eeed00d74981044e8fcb4106a604619..3a9a303ab8bacbf98581aa1a570142386df2251a 100644 --- a/hw/arm/aspeed_ast2600.c +++ b/hw/arm/aspeed_ast2600.c @@ -282,7 +282,6 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) Aspeed2600SoCState *a = ASPEED2600_SOC(dev); AspeedSoCState *s = ASPEED_SOC(dev); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - Error *err = NULL; qemu_irq irq; g_autofree char *sram_name = NULL; @@ -355,9 +354,8 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) /* SRAM */ sram_name = g_strdup_printf("aspeed.sram.%d", CPU(&a->cpu[0])->cpu_index); - memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, + errp)) { return; } memory_region_add_subregion(s->memory, diff --git a/hw/arm/b-l475e-iot01a.c b/hw/arm/b-l475e-iot01a.c new file mode 100644 index 0000000000000000000000000000000000000000..6ecde2db15c4cce6237533dd9bbeb327d6426dda --- /dev/null +++ b/hw/arm/b-l475e-iot01a.c @@ -0,0 +1,72 @@ +/* + * B-L475E-IOT01A Discovery Kit machine + * (B-L475E-IOT01A IoT Node) + * + * Copyright (c) 2023 Arnaud Minier + * Copyright (c) 2023 Inès Varhol + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * This work is heavily inspired by the netduinoplus2 by Alistair Francis. + * Original code is licensed under the MIT License: + * + * Copyright (c) 2014 Alistair Francis + */ + +/* + * The reference used is the STMicroElectronics UM2153 User manual + * Discovery kit for IoT node, multi-channel communication with STM32L4. + * https://www.st.com/en/evaluation-tools/b-l475e-iot01a.html#documentation + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-clock.h" +#include "qemu/error-report.h" +#include "hw/arm/stm32l4x5_soc.h" +#include "hw/arm/boot.h" + +/* Main SYSCLK frequency in Hz (80MHz) */ +#define MAIN_SYSCLK_FREQ_HZ 80000000ULL + +static void b_l475e_iot01a_init(MachineState *machine) +{ + const Stm32l4x5SocClass *sc; + DeviceState *dev; + Clock *sysclk; + + /* This clock doesn't need migration because it is fixed-frequency */ + sysclk = clock_new(OBJECT(machine), "SYSCLK"); + clock_set_hz(sysclk, MAIN_SYSCLK_FREQ_HZ); + + dev = qdev_new(TYPE_STM32L4X5XG_SOC); + object_property_add_child(OBJECT(machine), "soc", OBJECT(dev)); + qdev_connect_clock_in(dev, "sysclk", sysclk); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + sc = STM32L4X5_SOC_GET_CLASS(dev); + armv7m_load_kernel(ARM_CPU(first_cpu), + machine->kernel_filename, + 0, sc->flash_size); +} + +static void b_l475e_iot01a_machine_init(MachineClass *mc) +{ + static const char *machine_valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m4"), + NULL + }; + mc->desc = "B-L475E-IOT01A Discovery Kit (Cortex-M4)"; + mc->init = b_l475e_iot01a_init; + mc->valid_cpu_types = machine_valid_cpu_types; + + /* SRAM pre-allocated as part of the SoC instantiation */ + mc->default_ram_size = 0; +} + +DEFINE_MACHINE("b-l475e-iot01a", b_l475e_iot01a_machine_init) diff --git a/hw/arm/bananapi_m2u.c b/hw/arm/bananapi_m2u.c index 8f24b18d8ca9805f79bb5acaeae2753bc0052b52..0a4b6f29b1cdbffc184edfc3cd80a2ec2fc04d3d 100644 --- a/hw/arm/bananapi_m2u.c +++ b/hw/arm/bananapi_m2u.c @@ -71,12 +71,6 @@ static void bpim2u_init(MachineState *machine) exit(1); } - /* Only allow Cortex-A7 for this board */ - if (strcmp(machine->cpu_type, ARM_CPU_TYPE_NAME("cortex-a7")) != 0) { - error_report("This board can only be used with cortex-a7 CPU"); - exit(1); - } - r40 = AW_R40(object_new(TYPE_AW_R40)); object_property_add_child(OBJECT(machine), "soc", OBJECT(r40)); object_unref(OBJECT(r40)); @@ -133,12 +127,18 @@ static void bpim2u_init(MachineState *machine) static void bpim2u_machine_init(MachineClass *mc) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a7"), + NULL + }; + mc->desc = "Bananapi M2U (Cortex-A7)"; mc->init = bpim2u_init; mc->min_cpus = AW_R40_NUM_CPUS; mc->max_cpus = AW_R40_NUM_CPUS; mc->default_cpus = AW_R40_NUM_CPUS; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"); + mc->valid_cpu_types = valid_cpu_types; mc->default_ram_size = 1 * GiB; mc->default_ram_id = "bpim2u.ram"; } diff --git a/hw/arm/bcm2836.c b/hw/arm/bcm2836.c index 166dc896c09e49bb7f545b14eb78d69a6643c47a..b0674a22a6c26f212a196da1593cb040c6e5843a 100644 --- a/hw/arm/bcm2836.c +++ b/hw/arm/bcm2836.c @@ -127,22 +127,16 @@ static void bcm2836_realize(DeviceState *dev, Error **errp) qdev_get_gpio_in_named(DEVICE(&s->control), "gpu-fiq", 0)); for (n = 0; n < BCM283X_NCPUS; n++) { - /* TODO: this should be converted to a property of ARM_CPU */ - s->cpu[n].core.mp_affinity = (bc->clusterid << 8) | n; + object_property_set_int(OBJECT(&s->cpu[n].core), "mp-affinity", + (bc->clusterid << 8) | n, &error_abort); /* set periphbase/CBAR value for CPU-local registers */ - if (!object_property_set_int(OBJECT(&s->cpu[n].core), "reset-cbar", - bc->peri_base, errp)) { - return; - } + object_property_set_int(OBJECT(&s->cpu[n].core), "reset-cbar", + bc->peri_base, &error_abort); /* start powered off if not enabled */ - if (!object_property_set_bool(OBJECT(&s->cpu[n].core), - "start-powered-off", - n >= s->enabled_cpus, - errp)) { - return; - } + object_property_set_bool(OBJECT(&s->cpu[n].core), "start-powered-off", + n >= s->enabled_cpus, &error_abort); if (!qdev_realize(DEVICE(&s->cpu[n].core), NULL, errp)) { return; diff --git a/hw/arm/cubieboard.c b/hw/arm/cubieboard.c index 29146f50181c259b8a27c78510c1e04edf048d2b..b976727eefdad470231ebb367ffee62ac25e57f9 100644 --- a/hw/arm/cubieboard.c +++ b/hw/arm/cubieboard.c @@ -52,12 +52,6 @@ static void cubieboard_init(MachineState *machine) exit(1); } - /* Only allow Cortex-A8 for this board */ - if (strcmp(machine->cpu_type, ARM_CPU_TYPE_NAME("cortex-a8")) != 0) { - error_report("This board can only be used with cortex-a8 CPU"); - exit(1); - } - a10 = AW_A10(object_new(TYPE_AW_A10)); object_property_add_child(OBJECT(machine), "soc", OBJECT(a10)); object_unref(OBJECT(a10)); @@ -114,8 +108,14 @@ static void cubieboard_init(MachineState *machine) static void cubieboard_machine_init(MachineClass *mc) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a8"), + NULL + }; + mc->desc = "cubietech cubieboard (Cortex-A8)"; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a8"); + mc->valid_cpu_types = valid_cpu_types; mc->default_ram_size = 1 * GiB; mc->init = cubieboard_init; mc->block_default_type = IF_IDE; diff --git a/hw/arm/fsl-imx25.c b/hw/arm/fsl-imx25.c index 9aabbf7f5870b60990b473b1e1d9973bcdc76161..9d2fb75a689702b3b4a802a3b2b69a028ae68526 100644 --- a/hw/arm/fsl-imx25.c +++ b/hw/arm/fsl-imx25.c @@ -81,7 +81,6 @@ static void fsl_imx25_realize(DeviceState *dev, Error **errp) { FslIMX25State *s = FSL_IMX25(dev); uint8_t i; - Error *err = NULL; if (!qdev_realize(DEVICE(&s->cpu), NULL, errp)) { return; @@ -281,28 +280,22 @@ static void fsl_imx25_realize(DeviceState *dev, Error **errp) FSL_IMX25_WDT_IRQ)); /* initialize 2 x 16 KB ROM */ - memory_region_init_rom(&s->rom[0], OBJECT(dev), "imx25.rom0", - FSL_IMX25_ROM0_SIZE, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_rom(&s->rom[0], OBJECT(dev), "imx25.rom0", + FSL_IMX25_ROM0_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX25_ROM0_ADDR, &s->rom[0]); - memory_region_init_rom(&s->rom[1], OBJECT(dev), "imx25.rom1", - FSL_IMX25_ROM1_SIZE, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_rom(&s->rom[1], OBJECT(dev), "imx25.rom1", + FSL_IMX25_ROM1_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX25_ROM1_ADDR, &s->rom[1]); /* initialize internal RAM (128 KB) */ - memory_region_init_ram(&s->iram, NULL, "imx25.iram", FSL_IMX25_IRAM_SIZE, - &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_ram(&s->iram, NULL, "imx25.iram", + FSL_IMX25_IRAM_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX25_IRAM_ADDR, diff --git a/hw/arm/fsl-imx31.c b/hw/arm/fsl-imx31.c index def27bb913605a96919258149e4db5f033bdf689..c0584e4dfcdc7a9dd03d26576f8bf459ea1d4650 100644 --- a/hw/arm/fsl-imx31.c +++ b/hw/arm/fsl-imx31.c @@ -63,7 +63,6 @@ static void fsl_imx31_realize(DeviceState *dev, Error **errp) { FslIMX31State *s = FSL_IMX31(dev); uint16_t i; - Error *err = NULL; if (!qdev_realize(DEVICE(&s->cpu), NULL, errp)) { return; @@ -188,30 +187,24 @@ static void fsl_imx31_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->wdt), 0, FSL_IMX31_WDT_ADDR); /* On a real system, the first 16k is a `secure boot rom' */ - memory_region_init_rom(&s->secure_rom, OBJECT(dev), "imx31.secure_rom", - FSL_IMX31_SECURE_ROM_SIZE, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_rom(&s->secure_rom, OBJECT(dev), "imx31.secure_rom", + FSL_IMX31_SECURE_ROM_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX31_SECURE_ROM_ADDR, &s->secure_rom); /* There is also a 16k ROM */ - memory_region_init_rom(&s->rom, OBJECT(dev), "imx31.rom", - FSL_IMX31_ROM_SIZE, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_rom(&s->rom, OBJECT(dev), "imx31.rom", + FSL_IMX31_ROM_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX31_ROM_ADDR, &s->rom); /* initialize internal RAM (16 KB) */ - memory_region_init_ram(&s->iram, NULL, "imx31.iram", FSL_IMX31_IRAM_SIZE, - &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_ram(&s->iram, NULL, "imx31.iram", + FSL_IMX31_IRAM_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX31_IRAM_ADDR, diff --git a/hw/arm/fsl-imx6.c b/hw/arm/fsl-imx6.c index 7dc42cbfe64903baa6f74fdddce6753064ccb63f..af2e982b0527539bc386b01d046a3ec4cdb60abc 100644 --- a/hw/arm/fsl-imx6.c +++ b/hw/arm/fsl-imx6.c @@ -109,7 +109,6 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) MachineState *ms = MACHINE(qdev_get_machine()); FslIMX6State *s = FSL_IMX6(dev); uint16_t i; - Error *err = NULL; unsigned int smp_cpus = ms->smp.cpus; if (smp_cpus > FSL_IMX6_NUM_CPUS) { @@ -155,6 +154,9 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) qdev_get_gpio_in(DEVICE(&s->cpu[i]), ARM_CPU_FIQ)); } + /* L2 cache controller */ + sysbus_create_simple("l2x0", FSL_IMX6_PL310_ADDR, NULL); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->ccm), errp)) { return; } @@ -423,30 +425,24 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) } /* ROM memory */ - memory_region_init_rom(&s->rom, OBJECT(dev), "imx6.rom", - FSL_IMX6_ROM_SIZE, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_rom(&s->rom, OBJECT(dev), "imx6.rom", + FSL_IMX6_ROM_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX6_ROM_ADDR, &s->rom); /* CAAM memory */ - memory_region_init_rom(&s->caam, OBJECT(dev), "imx6.caam", - FSL_IMX6_CAAM_MEM_SIZE, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_rom(&s->caam, OBJECT(dev), "imx6.caam", + FSL_IMX6_CAAM_MEM_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX6_CAAM_MEM_ADDR, &s->caam); /* OCRAM memory */ - memory_region_init_ram(&s->ocram, NULL, "imx6.ocram", FSL_IMX6_OCRAM_SIZE, - &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_ram(&s->ocram, NULL, "imx6.ocram", + FSL_IMX6_OCRAM_SIZE, errp)) { return; } memory_region_add_subregion(get_system_memory(), FSL_IMX6_OCRAM_ADDR, diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c index f12aacea6b8629008eb35c5bb3e46c07e1c4173e..c21e18d08fdb54f7cdd08148972fc19222845027 100644 --- a/hw/arm/highbank.c +++ b/hw/arm/highbank.c @@ -112,7 +112,7 @@ static const VMStateDescription vmstate_highbank_regs = { .name = "highbank-regs", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, HighbankRegsState, NUM_REGS), VMSTATE_END_OF_LIST(), }, diff --git a/hw/arm/integratorcp.c b/hw/arm/integratorcp.c index d176e9af7eee1063c6c97724de2c94e44d834e37..1830e1d785088a647a970e139ad0893c178dc22f 100644 --- a/hw/arm/integratorcp.c +++ b/hw/arm/integratorcp.c @@ -63,7 +63,7 @@ static const VMStateDescription vmstate_integratorcm = { .name = "integratorcm", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cm_osc, IntegratorCMState), VMSTATE_UINT32(cm_ctrl, IntegratorCMState), VMSTATE_UINT32(cm_lock, IntegratorCMState), @@ -291,12 +291,9 @@ static void integratorcm_realize(DeviceState *d, Error **errp) { IntegratorCMState *s = INTEGRATOR_CM(d); SysBusDevice *dev = SYS_BUS_DEVICE(d); - Error *local_err = NULL; - memory_region_init_ram(&s->flash, OBJECT(d), "integrator.flash", 0x100000, - &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!memory_region_init_ram(&s->flash, OBJECT(d), "integrator.flash", + 0x100000, errp)) { return; } @@ -346,7 +343,7 @@ static const VMStateDescription vmstate_icp_pic = { .name = "icp_pic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(level, icp_pic_state), VMSTATE_UINT32(irq_enabled, icp_pic_state), VMSTATE_UINT32(fiq_enabled, icp_pic_state), @@ -488,7 +485,7 @@ static const VMStateDescription vmstate_icp_control = { .name = "icp_control", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(intreg_state, ICPCtrlRegsState), VMSTATE_END_OF_LIST() } diff --git a/hw/arm/meson.build b/hw/arm/meson.build index 68245d3ad10b5ca581d6ecc77e3375fe2792f952..bb92b27db3e6bb43d3b9aae3e8e69790ecd30619 100644 --- a/hw/arm/meson.build +++ b/hw/arm/meson.build @@ -42,6 +42,8 @@ arm_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2836.c', 'raspi.c')) arm_ss.add(when: 'CONFIG_STM32F100_SOC', if_true: files('stm32f100_soc.c')) arm_ss.add(when: 'CONFIG_STM32F205_SOC', if_true: files('stm32f205_soc.c')) arm_ss.add(when: 'CONFIG_STM32F405_SOC', if_true: files('stm32f405_soc.c')) +arm_ss.add(when: 'CONFIG_B_L475E_IOT01A', if_true: files('b-l475e-iot01a.c')) +arm_ss.add(when: 'CONFIG_STM32L4X5_SOC', if_true: files('stm32l4x5_soc.c')) arm_ss.add(when: 'CONFIG_XLNX_ZYNQMP_ARM', if_true: files('xlnx-zynqmp.c', 'xlnx-zcu102.c')) arm_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal.c', 'xlnx-versal-virt.c')) arm_ss.add(when: 'CONFIG_FSL_IMX25', if_true: files('fsl-imx25.c', 'imx25_pdk.c')) diff --git a/hw/arm/mps2-tz.c b/hw/arm/mps2-tz.c index 668db5ed61962e4bbc2c0a3945e8ba704e16b88d..5d8cdc1a4c5eacf2fa169957613a17ecf8b6b5b6 100644 --- a/hw/arm/mps2-tz.c +++ b/hw/arm/mps2-tz.c @@ -813,12 +813,6 @@ static void mps2tz_common_init(MachineState *machine) int num_ppcs; int i; - if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { - error_report("This board can only be used with CPU %s", - mc->default_cpu_type); - exit(1); - } - if (machine->ram_size != mc->default_ram_size) { char *sz = size_to_str(mc->default_ram_size); error_report("Invalid RAM size, should be %s", sz); @@ -1318,6 +1312,10 @@ static void mps2tz_an505_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m33"), + NULL + }; mc->desc = "ARM MPS2 with AN505 FPGA image for Cortex-M33"; mc->default_cpus = 1; @@ -1325,6 +1323,7 @@ static void mps2tz_an505_class_init(ObjectClass *oc, void *data) mc->max_cpus = mc->default_cpus; mmc->fpga_type = FPGA_AN505; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"); + mc->valid_cpu_types = valid_cpu_types; mmc->scc_id = 0x41045050; mmc->sysclk_frq = 20 * 1000 * 1000; /* 20MHz */ mmc->apb_periph_frq = mmc->sysclk_frq; @@ -1347,6 +1346,10 @@ static void mps2tz_an521_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m33"), + NULL + }; mc->desc = "ARM MPS2 with AN521 FPGA image for dual Cortex-M33"; mc->default_cpus = 2; @@ -1354,6 +1357,7 @@ static void mps2tz_an521_class_init(ObjectClass *oc, void *data) mc->max_cpus = mc->default_cpus; mmc->fpga_type = FPGA_AN521; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"); + mc->valid_cpu_types = valid_cpu_types; mmc->scc_id = 0x41045210; mmc->sysclk_frq = 20 * 1000 * 1000; /* 20MHz */ mmc->apb_periph_frq = mmc->sysclk_frq; @@ -1376,6 +1380,10 @@ static void mps3tz_an524_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m33"), + NULL + }; mc->desc = "ARM MPS3 with AN524 FPGA image for dual Cortex-M33"; mc->default_cpus = 2; @@ -1383,6 +1391,7 @@ static void mps3tz_an524_class_init(ObjectClass *oc, void *data) mc->max_cpus = mc->default_cpus; mmc->fpga_type = FPGA_AN524; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"); + mc->valid_cpu_types = valid_cpu_types; mmc->scc_id = 0x41045240; mmc->sysclk_frq = 32 * 1000 * 1000; /* 32MHz */ mmc->apb_periph_frq = mmc->sysclk_frq; @@ -1410,6 +1419,10 @@ static void mps3tz_an547_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2TZMachineClass *mmc = MPS2TZ_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m55"), + NULL + }; mc->desc = "ARM MPS3 with AN547 FPGA image for Cortex-M55"; mc->default_cpus = 1; @@ -1417,6 +1430,7 @@ static void mps3tz_an547_class_init(ObjectClass *oc, void *data) mc->max_cpus = mc->default_cpus; mmc->fpga_type = FPGA_AN547; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m55"); + mc->valid_cpu_types = valid_cpu_types; mmc->scc_id = 0x41055470; mmc->sysclk_frq = 32 * 1000 * 1000; /* 32MHz */ mmc->apb_periph_frq = 25 * 1000 * 1000; /* 25MHz */ diff --git a/hw/arm/mps2.c b/hw/arm/mps2.c index 292a180ad27eac0800f265e9a39f17d173058b35..bd873cc5ded3de93a20ad274c3e7b62b01ffaae2 100644 --- a/hw/arm/mps2.c +++ b/hw/arm/mps2.c @@ -142,12 +142,6 @@ static void mps2_common_init(MachineState *machine) QList *oscclk; int i; - if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { - error_report("This board can only be used with CPU %s", - mc->default_cpu_type); - exit(1); - } - if (machine->ram_size != mc->default_ram_size) { char *sz = size_to_str(mc->default_ram_size); error_report("Invalid RAM size, should be %s", sz); @@ -484,10 +478,15 @@ static void mps2_an385_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m3"), + NULL + }; mc->desc = "ARM MPS2 with AN385 FPGA image for Cortex-M3"; mmc->fpga_type = FPGA_AN385; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m3"); + mc->valid_cpu_types = valid_cpu_types; mmc->scc_id = 0x41043850; mmc->psram_base = 0x21000000; mmc->ethernet_base = 0x40200000; @@ -498,10 +497,15 @@ static void mps2_an386_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m4"), + NULL + }; mc->desc = "ARM MPS2 with AN386 FPGA image for Cortex-M4"; mmc->fpga_type = FPGA_AN386; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m4"); + mc->valid_cpu_types = valid_cpu_types; mmc->scc_id = 0x41043860; mmc->psram_base = 0x21000000; mmc->ethernet_base = 0x40200000; @@ -512,10 +516,15 @@ static void mps2_an500_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m7"), + NULL + }; mc->desc = "ARM MPS2 with AN500 FPGA image for Cortex-M7"; mmc->fpga_type = FPGA_AN500; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m7"); + mc->valid_cpu_types = valid_cpu_types; mmc->scc_id = 0x41045000; mmc->psram_base = 0x60000000; mmc->ethernet_base = 0xa0000000; @@ -526,10 +535,15 @@ static void mps2_an511_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); MPS2MachineClass *mmc = MPS2_MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m3"), + NULL + }; mc->desc = "ARM MPS2 with AN511 DesignStart FPGA image for Cortex-M3"; mmc->fpga_type = FPGA_AN511; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m3"); + mc->valid_cpu_types = valid_cpu_types; mmc->scc_id = 0x41045110; mmc->psram_base = 0x21000000; mmc->ethernet_base = 0x40200000; diff --git a/hw/arm/msf2-som.c b/hw/arm/msf2-som.c index 7b3106c790c17accc3ff7c7046e598b24901a86c..a269cf044b9df62575fae1dbb48fbba0733c302b 100644 --- a/hw/arm/msf2-som.c +++ b/hw/arm/msf2-som.c @@ -55,17 +55,12 @@ static void emcraft_sf2_s2s010_init(MachineState *machine) MemoryRegion *ddr = g_new(MemoryRegion, 1); Clock *m3clk; - if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { - error_report("This board can only be used with CPU %s", - mc->default_cpu_type); - exit(1); - } - memory_region_init_ram(ddr, NULL, "ddr-ram", DDR_SIZE, &error_fatal); memory_region_add_subregion(sysmem, DDR_BASE_ADDRESS, ddr); dev = qdev_new(TYPE_MSF2_SOC); + object_property_add_child(OBJECT(machine), "soc", OBJECT(dev)); qdev_prop_set_string(dev, "part-name", "M2S010"); qdev_prop_set_string(dev, "cpu-type", mc->default_cpu_type); @@ -106,9 +101,15 @@ static void emcraft_sf2_s2s010_init(MachineState *machine) static void emcraft_sf2_machine_init(MachineClass *mc) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m3"), + NULL + }; + mc->desc = "SmartFusion2 SOM kit from Emcraft (M2S010)"; mc->init = emcraft_sf2_s2s010_init; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m3"); + mc->valid_cpu_types = valid_cpu_types; } DEFINE_MACHINE("emcraft-sf2", emcraft_sf2_machine_init) diff --git a/hw/arm/musca.c b/hw/arm/musca.c index 6eeee57c9ddb90c0a0bcb4d3141a6021d1c36904..770ec1a15ca5f675fe1e4e8673b3ebd11f7eb43a 100644 --- a/hw/arm/musca.c +++ b/hw/arm/musca.c @@ -355,7 +355,6 @@ static void musca_init(MachineState *machine) { MuscaMachineState *mms = MUSCA_MACHINE(machine); MuscaMachineClass *mmc = MUSCA_MACHINE_GET_CLASS(mms); - MachineClass *mc = MACHINE_GET_CLASS(machine); MemoryRegion *system_memory = get_system_memory(); DeviceState *ssedev; DeviceState *dev_splitter; @@ -366,12 +365,6 @@ static void musca_init(MachineState *machine) assert(mmc->num_irqs <= MUSCA_NUMIRQ_MAX); assert(mmc->num_mpcs <= MUSCA_MPC_MAX); - if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { - error_report("This board can only be used with CPU %s", - mc->default_cpu_type); - exit(1); - } - mms->sysclk = clock_new(OBJECT(machine), "SYSCLK"); clock_set_hz(mms->sysclk, SYSCLK_FRQ); mms->s32kclk = clock_new(OBJECT(machine), "S32KCLK"); @@ -604,11 +597,16 @@ static void musca_init(MachineState *machine) static void musca_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-m33"), + NULL + }; mc->default_cpus = 2; mc->min_cpus = mc->default_cpus; mc->max_cpus = mc->default_cpus; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-m33"); + mc->valid_cpu_types = valid_cpu_types; mc->init = musca_init; } diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c index 9703bfb97fb4d389ab07f8562df902ebd3be754b..3200c9f68ac9b0109ab985d986b79aeb20051c1b 100644 --- a/hw/arm/musicpal.c +++ b/hw/arm/musicpal.c @@ -275,7 +275,7 @@ static const VMStateDescription musicpal_lcd_vmsd = { .name = "musicpal_lcd", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(brightness, musicpal_lcd_state), VMSTATE_UINT32(mode, musicpal_lcd_state), VMSTATE_UINT32(irqctrl, musicpal_lcd_state), @@ -400,7 +400,7 @@ static const VMStateDescription mv88w8618_pic_vmsd = { .name = "mv88w8618_pic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(level, mv88w8618_pic_state), VMSTATE_UINT32(enabled, mv88w8618_pic_state), VMSTATE_END_OF_LIST() @@ -583,7 +583,7 @@ static const VMStateDescription mv88w8618_timer_vmsd = { .name = "timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(ptimer, mv88w8618_timer_state), VMSTATE_UINT32(limit, mv88w8618_timer_state), VMSTATE_END_OF_LIST() @@ -594,7 +594,7 @@ static const VMStateDescription mv88w8618_pit_vmsd = { .name = "mv88w8618_pit", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(timer, mv88w8618_pit_state, 4, 1, mv88w8618_timer_vmsd, mv88w8618_timer_state), VMSTATE_END_OF_LIST() @@ -681,7 +681,7 @@ static const VMStateDescription mv88w8618_flashcfg_vmsd = { .name = "mv88w8618_flashcfg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cfgr0, mv88w8618_flashcfg_state), VMSTATE_END_OF_LIST() } @@ -1015,7 +1015,7 @@ static const VMStateDescription musicpal_gpio_vmsd = { .name = "musicpal_gpio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(lcd_brightness, musicpal_gpio_state), VMSTATE_UINT32(out_state, musicpal_gpio_state), VMSTATE_UINT32(in_state, musicpal_gpio_state), @@ -1174,7 +1174,7 @@ static const VMStateDescription musicpal_key_vmsd = { .name = "musicpal_key", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(kbd_extended, musicpal_key_state), VMSTATE_UINT32(pressed_keys, musicpal_key_state), VMSTATE_END_OF_LIST() diff --git a/hw/arm/netduino2.c b/hw/arm/netduino2.c index 501f63a77f9a9f67f8d76c4145cdc1a34d1633fd..8b1a9a24379080f82e657d7ba84526df89356f6d 100644 --- a/hw/arm/netduino2.c +++ b/hw/arm/netduino2.c @@ -44,6 +44,7 @@ static void netduino2_init(MachineState *machine) clock_set_hz(sysclk, SYSCLK_FRQ); dev = qdev_new(TYPE_STM32F205_SOC); + object_property_add_child(OBJECT(machine), "soc", OBJECT(dev)); qdev_connect_clock_in(dev, "sysclk", sysclk); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); diff --git a/hw/arm/netduinoplus2.c b/hw/arm/netduinoplus2.c index 2e589849478c84389124e21570fd2d964685dfd9..bccd1003549e33bd0034f77b7888f5c761846d7b 100644 --- a/hw/arm/netduinoplus2.c +++ b/hw/arm/netduinoplus2.c @@ -44,6 +44,7 @@ static void netduinoplus2_init(MachineState *machine) clock_set_hz(sysclk, SYSCLK_FRQ); dev = qdev_new(TYPE_STM32F405_SOC); + object_property_add_child(OBJECT(machine), "soc", OBJECT(dev)); qdev_connect_clock_in(dev, "sysclk", sysclk); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); diff --git a/hw/arm/npcm7xx_boards.c b/hw/arm/npcm7xx_boards.c index 2aef579aacc20dbb339bf8a31c7c4643489722c0..2999b8b96d0ff6498a2c0f5e3a774e314b2a9e0c 100644 --- a/hw/arm/npcm7xx_boards.c +++ b/hw/arm/npcm7xx_boards.c @@ -121,15 +121,8 @@ static NPCM7xxState *npcm7xx_create_soc(MachineState *machine, uint32_t hw_straps) { NPCM7xxMachineClass *nmc = NPCM7XX_MACHINE_GET_CLASS(machine); - MachineClass *mc = MACHINE_CLASS(nmc); Object *obj; - if (strcmp(machine->cpu_type, mc->default_cpu_type) != 0) { - error_report("This board can only be used with %s", - mc->default_cpu_type); - exit(1); - } - obj = object_new_with_props(nmc->soc_type, OBJECT(machine), "soc", &error_abort, NULL); object_property_set_uint(obj, "power-on-straps", hw_straps, &error_abort); @@ -463,12 +456,17 @@ static void npcm7xx_set_soc_type(NPCM7xxMachineClass *nmc, const char *type) static void npcm7xx_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a9"), + NULL + }; mc->no_floppy = 1; mc->no_cdrom = 1; mc->no_parallel = 1; mc->default_ram_id = "ram"; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a9"); + mc->valid_cpu_types = valid_cpu_types; } /* diff --git a/hw/arm/nrf51_soc.c b/hw/arm/nrf51_soc.c index 34da0d62f00ec43a2306d0cff94196aa1543a569..ac53441630fd284aea87436c8b889f8734f99aad 100644 --- a/hw/arm/nrf51_soc.c +++ b/hw/arm/nrf51_soc.c @@ -58,7 +58,6 @@ static void nrf51_soc_realize(DeviceState *dev_soc, Error **errp) { NRF51State *s = NRF51_SOC(dev_soc); MemoryRegion *mr; - Error *err = NULL; uint8_t i = 0; hwaddr base_addr = 0; @@ -92,10 +91,8 @@ static void nrf51_soc_realize(DeviceState *dev_soc, Error **errp) memory_region_add_subregion_overlap(&s->container, 0, s->board_memory, -1); - memory_region_init_ram(&s->sram, OBJECT(s), "nrf51.sram", s->sram_size, - &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_ram(&s->sram, OBJECT(s), "nrf51.sram", s->sram_size, + errp)) { return; } memory_region_add_subregion(&s->container, NRF51_SRAM_BASE, &s->sram); diff --git a/hw/arm/olimex-stm32-h405.c b/hw/arm/olimex-stm32-h405.c index d793de7c97f2547a3358cc0f79dae16afafb0224..4ad7b043be014b98ae87408704c1b2597e6d9b20 100644 --- a/hw/arm/olimex-stm32-h405.c +++ b/hw/arm/olimex-stm32-h405.c @@ -47,6 +47,7 @@ static void olimex_stm32_h405_init(MachineState *machine) clock_set_hz(sysclk, SYSCLK_FRQ); dev = qdev_new(TYPE_STM32F405_SOC); + object_property_add_child(OBJECT(machine), "soc", OBJECT(dev)); qdev_connect_clock_in(dev, "sysclk", sysclk); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); diff --git a/hw/arm/orangepi.c b/hw/arm/orangepi.c index f3784d45cafa145137e2798e5ef681abefbbbd5d..77e328191d73a9b4f0b9227cea41503b331e58f7 100644 --- a/hw/arm/orangepi.c +++ b/hw/arm/orangepi.c @@ -49,12 +49,6 @@ static void orangepi_init(MachineState *machine) exit(1); } - /* Only allow Cortex-A7 for this board */ - if (strcmp(machine->cpu_type, ARM_CPU_TYPE_NAME("cortex-a7")) != 0) { - error_report("This board can only be used with cortex-a7 CPU"); - exit(1); - } - h3 = AW_H3(object_new(TYPE_AW_H3)); object_property_add_child(OBJECT(machine), "soc", OBJECT(h3)); object_unref(OBJECT(h3)); @@ -111,6 +105,11 @@ static void orangepi_init(MachineState *machine) static void orangepi_machine_init(MachineClass *mc) { + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a7"), + NULL + }; + mc->desc = "Orange Pi PC (Cortex-A7)"; mc->init = orangepi_init; mc->block_default_type = IF_SD; @@ -119,6 +118,7 @@ static void orangepi_machine_init(MachineClass *mc) mc->max_cpus = AW_H3_NUM_CPUS; mc->default_cpus = AW_H3_NUM_CPUS; mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"); + mc->valid_cpu_types = valid_cpu_types; mc->default_ram_size = 1 * GiB; mc->default_ram_id = "orangepi.ram"; } diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c index f0bf407e664db9d4c68a45cbb0d0b74f3c086fdc..6b2e54473b3ec60af76e09d2b47edd0a53551f62 100644 --- a/hw/arm/pxa2xx.c +++ b/hw/arm/pxa2xx.c @@ -168,7 +168,7 @@ static const VMStateDescription vmstate_pxa2xx_pm = { .name = "pxa2xx_pm", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(pm_regs, PXA2xxState, 0x40), VMSTATE_END_OF_LIST() } @@ -238,7 +238,7 @@ static const VMStateDescription vmstate_pxa2xx_cm = { .name = "pxa2xx_cm", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(cm_regs, PXA2xxState, 4), VMSTATE_UINT32(clkcfg, PXA2xxState), VMSTATE_UINT32(pmnc, PXA2xxState), @@ -465,7 +465,7 @@ static const VMStateDescription vmstate_pxa2xx_mm = { .name = "pxa2xx_mm", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(mm_regs, PXA2xxState, 0x1a), VMSTATE_END_OF_LIST() } @@ -510,7 +510,7 @@ static const VMStateDescription vmstate_pxa2xx_ssp = { .name = "pxa2xx-ssp", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(enable, PXA2xxSSPState), VMSTATE_UINT32_ARRAY(sscr, PXA2xxSSPState, 2), VMSTATE_UINT32(sspsp, PXA2xxSSPState), @@ -1200,7 +1200,7 @@ static const VMStateDescription vmstate_pxa2xx_rtc_regs = { .minimum_version_id = 0, .pre_save = pxa2xx_rtc_pre_save, .post_load = pxa2xx_rtc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(rttr, PXA2xxRTCState), VMSTATE_UINT32(rtsr, PXA2xxRTCState), VMSTATE_UINT32(rtar, PXA2xxRTCState), @@ -1464,7 +1464,7 @@ static const VMStateDescription vmstate_pxa2xx_i2c_slave = { .name = "pxa2xx_i2c_slave", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_I2C_SLAVE(parent_obj, PXA2xxI2CSlaveState), VMSTATE_END_OF_LIST() } @@ -1474,7 +1474,7 @@ static const VMStateDescription vmstate_pxa2xx_i2c = { .name = "pxa2xx_i2c", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(control, PXA2xxI2CState), VMSTATE_UINT16(status, PXA2xxI2CState), VMSTATE_UINT8(ibmr, PXA2xxI2CState), @@ -1728,7 +1728,7 @@ static const VMStateDescription vmstate_pxa2xx_i2s = { .name = "pxa2xx_i2s", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(control, PXA2xxI2SState, 2), VMSTATE_UINT32(status, PXA2xxI2SState), VMSTATE_UINT32(mask, PXA2xxI2SState), @@ -2027,7 +2027,7 @@ static const VMStateDescription pxa2xx_fir_vmsd = { .name = "pxa2xx-fir", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(enable, PXA2xxFIrState), VMSTATE_UINT8_ARRAY(control, PXA2xxFIrState, 3), VMSTATE_UINT8_ARRAY(status, PXA2xxFIrState, 2), diff --git a/hw/arm/pxa2xx_gpio.c b/hw/arm/pxa2xx_gpio.c index c8db5e8e2b74f6952b93365242324c96141eb88b..41dca036fbb6f7f0c02988b67da715f9732cd543 100644 --- a/hw/arm/pxa2xx_gpio.c +++ b/hw/arm/pxa2xx_gpio.c @@ -320,7 +320,7 @@ static const VMStateDescription vmstate_pxa2xx_gpio_regs = { .name = "pxa2xx-gpio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(ilevel, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), VMSTATE_UINT32_ARRAY(olevel, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), VMSTATE_UINT32_ARRAY(dir, PXA2xxGPIOInfo, PXA2XX_GPIO_BANKS), diff --git a/hw/arm/pxa2xx_pic.c b/hw/arm/pxa2xx_pic.c index 1373a0d275fa331589f643e1b8fb63eba869253b..f54546cd4df4fe8a578ddf88904d96bf97bd2aa7 100644 --- a/hw/arm/pxa2xx_pic.c +++ b/hw/arm/pxa2xx_pic.c @@ -316,7 +316,7 @@ static const VMStateDescription vmstate_pxa2xx_pic_regs = { .version_id = 0, .minimum_version_id = 0, .post_load = pxa2xx_pic_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(int_enabled, PXA2xxPICState, 2), VMSTATE_UINT32_ARRAY(int_pending, PXA2xxPICState, 2), VMSTATE_UINT32_ARRAY(is_fiq, PXA2xxPICState, 2), diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c index f3c970469396b2fe2897c2ce537099c8c3bd9a1e..477dca06373aa501fd83a488239cdff3b75c304d 100644 --- a/hw/arm/sbsa-ref.c +++ b/hw/arm/sbsa-ref.c @@ -145,27 +145,6 @@ static const int sbsa_ref_irqmap[] = { [SBSA_GWDT_WS0] = 16, }; -static const char * const valid_cpus[] = { - ARM_CPU_TYPE_NAME("cortex-a57"), - ARM_CPU_TYPE_NAME("cortex-a72"), - ARM_CPU_TYPE_NAME("neoverse-n1"), - ARM_CPU_TYPE_NAME("neoverse-v1"), - ARM_CPU_TYPE_NAME("neoverse-n2"), - ARM_CPU_TYPE_NAME("max"), -}; - -static bool cpu_type_valid(const char *cpu) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(valid_cpus); i++) { - if (strcmp(cpu, valid_cpus[i]) == 0) { - return true; - } - } - return false; -} - static uint64_t sbsa_ref_cpu_mp_affinity(SBSAMachineState *sms, int idx) { uint8_t clustersz = ARM_DEFAULT_CPUS_PER_CLUSTER; @@ -733,11 +712,6 @@ static void sbsa_ref_init(MachineState *machine) const CPUArchIdList *possible_cpus; int n, sbsa_max_cpus; - if (!cpu_type_valid(machine->cpu_type)) { - error_report("sbsa-ref: CPU type %s not supported", machine->cpu_type); - exit(1); - } - if (kvm_enabled()) { error_report("sbsa-ref: KVM is not supported for this machine"); exit(1); @@ -898,10 +872,20 @@ static void sbsa_ref_instance_init(Object *obj) static void sbsa_ref_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + ARM_CPU_TYPE_NAME("cortex-a57"), + ARM_CPU_TYPE_NAME("cortex-a72"), + ARM_CPU_TYPE_NAME("neoverse-n1"), + ARM_CPU_TYPE_NAME("neoverse-v1"), + ARM_CPU_TYPE_NAME("neoverse-n2"), + ARM_CPU_TYPE_NAME("max"), + NULL, + }; mc->init = sbsa_ref_init; mc->desc = "QEMU 'SBSA Reference' ARM Virtual Machine"; mc->default_cpu_type = ARM_CPU_TYPE_NAME("neoverse-n1"); + mc->valid_cpu_types = valid_cpu_types; mc->max_cpus = 512; mc->pci_allow_0_address = true; mc->minimum_page_bits = 12; diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index c3871ae067ffb74a315025a8f54513e6e81b2a11..68eeef3e1d4c3866e75b6f6be19c8e2fe19b26ec 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -1768,7 +1768,7 @@ static const VMStateDescription vmstate_smmuv3_queue = { .name = "smmuv3_queue", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(base, SMMUQueue), VMSTATE_UINT32(prod, SMMUQueue), VMSTATE_UINT32(cons, SMMUQueue), @@ -1790,7 +1790,7 @@ static const VMStateDescription vmstate_gbpa = { .version_id = 1, .minimum_version_id = 1, .needed = smmuv3_gbpa_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(gbpa, SMMUv3State), VMSTATE_END_OF_LIST() } @@ -1801,7 +1801,7 @@ static const VMStateDescription vmstate_smmuv3 = { .version_id = 1, .minimum_version_id = 1, .priority = MIG_PRI_IOMMU, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(features, SMMUv3State), VMSTATE_UINT8(sid_size, SMMUv3State), VMSTATE_UINT8(sid_split, SMMUv3State), @@ -1826,7 +1826,7 @@ static const VMStateDescription vmstate_smmuv3 = { VMSTATE_END_OF_LIST(), }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_gbpa, NULL } diff --git a/hw/arm/spitz.c b/hw/arm/spitz.c index cc268c6ac0b78c2bff7ed86768d7e5b1e477a869..1d680b61e243087b1dfde98c899704a4e3f6c681 100644 --- a/hw/arm/spitz.c +++ b/hw/arm/spitz.c @@ -1143,7 +1143,7 @@ static const VMStateDescription vmstate_sl_nand_info = { .name = "sl-nand", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(ctl, SLNANDState), VMSTATE_STRUCT(ecc, SLNANDState, 0, vmstate_ecc_state, ECCState), VMSTATE_END_OF_LIST(), @@ -1180,7 +1180,7 @@ static const VMStateDescription vmstate_spitz_kbd = { .version_id = 1, .minimum_version_id = 0, .post_load = spitz_keyboard_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(sense_state, SpitzKeyboardState), VMSTATE_UINT16(strobe_state, SpitzKeyboardState), VMSTATE_UNUSED_TEST(is_version_0, 5), @@ -1208,7 +1208,7 @@ static const VMStateDescription vmstate_corgi_ssp_regs = { .name = "corgi-ssp", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SSI_PERIPHERAL(ssidev, CorgiSSPState), VMSTATE_UINT32_ARRAY(enable, CorgiSSPState, 3), VMSTATE_END_OF_LIST(), @@ -1236,7 +1236,7 @@ static const VMStateDescription vmstate_spitz_lcdtg_regs = { .name = "spitz-lcdtg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SSI_PERIPHERAL(ssidev, SpitzLCDTG), VMSTATE_UINT32(bl_intensity, SpitzLCDTG), VMSTATE_UINT32(bl_power, SpitzLCDTG), diff --git a/hw/arm/stellaris.c b/hw/arm/stellaris.c index dd90f686bfab1492a4a0e5f3becf356ad6bce2d3..d18b1144af5006f4ffb3cffe11db822e9a2d8dc5 100644 --- a/hw/arm/stellaris.c +++ b/hw/arm/stellaris.c @@ -47,6 +47,7 @@ #define BP_GAMEPAD 0x04 #define NUM_IRQ_LINES 64 +#define NUM_PRIO_BITS 3 typedef const struct { const char *name; @@ -419,7 +420,7 @@ static const VMStateDescription vmstate_stellaris_sys = { .version_id = 2, .minimum_version_id = 1, .post_load = stellaris_sys_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(pborctl, ssys_state), VMSTATE_UINT32(ldopctl, ssys_state), VMSTATE_UINT32(int_mask, ssys_state), @@ -631,7 +632,7 @@ static const VMStateDescription vmstate_stellaris_i2c = { .name = "stellaris_i2c", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(msa, stellaris_i2c_state), VMSTATE_UINT32(mcs, stellaris_i2c_state), VMSTATE_UINT32(mdr, stellaris_i2c_state), @@ -901,7 +902,7 @@ static const VMStateDescription vmstate_stellaris_adc = { .name = "stellaris_adc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(actss, StellarisADCState), VMSTATE_UINT32(ris, StellarisADCState), VMSTATE_UINT32(im, StellarisADCState), @@ -1067,6 +1068,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board) nvic = qdev_new(TYPE_ARMV7M); qdev_prop_set_uint32(nvic, "num-irq", NUM_IRQ_LINES); + qdev_prop_set_uint8(nvic, "num-prio-bits", NUM_PRIO_BITS); qdev_prop_set_string(nvic, "cpu-type", ms->cpu_type); qdev_prop_set_bit(nvic, "enable-bitband", true); qdev_connect_clock_in(nvic, "cpuclk", diff --git a/hw/arm/stm32f100_soc.c b/hw/arm/stm32f100_soc.c index b90d440d7aa26b8216aa690173c0865e9bfa9030..808b783515d90e61d42cd776da4789a57b2c1e2e 100644 --- a/hw/arm/stm32f100_soc.c +++ b/hw/arm/stm32f100_soc.c @@ -115,6 +115,7 @@ static void stm32f100_soc_realize(DeviceState *dev_soc, Error **errp) /* Init ARMv7m */ armv7m = DEVICE(&s->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 61); + qdev_prop_set_uint8(armv7m, "num-prio-bits", 4); qdev_prop_set_string(armv7m, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m3")); qdev_prop_set_bit(armv7m, "enable-bitband", true); qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); diff --git a/hw/arm/stm32f205_soc.c b/hw/arm/stm32f205_soc.c index 1a548646f6ecd3c6a28cd0585a052abc946f9aba..a451e21f59c311b489176f1a7d7721c34525f6c7 100644 --- a/hw/arm/stm32f205_soc.c +++ b/hw/arm/stm32f205_soc.c @@ -127,6 +127,7 @@ static void stm32f205_soc_realize(DeviceState *dev_soc, Error **errp) armv7m = DEVICE(&s->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 96); + qdev_prop_set_uint8(armv7m, "num-prio-bits", 4); qdev_prop_set_string(armv7m, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m3")); qdev_prop_set_bit(armv7m, "enable-bitband", true); qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); diff --git a/hw/arm/stm32f405_soc.c b/hw/arm/stm32f405_soc.c index a65bbe298d2bcd0446c911983db42e6158eb5bc4..2ad5b79a069f257c4e3a313c8ea30e9859de7253 100644 --- a/hw/arm/stm32f405_soc.c +++ b/hw/arm/stm32f405_soc.c @@ -149,6 +149,7 @@ static void stm32f405_soc_realize(DeviceState *dev_soc, Error **errp) armv7m = DEVICE(&s->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 96); + qdev_prop_set_uint8(armv7m, "num-prio-bits", 4); qdev_prop_set_string(armv7m, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m4")); qdev_prop_set_bit(armv7m, "enable-bitband", true); qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); diff --git a/hw/arm/stm32l4x5_soc.c b/hw/arm/stm32l4x5_soc.c new file mode 100644 index 0000000000000000000000000000000000000000..159d5315c99576fb66ea641767cb46d82ab4a9ff --- /dev/null +++ b/hw/arm/stm32l4x5_soc.c @@ -0,0 +1,266 @@ +/* + * STM32L4x5 SoC family + * + * Copyright (c) 2023 Arnaud Minier + * Copyright (c) 2023 Inès Varhol + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * This work is heavily inspired by the stm32f405_soc by Alistair Francis. + * Original code is licensed under the MIT License: + * + * Copyright (c) 2014 Alistair Francis + */ + +/* + * The reference used is the STMicroElectronics RM0351 Reference manual + * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs. + * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "exec/address-spaces.h" +#include "sysemu/sysemu.h" +#include "hw/arm/stm32l4x5_soc.h" +#include "hw/qdev-clock.h" +#include "hw/misc/unimp.h" + +#define FLASH_BASE_ADDRESS 0x08000000 +#define SRAM1_BASE_ADDRESS 0x20000000 +#define SRAM1_SIZE (96 * KiB) +#define SRAM2_BASE_ADDRESS 0x10000000 +#define SRAM2_SIZE (32 * KiB) + +static void stm32l4x5_soc_initfn(Object *obj) +{ + Stm32l4x5SocState *s = STM32L4X5_SOC(obj); + + s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); + s->refclk = qdev_init_clock_in(DEVICE(s), "refclk", NULL, NULL, 0); +} + +static void stm32l4x5_soc_realize(DeviceState *dev_soc, Error **errp) +{ + ERRP_GUARD(); + Stm32l4x5SocState *s = STM32L4X5_SOC(dev_soc); + const Stm32l4x5SocClass *sc = STM32L4X5_SOC_GET_CLASS(dev_soc); + MemoryRegion *system_memory = get_system_memory(); + DeviceState *armv7m; + + /* + * We use s->refclk internally and only define it with qdev_init_clock_in() + * so it is correctly parented and not leaked on an init/deinit; it is not + * intended as an externally exposed clock. + */ + if (clock_has_source(s->refclk)) { + error_setg(errp, "refclk clock must not be wired up by the board code"); + return; + } + + if (!clock_has_source(s->sysclk)) { + error_setg(errp, "sysclk clock must be wired up by the board code"); + return; + } + + /* + * TODO: ideally we should model the SoC RCC and its ability to + * change the sysclk frequency and define different sysclk sources. + */ + + /* The refclk always runs at frequency HCLK / 8 */ + clock_set_mul_div(s->refclk, 8, 1); + clock_set_source(s->refclk, s->sysclk); + + if (!memory_region_init_rom(&s->flash, OBJECT(dev_soc), "flash", + sc->flash_size, errp)) { + return; + } + memory_region_init_alias(&s->flash_alias, OBJECT(dev_soc), + "flash_boot_alias", &s->flash, 0, + sc->flash_size); + + memory_region_add_subregion(system_memory, FLASH_BASE_ADDRESS, &s->flash); + memory_region_add_subregion(system_memory, 0, &s->flash_alias); + + if (!memory_region_init_ram(&s->sram1, OBJECT(dev_soc), "SRAM1", SRAM1_SIZE, + errp)) { + return; + } + memory_region_add_subregion(system_memory, SRAM1_BASE_ADDRESS, &s->sram1); + + if (!memory_region_init_ram(&s->sram2, OBJECT(dev_soc), "SRAM2", SRAM2_SIZE, + errp)) { + return; + } + memory_region_add_subregion(system_memory, SRAM2_BASE_ADDRESS, &s->sram2); + + object_initialize_child(OBJECT(dev_soc), "armv7m", &s->armv7m, TYPE_ARMV7M); + armv7m = DEVICE(&s->armv7m); + qdev_prop_set_uint32(armv7m, "num-irq", 96); + qdev_prop_set_uint32(armv7m, "num-prio-bits", 4); + qdev_prop_set_string(armv7m, "cpu-type", ARM_CPU_TYPE_NAME("cortex-m4")); + qdev_prop_set_bit(armv7m, "enable-bitband", true); + qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); + qdev_connect_clock_in(armv7m, "refclk", s->refclk); + object_property_set_link(OBJECT(&s->armv7m), "memory", + OBJECT(system_memory), &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->armv7m), errp)) { + return; + } + + /* APB1 BUS */ + create_unimplemented_device("TIM2", 0x40000000, 0x400); + create_unimplemented_device("TIM3", 0x40000400, 0x400); + create_unimplemented_device("TIM4", 0x40000800, 0x400); + create_unimplemented_device("TIM5", 0x40000C00, 0x400); + create_unimplemented_device("TIM6", 0x40001000, 0x400); + create_unimplemented_device("TIM7", 0x40001400, 0x400); + /* RESERVED: 0x40001800, 0x1000 */ + create_unimplemented_device("RTC", 0x40002800, 0x400); + create_unimplemented_device("WWDG", 0x40002C00, 0x400); + create_unimplemented_device("IWDG", 0x40003000, 0x400); + /* RESERVED: 0x40001800, 0x400 */ + create_unimplemented_device("SPI2", 0x40003800, 0x400); + create_unimplemented_device("SPI3", 0x40003C00, 0x400); + /* RESERVED: 0x40004000, 0x400 */ + create_unimplemented_device("USART2", 0x40004400, 0x400); + create_unimplemented_device("USART3", 0x40004800, 0x400); + create_unimplemented_device("UART4", 0x40004C00, 0x400); + create_unimplemented_device("UART5", 0x40005000, 0x400); + create_unimplemented_device("I2C1", 0x40005400, 0x400); + create_unimplemented_device("I2C2", 0x40005800, 0x400); + create_unimplemented_device("I2C3", 0x40005C00, 0x400); + /* RESERVED: 0x40006000, 0x400 */ + create_unimplemented_device("CAN1", 0x40006400, 0x400); + /* RESERVED: 0x40006800, 0x400 */ + create_unimplemented_device("PWR", 0x40007000, 0x400); + create_unimplemented_device("DAC1", 0x40007400, 0x400); + create_unimplemented_device("OPAMP", 0x40007800, 0x400); + create_unimplemented_device("LPTIM1", 0x40007C00, 0x400); + create_unimplemented_device("LPUART1", 0x40008000, 0x400); + /* RESERVED: 0x40008400, 0x400 */ + create_unimplemented_device("SWPMI1", 0x40008800, 0x400); + /* RESERVED: 0x40008C00, 0x800 */ + create_unimplemented_device("LPTIM2", 0x40009400, 0x400); + /* RESERVED: 0x40009800, 0x6800 */ + + /* APB2 BUS */ + create_unimplemented_device("SYSCFG", 0x40010000, 0x30); + create_unimplemented_device("VREFBUF", 0x40010030, 0x1D0); + create_unimplemented_device("COMP", 0x40010200, 0x200); + create_unimplemented_device("EXTI", 0x40010400, 0x400); + /* RESERVED: 0x40010800, 0x1400 */ + create_unimplemented_device("FIREWALL", 0x40011C00, 0x400); + /* RESERVED: 0x40012000, 0x800 */ + create_unimplemented_device("SDMMC1", 0x40012800, 0x400); + create_unimplemented_device("TIM1", 0x40012C00, 0x400); + create_unimplemented_device("SPI1", 0x40013000, 0x400); + create_unimplemented_device("TIM8", 0x40013400, 0x400); + create_unimplemented_device("USART1", 0x40013800, 0x400); + /* RESERVED: 0x40013C00, 0x400 */ + create_unimplemented_device("TIM15", 0x40014000, 0x400); + create_unimplemented_device("TIM16", 0x40014400, 0x400); + create_unimplemented_device("TIM17", 0x40014800, 0x400); + /* RESERVED: 0x40014C00, 0x800 */ + create_unimplemented_device("SAI1", 0x40015400, 0x400); + create_unimplemented_device("SAI2", 0x40015800, 0x400); + /* RESERVED: 0x40015C00, 0x400 */ + create_unimplemented_device("DFSDM1", 0x40016000, 0x400); + /* RESERVED: 0x40016400, 0x9C00 */ + + /* AHB1 BUS */ + create_unimplemented_device("DMA1", 0x40020000, 0x400); + create_unimplemented_device("DMA2", 0x40020400, 0x400); + /* RESERVED: 0x40020800, 0x800 */ + create_unimplemented_device("RCC", 0x40021000, 0x400); + /* RESERVED: 0x40021400, 0xC00 */ + create_unimplemented_device("FLASH", 0x40022000, 0x400); + /* RESERVED: 0x40022400, 0xC00 */ + create_unimplemented_device("CRC", 0x40023000, 0x400); + /* RESERVED: 0x40023400, 0x400 */ + create_unimplemented_device("TSC", 0x40024000, 0x400); + + /* RESERVED: 0x40024400, 0x7FDBC00 */ + + /* AHB2 BUS */ + create_unimplemented_device("GPIOA", 0x48000000, 0x400); + create_unimplemented_device("GPIOB", 0x48000400, 0x400); + create_unimplemented_device("GPIOC", 0x48000800, 0x400); + create_unimplemented_device("GPIOD", 0x48000C00, 0x400); + create_unimplemented_device("GPIOE", 0x48001000, 0x400); + create_unimplemented_device("GPIOF", 0x48001400, 0x400); + create_unimplemented_device("GPIOG", 0x48001800, 0x400); + create_unimplemented_device("GPIOH", 0x48001C00, 0x400); + /* RESERVED: 0x48002000, 0x7FDBC00 */ + create_unimplemented_device("OTG_FS", 0x50000000, 0x40000); + create_unimplemented_device("ADC", 0x50040000, 0x400); + /* RESERVED: 0x50040400, 0x20400 */ + create_unimplemented_device("RNG", 0x50060800, 0x400); + + /* AHB3 BUS */ + create_unimplemented_device("FMC", 0xA0000000, 0x1000); + create_unimplemented_device("QUADSPI", 0xA0001000, 0x400); +} + +static void stm32l4x5_soc_class_init(ObjectClass *klass, void *data) +{ + + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = stm32l4x5_soc_realize; + /* Reason: Mapped at fixed location on the system bus */ + dc->user_creatable = false; + /* No vmstate or reset required: device has no internal state */ +} + +static void stm32l4x5xc_soc_class_init(ObjectClass *oc, void *data) +{ + Stm32l4x5SocClass *ssc = STM32L4X5_SOC_CLASS(oc); + + ssc->flash_size = 256 * KiB; +} + +static void stm32l4x5xe_soc_class_init(ObjectClass *oc, void *data) +{ + Stm32l4x5SocClass *ssc = STM32L4X5_SOC_CLASS(oc); + + ssc->flash_size = 512 * KiB; +} + +static void stm32l4x5xg_soc_class_init(ObjectClass *oc, void *data) +{ + Stm32l4x5SocClass *ssc = STM32L4X5_SOC_CLASS(oc); + + ssc->flash_size = 1 * MiB; +} + +static const TypeInfo stm32l4x5_soc_types[] = { + { + .name = TYPE_STM32L4X5XC_SOC, + .parent = TYPE_STM32L4X5_SOC, + .class_init = stm32l4x5xc_soc_class_init, + }, { + .name = TYPE_STM32L4X5XE_SOC, + .parent = TYPE_STM32L4X5_SOC, + .class_init = stm32l4x5xe_soc_class_init, + }, { + .name = TYPE_STM32L4X5XG_SOC, + .parent = TYPE_STM32L4X5_SOC, + .class_init = stm32l4x5xg_soc_class_init, + }, { + .name = TYPE_STM32L4X5_SOC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Stm32l4x5SocState), + .instance_init = stm32l4x5_soc_initfn, + .class_size = sizeof(Stm32l4x5SocClass), + .class_init = stm32l4x5_soc_class_init, + .abstract = true, + } +}; + +DEFINE_TYPES(stm32l4x5_soc_types) diff --git a/hw/arm/stm32vldiscovery.c b/hw/arm/stm32vldiscovery.c index 190db6118b9675f92ef1b7068f7fb47b987e6f1b..cc41935160536ae451a88892566bc080875ab32a 100644 --- a/hw/arm/stm32vldiscovery.c +++ b/hw/arm/stm32vldiscovery.c @@ -47,6 +47,7 @@ static void stm32vldiscovery_init(MachineState *machine) clock_set_hz(sysclk, SYSCLK_FRQ); dev = qdev_new(TYPE_STM32F100_SOC); + object_property_add_child(OBJECT(machine), "soc", OBJECT(dev)); qdev_connect_clock_in(dev, "sysclk", sysclk); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); diff --git a/hw/arm/strongarm.c b/hw/arm/strongarm.c index cc73145053a62d8eba1589a382becbe6706a2555..fef3638acaa3dafec48308f079ae2b73e460df13 100644 --- a/hw/arm/strongarm.c +++ b/hw/arm/strongarm.c @@ -211,7 +211,7 @@ static const VMStateDescription vmstate_strongarm_pic_regs = { .version_id = 0, .minimum_version_id = 0, .post_load = strongarm_pic_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(pending, StrongARMPICState), VMSTATE_UINT32(enabled, StrongARMPICState), VMSTATE_UINT32(is_fiq, StrongARMPICState), @@ -439,7 +439,7 @@ static const VMStateDescription vmstate_strongarm_rtc_regs = { .minimum_version_id = 0, .pre_save = strongarm_rtc_pre_save, .post_load = strongarm_rtc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(rttr, StrongARMRTCState), VMSTATE_UINT32(rtsr, StrongARMRTCState), VMSTATE_UINT32(rtar, StrongARMRTCState), @@ -677,7 +677,7 @@ static const VMStateDescription vmstate_strongarm_gpio_regs = { .name = "strongarm-gpio", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ilevel, StrongARMGPIOInfo), VMSTATE_UINT32(olevel, StrongARMGPIOInfo), VMSTATE_UINT32(dir, StrongARMGPIOInfo), @@ -846,7 +846,7 @@ static const VMStateDescription vmstate_strongarm_ppc_regs = { .name = "strongarm-ppc", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ilevel, StrongARMPPCInfo), VMSTATE_UINT32(olevel, StrongARMPPCInfo), VMSTATE_UINT32(dir, StrongARMPPCInfo), @@ -1300,7 +1300,7 @@ static const VMStateDescription vmstate_strongarm_uart_regs = { .version_id = 0, .minimum_version_id = 0, .post_load = strongarm_uart_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(utcr0, StrongARMUARTState), VMSTATE_UINT16(brd, StrongARMUARTState), VMSTATE_UINT8(utcr3, StrongARMUARTState), @@ -1558,7 +1558,7 @@ static const VMStateDescription vmstate_strongarm_ssp_regs = { .version_id = 0, .minimum_version_id = 0, .post_load = strongarm_ssp_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16_ARRAY(sscr, StrongARMSSPState, 2), VMSTATE_UINT16(sssr, StrongARMSSPState), VMSTATE_UINT16_ARRAY(rx_fifo, StrongARMSSPState, 8), diff --git a/hw/arm/versatilepb.c b/hw/arm/versatilepb.c index 2f22dc890f4cfa7ce5414bbb574dd83c0f32dff4..4b2257787b21d4d146d03fe622a7c5e468aaabd5 100644 --- a/hw/arm/versatilepb.c +++ b/hw/arm/versatilepb.c @@ -52,7 +52,7 @@ static const VMStateDescription vmstate_vpb_sic = { .name = "versatilepb_sic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(level, vpb_sic_state), VMSTATE_UINT32(mask, vpb_sic_state), VMSTATE_UINT32(pic_enable, vpb_sic_state), diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index 8bc35a483c9233f4b718bd3a66c5ec807d048b1b..a22a2f43a569d2b27872e9a18d4a67a42d7a4bc8 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -35,7 +35,7 @@ #include "target/arm/cpu.h" #include "hw/acpi/acpi-defs.h" #include "hw/acpi/acpi.h" -#include "hw/nvram/fw_cfg.h" +#include "hw/nvram/fw_cfg_acpi.h" #include "hw/acpi/bios-linker-loader.h" #include "hw/acpi/aml-build.h" #include "hw/acpi/utils.h" @@ -58,6 +58,7 @@ #include "migration/vmstate.h" #include "hw/acpi/ghes.h" #include "hw/acpi/viot.h" +#include "hw/virtio/virtio-acpi.h" #define ARM_SPI_BASE 32 @@ -94,21 +95,6 @@ static void acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap, aml_append(scope, dev); } -static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap) -{ - Aml *dev = aml_device("FWCF"); - aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002"))); - /* device present, functioning, decoding, not shown in UI */ - aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); - aml_append(dev, aml_name_decl("_CCA", aml_int(1))); - - Aml *crs = aml_resource_template(); - aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base, - fw_cfg_memmap->size, AML_READ_WRITE)); - aml_append(dev, aml_name_decl("_CRS", crs)); - aml_append(scope, dev); -} - static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap) { Aml *dev, *crs; @@ -133,32 +119,6 @@ static void acpi_dsdt_add_flash(Aml *scope, const MemMapEntry *flash_memmap) aml_append(scope, dev); } -static void acpi_dsdt_add_virtio(Aml *scope, - const MemMapEntry *virtio_mmio_memmap, - uint32_t mmio_irq, int num) -{ - hwaddr base = virtio_mmio_memmap->base; - hwaddr size = virtio_mmio_memmap->size; - int i; - - for (i = 0; i < num; i++) { - uint32_t irq = mmio_irq + i; - Aml *dev = aml_device("VR%02u", i); - aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005"))); - aml_append(dev, aml_name_decl("_UID", aml_int(i))); - aml_append(dev, aml_name_decl("_CCA", aml_int(1))); - - Aml *crs = aml_resource_template(); - aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE)); - aml_append(crs, - aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, - AML_EXCLUSIVE, &irq, 1)); - aml_append(dev, aml_name_decl("_CRS", crs)); - aml_append(scope, dev); - base += size; - } -} - static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap, uint32_t irq, VirtMachineState *vms) { @@ -864,9 +824,10 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) if (vmc->acpi_expose_flash) { acpi_dsdt_add_flash(scope, &memmap[VIRT_FLASH]); } - acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]); - acpi_dsdt_add_virtio(scope, &memmap[VIRT_MMIO], - (irqmap[VIRT_MMIO] + ARM_SPI_BASE), NUM_VIRTIO_TRANSPORTS); + fw_cfg_acpi_dsdt_add(scope, &memmap[VIRT_FW_CFG]); + virtio_acpi_dsdt_add(scope, memmap[VIRT_MMIO].base, memmap[VIRT_MMIO].size, + (irqmap[VIRT_MMIO] + ARM_SPI_BASE), + 0, NUM_VIRTIO_TRANSPORTS); acpi_dsdt_add_pci(scope, memmap, irqmap[VIRT_PCIE] + ARM_SPI_BASE, vms); if (vms->acpi_dev) { build_ged_aml(scope, "\\_SB."GED_DEVICE, @@ -1100,7 +1061,7 @@ static const VMStateDescription vmstate_virt_acpi_build = { .name = "virt_acpi_build", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(patched, AcpiBuildState), VMSTATE_END_OF_LIST() }, diff --git a/hw/arm/virt.c b/hw/arm/virt.c index be2856c018aa14c6ffda283cd7f99fb0d8e7a803..2793121cb41231493f49014f661bf1b58a03f6bb 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -204,38 +204,6 @@ static const int a15irqmap[] = { [VIRT_PLATFORM_BUS] = 112, /* ...to 112 + PLATFORM_BUS_NUM_IRQS -1 */ }; -static const char *valid_cpus[] = { -#ifdef CONFIG_TCG - ARM_CPU_TYPE_NAME("cortex-a7"), - ARM_CPU_TYPE_NAME("cortex-a15"), - ARM_CPU_TYPE_NAME("cortex-a35"), - ARM_CPU_TYPE_NAME("cortex-a55"), - ARM_CPU_TYPE_NAME("cortex-a72"), - ARM_CPU_TYPE_NAME("cortex-a76"), - ARM_CPU_TYPE_NAME("cortex-a710"), - ARM_CPU_TYPE_NAME("a64fx"), - ARM_CPU_TYPE_NAME("neoverse-n1"), - ARM_CPU_TYPE_NAME("neoverse-v1"), - ARM_CPU_TYPE_NAME("neoverse-n2"), -#endif - ARM_CPU_TYPE_NAME("cortex-a53"), - ARM_CPU_TYPE_NAME("cortex-a57"), - ARM_CPU_TYPE_NAME("host"), - ARM_CPU_TYPE_NAME("max"), -}; - -static bool cpu_type_valid(const char *cpu) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(valid_cpus); i++) { - if (strcmp(cpu, valid_cpus[i]) == 0) { - return true; - } - } - return false; -} - static void create_randomness(MachineState *ms, const char *node) { struct { @@ -1998,13 +1966,14 @@ static void virt_cpu_post_init(VirtMachineState *vms, MemoryRegion *sysmem) if (pmu) { assert(arm_feature(&ARM_CPU(cpu)->env, ARM_FEATURE_PMU)); if (kvm_irqchip_in_kernel()) { - kvm_arm_pmu_set_irq(cpu, VIRTUAL_PMU_IRQ); + kvm_arm_pmu_set_irq(ARM_CPU(cpu), VIRTUAL_PMU_IRQ); } - kvm_arm_pmu_init(cpu); + kvm_arm_pmu_init(ARM_CPU(cpu)); } if (steal_time) { - kvm_arm_pvtime_init(cpu, pvtime_reg_base + - cpu->cpu_index * PVTIME_SIZE_PER_CPU); + kvm_arm_pvtime_init(ARM_CPU(cpu), pvtime_reg_base + + cpu->cpu_index + * PVTIME_SIZE_PER_CPU); } } } else { @@ -2039,11 +2008,6 @@ static void machvirt_init(MachineState *machine) unsigned int smp_cpus = machine->smp.cpus; unsigned int max_cpus = machine->smp.max_cpus; - if (!cpu_type_valid(machine->cpu_type)) { - error_report("mach-virt: CPU type %s not supported", machine->cpu_type); - exit(1); - } - possible_cpus = mc->possible_cpu_arch_ids(machine); /* @@ -2937,6 +2901,28 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); + static const char * const valid_cpu_types[] = { +#ifdef CONFIG_TCG + ARM_CPU_TYPE_NAME("cortex-a7"), + ARM_CPU_TYPE_NAME("cortex-a15"), + ARM_CPU_TYPE_NAME("cortex-a35"), + ARM_CPU_TYPE_NAME("cortex-a55"), + ARM_CPU_TYPE_NAME("cortex-a72"), + ARM_CPU_TYPE_NAME("cortex-a76"), + ARM_CPU_TYPE_NAME("cortex-a710"), + ARM_CPU_TYPE_NAME("a64fx"), + ARM_CPU_TYPE_NAME("neoverse-n1"), + ARM_CPU_TYPE_NAME("neoverse-v1"), + ARM_CPU_TYPE_NAME("neoverse-n2"), +#endif + ARM_CPU_TYPE_NAME("cortex-a53"), + ARM_CPU_TYPE_NAME("cortex-a57"), +#if defined(CONFIG_KVM) || defined(CONFIG_HVF) + ARM_CPU_TYPE_NAME("host"), +#endif + ARM_CPU_TYPE_NAME("max"), + NULL + }; mc->init = machvirt_init; /* Start with max_cpus set to 512, which is the maximum supported by KVM. @@ -2963,6 +2949,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) #else mc->default_cpu_type = ARM_CPU_TYPE_NAME("max"); #endif + mc->valid_cpu_types = valid_cpu_types; mc->get_default_cpu_node_id = virt_get_default_cpu_node_id; mc->kvm_type = virt_kvm_type; assert(!mc->get_hotplug_handler); @@ -3180,10 +3167,17 @@ static void machvirt_machine_init(void) } type_init(machvirt_machine_init); +static void virt_machine_9_0_options(MachineClass *mc) +{ +} +DEFINE_VIRT_MACHINE_AS_LATEST(9, 0) + static void virt_machine_8_2_options(MachineClass *mc) { + virt_machine_9_0_options(mc); + compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len); } -DEFINE_VIRT_MACHINE_AS_LATEST(8, 2) +DEFINE_VIRT_MACHINE(8, 2) static void virt_machine_8_1_options(MachineClass *mc) { diff --git a/hw/arm/z2.c b/hw/arm/z2.c index d9a08fa67b23d930214709cfd0219a3abe6b9632..83741a490920d94d285303250738cce660afbb34 100644 --- a/hw/arm/z2.c +++ b/hw/arm/z2.c @@ -168,7 +168,7 @@ static const VMStateDescription vmstate_zipit_lcd_state = { .name = "zipit-lcd", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SSI_PERIPHERAL(ssidev, ZipitLCD), VMSTATE_INT32(selected, ZipitLCD), VMSTATE_INT32(enabled, ZipitLCD), @@ -274,7 +274,7 @@ static const VMStateDescription vmstate_aer915_state = { .name = "aer915", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(len, AER915State), VMSTATE_BUFFER(buf, AER915State), VMSTATE_END_OF_LIST(), diff --git a/hw/audio/ac97.c b/hw/audio/ac97.c index 6a7a2dc80c4f7b6f85648e716af78fad035721fd..3f0053f94de38af01211cec26332b1628ca3f63a 100644 --- a/hw/audio/ac97.c +++ b/hw/audio/ac97.c @@ -1094,7 +1094,7 @@ static const VMStateDescription vmstate_ac97_bm_regs = { .name = "ac97_bm_regs", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(bdbar, AC97BusMasterRegs), VMSTATE_UINT8(civ, AC97BusMasterRegs), VMSTATE_UINT8(lvi, AC97BusMasterRegs), @@ -1142,7 +1142,7 @@ static const VMStateDescription vmstate_ac97 = { .version_id = 3, .minimum_version_id = 2, .post_load = ac97_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, AC97LinkState), VMSTATE_UINT32(glob_cnt, AC97LinkState), VMSTATE_UINT32(glob_sta, AC97LinkState), diff --git a/hw/audio/asc.c b/hw/audio/asc.c index 0f36b4ce9b6f9617fc8f82fc236d2d203aef574d..87b56243262ef36a9b60073655feb0e72b250491 100644 --- a/hw/audio/asc.c +++ b/hw/audio/asc.c @@ -555,7 +555,7 @@ static const VMStateDescription vmstate_asc_fifo = { .name = "apple-sound-chip.fifo", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(fifo, ASCFIFOState, ASC_FIFO_SIZE), VMSTATE_UINT8(int_status, ASCFIFOState), VMSTATE_INT32(cnt, ASCFIFOState), @@ -575,7 +575,7 @@ static const VMStateDescription vmstate_asc = { .version_id = 0, .minimum_version_id = 0, .post_load = asc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(fifos, ASCState, 2, 0, vmstate_asc_fifo, ASCFIFOState), VMSTATE_UINT8_ARRAY(regs, ASCState, ASC_REG_SIZE), diff --git a/hw/audio/cs4231.c b/hw/audio/cs4231.c index aefc3edea1812967335270234de3e3aa3aeba21e..967caa7fcbd5ff8d1e3b08a97b6235cd892cd906 100644 --- a/hw/audio/cs4231.c +++ b/hw/audio/cs4231.c @@ -142,7 +142,7 @@ static const VMStateDescription vmstate_cs4231 = { .name ="cs4231", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, CSState, CS_REGS), VMSTATE_UINT8_ARRAY(dregs, CSState, CS_DREGS), VMSTATE_END_OF_LIST() diff --git a/hw/audio/cs4231a.c b/hw/audio/cs4231a.c index 3aa105748d36c1b8efa3c1bd2bc81a353365b7d5..9ef57f042d112122a153db06eace336590d6f539 100644 --- a/hw/audio/cs4231a.c +++ b/hw/audio/cs4231a.c @@ -637,7 +637,7 @@ static const VMStateDescription vmstate_cs4231a = { .minimum_version_id = 1, .pre_load = cs4231a_pre_load, .post_load = cs4231a_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY (regs, CSState, CS_REGS), VMSTATE_BUFFER (dregs, CSState), VMSTATE_INT32 (dma_running, CSState), diff --git a/hw/audio/es1370.c b/hw/audio/es1370.c index fad5541211903579d4b1f5476372bcbe1e46d031..4ab61d3b9da3e1cb60fa679823f8be8ea1eb1b80 100644 --- a/hw/audio/es1370.c +++ b/hw/audio/es1370.c @@ -765,7 +765,7 @@ static const VMStateDescription vmstate_es1370_channel = { .name = "es1370_channel", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32 (shift, struct chan), VMSTATE_UINT32 (leftover, struct chan), VMSTATE_UINT32 (scount, struct chan), @@ -808,7 +808,7 @@ static const VMStateDescription vmstate_es1370 = { .version_id = 2, .minimum_version_id = 2, .post_load = es1370_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE (dev, ES1370State), VMSTATE_STRUCT_ARRAY (chan, ES1370State, NB_CHANNELS, 2, vmstate_es1370_channel, struct chan), diff --git a/hw/audio/gus.c b/hw/audio/gus.c index 6c2b586ca716475f15dd56dddca5b97bf6f1bbe6..4beb3fd74e2efa088a16a86a289fd298fffe8be9 100644 --- a/hw/audio/gus.c +++ b/hw/audio/gus.c @@ -209,7 +209,7 @@ static const VMStateDescription vmstate_gus = { .name = "gus", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32 (pos, GUSState), VMSTATE_INT32 (left, GUSState), VMSTATE_INT32 (shift, GUSState), diff --git a/hw/audio/hda-codec.c b/hw/audio/hda-codec.c index 0bc20d49f6cf43ea37141d4543c2d79d8e2bd68a..b22e486fda98841ba98b7b07263a0b74e368f9db 100644 --- a/hw/audio/hda-codec.c +++ b/hw/audio/hda-codec.c @@ -812,7 +812,7 @@ static const VMStateDescription vmstate_hda_audio_stream_buf = { .name = "hda-audio-stream/buffer", .version_id = 1, .needed = vmstate_hda_audio_stream_buf_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(buf, HDAAudioStream), VMSTATE_INT64(rpos, HDAAudioStream), VMSTATE_INT64(wpos, HDAAudioStream), @@ -825,7 +825,7 @@ static const VMStateDescription vmstate_hda_audio_stream_buf = { static const VMStateDescription vmstate_hda_audio_stream = { .name = "hda-audio-stream", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(stream, HDAAudioStream), VMSTATE_UINT32(channel, HDAAudioStream), VMSTATE_UINT32(format, HDAAudioStream), @@ -837,7 +837,7 @@ static const VMStateDescription vmstate_hda_audio_stream = { VMSTATE_BUFFER(compat_buf, HDAAudioStream), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_hda_audio_stream_buf, NULL } @@ -847,7 +847,7 @@ static const VMStateDescription vmstate_hda_audio = { .name = "hda-audio", .version_id = 2, .post_load = hda_audio_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(st, HDAAudioState, 4, 0, vmstate_hda_audio_stream, HDAAudioStream), diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c index 78ff9f9a6807f60a36f880e04988c56e032fb7bc..9c54e60b7183f694ea768e32a4d99cc23d77d07a 100644 --- a/hw/audio/intel-hda.c +++ b/hw/audio/intel-hda.c @@ -1158,7 +1158,7 @@ static int intel_hda_post_load(void *opaque, int version) static const VMStateDescription vmstate_intel_hda_stream = { .name = "intel-hda-stream", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ctl, IntelHDAStream), VMSTATE_UINT32(lpib, IntelHDAStream), VMSTATE_UINT32(cbl, IntelHDAStream), @@ -1174,7 +1174,7 @@ static const VMStateDescription vmstate_intel_hda = { .name = "intel-hda", .version_id = 1, .post_load = intel_hda_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(pci, IntelHDAState), /* registers */ diff --git a/hw/audio/lm4549.c b/hw/audio/lm4549.c index e7bfcc4b9fe8049d68e47cac0169edd79e3ddc98..a4a77c8dc6a2b0ddf97b4380fe443b25a624a973 100644 --- a/hw/audio/lm4549.c +++ b/hw/audio/lm4549.c @@ -329,7 +329,7 @@ const VMStateDescription vmstate_lm4549_state = { .version_id = 1, .minimum_version_id = 1, .post_load = lm4549_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(voice_is_active, lm4549_state), VMSTATE_UINT16_ARRAY(regfile, lm4549_state, 128), VMSTATE_UINT16_ARRAY(buffer, lm4549_state, LM4549_BUFFER_SIZE), diff --git a/hw/audio/marvell_88w8618.c b/hw/audio/marvell_88w8618.c index e6c09bdb8e3eb33bcffd22fe0fd3df81b48584a3..cc285444bcefbb44008a78dbb094c8afa0ab6c1b 100644 --- a/hw/audio/marvell_88w8618.c +++ b/hw/audio/marvell_88w8618.c @@ -273,7 +273,7 @@ static const VMStateDescription mv88w8618_audio_vmsd = { .name = "mv88w8618_audio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(playback_mode, mv88w8618_audio_state), VMSTATE_UINT32(status, mv88w8618_audio_state), VMSTATE_UINT32(irq_enable, mv88w8618_audio_state), diff --git a/hw/audio/pcspk.c b/hw/audio/pcspk.c index fe7f07ced211b53c5d0038115182384a14b078c1..a4b89f17682137cc41f37cb730d6fe113e9375fa 100644 --- a/hw/audio/pcspk.c +++ b/hw/audio/pcspk.c @@ -208,7 +208,7 @@ static const VMStateDescription vmstate_spk = { .version_id = 1, .minimum_version_id = 1, .needed = migrate_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(data_on, PCSpkState), VMSTATE_UINT8(dummy_refresh_clock, PCSpkState), VMSTATE_END_OF_LIST() diff --git a/hw/audio/pl041.c b/hw/audio/pl041.c index 868dffbfd321bae6ec9295e2f79eb823fb854b7f..b435208c2421abec84c0dfbdb964b699ac142ba6 100644 --- a/hw/audio/pl041.c +++ b/hw/audio/pl041.c @@ -571,7 +571,7 @@ static const VMStateDescription vmstate_pl041_regfile = { .name = "pl041_regfile", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { #define REGISTER(name, offset) VMSTATE_UINT32(name, pl041_regfile), #include "pl041.hx" #undef REGISTER @@ -583,7 +583,7 @@ static const VMStateDescription vmstate_pl041_fifo = { .name = "pl041_fifo", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(level, pl041_fifo), VMSTATE_UINT32_ARRAY(data, pl041_fifo, MAX_FIFO_DEPTH), VMSTATE_END_OF_LIST() @@ -594,7 +594,7 @@ static const VMStateDescription vmstate_pl041_channel = { .name = "pl041_channel", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(tx_fifo, pl041_channel, 0, vmstate_pl041_fifo, pl041_fifo), VMSTATE_UINT8(tx_enabled, pl041_channel), @@ -613,7 +613,7 @@ static const VMStateDescription vmstate_pl041 = { .name = "pl041", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(fifo_depth, PL041State), VMSTATE_STRUCT(regs, PL041State, 0, vmstate_pl041_regfile, pl041_regfile), diff --git a/hw/audio/sb16.c b/hw/audio/sb16.c index 18f6d252db3aaf2473e1e0ce211c743337f4f579..fd76e78d180ce7c5845ff989e355083088d8d760 100644 --- a/hw/audio/sb16.c +++ b/hw/audio/sb16.c @@ -1324,12 +1324,12 @@ static const VMStateDescription vmstate_sb16 = { .version_id = 1, .minimum_version_id = 1, .post_load = sb16_post_load, - .fields = (VMStateField[]) { - VMSTATE_UINT32 (irq, SB16State), - VMSTATE_UINT32 (dma, SB16State), - VMSTATE_UINT32 (hdma, SB16State), - VMSTATE_UINT32 (port, SB16State), - VMSTATE_UINT32 (ver, SB16State), + .fields = (const VMStateField[]) { + VMSTATE_UNUSED( 4 /* irq */ + + 4 /* dma */ + + 4 /* hdma */ + + 4 /* port */ + + 4 /* ver */), VMSTATE_INT32 (in_index, SB16State), VMSTATE_INT32 (out_data_len, SB16State), VMSTATE_INT32 (fmt_stereo, SB16State), diff --git a/hw/audio/virtio-snd.c b/hw/audio/virtio-snd.c index 137fa77a01ce85803962f3027743353b991f179e..ea2aeaef14347010c9601f4466f99949502463f4 100644 --- a/hw/audio/virtio-snd.c +++ b/hw/audio/virtio-snd.c @@ -72,7 +72,7 @@ static const VMStateDescription vmstate_virtio_snd = { .unmigratable = 1, .minimum_version_id = VIRTIO_SOUND_VM_VERSION, .version_id = VIRTIO_SOUND_VM_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/audio/wm8750.c b/hw/audio/wm8750.c index 57954a63144238abe8aa720e52193f6274b4506a..ec2c4e13743dc7930801a90cf3a1fe5e75c92792 100644 --- a/hw/audio/wm8750.c +++ b/hw/audio/wm8750.c @@ -592,7 +592,7 @@ static const VMStateDescription vmstate_wm8750 = { .minimum_version_id = 0, .pre_save = wm8750_pre_save, .post_load = wm8750_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(i2c_data, WM8750State, 2), VMSTATE_INT32(i2c_len, WM8750State), VMSTATE_INT32(enable, WM8750State), diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c index f83bb0f116b208b42764b03c74475749ce1aafda..ba22732497043280268e3d6279b3e8db3992ce83 100644 --- a/hw/block/dataplane/virtio-blk.c +++ b/hw/block/dataplane/virtio-blk.c @@ -32,13 +32,11 @@ struct VirtIOBlockDataPlane { VirtIOBlkConf *conf; VirtIODevice *vdev; - /* Note that these EventNotifiers are assigned by value. This is - * fine as long as you do not call event_notifier_cleanup on them - * (because you don't own the file descriptor or handle; you just - * use it). + /* + * The AioContext for each virtqueue. The BlockDriverState will use the + * first element as its AioContext. */ - IOThread *iothread; - AioContext *ctx; + AioContext **vq_aio_context; }; /* Raise an interrupt to signal guest, if necessary */ @@ -47,7 +45,46 @@ void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq) virtio_notify_irqfd(s->vdev, vq); } -/* Context: QEMU global mutex held */ +/* Generate vq:AioContext mappings from a validated iothread-vq-mapping list */ +static void +apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list, + AioContext **vq_aio_context, uint16_t num_queues) +{ + IOThreadVirtQueueMappingList *node; + size_t num_iothreads = 0; + size_t cur_iothread = 0; + + for (node = iothread_vq_mapping_list; node; node = node->next) { + num_iothreads++; + } + + for (node = iothread_vq_mapping_list; node; node = node->next) { + IOThread *iothread = iothread_by_id(node->value->iothread); + AioContext *ctx = iothread_get_aio_context(iothread); + + /* Released in virtio_blk_data_plane_destroy() */ + object_ref(OBJECT(iothread)); + + if (node->value->vqs) { + uint16List *vq; + + /* Explicit vq:IOThread assignment */ + for (vq = node->value->vqs; vq; vq = vq->next) { + vq_aio_context[vq->value] = ctx; + } + } else { + /* Round-robin vq:IOThread assignment */ + for (unsigned i = cur_iothread; i < num_queues; + i += num_iothreads) { + vq_aio_context[i] = ctx; + } + } + + cur_iothread++; + } +} + +/* Context: BQL held */ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, VirtIOBlockDataPlane **dataplane, Error **errp) @@ -58,7 +95,7 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, *dataplane = NULL; - if (conf->iothread) { + if (conf->iothread || conf->iothread_vq_mapping_list) { if (!k->set_guest_notifiers || !k->ioeventfd_assign) { error_setg(errp, "device is incompatible with iothread " @@ -86,13 +123,24 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, s = g_new0(VirtIOBlockDataPlane, 1); s->vdev = vdev; s->conf = conf; + s->vq_aio_context = g_new(AioContext *, conf->num_queues); + + if (conf->iothread_vq_mapping_list) { + apply_vq_mapping(conf->iothread_vq_mapping_list, s->vq_aio_context, + conf->num_queues); + } else if (conf->iothread) { + AioContext *ctx = iothread_get_aio_context(conf->iothread); + for (unsigned i = 0; i < conf->num_queues; i++) { + s->vq_aio_context[i] = ctx; + } - if (conf->iothread) { - s->iothread = conf->iothread; - object_ref(OBJECT(s->iothread)); - s->ctx = iothread_get_aio_context(s->iothread); + /* Released in virtio_blk_data_plane_destroy() */ + object_ref(OBJECT(conf->iothread)); } else { - s->ctx = qemu_get_aio_context(); + AioContext *ctx = qemu_get_aio_context(); + for (unsigned i = 0; i < conf->num_queues; i++) { + s->vq_aio_context[i] = ctx; + } } *dataplane = s; @@ -100,10 +148,11 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, return true; } -/* Context: QEMU global mutex held */ +/* Context: BQL held */ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s) { VirtIOBlock *vblk; + VirtIOBlkConf *conf; if (!s) { return; @@ -111,20 +160,32 @@ void virtio_blk_data_plane_destroy(VirtIOBlockDataPlane *s) vblk = VIRTIO_BLK(s->vdev); assert(!vblk->dataplane_started); - if (s->iothread) { - object_unref(OBJECT(s->iothread)); + conf = s->conf; + + if (conf->iothread_vq_mapping_list) { + IOThreadVirtQueueMappingList *node; + + for (node = conf->iothread_vq_mapping_list; node; node = node->next) { + IOThread *iothread = iothread_by_id(node->value->iothread); + object_unref(OBJECT(iothread)); + } + } + + if (conf->iothread) { + object_unref(OBJECT(conf->iothread)); } + + g_free(s->vq_aio_context); g_free(s); } -/* Context: QEMU global mutex held */ +/* Context: BQL held */ int virtio_blk_data_plane_start(VirtIODevice *vdev) { VirtIOBlock *vblk = VIRTIO_BLK(vdev); VirtIOBlockDataPlane *s = vblk->dataplane; BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vblk))); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); - AioContext *old_context; unsigned i; unsigned nvqs = s->conf->num_queues; Error *local_err = NULL; @@ -178,22 +239,13 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev) trace_virtio_blk_data_plane_start(s); - old_context = blk_get_aio_context(s->conf->conf.blk); - aio_context_acquire(old_context); - r = blk_set_aio_context(s->conf->conf.blk, s->ctx, &local_err); - aio_context_release(old_context); + r = blk_set_aio_context(s->conf->conf.blk, s->vq_aio_context[0], + &local_err); if (r < 0) { error_report_err(local_err); goto fail_aio_context; } - /* Kick right away to begin processing requests already in vring */ - for (i = 0; i < nvqs; i++) { - VirtQueue *vq = virtio_get_queue(s->vdev, i); - - event_notifier_set(virtio_queue_get_host_notifier(vq)); - } - /* * These fields must be visible to the IOThread when it processes the * virtqueue, otherwise it will think dataplane has not started yet. @@ -208,13 +260,15 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev) /* Get this show started by hooking up our callbacks */ if (!blk_in_drain(s->conf->conf.blk)) { - aio_context_acquire(s->ctx); for (i = 0; i < nvqs; i++) { VirtQueue *vq = virtio_get_queue(s->vdev, i); + AioContext *ctx = s->vq_aio_context[i]; - virtio_queue_aio_attach_host_notifier(vq, s->ctx); + /* Kick right away to begin processing requests already in vring */ + event_notifier_set(virtio_queue_get_host_notifier(vq)); + + virtio_queue_aio_attach_host_notifier(vq, ctx); } - aio_context_release(s->ctx); } return 0; @@ -242,26 +296,21 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev) * * Context: BH in IOThread */ -static void virtio_blk_data_plane_stop_bh(void *opaque) +static void virtio_blk_data_plane_stop_vq_bh(void *opaque) { - VirtIOBlockDataPlane *s = opaque; - unsigned i; - - for (i = 0; i < s->conf->num_queues; i++) { - VirtQueue *vq = virtio_get_queue(s->vdev, i); - EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq); + VirtQueue *vq = opaque; + EventNotifier *host_notifier = virtio_queue_get_host_notifier(vq); - virtio_queue_aio_detach_host_notifier(vq, s->ctx); + virtio_queue_aio_detach_host_notifier(vq, qemu_get_current_aio_context()); - /* - * Test and clear notifier after disabling event, in case poll callback - * didn't have time to run. - */ - virtio_queue_host_notifier_read(host_notifier); - } + /* + * Test and clear notifier after disabling event, in case poll callback + * didn't have time to run. + */ + virtio_queue_host_notifier_read(host_notifier); } -/* Context: QEMU global mutex held */ +/* Context: BQL held */ void virtio_blk_data_plane_stop(VirtIODevice *vdev) { VirtIOBlock *vblk = VIRTIO_BLK(vdev); @@ -285,7 +334,12 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev) trace_virtio_blk_data_plane_stop(s); if (!blk_in_drain(s->conf->conf.blk)) { - aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s); + for (i = 0; i < nvqs; i++) { + VirtQueue *vq = virtio_get_queue(s->vdev, i); + AioContext *ctx = s->vq_aio_context[i]; + + aio_wait_bh_oneshot(ctx, virtio_blk_data_plane_stop_vq_bh, vq); + } } /* @@ -314,8 +368,6 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev) */ vblk->dataplane_started = false; - aio_context_acquire(s->ctx); - /* Wait for virtio_blk_dma_restart_bh() and in flight I/O to complete */ blk_drain(s->conf->conf.blk); @@ -325,10 +377,28 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev) */ blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL); - aio_context_release(s->ctx); - /* Clean up guest notifier (irq) */ k->set_guest_notifiers(qbus->parent, nvqs, false); s->stopping = false; } + +void virtio_blk_data_plane_detach(VirtIOBlockDataPlane *s) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(s->vdev); + + for (uint16_t i = 0; i < s->conf->num_queues; i++) { + VirtQueue *vq = virtio_get_queue(vdev, i); + virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]); + } +} + +void virtio_blk_data_plane_attach(VirtIOBlockDataPlane *s) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(s->vdev); + + for (uint16_t i = 0; i < s->conf->num_queues; i++) { + VirtQueue *vq = virtio_get_queue(vdev, i); + virtio_queue_aio_attach_host_notifier(vq, s->vq_aio_context[i]); + } +} diff --git a/hw/block/dataplane/virtio-blk.h b/hw/block/dataplane/virtio-blk.h index 5e18bb99aeb8ea8b535178c746fde2c14988dd74..1a806fe447e6c4fec234ebdfb080126a8fe448f8 100644 --- a/hw/block/dataplane/virtio-blk.h +++ b/hw/block/dataplane/virtio-blk.h @@ -28,4 +28,7 @@ void virtio_blk_data_plane_notify(VirtIOBlockDataPlane *s, VirtQueue *vq); int virtio_blk_data_plane_start(VirtIODevice *vdev); void virtio_blk_data_plane_stop(VirtIODevice *vdev); +void virtio_blk_data_plane_detach(VirtIOBlockDataPlane *s); +void virtio_blk_data_plane_attach(VirtIOBlockDataPlane *s); + #endif /* HW_DATAPLANE_VIRTIO_BLK_H */ diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c index c4bb28c66fede684b4a6ee02a2b89009f64869bc..98501e6885e76b91871f13bc187d20e0f13edc5d 100644 --- a/hw/block/dataplane/xen-block.c +++ b/hw/block/dataplane/xen-block.c @@ -260,8 +260,6 @@ static void xen_block_complete_aio(void *opaque, int ret) XenBlockRequest *request = opaque; XenBlockDataPlane *dataplane = request->dataplane; - aio_context_acquire(dataplane->ctx); - if (ret != 0) { error_report("%s I/O error", request->req.operation == BLKIF_OP_READ ? @@ -273,10 +271,10 @@ static void xen_block_complete_aio(void *opaque, int ret) if (request->presync) { request->presync = 0; xen_block_do_aio(request); - goto done; + return; } if (request->aio_inflight > 0) { - goto done; + return; } switch (request->req.operation) { @@ -318,9 +316,6 @@ static void xen_block_complete_aio(void *opaque, int ret) if (dataplane->more_work) { qemu_bh_schedule(dataplane->bh); } - -done: - aio_context_release(dataplane->ctx); } static bool xen_block_split_discard(XenBlockRequest *request, @@ -601,9 +596,7 @@ static void xen_block_dataplane_bh(void *opaque) { XenBlockDataPlane *dataplane = opaque; - aio_context_acquire(dataplane->ctx); xen_block_handle_requests(dataplane); - aio_context_release(dataplane->ctx); } static bool xen_block_dataplane_event(void *opaque) @@ -703,10 +696,8 @@ void xen_block_dataplane_stop(XenBlockDataPlane *dataplane) xen_block_dataplane_detach(dataplane); } - aio_context_acquire(dataplane->ctx); /* Xen doesn't have multiple users for nodes, so this can't fail */ blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort); - aio_context_release(dataplane->ctx); /* * Now that the context has been moved onto the main thread, cancel @@ -752,7 +743,6 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane, { ERRP_GUARD(); XenDevice *xendev = dataplane->xendev; - AioContext *old_context; unsigned int ring_size; unsigned int i; @@ -836,11 +826,8 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane, goto stop; } - old_context = blk_get_aio_context(dataplane->blk); - aio_context_acquire(old_context); /* If other users keep the BlockBackend in the iothread, that's ok */ blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL); - aio_context_release(old_context); if (!blk_in_drain(dataplane->blk)) { xen_block_dataplane_attach(dataplane); diff --git a/hw/block/ecc.c b/hw/block/ecc.c index 6e0d63842c1412641f104df3fdfc5df822222706..ed889a4184f4f790957e8b83031985ab09d7c5d2 100644 --- a/hw/block/ecc.c +++ b/hw/block/ecc.c @@ -82,7 +82,7 @@ const VMStateDescription vmstate_ecc_state = { .name = "ecc-state", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(cp, ECCState), VMSTATE_UINT16_ARRAY(lp, ECCState, 2), VMSTATE_UINT16(count, ECCState), diff --git a/hw/block/fdc-isa.c b/hw/block/fdc-isa.c index 7ec075e470a7a305d06670ed39120d7250f46984..ad0921c7d38bdd853c132fb2fa1219beca6f991e 100644 --- a/hw/block/fdc-isa.c +++ b/hw/block/fdc-isa.c @@ -259,7 +259,7 @@ static const VMStateDescription vmstate_isa_fdc = { .name = "fdc", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(state, FDCtrlISABus, 0, vmstate_fdc, FDCtrl), VMSTATE_END_OF_LIST() } diff --git a/hw/block/fdc-sysbus.c b/hw/block/fdc-sysbus.c index 86ea51d003465c9ccb1873522b9754855b8f15ef..266bc4d1451d8995b7f373114260b0c0d3e5861f 100644 --- a/hw/block/fdc-sysbus.c +++ b/hw/block/fdc-sysbus.c @@ -168,7 +168,7 @@ static const VMStateDescription vmstate_sysbus_fdc = { .name = "fdc", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(state, FDCtrlSysBus, 0, vmstate_fdc, FDCtrl), VMSTATE_END_OF_LIST() } diff --git a/hw/block/fdc.c b/hw/block/fdc.c index d7cc4d3ec19bcb40f5fcdfeb56ab9c83655066f6..6dd94e98bc36e8e91e23e0ea6e4ccd628e9a2764 100644 --- a/hw/block/fdc.c +++ b/hw/block/fdc.c @@ -854,7 +854,7 @@ static const VMStateDescription vmstate_fdrive_media_changed = { .version_id = 1, .minimum_version_id = 1, .needed = fdrive_media_changed_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(media_changed, FDrive), VMSTATE_END_OF_LIST() } @@ -864,7 +864,7 @@ static const VMStateDescription vmstate_fdrive_media_rate = { .name = "fdrive/media_rate", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(media_rate, FDrive), VMSTATE_END_OF_LIST() } @@ -882,7 +882,7 @@ static const VMStateDescription vmstate_fdrive_perpendicular = { .version_id = 1, .minimum_version_id = 1, .needed = fdrive_perpendicular_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(perpendicular, FDrive), VMSTATE_END_OF_LIST() } @@ -899,13 +899,13 @@ static const VMStateDescription vmstate_fdrive = { .version_id = 1, .minimum_version_id = 1, .post_load = fdrive_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(head, FDrive), VMSTATE_UINT8(track, FDrive), VMSTATE_UINT8(sect, FDrive), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_fdrive_media_changed, &vmstate_fdrive_media_rate, &vmstate_fdrive_perpendicular, @@ -977,7 +977,7 @@ static const VMStateDescription vmstate_fdc_reset_sensei = { .version_id = 1, .minimum_version_id = 1, .needed = fdc_reset_sensei_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(reset_sensei, FDCtrl), VMSTATE_END_OF_LIST() } @@ -995,7 +995,7 @@ static const VMStateDescription vmstate_fdc_result_timer = { .version_id = 1, .minimum_version_id = 1, .needed = fdc_result_timer_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(result_timer, FDCtrl), VMSTATE_END_OF_LIST() } @@ -1013,7 +1013,7 @@ static const VMStateDescription vmstate_fdc_phase = { .version_id = 1, .minimum_version_id = 1, .needed = fdc_phase_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(phase, FDCtrl), VMSTATE_END_OF_LIST() } @@ -1026,7 +1026,7 @@ const VMStateDescription vmstate_fdc = { .pre_save = fdc_pre_save, .pre_load = fdc_pre_load, .post_load = fdc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* Controller State */ VMSTATE_UINT8(sra, FDCtrl), VMSTATE_UINT8(srb, FDCtrl), @@ -1057,7 +1057,7 @@ const VMStateDescription vmstate_fdc = { vmstate_fdrive, FDrive), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_fdc_reset_sensei, &vmstate_fdc_result_timer, &vmstate_fdc_phase, diff --git a/hw/block/m25p80.c b/hw/block/m25p80.c index afc3fdf4d60b16a5720b53fcd0d43c409495a886..26ce89562823df256d2450dfd47a39485e01b6a5 100644 --- a/hw/block/m25p80.c +++ b/hw/block/m25p80.c @@ -1684,7 +1684,7 @@ static const VMStateDescription vmstate_m25p80_data_read_loop = { .version_id = 1, .minimum_version_id = 1, .needed = m25p80_data_read_loop_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(data_read_loop, Flash), VMSTATE_END_OF_LIST() } @@ -1702,7 +1702,7 @@ static const VMStateDescription vmstate_m25p80_aai_enable = { .version_id = 1, .minimum_version_id = 1, .needed = m25p80_aai_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(aai_enable, Flash), VMSTATE_END_OF_LIST() } @@ -1720,7 +1720,7 @@ static const VMStateDescription vmstate_m25p80_write_protect = { .version_id = 1, .minimum_version_id = 1, .needed = m25p80_wp_level_srwd_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(wp_level, Flash), VMSTATE_BOOL(status_register_write_disabled, Flash), VMSTATE_END_OF_LIST() @@ -1743,7 +1743,7 @@ static const VMStateDescription vmstate_m25p80_block_protect = { .version_id = 1, .minimum_version_id = 1, .needed = m25p80_block_protect_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(block_protect0, Flash), VMSTATE_BOOL(block_protect1, Flash), VMSTATE_BOOL(block_protect2, Flash), @@ -1759,7 +1759,7 @@ static const VMStateDescription vmstate_m25p80 = { .minimum_version_id = 0, .pre_save = m25p80_pre_save, .pre_load = m25p80_pre_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(state, Flash), VMSTATE_UINT8_ARRAY(data, Flash, M25P80_INTERNAL_DATA_BUFFER_SZ), VMSTATE_UINT32(len, Flash), @@ -1781,7 +1781,7 @@ static const VMStateDescription vmstate_m25p80 = { VMSTATE_UINT8(spansion_cr4nv, Flash), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_m25p80_data_read_loop, &vmstate_m25p80_aai_enable, &vmstate_m25p80_write_protect, diff --git a/hw/block/nand.c b/hw/block/nand.c index 9c1b89cfa66f3696e342baa16c59a57a47833338..d1435f2207f86eb950cac3945c74b9bf0eeb9905 100644 --- a/hw/block/nand.c +++ b/hw/block/nand.c @@ -345,7 +345,7 @@ static const VMStateDescription vmstate_nand = { .minimum_version_id = 1, .pre_save = nand_pre_save, .post_load = nand_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(cle, NANDFlashState), VMSTATE_UINT8(ale, NANDFlashState), VMSTATE_UINT8(ce, NANDFlashState), diff --git a/hw/block/onenand.c b/hw/block/onenand.c index 50d3d1c9856813773976e8d60d4e0ee1b6a967e0..d8a6944027a7a1995eee30c892aa040e8dd856a2 100644 --- a/hw/block/onenand.c +++ b/hw/block/onenand.c @@ -179,7 +179,7 @@ static const VMStateDescription vmstate_onenand = { .minimum_version_id = 1, .pre_save = onenand_pre_save, .post_load = onenand_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(current_direction, OneNANDState), VMSTATE_INT32(cycle, OneNANDState), VMSTATE_INT32(otpmode, OneNANDState), diff --git a/hw/block/pflash_cfi01.c b/hw/block/pflash_cfi01.c index 62056b1d741ee4ff49f7dcce76f18c70f376ec4b..3e2dc08bd78fe0bc3ab14db87a4dbf2ad0c41481 100644 --- a/hw/block/pflash_cfi01.c +++ b/hw/block/pflash_cfi01.c @@ -95,7 +95,7 @@ static const VMStateDescription vmstate_pflash = { .version_id = 1, .minimum_version_id = 1, .post_load = pflash_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(wcycle, PFlashCFI01), VMSTATE_UINT8(cmd, PFlashCFI01), VMSTATE_UINT8(status, PFlashCFI01), diff --git a/hw/block/swim.c b/hw/block/swim.c index fd65c59f8a10dfb9371f5ca20baf86f2fff3e450..44761c11cbc637309163ac5ad1850f6a94bbc4d7 100644 --- a/hw/block/swim.c +++ b/hw/block/swim.c @@ -516,7 +516,7 @@ static const VMStateDescription vmstate_fdrive = { .name = "fdrive", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_END_OF_LIST() }, }; @@ -525,7 +525,7 @@ static const VMStateDescription vmstate_swim = { .name = "swim", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(mode, SWIMCtrl), /* IWM mode */ VMSTATE_INT32(iwm_switch, SWIMCtrl), @@ -545,7 +545,7 @@ static const VMStateDescription vmstate_swim = { static const VMStateDescription vmstate_sysbus_swim = { .name = "SWIM", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(ctrl, Swim, 0, vmstate_swim, SWIMCtrl), VMSTATE_END_OF_LIST() } diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c index 2863d80d150a8e72c86504287bf7061e70518e53..6a856ad51a94ba0177073390156aa0977bc52551 100644 --- a/hw/block/vhost-user-blk.c +++ b/hw/block/vhost-user-blk.c @@ -554,7 +554,7 @@ static const VMStateDescription vmstate_vhost_user_blk = { .name = "vhost-user-blk", .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index a1f8e155227591fb428f065d11658f133e348a6b..b7a344ca9791ab1a562c8860fe2a92d05901d2d0 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -82,8 +82,11 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, /* Break the link as the next request is going to be parsed from the * ring again. Otherwise we may end up doing a double completion! */ req->mr_next = NULL; - req->next = s->rq; - s->rq = req; + + WITH_QEMU_LOCK_GUARD(&s->rq_lock) { + req->next = s->rq; + s->rq = req; + } } else if (action == BLOCK_ERROR_ACTION_REPORT) { virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); if (acct_failed) { @@ -102,7 +105,6 @@ static void virtio_blk_rw_complete(void *opaque, int ret) VirtIOBlock *s = next->dev; VirtIODevice *vdev = VIRTIO_DEVICE(s); - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); while (next) { VirtIOBlockReq *req = next; next = req->mr_next; @@ -135,7 +137,6 @@ static void virtio_blk_rw_complete(void *opaque, int ret) block_acct_done(blk_get_stats(s->blk), &req->acct); virtio_blk_free_request(req); } - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); } static void virtio_blk_flush_complete(void *opaque, int ret) @@ -143,19 +144,13 @@ static void virtio_blk_flush_complete(void *opaque, int ret) VirtIOBlockReq *req = opaque; VirtIOBlock *s = req->dev; - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); - if (ret) { - if (virtio_blk_handle_rw_error(req, -ret, 0, true)) { - goto out; - } + if (ret && virtio_blk_handle_rw_error(req, -ret, 0, true)) { + return; } virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); block_acct_done(blk_get_stats(s->blk), &req->acct); virtio_blk_free_request(req); - -out: - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); } static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) @@ -165,11 +160,8 @@ static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) bool is_write_zeroes = (virtio_ldl_p(VIRTIO_DEVICE(s), &req->out.type) & ~VIRTIO_BLK_T_BARRIER) == VIRTIO_BLK_T_WRITE_ZEROES; - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); - if (ret) { - if (virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) { - goto out; - } + if (ret && virtio_blk_handle_rw_error(req, -ret, false, is_write_zeroes)) { + return; } virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); @@ -177,9 +169,6 @@ static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) block_acct_done(blk_get_stats(s->blk), &req->acct); } virtio_blk_free_request(req); - -out: - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); } #ifdef __linux__ @@ -226,10 +215,8 @@ static void virtio_blk_ioctl_complete(void *opaque, int status) virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len); out: - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); virtio_blk_req_complete(req, status); virtio_blk_free_request(req); - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); g_free(ioctl_req); } @@ -669,7 +656,6 @@ static void virtio_blk_zone_report_complete(void *opaque, int ret) { ZoneCmdData *data = opaque; VirtIOBlockReq *req = data->req; - VirtIOBlock *s = req->dev; VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); struct iovec *in_iov = data->in_iov; unsigned in_num = data->in_num; @@ -760,10 +746,8 @@ static void virtio_blk_zone_report_complete(void *opaque, int ret) } out: - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); virtio_blk_req_complete(req, err_status); virtio_blk_free_request(req); - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); g_free(data->zone_report_data.zones); g_free(data); } @@ -826,10 +810,8 @@ static void virtio_blk_zone_mgmt_complete(void *opaque, int ret) err_status = VIRTIO_BLK_S_ZONE_INVALID_CMD; } - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); virtio_blk_req_complete(req, err_status); virtio_blk_free_request(req); - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); } static int virtio_blk_handle_zone_mgmt(VirtIOBlockReq *req, BlockZoneOp op) @@ -879,7 +861,6 @@ static void virtio_blk_zone_append_complete(void *opaque, int ret) { ZoneCmdData *data = opaque; VirtIOBlockReq *req = data->req; - VirtIOBlock *s = req->dev; VirtIODevice *vdev = VIRTIO_DEVICE(req->dev); int64_t append_sector, n; uint8_t err_status = VIRTIO_BLK_S_OK; @@ -902,10 +883,8 @@ static void virtio_blk_zone_append_complete(void *opaque, int ret) trace_virtio_blk_zone_append_complete(vdev, req, append_sector, ret); out: - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); virtio_blk_req_complete(req, err_status); virtio_blk_free_request(req); - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); g_free(data); } @@ -941,10 +920,8 @@ static int virtio_blk_handle_zone_append(VirtIOBlockReq *req, return 0; out: - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); virtio_blk_req_complete(req, err_status); virtio_blk_free_request(req); - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); return err_status; } @@ -1134,7 +1111,6 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) MultiReqBuffer mrb = {}; bool suppress_notifications = virtio_queue_get_notification(vq); - aio_context_acquire(blk_get_aio_context(s->blk)); defer_call_begin(); do { @@ -1160,7 +1136,6 @@ void virtio_blk_handle_vq(VirtIOBlock *s, VirtQueue *vq) } defer_call_end(); - aio_context_release(blk_get_aio_context(s->blk)); } static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) @@ -1176,6 +1151,7 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) return; } } + virtio_blk_handle_vq(s, vq); } @@ -1183,12 +1159,14 @@ static void virtio_blk_dma_restart_bh(void *opaque) { VirtIOBlock *s = opaque; - VirtIOBlockReq *req = s->rq; + VirtIOBlockReq *req; MultiReqBuffer mrb = {}; - s->rq = NULL; + WITH_QEMU_LOCK_GUARD(&s->rq_lock) { + req = s->rq; + s->rq = NULL; + } - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); while (req) { VirtIOBlockReq *next = req->next; if (virtio_blk_handle_request(req, &mrb)) { @@ -1212,8 +1190,6 @@ static void virtio_blk_dma_restart_bh(void *opaque) /* Paired with inc in virtio_blk_dma_restart_cb() */ blk_dec_in_flight(s->conf.conf.blk); - - aio_context_release(blk_get_aio_context(s->conf.conf.blk)); } static void virtio_blk_dma_restart_cb(void *opaque, bool running, @@ -1235,25 +1211,28 @@ static void virtio_blk_dma_restart_cb(void *opaque, bool running, static void virtio_blk_reset(VirtIODevice *vdev) { VirtIOBlock *s = VIRTIO_BLK(vdev); - AioContext *ctx; VirtIOBlockReq *req; - ctx = blk_get_aio_context(s->blk); - aio_context_acquire(ctx); + /* Dataplane has stopped... */ + assert(!s->dataplane_started); + + /* ...but requests may still be in flight. */ blk_drain(s->blk); /* We drop queued requests after blk_drain() because blk_drain() itself can * produce them. */ - while (s->rq) { - req = s->rq; - s->rq = req->next; - virtqueue_detach_element(req->vq, &req->elem, 0); - virtio_blk_free_request(req); - } + WITH_QEMU_LOCK_GUARD(&s->rq_lock) { + while (s->rq) { + req = s->rq; + s->rq = req->next; - aio_context_release(ctx); + /* No other threads can access req->vq here */ + virtqueue_detach_element(req->vq, &req->elem, 0); + + virtio_blk_free_request(req); + } + } - assert(!s->dataplane_started); blk_set_enable_write_cache(s->blk, s->original_wce); } @@ -1268,10 +1247,6 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) uint64_t capacity; int64_t length; int blk_size = conf->logical_block_size; - AioContext *ctx; - - ctx = blk_get_aio_context(s->blk); - aio_context_acquire(ctx); blk_get_geometry(s->blk, &capacity); memset(&blkcfg, 0, sizeof(blkcfg)); @@ -1295,7 +1270,6 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) * per track (cylinder). */ length = blk_getlength(s->blk); - aio_context_release(ctx); if (length > 0 && length / conf->heads / conf->secs % blk_size) { blkcfg.geometry.sectors = conf->secs & ~s->sector_mask; } else { @@ -1362,9 +1336,7 @@ static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config) memcpy(&blkcfg, config, s->config_size); - aio_context_acquire(blk_get_aio_context(s->blk)); blk_set_enable_write_cache(s->blk, blkcfg.wce != 0); - aio_context_release(blk_get_aio_context(s->blk)); } static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features, @@ -1432,29 +1404,31 @@ static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status) * s->blk would erroneously be placed in writethrough mode. */ if (!virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) { - aio_context_acquire(blk_get_aio_context(s->blk)); blk_set_enable_write_cache(s->blk, virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_WCE)); - aio_context_release(blk_get_aio_context(s->blk)); } } static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f) { VirtIOBlock *s = VIRTIO_BLK(vdev); - VirtIOBlockReq *req = s->rq; - while (req) { - qemu_put_sbyte(f, 1); + WITH_QEMU_LOCK_GUARD(&s->rq_lock) { + VirtIOBlockReq *req = s->rq; - if (s->conf.num_queues > 1) { - qemu_put_be32(f, virtio_get_queue_index(req->vq)); - } + while (req) { + qemu_put_sbyte(f, 1); + + if (s->conf.num_queues > 1) { + qemu_put_be32(f, virtio_get_queue_index(req->vq)); + } - qemu_put_virtqueue_element(vdev, f, &req->elem); - req = req->next; + qemu_put_virtqueue_element(vdev, f, &req->elem); + req = req->next; + } } + qemu_put_sbyte(f, 0); } @@ -1480,13 +1454,78 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f, req = qemu_get_virtqueue_element(vdev, f, sizeof(VirtIOBlockReq)); virtio_blk_init_request(s, virtio_get_queue(vdev, vq_idx), req); - req->next = s->rq; - s->rq = req; + + WITH_QEMU_LOCK_GUARD(&s->rq_lock) { + req->next = s->rq; + s->rq = req; + } } return 0; } +static bool +validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list, + uint16_t num_queues, Error **errp) +{ + g_autofree unsigned long *vqs = bitmap_new(num_queues); + g_autoptr(GHashTable) iothreads = + g_hash_table_new(g_str_hash, g_str_equal); + + for (IOThreadVirtQueueMappingList *node = list; node; node = node->next) { + const char *name = node->value->iothread; + uint16List *vq; + + if (!iothread_by_id(name)) { + error_setg(errp, "IOThread \"%s\" object does not exist", name); + return false; + } + + if (!g_hash_table_add(iothreads, (gpointer)name)) { + error_setg(errp, + "duplicate IOThread name \"%s\" in iothread-vq-mapping", + name); + return false; + } + + if (node != list) { + if (!!node->value->vqs != !!list->value->vqs) { + error_setg(errp, "either all items in iothread-vq-mapping " + "must have vqs or none of them must have it"); + return false; + } + } + + for (vq = node->value->vqs; vq; vq = vq->next) { + if (vq->value >= num_queues) { + error_setg(errp, "vq index %u for IOThread \"%s\" must be " + "less than num_queues %u in iothread-vq-mapping", + vq->value, name, num_queues); + return false; + } + + if (test_and_set_bit(vq->value, vqs)) { + error_setg(errp, "cannot assign vq %u to IOThread \"%s\" " + "because it is already assigned", vq->value, name); + return false; + } + } + } + + if (list->value->vqs) { + for (uint16_t i = 0; i < num_queues; i++) { + if (!test_bit(i, vqs)) { + error_setg(errp, + "missing vq %u IOThread assignment in iothread-vq-mapping", + i); + return false; + } + } + } + + return true; +} + static void virtio_resize_cb(void *opaque) { VirtIODevice *vdev = opaque; @@ -1500,7 +1539,7 @@ static void virtio_blk_resize(void *opaque) VirtIODevice *vdev = VIRTIO_DEVICE(opaque); /* - * virtio_notify_config() needs to acquire the global mutex, + * virtio_notify_config() needs to acquire the BQL, * so it can't be called from an iothread. Instead, schedule * it to be run in the main context BH. */ @@ -1511,34 +1550,24 @@ static void virtio_blk_resize(void *opaque) static void virtio_blk_drained_begin(void *opaque) { VirtIOBlock *s = opaque; - VirtIODevice *vdev = VIRTIO_DEVICE(opaque); - AioContext *ctx = blk_get_aio_context(s->conf.conf.blk); if (!s->dataplane || !s->dataplane_started) { return; } - for (uint16_t i = 0; i < s->conf.num_queues; i++) { - VirtQueue *vq = virtio_get_queue(vdev, i); - virtio_queue_aio_detach_host_notifier(vq, ctx); - } + virtio_blk_data_plane_detach(s->dataplane); } /* Resume virtqueue ioeventfd processing after drain */ static void virtio_blk_drained_end(void *opaque) { VirtIOBlock *s = opaque; - VirtIODevice *vdev = VIRTIO_DEVICE(opaque); - AioContext *ctx = blk_get_aio_context(s->conf.conf.blk); if (!s->dataplane || !s->dataplane_started) { return; } - for (uint16_t i = 0; i < s->conf.num_queues; i++) { - VirtQueue *vq = virtio_get_queue(vdev, i); - virtio_queue_aio_attach_host_notifier(vq, ctx); - } + virtio_blk_data_plane_attach(s->dataplane); } static const BlockDevOps virtio_block_ops = { @@ -1624,10 +1653,25 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp) return; } + if (conf->iothread_vq_mapping_list) { + if (conf->iothread) { + error_setg(errp, "iothread and iothread-vq-mapping properties " + "cannot be set at the same time"); + return; + } + + if (!validate_iothread_vq_mapping_list(conf->iothread_vq_mapping_list, + conf->num_queues, errp)) { + return; + } + } + s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params, s->host_features); virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size); + qemu_mutex_init(&s->rq_lock); + s->blk = conf->conf.blk; s->rq = NULL; s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1; @@ -1679,6 +1723,7 @@ static void virtio_blk_device_unrealize(DeviceState *dev) virtio_del_queue(vdev, i); } qemu_coroutine_dec_pool_size(conf->num_queues * conf->queue_size / 2); + qemu_mutex_destroy(&s->rq_lock); blk_ram_registrar_destroy(&s->blk_ram_registrar); qemu_del_vm_change_state_handler(s->change); blockdev_mark_auto_del(s->blk); @@ -1698,7 +1743,7 @@ static const VMStateDescription vmstate_virtio_blk = { .name = "virtio-blk", .minimum_version_id = 2, .version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, @@ -1723,6 +1768,8 @@ static Property virtio_blk_properties[] = { DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock, conf.seg_max_adjust, true), DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD, IOThread *), + DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST("iothread-vq-mapping", VirtIOBlock, + conf.iothread_vq_mapping_list), DEFINE_PROP_BIT64("discard", VirtIOBlock, host_features, VIRTIO_BLK_F_DISCARD, true), DEFINE_PROP_BOOL("report-discard-granularity", VirtIOBlock, diff --git a/hw/char/bcm2835_aux.c b/hw/char/bcm2835_aux.c index 96410b1ff8662c21a74741a67e3545e0e5516cb9..83990e20f768011711c0c61ecfa282f5a2bb82eb 100644 --- a/hw/char/bcm2835_aux.c +++ b/hw/char/bcm2835_aux.c @@ -260,7 +260,7 @@ static const VMStateDescription vmstate_bcm2835_aux = { .name = TYPE_BCM2835_AUX, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(read_fifo, BCM2835AuxState, BCM2835_AUX_RX_FIFO_LEN), VMSTATE_UINT8(read_pos, BCM2835AuxState), diff --git a/hw/char/cadence_uart.c b/hw/char/cadence_uart.c index a2ac062b1eeffa0525b423d729d36e0f971a6dee..db31d7cc85990b41bbd24c98f8fc63b15209c6a3 100644 --- a/hw/char/cadence_uart.c +++ b/hw/char/cadence_uart.c @@ -602,7 +602,7 @@ static const VMStateDescription vmstate_cadence_uart = { .minimum_version_id = 2, .pre_load = cadence_uart_pre_load, .post_load = cadence_uart_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(r, CadenceUARTState, CADENCE_UART_R_MAX), VMSTATE_UINT8_ARRAY(rx_fifo, CadenceUARTState, CADENCE_UART_RX_FIFO_SIZE), diff --git a/hw/char/cmsdk-apb-uart.c b/hw/char/cmsdk-apb-uart.c index d466cd93de43057e5eb13d9f6cbe6e186594ac48..d07cca1bd42588210185ef6eacad91b8a4caf494 100644 --- a/hw/char/cmsdk-apb-uart.c +++ b/hw/char/cmsdk-apb-uart.c @@ -366,7 +366,7 @@ static const VMStateDescription cmsdk_apb_uart_vmstate = { .version_id = 1, .minimum_version_id = 1, .post_load = cmsdk_apb_uart_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(state, CMSDKAPBUART), VMSTATE_UINT32(ctrl, CMSDKAPBUART), VMSTATE_UINT32(intstatus, CMSDKAPBUART), diff --git a/hw/char/digic-uart.c b/hw/char/digic-uart.c index 51d4e7db52f2e0835d5175db7a345c06f8f9dde4..ef2d76272629045784a4ba56c6129cd953afb0ca 100644 --- a/hw/char/digic-uart.c +++ b/hw/char/digic-uart.c @@ -165,7 +165,7 @@ static const VMStateDescription vmstate_digic_uart = { .name = "digic-uart", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(reg_rx, DigicUartState), VMSTATE_UINT32(reg_st, DigicUartState), VMSTATE_END_OF_LIST() diff --git a/hw/char/escc.c b/hw/char/escc.c index 48b30ee760a682b85d060c6d2cf9de569c4cf5e3..d450d70eda1ee195128da5123cdbfc397a46f7f1 100644 --- a/hw/char/escc.c +++ b/hw/char/escc.c @@ -766,7 +766,7 @@ static const VMStateDescription vmstate_escc_chn = { .name = "escc_chn", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(vmstate_dummy, ESCCChannelState), VMSTATE_UINT32(reg, ESCCChannelState), VMSTATE_UINT32(rxint, ESCCChannelState), @@ -785,7 +785,7 @@ static const VMStateDescription vmstate_escc = { .name = "escc", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(chn, ESCCState, 2, 2, vmstate_escc_chn, ESCCChannelState), VMSTATE_END_OF_LIST() diff --git a/hw/char/exynos4210_uart.c b/hw/char/exynos4210_uart.c index 7b7c56b6ef47bd89468eb4e12a4f3b6fe7aed451..8cdd42e54fd539700e1f211e33f08c90259b69ca 100644 --- a/hw/char/exynos4210_uart.c +++ b/hw/char/exynos4210_uart.c @@ -628,7 +628,7 @@ static const VMStateDescription vmstate_exynos4210_uart_fifo = { .name = "exynos4210.uart.fifo", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(sp, Exynos4210UartFIFO), VMSTATE_UINT32(rp, Exynos4210UartFIFO), VMSTATE_VBUFFER_UINT32(data, Exynos4210UartFIFO, 1, NULL, size), @@ -641,7 +641,7 @@ static const VMStateDescription vmstate_exynos4210_uart = { .version_id = 1, .minimum_version_id = 1, .post_load = exynos4210_uart_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(rx, Exynos4210UartState, 1, vmstate_exynos4210_uart_fifo, Exynos4210UartFIFO), VMSTATE_UINT32_ARRAY(reg, Exynos4210UartState, diff --git a/hw/char/goldfish_tty.c b/hw/char/goldfish_tty.c index 20b77885c180a986a0bde47ae2ce9e232110a45c..f8ff043c396a8822d9b302fab34eb5cab2c0233c 100644 --- a/hw/char/goldfish_tty.c +++ b/hw/char/goldfish_tty.c @@ -232,7 +232,7 @@ static const VMStateDescription vmstate_goldfish_tty = { .name = "goldfish_tty", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(data_len, GoldfishTTYState), VMSTATE_UINT64(data_ptr, GoldfishTTYState), VMSTATE_BOOL(int_enabled, GoldfishTTYState), diff --git a/hw/char/ibex_uart.c b/hw/char/ibex_uart.c index 51708c083634adcd99f20eed8276a7d10db31d6b..63aae6dc2c7aa12d26b65a6b3f21076f06728e80 100644 --- a/hw/char/ibex_uart.c +++ b/hw/char/ibex_uart.c @@ -488,7 +488,7 @@ static const VMStateDescription vmstate_ibex_uart = { .version_id = 1, .minimum_version_id = 1, .post_load = ibex_uart_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(tx_fifo, IbexUartState, IBEX_UART_TX_FIFO_SIZE), VMSTATE_UINT32(tx_level, IbexUartState), diff --git a/hw/char/imx_serial.c b/hw/char/imx_serial.c index 377d1d97730f439e54ee9eed1c624ffbf2417367..1df862eb7ff2cf1159f9de36b90ae77cd6514a4f 100644 --- a/hw/char/imx_serial.c +++ b/hw/char/imx_serial.c @@ -43,7 +43,7 @@ static const VMStateDescription vmstate_imx_serial = { .name = TYPE_IMX_SERIAL, .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(readbuff, IMXSerialState), VMSTATE_UINT32(usr1, IMXSerialState), VMSTATE_UINT32(usr2, IMXSerialState), diff --git a/hw/char/ipoctal232.c b/hw/char/ipoctal232.c index 3311e0872c2f5d0c8f4bb80a927842fe3c52a073..64be5226d4b1034b11b05525ac27d695609fe32e 100644 --- a/hw/char/ipoctal232.c +++ b/hw/char/ipoctal232.c @@ -130,7 +130,7 @@ static const VMStateDescription vmstate_scc2698_channel = { .name = "scc2698_channel", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(rx_enabled, SCC2698Channel), VMSTATE_UINT8_ARRAY(mr, SCC2698Channel, 2), VMSTATE_UINT8(mr_idx, SCC2698Channel), @@ -146,7 +146,7 @@ static const VMStateDescription vmstate_scc2698_block = { .name = "scc2698_block", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(imr, SCC2698Block), VMSTATE_UINT8(isr, SCC2698Block), VMSTATE_END_OF_LIST() @@ -157,7 +157,7 @@ static const VMStateDescription vmstate_ipoctal = { .name = "ipoctal232", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_IPACK_DEVICE(parent_obj, IPOctalState), VMSTATE_STRUCT_ARRAY(ch, IPOctalState, N_CHANNELS, 1, vmstate_scc2698_channel, SCC2698Channel), diff --git a/hw/char/mchp_pfsoc_mmuart.c b/hw/char/mchp_pfsoc_mmuart.c index 22f3e78eb9e5ccfe606ac4c2c8b2a19f4ae4ec8f..e7908bbfb5d2dc036da1c96e70ceafc5338d95b2 100644 --- a/hw/char/mchp_pfsoc_mmuart.c +++ b/hw/char/mchp_pfsoc_mmuart.c @@ -114,7 +114,7 @@ static const VMStateDescription mchp_pfsoc_mmuart_vmstate = { .name = "mchp.pfsoc.uart", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg, MchpPfSoCMMUartState, MCHP_PFSOC_MMUART_REG_COUNT), VMSTATE_END_OF_LIST() diff --git a/hw/char/nrf51_uart.c b/hw/char/nrf51_uart.c index dfe2276d7119e95316b187230b540bfd2d05e2f5..c2cd6bb5e71d660ae173bec5e196b4fa36f595e9 100644 --- a/hw/char/nrf51_uart.c +++ b/hw/char/nrf51_uart.c @@ -291,7 +291,7 @@ static int nrf51_uart_post_load(void *opaque, int version_id) static const VMStateDescription nrf51_uart_vmstate = { .name = "nrf51_soc.uart", .post_load = nrf51_uart_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg, NRF51UARTState, 0x56C), VMSTATE_UINT8_ARRAY(rx_fifo, NRF51UARTState, UART_FIFO_LENGTH), VMSTATE_UINT32(rx_fifo_pos, NRF51UARTState), diff --git a/hw/char/parallel.c b/hw/char/parallel.c index 147c900f0d611ba39ff392920e31987f12c24b42..bd488cd7f947562900f0c889b13612abf7a0952a 100644 --- a/hw/char/parallel.c +++ b/hw/char/parallel.c @@ -478,7 +478,7 @@ static const VMStateDescription vmstate_parallel_isa = { .name = "parallel_isa", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(state.dataw, ISAParallelState), VMSTATE_UINT8(state.datar, ISAParallelState), VMSTATE_UINT8(state.status, ISAParallelState), diff --git a/hw/char/pl011.c b/hw/char/pl011.c index 58edeb9ddb6b39ead1d47d7bb74a1f5f04f4fd18..855cb82d08de18750ece0a00c85f92a8c087e5df 100644 --- a/hw/char/pl011.c +++ b/hw/char/pl011.c @@ -398,7 +398,7 @@ static const VMStateDescription vmstate_pl011_clock = { .version_id = 1, .minimum_version_id = 1, .needed = pl011_clock_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(clk, PL011State), VMSTATE_END_OF_LIST() } @@ -433,7 +433,7 @@ static const VMStateDescription vmstate_pl011 = { .version_id = 2, .minimum_version_id = 2, .post_load = pl011_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(readbuff, PL011State), VMSTATE_UINT32(flags, PL011State), VMSTATE_UINT32(lcr, PL011State), @@ -452,7 +452,7 @@ static const VMStateDescription vmstate_pl011 = { VMSTATE_INT32(read_trigger, PL011State), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_pl011_clock, NULL } diff --git a/hw/char/renesas_sci.c b/hw/char/renesas_sci.c index 1c634672905f41e0e274e75b509377213c80efe5..5cb733545c4554a6caf497e16a4f0b5fad6c5888 100644 --- a/hw/char/renesas_sci.c +++ b/hw/char/renesas_sci.c @@ -302,7 +302,7 @@ static const VMStateDescription vmstate_rsci = { .name = "renesas-sci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(trtime, RSCIState), VMSTATE_INT64(rx_next, RSCIState), VMSTATE_UINT8(smr, RSCIState), diff --git a/hw/char/sclpconsole-lm.c b/hw/char/sclpconsole-lm.c index b9e9b2d4535a2bb77318df5df45b704b6ff9f146..7719f438f687cb239861f98ce25389e8d810403f 100644 --- a/hw/char/sclpconsole-lm.c +++ b/hw/char/sclpconsole-lm.c @@ -292,7 +292,7 @@ static const VMStateDescription vmstate_sclplmconsole = { .name = "sclplmconsole", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(event.event_pending, SCLPConsoleLM), VMSTATE_UINT32(write_errors, SCLPConsoleLM), VMSTATE_UINT32(length, SCLPConsoleLM), diff --git a/hw/char/sclpconsole.c b/hw/char/sclpconsole.c index c36b5722224934c16ea93bf2942fb435177f947b..5d630b04bb9654d4a275ab36e9e7c7d4255f1869 100644 --- a/hw/char/sclpconsole.c +++ b/hw/char/sclpconsole.c @@ -206,7 +206,7 @@ static const VMStateDescription vmstate_sclpconsole = { .name = "sclpconsole", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(event.event_pending, SCLPConsole), VMSTATE_UINT8_ARRAY(iov, SCLPConsole, SIZE_BUFFER_VT220), VMSTATE_UINT32(iov_sclp, SCLPConsole), diff --git a/hw/char/serial-isa.c b/hw/char/serial-isa.c index 141a6cb168494c72dca42a47677c825919b51e8e..1c793b20f7ec2119e38ded3d8fcebe96b526e993 100644 --- a/hw/char/serial-isa.c +++ b/hw/char/serial-isa.c @@ -106,7 +106,7 @@ static const VMStateDescription vmstate_isa_serial = { .name = "serial", .version_id = 3, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(state, ISASerialState, 0, vmstate_serial, SerialState), VMSTATE_END_OF_LIST() } diff --git a/hw/char/serial-pci-multi.c b/hw/char/serial-pci-multi.c index 5d65c534cb5bde91c3cf57a67f249657a9832e82..28b275709af887545d568bf816c0c1ad1058ad59 100644 --- a/hw/char/serial-pci-multi.c +++ b/hw/char/serial-pci-multi.c @@ -123,7 +123,7 @@ static const VMStateDescription vmstate_pci_multi_serial = { .name = "pci-serial-multi", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PCIMultiSerialState), VMSTATE_STRUCT_ARRAY(state, PCIMultiSerialState, PCI_SERIAL_MAX_PORTS, 0, vmstate_serial, SerialState), diff --git a/hw/char/serial-pci.c b/hw/char/serial-pci.c index 087da3059a44fd94eddac4240fada6431280dcf9..f8a1a94d0c2a61afe283ba68f0433a91ca1867e0 100644 --- a/hw/char/serial-pci.c +++ b/hw/char/serial-pci.c @@ -74,7 +74,7 @@ static const VMStateDescription vmstate_pci_serial = { .name = "pci-serial", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PCISerialState), VMSTATE_STRUCT(state, PCISerialState, 0, vmstate_serial, SerialState), VMSTATE_END_OF_LIST() diff --git a/hw/char/serial.c b/hw/char/serial.c index a32eb25f581ad97206e287aad9fb1737a2b353d9..d8b2db508291cb5738a8115b7269118e8b5d03df 100644 --- a/hw/char/serial.c +++ b/hw/char/serial.c @@ -707,7 +707,7 @@ static const VMStateDescription vmstate_serial_thr_ipending = { .version_id = 1, .minimum_version_id = 1, .needed = serial_thr_ipending_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(thr_ipending, SerialState), VMSTATE_END_OF_LIST() } @@ -724,7 +724,7 @@ static const VMStateDescription vmstate_serial_tsr = { .version_id = 1, .minimum_version_id = 1, .needed = serial_tsr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(tsr_retry, SerialState), VMSTATE_UINT8(thr, SerialState), VMSTATE_UINT8(tsr, SerialState), @@ -744,7 +744,7 @@ static const VMStateDescription vmstate_serial_recv_fifo = { .version_id = 1, .minimum_version_id = 1, .needed = serial_recv_fifo_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(recv_fifo, SerialState, 1, vmstate_fifo8, Fifo8), VMSTATE_END_OF_LIST() } @@ -761,7 +761,7 @@ static const VMStateDescription vmstate_serial_xmit_fifo = { .version_id = 1, .minimum_version_id = 1, .needed = serial_xmit_fifo_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(xmit_fifo, SerialState, 1, vmstate_fifo8, Fifo8), VMSTATE_END_OF_LIST() } @@ -778,7 +778,7 @@ static const VMStateDescription vmstate_serial_fifo_timeout_timer = { .version_id = 1, .minimum_version_id = 1, .needed = serial_fifo_timeout_timer_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(fifo_timeout_timer, SerialState), VMSTATE_END_OF_LIST() } @@ -795,7 +795,7 @@ static const VMStateDescription vmstate_serial_timeout_ipending = { .version_id = 1, .minimum_version_id = 1, .needed = serial_timeout_ipending_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(timeout_ipending, SerialState), VMSTATE_END_OF_LIST() } @@ -812,7 +812,7 @@ static const VMStateDescription vmstate_serial_poll = { .version_id = 1, .needed = serial_poll_needed, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(poll_msl, SerialState), VMSTATE_TIMER_PTR(modem_status_poll, SerialState), VMSTATE_END_OF_LIST() @@ -826,7 +826,7 @@ const VMStateDescription vmstate_serial = { .pre_save = serial_pre_save, .pre_load = serial_pre_load, .post_load = serial_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16_V(divider, SerialState, 2), VMSTATE_UINT8(rbr, SerialState), VMSTATE_UINT8(ier, SerialState), @@ -839,7 +839,7 @@ const VMStateDescription vmstate_serial = { VMSTATE_UINT8_V(fcr_vmstate, SerialState, 3), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_serial_thr_ipending, &vmstate_serial_tsr, &vmstate_serial_recv_fifo, @@ -1056,7 +1056,7 @@ static const VMStateDescription vmstate_serial_mm = { .name = "serial", .version_id = 3, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(serial, SerialMM, 0, vmstate_serial, SerialState), VMSTATE_END_OF_LIST() } diff --git a/hw/char/sifive_uart.c b/hw/char/sifive_uart.c index f2684e57bccb4b0cb54a99d290283ea38cd51f2f..e8716c42523b2b26bca862b09d4375982f5e16d9 100644 --- a/hw/char/sifive_uart.c +++ b/hw/char/sifive_uart.c @@ -224,7 +224,7 @@ static const VMStateDescription vmstate_sifive_uart = { .name = TYPE_SIFIVE_UART, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(rx_fifo, SiFiveUARTState, SIFIVE_UART_RX_FIFO_SIZE), VMSTATE_UINT8(rx_fifo_len, SiFiveUARTState), diff --git a/hw/char/spapr_vty.c b/hw/char/spapr_vty.c index 91eae1a59881ab6d419727d7bc15c92d07dce07b..3e23d9cbab356cde5e974ee9e0514d3bb2dbf5e3 100644 --- a/hw/char/spapr_vty.c +++ b/hw/char/spapr_vty.c @@ -173,7 +173,7 @@ static const VMStateDescription vmstate_spapr_vty = { .name = "spapr_vty", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SPAPR_VIO(sdev, SpaprVioVty), VMSTATE_UINT32(in, SpaprVioVty), diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c index dd619f0731e8ef7f0f8e8ff647131a998918bc4b..016aba637405e93591611af16332eb0a1594a7a3 100644 --- a/hw/char/virtio-serial-bus.c +++ b/hw/char/virtio-serial-bus.c @@ -1148,7 +1148,7 @@ static const VMStateDescription vmstate_virtio_console = { .name = "virtio-console", .minimum_version_id = 3, .version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/core/clock-vmstate.c b/hw/core/clock-vmstate.c index 7eccb6d4eaa354cd782c3191a33344561e8248d6..e831fc596f8c553075d0067d26edc08458f8d837 100644 --- a/hw/core/clock-vmstate.c +++ b/hw/core/clock-vmstate.c @@ -41,7 +41,7 @@ const VMStateDescription vmstate_muldiv = { .version_id = 1, .minimum_version_id = 1, .needed = muldiv_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(multiplier, Clock), VMSTATE_UINT32(divider, Clock), VMSTATE_END_OF_LIST() @@ -53,11 +53,11 @@ const VMStateDescription vmstate_clock = { .version_id = 0, .minimum_version_id = 0, .pre_load = clock_pre_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(period, Clock), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_muldiv, NULL }, diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c index 82dae51a550b52b836fe9802784e6d50fb71c337..3ccfe882e2c3391aaa373687c5498eaffc670e88 100644 --- a/hw/core/cpu-common.c +++ b/hw/core/cpu-common.c @@ -70,14 +70,14 @@ CPUState *cpu_create(const char *typename) * BQL here if we need to. cpu_interrupt assumes it is held.*/ void cpu_reset_interrupt(CPUState *cpu, int mask) { - bool need_lock = !qemu_mutex_iothread_locked(); + bool need_lock = !bql_locked(); if (need_lock) { - qemu_mutex_lock_iothread(); + bql_lock(); } cpu->interrupt_request &= ~mask; if (need_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } } @@ -154,10 +154,12 @@ ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model) assert(cc->class_by_name); assert(cpu_model); oc = cc->class_by_name(cpu_model); - if (oc == NULL || object_class_is_abstract(oc)) { - return NULL; + if (object_class_dynamic_cast(oc, typename) && + !object_class_is_abstract(oc)) { + return oc; } - return oc; + + return NULL; } static void cpu_common_parse_features(const char *typename, char *features, diff --git a/hw/core/machine.c b/hw/core/machine.c index 0c1739814124ca1045a383a1f3b9e791221a0bd6..fb5afdcae4cc1f9a289d427e161b1917df11a389 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -32,6 +32,9 @@ #include "hw/virtio/virtio-net.h" #include "audio/audio.h" +GlobalProperty hw_compat_8_2[] = {}; +const size_t hw_compat_8_2_len = G_N_ELEMENTS(hw_compat_8_2); + GlobalProperty hw_compat_8_1[] = { { TYPE_PCI_BRIDGE, "x-pci-express-writeable-slt-bug", "true" }, { "ramfb", "x-migrate", "off" }, @@ -1309,7 +1312,7 @@ static void validate_cpu_cluster_to_numa_boundary(MachineState *ms) const CPUArchId *cpus = possible_cpus->cpus; int i, j; - if (state->num_nodes <= 1 || possible_cpus->len <= 1) { + if (qtest_enabled() || state->num_nodes <= 1 || possible_cpus->len <= 1) { return; } @@ -1387,13 +1390,74 @@ out: return r; } +const char *machine_class_default_cpu_type(MachineClass *mc) +{ + if (mc->valid_cpu_types && !mc->valid_cpu_types[1]) { + /* Only a single CPU type allowed: use it as default. */ + return mc->valid_cpu_types[0]; + } + return mc->default_cpu_type; +} + +static bool is_cpu_type_supported(const MachineState *machine, Error **errp) +{ + MachineClass *mc = MACHINE_GET_CLASS(machine); + ObjectClass *oc = object_class_by_name(machine->cpu_type); + CPUClass *cc; + int i; + + /* + * Check if the user specified CPU type is supported when the valid + * CPU types have been determined. Note that the user specified CPU + * type is provided through '-cpu' option. + */ + if (mc->valid_cpu_types) { + assert(mc->valid_cpu_types[0] != NULL); + for (i = 0; mc->valid_cpu_types[i]; i++) { + if (object_class_dynamic_cast(oc, mc->valid_cpu_types[i])) { + break; + } + } + + /* The user specified CPU type isn't valid */ + if (!mc->valid_cpu_types[i]) { + g_autofree char *requested = cpu_model_from_type(machine->cpu_type); + error_setg(errp, "Invalid CPU model: %s", requested); + if (!mc->valid_cpu_types[1]) { + g_autofree char *model = cpu_model_from_type( + mc->valid_cpu_types[0]); + error_append_hint(errp, "The only valid type is: %s\n", model); + } else { + error_append_hint(errp, "The valid models are: "); + for (i = 0; mc->valid_cpu_types[i]; i++) { + g_autofree char *model = cpu_model_from_type( + mc->valid_cpu_types[i]); + error_append_hint(errp, "%s%s", + model, + mc->valid_cpu_types[i + 1] ? ", " : ""); + } + error_append_hint(errp, "\n"); + } + + return false; + } + } + + /* Check if CPU type is deprecated and warn if so */ + cc = CPU_CLASS(oc); + assert(cc != NULL); + if (cc->deprecation_note) { + warn_report("CPU model %s is deprecated -- %s", + machine->cpu_type, cc->deprecation_note); + } + + return true; +} void machine_run_board_init(MachineState *machine, const char *mem_path, Error **errp) { ERRP_GUARD(); MachineClass *machine_class = MACHINE_GET_CLASS(machine); - ObjectClass *oc = object_class_by_name(machine->cpu_type); - CPUClass *cc; /* This checkpoint is required by replay to separate prior clock reading from the other reads, because timer polling functions query @@ -1448,41 +1512,9 @@ void machine_run_board_init(MachineState *machine, const char *mem_path, Error * machine->ram = machine_consume_memdev(machine, machine->memdev); } - /* If the machine supports the valid_cpu_types check and the user - * specified a CPU with -cpu check here that the user CPU is supported. - */ - if (machine_class->valid_cpu_types && machine->cpu_type) { - int i; - - for (i = 0; machine_class->valid_cpu_types[i]; i++) { - if (object_class_dynamic_cast(oc, - machine_class->valid_cpu_types[i])) { - /* The user specified CPU is in the valid field, we are - * good to go. - */ - break; - } - } - - if (!machine_class->valid_cpu_types[i]) { - /* The user specified CPU is not valid */ - error_report("Invalid CPU type: %s", machine->cpu_type); - error_printf("The valid types are: %s", - machine_class->valid_cpu_types[0]); - for (i = 1; machine_class->valid_cpu_types[i]; i++) { - error_printf(", %s", machine_class->valid_cpu_types[i]); - } - error_printf("\n"); - - exit(1); - } - } - - /* Check if CPU type is deprecated and warn if so */ - cc = CPU_CLASS(oc); - if (cc && cc->deprecation_note) { - warn_report("CPU model %s is deprecated -- %s", machine->cpu_type, - cc->deprecation_note); + /* Check if the CPU type is supported */ + if (machine->cpu_type && !is_cpu_type_supported(machine, errp)) { + return; } if (machine->cgs) { diff --git a/hw/core/or-irq.c b/hw/core/or-irq.c index 1df4bc05a7cf729e158c27905eca32c7d94248f3..13907df0266b46c14d3b55b268dc458338926e63 100644 --- a/hw/core/or-irq.c +++ b/hw/core/or-irq.c @@ -94,7 +94,7 @@ static const VMStateDescription vmstate_or_irq_extras = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_extras_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT16_UNSAFE(levels, OrIRQState, num_lines, 0, vmstate_info_bool, bool), VMSTATE_END_OF_LIST(), @@ -105,11 +105,11 @@ static const VMStateDescription vmstate_or_irq = { .name = TYPE_OR_IRQ, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL_SUB_ARRAY(levels, OrIRQState, 0, OLD_MAX_OR_LINES), VMSTATE_END_OF_LIST(), }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_or_irq_extras, NULL }, diff --git a/hw/core/ptimer.c b/hw/core/ptimer.c index e03165febf149dcdb25b1a4bb5c39a0a8fcd5869..b1517592c6bd092a3175bcfdafa866ed0e3e426b 100644 --- a/hw/core/ptimer.c +++ b/hw/core/ptimer.c @@ -441,7 +441,7 @@ const VMStateDescription vmstate_ptimer = { .name = "ptimer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(enabled, ptimer_state), VMSTATE_UINT64(limit, ptimer_state), VMSTATE_UINT64(delta, ptimer_state), diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c index 1473ab3d5e956796fd445ad0d109a9619bfd93b8..1a396521d51f0d4b534838d1c1e595194a1e092a 100644 --- a/hw/core/qdev-properties-system.c +++ b/hw/core/qdev-properties-system.c @@ -18,6 +18,7 @@ #include "qapi/qapi-types-block.h" #include "qapi/qapi-types-machine.h" #include "qapi/qapi-types-migration.h" +#include "qapi/qapi-visit-virtio.h" #include "qapi/qmp/qerror.h" #include "qemu/ctype.h" #include "qemu/cutils.h" @@ -120,9 +121,7 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name, "node"); } - aio_context_acquire(ctx); blk_replace_bs(blk, bs, errp); - aio_context_release(ctx); return; } @@ -148,10 +147,7 @@ static void set_drive_helper(Object *obj, Visitor *v, const char *name, 0, BLK_PERM_ALL); blk_created = true; - aio_context_acquire(ctx); ret = blk_insert_bs(blk, bs, errp); - aio_context_release(ctx); - if (ret < 0) { goto fail; } @@ -207,12 +203,8 @@ static void release_drive(Object *obj, const char *name, void *opaque) BlockBackend **ptr = object_field_prop_ptr(obj, prop); if (*ptr) { - AioContext *ctx = blk_get_aio_context(*ptr); - - aio_context_acquire(ctx); blockdev_auto_del(*ptr); blk_detach_dev(*ptr, dev); - aio_context_release(ctx); } } @@ -1169,3 +1161,48 @@ const PropertyInfo qdev_prop_cpus390entitlement = { .set = qdev_propinfo_set_enum, .set_default_value = qdev_propinfo_set_default_value_enum, }; + +/* --- IOThreadVirtQueueMappingList --- */ + +static void get_iothread_vq_mapping_list(Object *obj, Visitor *v, + const char *name, void *opaque, Error **errp) +{ + IOThreadVirtQueueMappingList **prop_ptr = + object_field_prop_ptr(obj, opaque); + + visit_type_IOThreadVirtQueueMappingList(v, name, prop_ptr, errp); +} + +static void set_iothread_vq_mapping_list(Object *obj, Visitor *v, + const char *name, void *opaque, Error **errp) +{ + IOThreadVirtQueueMappingList **prop_ptr = + object_field_prop_ptr(obj, opaque); + IOThreadVirtQueueMappingList *list; + + if (!visit_type_IOThreadVirtQueueMappingList(v, name, &list, errp)) { + return; + } + + qapi_free_IOThreadVirtQueueMappingList(*prop_ptr); + *prop_ptr = list; +} + +static void release_iothread_vq_mapping_list(Object *obj, + const char *name, void *opaque) +{ + IOThreadVirtQueueMappingList **prop_ptr = + object_field_prop_ptr(obj, opaque); + + qapi_free_IOThreadVirtQueueMappingList(*prop_ptr); + *prop_ptr = NULL; +} + +const PropertyInfo qdev_prop_iothread_vq_mapping_list = { + .name = "IOThreadVirtQueueMappingList", + .description = "IOThread virtqueue mapping list [{\"iothread\":\"\", " + "\"vqs\":[1,2,3,...]},...]", + .get = get_iothread_vq_mapping_list, + .set = set_iothread_vq_mapping_list, + .release = release_iothread_vq_mapping_list, +}; diff --git a/hw/core/qdev-properties.c b/hw/core/qdev-properties.c index 840006e953c077c4f00bca8c90b1ede16fd3daf5..7d6fa726fdf2d4cf353165f554e88badafed9696 100644 --- a/hw/core/qdev-properties.c +++ b/hw/core/qdev-properties.c @@ -1076,16 +1076,18 @@ void device_class_set_props(DeviceClass *dc, Property *props) void qdev_alias_all_properties(DeviceState *target, Object *source) { ObjectClass *class; - Property *prop; + ObjectPropertyIterator iter; + ObjectProperty *prop; class = object_get_class(OBJECT(target)); - do { - DeviceClass *dc = DEVICE_CLASS(class); - for (prop = dc->props_; prop && prop->name; prop++) { - object_property_add_alias(source, prop->name, - OBJECT(target), prop->name); + object_class_property_iter_init(&iter, class); + while ((prop = object_property_iter_next(&iter))) { + if (object_property_find(source, prop->name)) { + continue; /* skip duplicate properties */ } - class = object_class_get_parent(class); - } while (class != object_class_by_name(TYPE_DEVICE)); + + object_property_add_alias(source, prop->name, + OBJECT(target), prop->name); + } } diff --git a/hw/cpu/cluster.c b/hw/cpu/cluster.c index e444b7c29d1bd02b3ba016319c14f52f5d55a3f2..61289a840d469342c28575ad3fe85be0818d6a8f 100644 --- a/hw/cpu/cluster.c +++ b/hw/cpu/cluster.c @@ -19,12 +19,11 @@ */ #include "qemu/osdep.h" + +#include "hw/core/cpu.h" #include "hw/cpu/cluster.h" #include "hw/qdev-properties.h" -#include "hw/core/cpu.h" #include "qapi/error.h" -#include "qemu/module.h" -#include "qemu/cutils.h" static Property cpu_cluster_properties[] = { DEFINE_PROP_UINT32("cluster-id", CPUClusterState, cluster_id, 0), diff --git a/hw/cpu/core.c b/hw/cpu/core.c index 9876075155743d8966a8383412fd8608470ac481..495a5c30ffe1000f7335e0a45c2e60813b683a5e 100644 --- a/hw/cpu/core.c +++ b/hw/cpu/core.c @@ -8,12 +8,11 @@ */ #include "qemu/osdep.h" + +#include "hw/boards.h" #include "hw/cpu/core.h" -#include "qapi/visitor.h" -#include "qemu/module.h" #include "qapi/error.h" -#include "sysemu/cpus.h" -#include "hw/boards.h" +#include "qapi/visitor.h" static void core_prop_get_core_id(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) diff --git a/hw/cxl/meson.build b/hw/cxl/meson.build index ea0aebf6e3c96e087fd7595a990485a0b88787b3..3e375f61a986baca605366017cd453e2975ab87f 100644 --- a/hw/cxl/meson.build +++ b/hw/cxl/meson.build @@ -11,5 +11,3 @@ system_ss.add(when: 'CONFIG_CXL', if_false: files( 'cxl-host-stubs.c', )) - -system_ss.add(when: 'CONFIG_ALL', if_true: files('cxl-host-stubs.c')) diff --git a/hw/display/artist.c b/hw/display/artist.c index fde050c882b0cfc5f2e748fa3d8b923ab5488895..d9134532fb795592431f84a5d73353c4a2c154d1 100644 --- a/hw/display/artist.c +++ b/hw/display/artist.c @@ -1435,7 +1435,7 @@ static const VMStateDescription vmstate_artist = { .version_id = 2, .minimum_version_id = 2, .post_load = vmstate_artist_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(height, ARTISTState), VMSTATE_UINT16(width, ARTISTState), VMSTATE_UINT16(depth, ARTISTState), diff --git a/hw/display/bcm2835_fb.c b/hw/display/bcm2835_fb.c index a05277674f2c64634e1489fe895545d50e8ec2bb..e40ed2d2e18f57c417d78ff291118503602657cb 100644 --- a/hw/display/bcm2835_fb.c +++ b/hw/display/bcm2835_fb.c @@ -355,7 +355,7 @@ static const VMStateDescription vmstate_bcm2835_fb = { .name = TYPE_BCM2835_FB, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(lock, BCM2835FBState), VMSTATE_BOOL(invalidate, BCM2835FBState), VMSTATE_BOOL(pending, BCM2835FBState), diff --git a/hw/display/bochs-display.c b/hw/display/bochs-display.c index 9138e98c3b6d02b480dee5b0a2613865825808f3..3b1d922b6eae6bb860d8bdbdc4fdca02038d76b7 100644 --- a/hw/display/bochs-display.c +++ b/hw/display/bochs-display.c @@ -61,7 +61,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(BochsDisplayState, BOCHS_DISPLAY) static const VMStateDescription vmstate_bochs_display = { .name = "bochs-display", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(pci, BochsDisplayState), VMSTATE_UINT16_ARRAY(vbe_regs, BochsDisplayState, VBE_DISPI_INDEX_NB), VMSTATE_BOOL(big_endian_fb, BochsDisplayState), diff --git a/hw/display/cg3.c b/hw/display/cg3.c index 2e9656ae1c39be1e25bdf63f664958a9685ee848..b271faaa484e4697ab3223b94a079be5a0aa3931 100644 --- a/hw/display/cg3.c +++ b/hw/display/cg3.c @@ -334,7 +334,7 @@ static const VMStateDescription vmstate_cg3 = { .version_id = 1, .minimum_version_id = 1, .post_load = vmstate_cg3_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(height, CG3State), VMSTATE_UINT16(width, CG3State), VMSTATE_UINT16(depth, CG3State), diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c index b80f98b6c4c02c2cd122025df8b9b8df3dba8962..5dd5136a0cda05544cd2b6ad1421bc2a64744dbc 100644 --- a/hw/display/cirrus_vga.c +++ b/hw/display/cirrus_vga.c @@ -2739,7 +2739,7 @@ const VMStateDescription vmstate_cirrus_vga = { .version_id = 2, .minimum_version_id = 1, .post_load = cirrus_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(vga.latch, CirrusVGAState), VMSTATE_UINT8(vga.sr_index, CirrusVGAState), VMSTATE_BUFFER(vga.sr, CirrusVGAState), @@ -2777,7 +2777,7 @@ static const VMStateDescription vmstate_pci_cirrus_vga = { .name = "cirrus_vga", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PCICirrusVGAState), VMSTATE_STRUCT(cirrus_vga, PCICirrusVGAState, 0, vmstate_cirrus_vga, CirrusVGAState), diff --git a/hw/display/dpcd.c b/hw/display/dpcd.c index 64463654a1a007d7476d78f83785b089a8eccdd9..aab1b1a2d7f4e606cad3c4176ad1a85c709fd817 100644 --- a/hw/display/dpcd.c +++ b/hw/display/dpcd.c @@ -135,7 +135,7 @@ static const VMStateDescription vmstate_dpcd = { .name = TYPE_DPCD, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY_V(dpcd_info, DPCDState, DPCD_READABLE_AREA, 0), VMSTATE_END_OF_LIST() } diff --git a/hw/display/exynos4210_fimd.c b/hw/display/exynos4210_fimd.c index 34a960a976534ac8b960302d42fcd8ce4e175f43..84687527d51f540d504ec621029d94e644874fdf 100644 --- a/hw/display/exynos4210_fimd.c +++ b/hw/display/exynos4210_fimd.c @@ -1865,7 +1865,7 @@ static const VMStateDescription exynos4210_fimd_window_vmstate = { .name = "exynos4210.fimd_window", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(wincon, Exynos4210fimdWindow), VMSTATE_UINT32_ARRAY(buf_start, Exynos4210fimdWindow, 3), VMSTATE_UINT32_ARRAY(buf_end, Exynos4210fimdWindow, 3), @@ -1895,7 +1895,7 @@ static const VMStateDescription exynos4210_fimd_vmstate = { .version_id = 1, .minimum_version_id = 1, .post_load = exynos4210_fimd_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(vidcon, Exynos4210fimdState, 4), VMSTATE_UINT32_ARRAY(vidtcon, Exynos4210fimdState, 4), VMSTATE_UINT32(shadowcon, Exynos4210fimdState), diff --git a/hw/display/g364fb.c b/hw/display/g364fb.c index 2903cab82d8addf9e8c3808292189d6dfd9fe35b..e08ec3f8de4f026d2795de169898019a28391058 100644 --- a/hw/display/g364fb.c +++ b/hw/display/g364fb.c @@ -455,7 +455,7 @@ static const VMStateDescription vmstate_g364fb = { .version_id = 2, .minimum_version_id = 2, .post_load = g364fb_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER_UNSAFE(color_palette, G364State, 0, 256 * 3), VMSTATE_BUFFER_UNSAFE(cursor_palette, G364State, 0, 9), VMSTATE_UINT16_ARRAY(cursor, G364State, 512), @@ -521,7 +521,7 @@ static const VMStateDescription vmstate_g364fb_sysbus = { .name = "g364fb-sysbus", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(g364, G364SysBusState, 2, vmstate_g364fb, G364State), VMSTATE_END_OF_LIST() } diff --git a/hw/display/i2c-ddc.c b/hw/display/i2c-ddc.c index 146489518c781cecaec4b5fbf3d39079fe8683b6..3f9d1e1f6fe4afbe5ece784cfeda5007c334f6ef 100644 --- a/hw/display/i2c-ddc.c +++ b/hw/display/i2c-ddc.c @@ -88,7 +88,7 @@ static void i2c_ddc_init(Object *obj) static const VMStateDescription vmstate_i2c_ddc = { .name = TYPE_I2CDDC, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(firstbyte, I2CDDCState), VMSTATE_UINT8(reg, I2CDDCState), VMSTATE_END_OF_LIST() diff --git a/hw/display/jazz_led.c b/hw/display/jazz_led.c index dd5f4696c4f65fdb879e56d5a4625efb27d7bf3b..534f15dcfd439653b1cf29ddc5d50aa72392f747 100644 --- a/hw/display/jazz_led.c +++ b/hw/display/jazz_led.c @@ -257,7 +257,7 @@ static const VMStateDescription vmstate_jazz_led = { .version_id = 0, .minimum_version_id = 0, .post_load = jazz_led_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(segments, LedState), VMSTATE_END_OF_LIST() } diff --git a/hw/display/macfb.c b/hw/display/macfb.c index d61541ccb5d5d3d13c65539bef4f237f690fc540..418e99c8e18ec43106a412dcc384970452990dc6 100644 --- a/hw/display/macfb.c +++ b/hw/display/macfb.c @@ -627,7 +627,7 @@ static const VMStateDescription vmstate_macfb = { .version_id = 1, .minimum_version_id = 1, .post_load = macfb_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(type, MacfbState), VMSTATE_UINT8_ARRAY(color_palette, MacfbState, 256 * 3), VMSTATE_UINT32(palette_current, MacfbState), @@ -770,7 +770,7 @@ static const VMStateDescription vmstate_macfb_sysbus = { .name = "macfb-sysbus", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(macfb, MacfbSysBusState, 1, vmstate_macfb, MacfbState), VMSTATE_END_OF_LIST() } @@ -789,7 +789,7 @@ static const VMStateDescription vmstate_macfb_nubus = { .name = "macfb-nubus", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(macfb, MacfbNubusState, 1, vmstate_macfb, MacfbState), VMSTATE_END_OF_LIST() } diff --git a/hw/display/meson.build b/hw/display/meson.build index 344dfe3d8c26dfd4d856b57e7c740d67293809ad..f93a69f70f4f3d2577fa69de3349d98e2bc9501c 100644 --- a/hw/display/meson.build +++ b/hw/display/meson.build @@ -69,8 +69,11 @@ if config_all_devices.has_key('CONFIG_VIRTIO_GPU') virtio_gpu_ss = ss.source_set() virtio_gpu_ss.add(when: 'CONFIG_VIRTIO_GPU', if_true: [files('virtio-gpu-base.c', 'virtio-gpu.c'), pixman]) - virtio_gpu_ss.add(when: 'CONFIG_LINUX', if_true: files('virtio-gpu-udmabuf.c'), - if_false: files('virtio-gpu-udmabuf-stubs.c')) + if host_os == 'linux' + virtio_gpu_ss.add(files('virtio-gpu-udmabuf.c')) + else + virtio_gpu_ss.add(files('virtio-gpu-udmabuf-stubs.c')) + endif virtio_gpu_ss.add(when: 'CONFIG_VHOST_USER_GPU', if_true: files('vhost-user-gpu.c')) hw_display_modules += {'virtio-gpu': virtio_gpu_ss} @@ -140,5 +143,4 @@ endif system_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_lcdc.c')) -system_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-vga-stub.c')) modules += { 'hw-display': hw_display_modules } diff --git a/hw/display/pl110.c b/hw/display/pl110.c index 4bf15c1da51de42cf45fb30626515641ea91e16b..4b83db932264ff601429a27dd71c6357fcd0e78b 100644 --- a/hw/display/pl110.c +++ b/hw/display/pl110.c @@ -83,7 +83,7 @@ static const VMStateDescription vmstate_pl110 = { .version_id = 2, .minimum_version_id = 1, .post_load = vmstate_pl110_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(version, PL110State), VMSTATE_UINT32_ARRAY(timing, PL110State, 4), VMSTATE_UINT32(cr, PL110State), diff --git a/hw/display/pxa2xx_lcd.c b/hw/display/pxa2xx_lcd.c index eb83d882222e385dc2c8cb7bea5fe9ddcaa0a71a..a9d0d981a081c7b13623313251f7d71fe3f92bb5 100644 --- a/hw/display/pxa2xx_lcd.c +++ b/hw/display/pxa2xx_lcd.c @@ -1371,7 +1371,7 @@ static const VMStateDescription vmstate_dma_channel = { .name = "dma_channel", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(branch, struct DMAChannel), VMSTATE_UINT8(up, struct DMAChannel), VMSTATE_BUFFER(pbuffer, struct DMAChannel), @@ -1398,7 +1398,7 @@ static const VMStateDescription vmstate_pxa2xx_lcdc = { .version_id = 0, .minimum_version_id = 0, .post_load = pxa2xx_lcdc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(irqlevel, PXA2xxLCDState), VMSTATE_INT32(transp, PXA2xxLCDState), VMSTATE_UINT32_ARRAY(control, PXA2xxLCDState, 6), diff --git a/hw/display/qxl.c b/hw/display/qxl.c index 7bb00d68f572b36f04deff86e7c3e5c4c2f6c83a..7178dec85d9d80189b52ed3dfbc3e594868e3c56 100644 --- a/hw/display/qxl.c +++ b/hw/display/qxl.c @@ -2388,7 +2388,7 @@ static const VMStateDescription qxl_memslot = { .name = "qxl-memslot", .version_id = QXL_SAVE_VERSION, .minimum_version_id = QXL_SAVE_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(slot.mem_start, struct guest_slots), VMSTATE_UINT64(slot.mem_end, struct guest_slots), VMSTATE_UINT32(active, struct guest_slots), @@ -2400,7 +2400,7 @@ static const VMStateDescription qxl_surface = { .name = "qxl-surface", .version_id = QXL_SAVE_VERSION, .minimum_version_id = QXL_SAVE_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(width, QXLSurfaceCreate), VMSTATE_UINT32(height, QXLSurfaceCreate), VMSTATE_INT32(stride, QXLSurfaceCreate), @@ -2419,7 +2419,7 @@ static const VMStateDescription qxl_vmstate_monitors_config = { .version_id = 1, .minimum_version_id = 1, .needed = qxl_monitors_config_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(guest_monitors_config, PCIQXLDevice), VMSTATE_END_OF_LIST() }, @@ -2432,7 +2432,7 @@ static const VMStateDescription qxl_vmstate = { .pre_save = qxl_pre_save, .pre_load = qxl_pre_load, .post_load = qxl_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(pci, PCIQXLDevice), VMSTATE_STRUCT(vga, PCIQXLDevice, 0, vmstate_vga_common, VGACommonState), VMSTATE_UINT32(shadow_rom.mode, PCIQXLDevice), @@ -2452,7 +2452,7 @@ static const VMStateDescription qxl_vmstate = { VMSTATE_UINT64(guest_cursor, PCIQXLDevice), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &qxl_vmstate_monitors_config, NULL } diff --git a/hw/display/qxl.h b/hw/display/qxl.h index fdac14edade7bd99bbda4f00e1e232aa0d6cfc9b..e0a85a5ca49fde248f7cfda0727efd7809019b1a 100644 --- a/hw/display/qxl.h +++ b/hw/display/qxl.h @@ -159,7 +159,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(PCIQXLDevice, PCI_QXL) * * Use with care; by the time this function returns, the returned pointer is * not protected by RCU anymore. If the caller is not within an RCU critical - * section and does not hold the iothread lock, it must have other means of + * section and does not hold the BQL, it must have other means of * protecting the pointer, such as a reference to the region that includes * the incoming ram_addr_t. * diff --git a/hw/display/ramfb-standalone.c b/hw/display/ramfb-standalone.c index a96e7ebcd9fa57989475e25b75599990acbe884f..20eab34ff41112ea9ce38a41cd8477ca5cb47ac2 100644 --- a/hw/display/ramfb-standalone.c +++ b/hw/display/ramfb-standalone.c @@ -54,7 +54,7 @@ static const VMStateDescription ramfb_dev_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = migrate_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_POINTER(state, RAMFBStandaloneState, ramfb_vmstate, RAMFBState), VMSTATE_END_OF_LIST() } diff --git a/hw/display/ramfb.c b/hw/display/ramfb.c index 477ef7272ab71a279ad8ed8a6dc59e1f66b778f9..6086baf7a98612c29ca4ce8b8f52db1cd128dd14 100644 --- a/hw/display/ramfb.c +++ b/hw/display/ramfb.c @@ -129,7 +129,7 @@ const VMStateDescription ramfb_vmstate = { .version_id = 1, .minimum_version_id = 1, .post_load = ramfb_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER_UNSAFE(cfg, RAMFBState, 0, sizeof(RAMFBCfg)), VMSTATE_END_OF_LIST() } diff --git a/hw/display/sii9022.c b/hw/display/sii9022.c index 664fd4046d82dcd7ee5791a802099c50a80b3943..60c3f78549816ed3ef63c8ad09881165ff57a3c1 100644 --- a/hw/display/sii9022.c +++ b/hw/display/sii9022.c @@ -51,7 +51,7 @@ static const VMStateDescription vmstate_sii9022 = { .name = "sii9022", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_I2C_SLAVE(parent_obj, sii9022_state), VMSTATE_UINT8(ptr, sii9022_state), VMSTATE_BOOL(addr_byte, sii9022_state), diff --git a/hw/display/sm501.c b/hw/display/sm501.c index 5b4e4509e1901daac918ee97b3b94d46d257cd5e..26dc8170d89ba6223961e965a9f46dd78a7df4b8 100644 --- a/hw/display/sm501.c +++ b/hw/display/sm501.c @@ -1940,7 +1940,7 @@ static const VMStateDescription vmstate_sm501_state = { .name = "sm501-state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(local_mem_size_index, SM501State), VMSTATE_UINT32(system_control, SM501State), VMSTATE_UINT32(misc_control, SM501State), @@ -2071,7 +2071,7 @@ static const VMStateDescription vmstate_sm501_sysbus = { .name = TYPE_SYSBUS_SM501, .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(state, SM501SysBusState, 1, vmstate_sm501_state, SM501State), VMSTATE_END_OF_LIST() @@ -2161,7 +2161,7 @@ static const VMStateDescription vmstate_sm501_pci = { .name = TYPE_PCI_SM501, .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, SM501PCIState), VMSTATE_STRUCT(state, SM501PCIState, 1, vmstate_sm501_state, SM501State), diff --git a/hw/display/ssd0303.c b/hw/display/ssd0303.c index 32b32a3044e522ba33f5b57708d5de2b8ba285c8..e292cff44eaf3d2ff104fd50b1c24e5cdff532f2 100644 --- a/hw/display/ssd0303.c +++ b/hw/display/ssd0303.c @@ -281,7 +281,7 @@ static const VMStateDescription vmstate_ssd0303 = { .name = "ssd0303_oled", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(row, ssd0303_state), VMSTATE_INT32(col, ssd0303_state), VMSTATE_INT32(start_line, ssd0303_state), diff --git a/hw/display/ssd0323.c b/hw/display/ssd0323.c index 09b1bbed0a4b85cb297f88683c2ee8b10f888493..96cf0dc662bb369946f36610e908abf737b35828 100644 --- a/hw/display/ssd0323.c +++ b/hw/display/ssd0323.c @@ -324,7 +324,7 @@ static const VMStateDescription vmstate_ssd0323 = { .version_id = 2, .minimum_version_id = 2, .post_load = ssd0323_post_load, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT32(cmd_len, ssd0323_state), VMSTATE_INT32(cmd, ssd0323_state), VMSTATE_INT32_ARRAY(cmd_data, ssd0323_state, 8), diff --git a/hw/display/tcx.c b/hw/display/tcx.c index 1b27b64f6d14e1a878e25ef75c8e2f48a9bfa419..99507e763886091cffb18872898cf1cecaee9214 100644 --- a/hw/display/tcx.c +++ b/hw/display/tcx.c @@ -344,7 +344,7 @@ static const VMStateDescription vmstate_tcx = { .version_id = 4, .minimum_version_id = 4, .post_load = vmstate_tcx_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(height, TCXState), VMSTATE_UINT16(width, TCXState), VMSTATE_UINT16(depth, TCXState), diff --git a/hw/display/vga-pci.c b/hw/display/vga-pci.c index e4f45b4476d413e7a0a1d65827109ff886c4c9d7..2d8adce5da695efc9bd241f6ffd04bf6d6d9b84a 100644 --- a/hw/display/vga-pci.c +++ b/hw/display/vga-pci.c @@ -61,7 +61,7 @@ static const VMStateDescription vmstate_vga_pci = { .name = "vga", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PCIVGAState), VMSTATE_STRUCT(vga, PCIVGAState, 0, vmstate_vga_common, VGACommonState), VMSTATE_END_OF_LIST() diff --git a/hw/display/vga.c b/hw/display/vga.c index 37557c3442aa8b709e74930502ed3a573da3d222..886a4020e5dcdd33f37479ef0e784bc4a4343419 100644 --- a/hw/display/vga.c +++ b/hw/display/vga.c @@ -2106,7 +2106,7 @@ static const VMStateDescription vmstate_vga_endian = { .version_id = 1, .minimum_version_id = 1, .needed = vga_endian_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(big_endian_fb, VGACommonState), VMSTATE_END_OF_LIST() } @@ -2117,7 +2117,7 @@ const VMStateDescription vmstate_vga_common = { .version_id = 2, .minimum_version_id = 2, .post_load = vga_common_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(latch, VGACommonState), VMSTATE_UINT8(sr_index, VGACommonState), VMSTATE_PARTIAL_BUFFER(sr, VGACommonState, 8), @@ -2149,7 +2149,7 @@ const VMStateDescription vmstate_vga_common = { VMSTATE_UINT32(vbe_bank_mask, VGACommonState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_vga_endian, NULL } diff --git a/hw/display/virtio-gpu-base.c b/hw/display/virtio-gpu-base.c index 37af25621985db413dd6eadf171dbad51fffb818..4fc7ef8896c1e349a6680eba4128f45e9631d713 100644 --- a/hw/display/virtio-gpu-base.c +++ b/hw/display/virtio-gpu-base.c @@ -251,7 +251,11 @@ void virtio_gpu_base_device_unrealize(DeviceState *qdev) { VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev); + VirtIODevice *vdev = VIRTIO_DEVICE(qdev); + virtio_del_queue(vdev, 0); + virtio_del_queue(vdev, 1); + virtio_cleanup(vdev); migrate_del_blocker(&g->migration_blocker); } diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index b016d3bac85c27ccbfb64241d6d9e5f319957401..f8a675eb3017ad474a99fcc5463fc0296d1b79ef 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -1164,7 +1164,7 @@ static void virtio_gpu_cursor_bh(void *opaque) static const VMStateDescription vmstate_virtio_gpu_scanout = { .name = "virtio-gpu-one-scanout", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), VMSTATE_UINT32(width, struct virtio_gpu_scanout), VMSTATE_UINT32(height, struct virtio_gpu_scanout), @@ -1182,7 +1182,7 @@ static const VMStateDescription vmstate_virtio_gpu_scanout = { static const VMStateDescription vmstate_virtio_gpu_scanouts = { .name = "virtio-gpu-scanouts", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, struct VirtIOGPU, NULL), @@ -1512,7 +1512,7 @@ void virtio_gpu_reset(VirtIODevice *vdev) g->reset_finished = false; qemu_bh_schedule(g->reset_bh); while (!g->reset_finished) { - qemu_cond_wait_iothread(&g->reset_cond); + qemu_cond_wait_bql(&g->reset_cond); } } else { virtio_gpu_reset_bh(g); @@ -1592,7 +1592,7 @@ static const VMStateDescription vmstate_virtio_gpu = { .name = "virtio-gpu", .minimum_version_id = VIRTIO_GPU_VM_VERSION, .version_id = VIRTIO_GPU_VM_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE /* core */, { .name = "virtio-gpu", @@ -1605,7 +1605,7 @@ static const VMStateDescription vmstate_virtio_gpu = { } /* device */, VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_virtio_gpu_blob_state, NULL }, diff --git a/hw/display/virtio-vga.c b/hw/display/virtio-vga.c index c8552ff760f15b33432dc3b31a7da7931d27f984..94d3353f540430bbd6e3ff4306c9c7d9e5ef7055 100644 --- a/hw/display/virtio-vga.c +++ b/hw/display/virtio-vga.c @@ -88,7 +88,7 @@ static const VMStateDescription vmstate_virtio_vga_base = { .name = "virtio-vga", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* no pci stuff here, saving the virtio device will handle that */ VMSTATE_STRUCT(vga, VirtIOVGABase, 0, vmstate_vga_common, VGACommonState), diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c index 3f26bea1904b290d107bcb599278fc761ba85eae..1c0f9d9a991d5b5138544e12d4a9f96d109e0ab3 100644 --- a/hw/display/vmware_vga.c +++ b/hw/display/vmware_vga.c @@ -1210,7 +1210,7 @@ static const VMStateDescription vmstate_vmware_vga_internal = { .version_id = 0, .minimum_version_id = 0, .post_load = vmsvga_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_EQUAL(new_depth, struct vmsvga_state_s, NULL), VMSTATE_INT32(enable, struct vmsvga_state_s), VMSTATE_INT32(config, struct vmsvga_state_s), @@ -1235,7 +1235,7 @@ static const VMStateDescription vmstate_vmware_vga = { .name = "vmware_vga", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, struct pci_vmsvga_state_s), VMSTATE_STRUCT(chip, struct pci_vmsvga_state_s, 0, vmstate_vmware_vga_internal, struct vmsvga_state_s), diff --git a/hw/display/xlnx_dp.c b/hw/display/xlnx_dp.c index eee8f33a5844efc64f0160c8c1b908e2708b40f1..c42fc388dc75fa4065b800b49d6ce6d9a1252047 100644 --- a/hw/display/xlnx_dp.c +++ b/hw/display/xlnx_dp.c @@ -262,7 +262,7 @@ typedef enum DPVideoFmt DPVideoFmt; static const VMStateDescription vmstate_dp = { .name = TYPE_XLNX_DP, .version_id = 2, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT32_ARRAY(core_registers, XlnxDPState, DP_CORE_REG_ARRAY_SIZE), VMSTATE_UINT32_ARRAY(avbufm_registers, XlnxDPState, diff --git a/hw/dma/bcm2835_dma.c b/hw/dma/bcm2835_dma.c index 5e9306110dc2a92e5630ff0a90b3b37d2a3b69df..9bda45072b6653a328f5e7b6df262fbbeff00430 100644 --- a/hw/dma/bcm2835_dma.c +++ b/hw/dma/bcm2835_dma.c @@ -311,7 +311,7 @@ static const VMStateDescription vmstate_bcm2835_dma_chan = { .name = TYPE_BCM2835_DMA "-chan", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cs, BCM2835DMAChan), VMSTATE_UINT32(conblk_ad, BCM2835DMAChan), VMSTATE_UINT32(ti, BCM2835DMAChan), @@ -329,7 +329,7 @@ static const VMStateDescription vmstate_bcm2835_dma = { .name = TYPE_BCM2835_DMA, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(chan, BCM2835DMAState, BCM2835_DMA_NCHANS, 1, vmstate_bcm2835_dma_chan, BCM2835DMAChan), VMSTATE_UINT32(int_status, BCM2835DMAState), diff --git a/hw/dma/i82374.c b/hw/dma/i82374.c index 63734c22c9d9c9973535192469f3e6e8271157de..f6ddfc51c50bbe19daa083be6f84613464f1b305 100644 --- a/hw/dma/i82374.c +++ b/hw/dma/i82374.c @@ -58,7 +58,7 @@ static const VMStateDescription vmstate_i82374 = { .name = "i82374", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(commands, I82374State, 8), VMSTATE_END_OF_LIST() }, diff --git a/hw/dma/i8257.c b/hw/dma/i8257.c index de5f696919ca131fd9560bae404895f54b4936f2..de1d5b110c0e5664ab098c4a0b70e2723b18d4ae 100644 --- a/hw/dma/i8257.c +++ b/hw/dma/i8257.c @@ -517,7 +517,7 @@ static const VMStateDescription vmstate_i8257_regs = { .name = "dma_regs", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_ARRAY(now, I8257Regs, 2), VMSTATE_UINT16_ARRAY(base, I8257Regs, 2), VMSTATE_UINT8(mode, I8257Regs), @@ -542,7 +542,7 @@ static const VMStateDescription vmstate_i8257 = { .version_id = 1, .minimum_version_id = 1, .post_load = i8257_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(command, I8257State), VMSTATE_UINT8(mask, I8257State), VMSTATE_UINT8(flip_flop, I8257State), diff --git a/hw/dma/pl080.c b/hw/dma/pl080.c index 2627307cc85aabab25d4bc49f756093349e86898..1e49c22e933426eda5bf4297faf65267f0747ea5 100644 --- a/hw/dma/pl080.c +++ b/hw/dma/pl080.c @@ -39,7 +39,7 @@ static const VMStateDescription vmstate_pl080_channel = { .name = "pl080_channel", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(src, pl080_channel), VMSTATE_UINT32(dest, pl080_channel), VMSTATE_UINT32(lli, pl080_channel), @@ -53,7 +53,7 @@ static const VMStateDescription vmstate_pl080 = { .name = "pl080", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(tc_int, PL080State), VMSTATE_UINT8(tc_mask, PL080State), VMSTATE_UINT8(err_int, PL080State), diff --git a/hw/dma/pl330.c b/hw/dma/pl330.c index e7e67dd8b6a76af6c3a18736f7c8adce9ef3e36f..70a502d24529d8f0dde6e8d7844e03f37be4fe2f 100644 --- a/hw/dma/pl330.c +++ b/hw/dma/pl330.c @@ -139,7 +139,7 @@ static const VMStateDescription vmstate_pl330_chan = { .name = "pl330_chan", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(src, PL330Chan), VMSTATE_UINT32(dst, PL330Chan), VMSTATE_UINT32(pc, PL330Chan), @@ -170,7 +170,7 @@ static const VMStateDescription vmstate_pl330_fifo = { .name = "pl330_chan", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VBUFFER_UINT32(buf, PL330Fifo, 1, NULL, buf_size), VMSTATE_VBUFFER_UINT32(tag, PL330Fifo, 1, NULL, buf_size), VMSTATE_UINT32(head, PL330Fifo), @@ -194,7 +194,7 @@ static const VMStateDescription vmstate_pl330_queue_entry = { .name = "pl330_queue_entry", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(addr, PL330QueueEntry), VMSTATE_UINT32(len, PL330QueueEntry), VMSTATE_UINT8(n, PL330QueueEntry), @@ -216,7 +216,7 @@ static const VMStateDescription vmstate_pl330_queue = { .name = "pl330_queue", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_VARRAY_POINTER_UINT32(queue, PL330Queue, queue_size, vmstate_pl330_queue_entry, PL330QueueEntry), @@ -280,7 +280,7 @@ static const VMStateDescription vmstate_pl330 = { .name = "pl330", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(manager, PL330State, 0, vmstate_pl330_chan, PL330Chan), VMSTATE_STRUCT_VARRAY_POINTER_UINT32(chan, PL330State, num_chnls, vmstate_pl330_chan, PL330Chan), diff --git a/hw/dma/pxa2xx_dma.c b/hw/dma/pxa2xx_dma.c index fa896f7edf776441c5434aa4702db6a4fa0a7315..9f62f0b633b5b06d03c270ba2e47d9ab91ead17e 100644 --- a/hw/dma/pxa2xx_dma.c +++ b/hw/dma/pxa2xx_dma.c @@ -529,7 +529,7 @@ static const VMStateDescription vmstate_pxa2xx_dma_chan = { .name = "pxa2xx_dma_chan", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(descr, PXA2xxDMAChannel), VMSTATE_UINT32(src, PXA2xxDMAChannel), VMSTATE_UINT32(dest, PXA2xxDMAChannel), @@ -544,7 +544,7 @@ static const VMStateDescription vmstate_pxa2xx_dma = { .name = "pxa2xx_dma", .version_id = 1, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UNUSED_TEST(is_version_0, 4), VMSTATE_UINT32(stopintr, PXA2xxDMAState), VMSTATE_UINT32(eorintr, PXA2xxDMAState), diff --git a/hw/dma/rc4030.c b/hw/dma/rc4030.c index aa1d323a36b9b881c19e69ce3969e1a18eda25be..915284194fe01b8710c8b3fc8a5a63da7dad1471 100644 --- a/hw/dma/rc4030.c +++ b/hw/dma/rc4030.c @@ -568,7 +568,7 @@ static const VMStateDescription vmstate_rc4030 = { .name = "rc4030", .version_id = 3, .post_load = rc4030_post_load, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT32(config, rc4030State), VMSTATE_UINT32(invalid_address_register, rc4030State), VMSTATE_UINT32_2DARRAY(dma_regs, rc4030State, 8, 4), diff --git a/hw/dma/sparc32_dma.c b/hw/dma/sparc32_dma.c index 0ef13c5e9a8cc2ac826d622da43b3fa072230ba4..8019641942774570ef58af95b9b33ac1ca3ba8bb 100644 --- a/hw/dma/sparc32_dma.c +++ b/hw/dma/sparc32_dma.c @@ -249,7 +249,7 @@ static const VMStateDescription vmstate_sparc32_dma_device = { .name ="sparc32_dma", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(dmaregs, DMADeviceState, DMA_REGS), VMSTATE_END_OF_LIST() } diff --git a/hw/dma/xlnx-zdma.c b/hw/dma/xlnx-zdma.c index 84c0083013e3442246d096ae5bf9dc379561f509..670c9568669430d76b3cca713026e49ecfe80a6e 100644 --- a/hw/dma/xlnx-zdma.c +++ b/hw/dma/xlnx-zdma.c @@ -801,7 +801,7 @@ static const VMStateDescription vmstate_zdma = { .name = TYPE_XLNX_ZDMA, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxZDMA, ZDMA_R_MAX), VMSTATE_UINT32(state, XlnxZDMA), VMSTATE_UINT32_ARRAY(dsc_src.words, XlnxZDMA, 4), diff --git a/hw/dma/xlnx-zynq-devcfg.c b/hw/dma/xlnx-zynq-devcfg.c index f5ad1a0d22cd031d3633c9108c0716bad1347f6f..e901f68ff34f788a127c18660f29a6d463d1ccd1 100644 --- a/hw/dma/xlnx-zynq-devcfg.c +++ b/hw/dma/xlnx-zynq-devcfg.c @@ -333,7 +333,7 @@ static const VMStateDescription vmstate_xlnx_zynq_devcfg_dma_cmd = { .name = "xlnx_zynq_devcfg_dma_cmd", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(src_addr, XlnxZynqDevcfgDMACmd), VMSTATE_UINT32(dest_addr, XlnxZynqDevcfgDMACmd), VMSTATE_UINT32(src_len, XlnxZynqDevcfgDMACmd), @@ -346,7 +346,7 @@ static const VMStateDescription vmstate_xlnx_zynq_devcfg = { .name = "xlnx_zynq_devcfg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(dma_cmd_fifo, XlnxZynqDevcfg, XLNX_ZYNQ_DEVCFG_DMA_CMD_FIFO_LEN, 0, vmstate_xlnx_zynq_devcfg_dma_cmd, diff --git a/hw/dma/xlnx_csu_dma.c b/hw/dma/xlnx_csu_dma.c index bc1505aade73b2ec54249171cd51eb291c742ab7..ae307482f22d3335e3e82bc14496f94f0fe3a5ca 100644 --- a/hw/dma/xlnx_csu_dma.c +++ b/hw/dma/xlnx_csu_dma.c @@ -681,7 +681,7 @@ static const VMStateDescription vmstate_xlnx_csu_dma = { .name = TYPE_XLNX_CSU_DMA, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(src_timer, XlnxCSUDMA), VMSTATE_UINT16(width, XlnxCSUDMA), VMSTATE_BOOL(is_dst, XlnxCSUDMA), diff --git a/hw/dma/xlnx_dpdma.c b/hw/dma/xlnx_dpdma.c index dd66be5265dfa368e260039f54434cc61833db41..1f5cd64ed10230b3fbaad97951104956a054d0e9 100644 --- a/hw/dma/xlnx_dpdma.c +++ b/hw/dma/xlnx_dpdma.c @@ -277,7 +277,7 @@ static inline bool xlnx_dpdma_desc_ignore_done_bit(DPDMADescriptor *desc) static const VMStateDescription vmstate_xlnx_dpdma = { .name = TYPE_XLNX_DPDMA, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(registers, XlnxDPDMAState, XLNX_DPDMA_REG_ARRAY_SIZE), VMSTATE_BOOL_ARRAY(operation_finished, XlnxDPDMAState, 6), diff --git a/hw/gpio/aspeed_gpio.c b/hw/gpio/aspeed_gpio.c index 1e267dd48203c23d3266124ef0e14aa1bd275c64..c1781e2ba36f41838bcb488d6d8e1b5a718d8bce 100644 --- a/hw/gpio/aspeed_gpio.c +++ b/hw/gpio/aspeed_gpio.c @@ -1067,7 +1067,7 @@ static const VMStateDescription vmstate_gpio_regs = { .name = TYPE_ASPEED_GPIO"/regs", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(data_value, GPIOSets), VMSTATE_UINT32(data_read, GPIOSets), VMSTATE_UINT32(direction, GPIOSets), @@ -1090,7 +1090,7 @@ static const VMStateDescription vmstate_aspeed_gpio = { .name = TYPE_ASPEED_GPIO, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(sets, AspeedGPIOState, ASPEED_GPIO_MAX_NR_SETS, 1, vmstate_gpio_regs, GPIOSets), VMSTATE_UINT32_ARRAY(debounce_regs, AspeedGPIOState, diff --git a/hw/gpio/bcm2835_gpio.c b/hw/gpio/bcm2835_gpio.c index c995bba1d9f9ac6dcd3f765c72d1cdd5777a0e8a..6bd50bb0b693383cfa79967474dfcb8c7eaa7ba7 100644 --- a/hw/gpio/bcm2835_gpio.c +++ b/hw/gpio/bcm2835_gpio.c @@ -284,7 +284,7 @@ static const VMStateDescription vmstate_bcm2835_gpio = { .name = "bcm2835_gpio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(fsel, BCM2835GpioState, 54), VMSTATE_UINT32(lev0, BCM2835GpioState), VMSTATE_UINT32(lev1, BCM2835GpioState), diff --git a/hw/gpio/gpio_key.c b/hw/gpio/gpio_key.c index 74f61383562dae28288333f02f7b2562d10a0111..61bb5870589f995b6033d371ccde2bce3df945ec 100644 --- a/hw/gpio/gpio_key.c +++ b/hw/gpio/gpio_key.c @@ -45,7 +45,7 @@ static const VMStateDescription vmstate_gpio_key = { .name = "gpio-key", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(timer, GPIOKEYState), VMSTATE_END_OF_LIST() } diff --git a/hw/gpio/imx_gpio.c b/hw/gpio/imx_gpio.c index c7f98b7bb1531a9b670228af12a078b1cc126b3a..e53b00d951d8aaa4d3952ea8ca142a606f03e629 100644 --- a/hw/gpio/imx_gpio.c +++ b/hw/gpio/imx_gpio.c @@ -277,7 +277,7 @@ static const VMStateDescription vmstate_imx_gpio = { .name = TYPE_IMX_GPIO, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(dr, IMXGPIOState), VMSTATE_UINT32(gdir, IMXGPIOState), VMSTATE_UINT32(psr, IMXGPIOState), diff --git a/hw/gpio/max7310.c b/hw/gpio/max7310.c index 4470cfe9856e268047f1295944b64c3cff15af7f..86315714fbdca0ad194aa433da01cf205573bc90 100644 --- a/hw/gpio/max7310.c +++ b/hw/gpio/max7310.c @@ -155,7 +155,7 @@ static const VMStateDescription vmstate_max7310 = { .name = "max7310", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(i2c_command_byte, MAX7310State), VMSTATE_INT32(len, MAX7310State), VMSTATE_UINT8(level, MAX7310State), diff --git a/hw/gpio/mpc8xxx.c b/hw/gpio/mpc8xxx.c index cb42acb6da83e43e99989dd61dbd8c0d6c7440cb..0b3f9e516da758c6e5275106f50bf21d1e505a0a 100644 --- a/hw/gpio/mpc8xxx.c +++ b/hw/gpio/mpc8xxx.c @@ -48,7 +48,7 @@ static const VMStateDescription vmstate_mpc8xxx_gpio = { .name = "mpc8xxx_gpio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(dir, MPC8XXXGPIOState), VMSTATE_UINT32(odr, MPC8XXXGPIOState), VMSTATE_UINT32(dat, MPC8XXXGPIOState), diff --git a/hw/gpio/npcm7xx_gpio.c b/hw/gpio/npcm7xx_gpio.c index 3376901ab13ec8053137ddbd9bc8fa686495deb1..6e70ac1f24b591c6417829a33ca2dfdf00c5caa4 100644 --- a/hw/gpio/npcm7xx_gpio.c +++ b/hw/gpio/npcm7xx_gpio.c @@ -377,7 +377,7 @@ static const VMStateDescription vmstate_npcm7xx_gpio = { .name = "npcm7xx-gpio", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(pin_level, NPCM7xxGPIOState), VMSTATE_UINT32(ext_level, NPCM7xxGPIOState), VMSTATE_UINT32(ext_driven, NPCM7xxGPIOState), diff --git a/hw/gpio/nrf51_gpio.c b/hw/gpio/nrf51_gpio.c index 08396c69a4bc99c6024e936c028f37db19606d12..ffc7dff79646c641afb2fe7b2a4fe5b8544f59d5 100644 --- a/hw/gpio/nrf51_gpio.c +++ b/hw/gpio/nrf51_gpio.c @@ -280,7 +280,7 @@ static const VMStateDescription vmstate_nrf51_gpio = { .name = TYPE_NRF51_GPIO, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(out, NRF51GPIOState), VMSTATE_UINT32(in, NRF51GPIOState), VMSTATE_UINT32(in_mask, NRF51GPIOState), diff --git a/hw/gpio/pl061.c b/hw/gpio/pl061.c index 899be861cc5f94ba5cb08728c926d0dfcbb308b5..86f23836553e3a9ec7b94ab6bcc0e8c616edcabb 100644 --- a/hw/gpio/pl061.c +++ b/hw/gpio/pl061.c @@ -87,7 +87,7 @@ static const VMStateDescription vmstate_pl061 = { .name = "pl061", .version_id = 4, .minimum_version_id = 4, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(locked, PL061State), VMSTATE_UINT32(data, PL061State), VMSTATE_UINT32(old_out_data, PL061State), diff --git a/hw/gpio/sifive_gpio.c b/hw/gpio/sifive_gpio.c index 78bf29e996f923fcec37baaf636ec8d6ceecc91d..995a43c79588a6de48145d0fa06706e2dcf25811 100644 --- a/hw/gpio/sifive_gpio.c +++ b/hw/gpio/sifive_gpio.c @@ -326,7 +326,7 @@ static const VMStateDescription vmstate_sifive_gpio = { .name = TYPE_SIFIVE_GPIO, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(value, SIFIVEGPIOState), VMSTATE_UINT32(input_en, SIFIVEGPIOState), VMSTATE_UINT32(output_en, SIFIVEGPIOState), diff --git a/hw/gpio/zaurus.c b/hw/gpio/zaurus.c index 7cf52a50412a765eabb807f2f1a18ca43b7ef485..5884804c589c3d021c0b53f3c4ee1f7a422c457d 100644 --- a/hw/gpio/zaurus.c +++ b/hw/gpio/zaurus.c @@ -222,7 +222,7 @@ static const VMStateDescription vmstate_scoop_regs = { .version_id = 1, .minimum_version_id = 0, .post_load = scoop_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(status, ScoopInfo), VMSTATE_UINT16(power, ScoopInfo), VMSTATE_UINT32(gpio_level, ScoopInfo), diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c index c8da7c18d535792de388788dbbf52c507eda6a52..9e611620cc76f1127651f7e63e253dcf921d9f35 100644 --- a/hw/hppa/machine.c +++ b/hw/hppa/machine.c @@ -36,8 +36,8 @@ #define MIN_SEABIOS_HPPA_VERSION 12 /* require at least this fw version */ -/* Power button address at &PAGE0->pad[4] */ -#define HPA_POWER_BUTTON (0x40 + 4 * sizeof(uint32_t)) +#define HPA_POWER_BUTTON (FIRMWARE_END - 0x10) +static hwaddr soft_power_reg; #define enable_lasi_lan() 0 @@ -45,7 +45,6 @@ static DeviceState *lasi_dev; static void hppa_powerdown_req(Notifier *n, void *opaque) { - hwaddr soft_power_reg = HPA_POWER_BUTTON; uint32_t val; val = ldl_be_phys(&address_space_memory, soft_power_reg); @@ -221,7 +220,7 @@ static FWCfgState *create_fw_cfg(MachineState *ms, PCIBus *pci_bus, fw_cfg_add_file(fw_cfg, "/etc/hppa/machine", g_memdup(mc->name, len), len); - val = cpu_to_le64(HPA_POWER_BUTTON); + val = cpu_to_le64(soft_power_reg); fw_cfg_add_file(fw_cfg, "/etc/hppa/power-button-addr", g_memdup(&val, sizeof(val)), sizeof(val)); @@ -276,6 +275,7 @@ static TranslateFn *machine_HP_common_init_cpus(MachineState *machine) unsigned int smp_cpus = machine->smp.cpus; TranslateFn *translate; MemoryRegion *cpu_region; + uint64_t ram_max; /* Create CPUs. */ for (unsigned int i = 0; i < smp_cpus; i++) { @@ -288,10 +288,14 @@ static TranslateFn *machine_HP_common_init_cpus(MachineState *machine) */ if (hppa_is_pa20(&cpu[0]->env)) { translate = translate_pa20; + ram_max = 0xf0000000; /* 3.75 GB (limited by 32-bit firmware) */ } else { translate = translate_pa10; + ram_max = 0xf0000000; /* 3.75 GB (32-bit CPU) */ } + soft_power_reg = translate(NULL, HPA_POWER_BUTTON); + for (unsigned int i = 0; i < smp_cpus; i++) { g_autofree char *name = g_strdup_printf("cpu%u-io-eir", i); @@ -311,9 +315,9 @@ static TranslateFn *machine_HP_common_init_cpus(MachineState *machine) cpu_region); /* Main memory region. */ - if (machine->ram_size > 3 * GiB) { - error_report("RAM size is currently restricted to 3GB"); - exit(EXIT_FAILURE); + if (machine->ram_size > ram_max) { + info_report("Max RAM size limited to %" PRIu64 " MB", ram_max / MiB); + machine->ram_size = ram_max; } memory_region_add_subregion_overlap(addr_space, 0, machine->ram, -1); @@ -343,8 +347,10 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, SysBusDevice *s; /* SCSI disk setup. */ - dev = DEVICE(pci_create_simple(pci_bus, -1, "lsi53c895a")); - lsi53c8xx_handle_legacy_cmdline(dev); + if (drive_get_max_bus(IF_SCSI) >= 0) { + dev = DEVICE(pci_create_simple(pci_bus, -1, "lsi53c895a")); + lsi53c8xx_handle_legacy_cmdline(dev); + } /* Graphics setup. */ if (machine->enable_graphics && vga_interface_type != VGA_NONE) { @@ -357,7 +363,7 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, } /* Network setup. */ - if (enable_lasi_lan()) { + if (nd_table[0].used && enable_lasi_lan()) { lasi_82596_init(addr_space, translate(NULL, LASI_LAN_HPA), qdev_get_gpio_in(lasi_dev, LASI_IRQ_LAN_HPA)); } @@ -382,7 +388,7 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, pci_set_word(&pci_dev->config[PCI_SUBSYSTEM_ID], 0x1227); /* Powerbar */ /* create a second serial PCI card when running Astro */ - if (!lasi_dev) { + if (serial_hd(1) && !lasi_dev) { pci_dev = pci_new(-1, "pci-serial-4x"); qdev_prop_set_chr(DEVICE(pci_dev), "chardev1", serial_hd(1)); qdev_prop_set_chr(DEVICE(pci_dev), "chardev2", serial_hd(2)); diff --git a/hw/hyperv/vmbus.c b/hw/hyperv/vmbus.c index c64eaa5a46a04433dfc33313bbd4fdda8c619868..c86d1895bae5d6c053984c77cbe0a19ea97861dc 100644 --- a/hw/hyperv/vmbus.c +++ b/hw/hyperv/vmbus.c @@ -526,7 +526,7 @@ static const VMStateDescription vmstate_gpadl = { .name = "vmbus/gpadl", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(id, VMBusGpadl), VMSTATE_UINT32(child_relid, VMBusGpadl), VMSTATE_UINT32(num_gfns, VMBusGpadl), @@ -1489,7 +1489,7 @@ static const VMStateDescription vmstate_channel = { .version_id = 0, .minimum_version_id = 0, .post_load = channel_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(id, VMBusChannel), VMSTATE_UINT16(subchan_idx, VMBusChannel), VMSTATE_UINT32(open_id, VMBusChannel), @@ -2380,7 +2380,7 @@ const VMStateDescription vmstate_vmbus_dev = { .name = TYPE_VMBUS_DEVICE, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(instanceid.data, VMBusDevice, 16), VMSTATE_UINT16(num_channels, VMBusDevice), VMSTATE_STRUCT_VARRAY_POINTER_UINT16(channels, VMBusDevice, @@ -2549,7 +2549,7 @@ static const VMStateDescription vmstate_post_message_input = { .name = "vmbus/hyperv_post_message_input", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* * skip connection_id and message_type as they are validated before * queueing and ignored on dequeueing @@ -2572,7 +2572,7 @@ static const VMStateDescription vmstate_rx_queue = { .version_id = 0, .minimum_version_id = 0, .needed = vmbus_rx_queue_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(rx_queue_head, VMBus), VMSTATE_UINT8(rx_queue_size, VMBus), VMSTATE_STRUCT_ARRAY(rx_queue, VMBus, @@ -2589,7 +2589,7 @@ static const VMStateDescription vmstate_vmbus = { .minimum_version_id = 0, .pre_load = vmbus_pre_load, .post_load = vmbus_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(state, VMBus), VMSTATE_UINT32(version, VMBus), VMSTATE_UINT32(target_vp, VMBus), @@ -2598,7 +2598,7 @@ static const VMStateDescription vmstate_vmbus = { vmstate_gpadl, VMBusGpadl, link), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_rx_queue, NULL } @@ -2643,7 +2643,7 @@ static const VMStateDescription vmstate_vmbus_bridge = { .name = TYPE_VMBUS_BRIDGE, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_POINTER(bus, VMBusBridge, vmstate_vmbus, VMBus), VMSTATE_END_OF_LIST() }, diff --git a/hw/i2c/allwinner-i2c.c b/hw/i2c/allwinner-i2c.c index 9e8efa1d63f23e980ea8b6b4f2188c8779a28919..8abcc39a5c26144bb4a87ccb1326976a84763335 100644 --- a/hw/i2c/allwinner-i2c.c +++ b/hw/i2c/allwinner-i2c.c @@ -415,7 +415,7 @@ static const VMStateDescription allwinner_i2c_vmstate = { .name = TYPE_AW_I2C, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(addr, AWI2CState), VMSTATE_UINT8(xaddr, AWI2CState), VMSTATE_UINT8(data, AWI2CState), diff --git a/hw/i2c/aspeed_i2c.c b/hw/i2c/aspeed_i2c.c index 1037c22b2f79a3a70ec6d58bda9557db3bf83479..b43afd250de4dda41361d96e0aba1d6707fcb030 100644 --- a/hw/i2c/aspeed_i2c.c +++ b/hw/i2c/aspeed_i2c.c @@ -945,7 +945,7 @@ static const VMStateDescription aspeed_i2c_bus_vmstate = { .name = TYPE_ASPEED_I2C, .version_id = 5, .minimum_version_id = 5, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedI2CBus, ASPEED_I2C_NEW_NUM_REG), VMSTATE_END_OF_LIST() } @@ -955,7 +955,7 @@ static const VMStateDescription aspeed_i2c_vmstate = { .name = TYPE_ASPEED_I2C, .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(intr_status, AspeedI2CState), VMSTATE_STRUCT_ARRAY(busses, AspeedI2CState, ASPEED_I2C_NR_BUSSES, 1, aspeed_i2c_bus_vmstate, diff --git a/hw/i2c/core.c b/hw/i2c/core.c index 879a1d45cb1dbc98d58736b7cd2228472f645a06..4cf30b2c8638d39bb196ea729f4b28016662aac9 100644 --- a/hw/i2c/core.c +++ b/hw/i2c/core.c @@ -50,7 +50,7 @@ static const VMStateDescription vmstate_i2c_bus = { .version_id = 1, .minimum_version_id = 1, .pre_save = i2c_bus_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(saved_address, I2CBus), VMSTATE_END_OF_LIST() } @@ -359,7 +359,7 @@ const VMStateDescription vmstate_i2c_slave = { .version_id = 1, .minimum_version_id = 1, .post_load = i2c_slave_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(address, I2CSlave), VMSTATE_END_OF_LIST() } diff --git a/hw/i2c/exynos4210_i2c.c b/hw/i2c/exynos4210_i2c.c index b65a7d0222ef42ba8b535d0337d92c2a9439f626..9445424d5fd173a32d249d22208a423e37ba2f0d 100644 --- a/hw/i2c/exynos4210_i2c.c +++ b/hw/i2c/exynos4210_i2c.c @@ -273,7 +273,7 @@ static const VMStateDescription exynos4210_i2c_vmstate = { .name = "exynos4210.i2c", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(i2ccon, Exynos4210I2CState), VMSTATE_UINT8(i2cstat, Exynos4210I2CState), VMSTATE_UINT8(i2cds, Exynos4210I2CState), diff --git a/hw/i2c/imx_i2c.c b/hw/i2c/imx_i2c.c index 9792583fea754f185f010f11dbb0f81f249e2670..a25676f025491b18e8a8bac4af06d6fbc4b4a889 100644 --- a/hw/i2c/imx_i2c.c +++ b/hw/i2c/imx_i2c.c @@ -285,7 +285,7 @@ static const VMStateDescription imx_i2c_vmstate = { .name = TYPE_IMX_I2C, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(address, IMXI2CState), VMSTATE_UINT16(iadr, IMXI2CState), VMSTATE_UINT16(ifdr, IMXI2CState), diff --git a/hw/i2c/microbit_i2c.c b/hw/i2c/microbit_i2c.c index e92f9f84ea81596bd64695452818430ddaf77c87..24d36d15b092b421afaa5e19e22538caa286b679 100644 --- a/hw/i2c/microbit_i2c.c +++ b/hw/i2c/microbit_i2c.c @@ -80,7 +80,7 @@ static const VMStateDescription microbit_i2c_vmstate = { .name = TYPE_MICROBIT_I2C, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, MicrobitI2CState, MICROBIT_I2C_NREGS), VMSTATE_UINT32(read_idx, MicrobitI2CState), VMSTATE_END_OF_LIST() diff --git a/hw/i2c/mpc_i2c.c b/hw/i2c/mpc_i2c.c index 219c5484028cc028815ba985b0793346b091a5d4..cb051a520f7898bff3bf80a3beeef8cb3ffc3ea3 100644 --- a/hw/i2c/mpc_i2c.c +++ b/hw/i2c/mpc_i2c.c @@ -312,7 +312,7 @@ static const VMStateDescription mpc_i2c_vmstate = { .name = TYPE_MPC_I2C, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(address, MPCI2CState), VMSTATE_UINT8(adr, MPCI2CState), VMSTATE_UINT8(fdr, MPCI2CState), diff --git a/hw/i2c/npcm7xx_smbus.c b/hw/i2c/npcm7xx_smbus.c index e7e0ba66fe725400998916984f54203c76c8ea53..0ea3083bb6ec9d5b53c38dd588abd61c49123b10 100644 --- a/hw/i2c/npcm7xx_smbus.c +++ b/hw/i2c/npcm7xx_smbus.c @@ -1046,7 +1046,7 @@ static const VMStateDescription vmstate_npcm7xx_smbus = { .name = "npcm7xx-smbus", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(sda, NPCM7xxSMBusState), VMSTATE_UINT8(st, NPCM7xxSMBusState), VMSTATE_UINT8(cst, NPCM7xxSMBusState), diff --git a/hw/i2c/pm_smbus.c b/hw/i2c/pm_smbus.c index 78e7c229a85608632b1557aaf5beb28b8eb71ab2..3eed8110b95ad27b263f089bbb1dfc3b81a34007 100644 --- a/hw/i2c/pm_smbus.c +++ b/hw/i2c/pm_smbus.c @@ -455,7 +455,7 @@ const VMStateDescription pmsmb_vmstate = { .name = "pmsmb", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(smb_stat, PMSMBus), VMSTATE_UINT8(smb_ctl, PMSMBus), VMSTATE_UINT8(smb_cmd, PMSMBus), diff --git a/hw/i2c/pmbus_device.c b/hw/i2c/pmbus_device.c index 1b978e588f183a3709ddffa2459414d2b98ded77..ba1d2fd71604cc72631e20de12e408b7aab30f38 100644 --- a/hw/i2c/pmbus_device.c +++ b/hw/i2c/pmbus_device.c @@ -1886,7 +1886,7 @@ const VMStateDescription vmstate_pmbus_device = { .name = TYPE_PMBUS_DEVICE, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SMBUS_DEVICE(smb, PMBusDevice), VMSTATE_UINT8(num_pages, PMBusDevice), VMSTATE_UINT8(code, PMBusDevice), diff --git a/hw/i2c/smbus_eeprom.c b/hw/i2c/smbus_eeprom.c index 12c5741f3884a3ce17a9583f5661722e91efdf75..c42236bb1397ce1a63e906a4bee679646204de7a 100644 --- a/hw/i2c/smbus_eeprom.c +++ b/hw/i2c/smbus_eeprom.c @@ -100,7 +100,7 @@ static const VMStateDescription vmstate_smbus_eeprom = { .version_id = 1, .minimum_version_id = 1, .needed = smbus_eeprom_vmstate_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SMBUS_DEVICE(smbusdev, SMBusEEPROMDevice), VMSTATE_UINT8_ARRAY(data, SMBusEEPROMDevice, SMBUS_EEPROM_SIZE), VMSTATE_UINT8(offset, SMBusEEPROMDevice), diff --git a/hw/i2c/smbus_ich9.c b/hw/i2c/smbus_ich9.c index 18d40e93c10663751001dfd2c9195bbb2b6832d9..208f263ac5b2241487dc9cb5b97e81e1b61e8d29 100644 --- a/hw/i2c/smbus_ich9.c +++ b/hw/i2c/smbus_ich9.c @@ -50,7 +50,7 @@ static const VMStateDescription vmstate_ich9_smbus = { .name = "ich9_smb", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, ICH9SMBState), VMSTATE_BOOL_TEST(irq_enabled, ICH9SMBState, ich9_vmstate_need_smbus), VMSTATE_STRUCT_TEST(smb, ICH9SMBState, ich9_vmstate_need_smbus, 1, diff --git a/hw/i2c/smbus_slave.c b/hw/i2c/smbus_slave.c index 2ef2c7c5f69495464bc5965ae9b251db6425ad70..1300c9ec72edc93225dc00419f67594795cb31ef 100644 --- a/hw/i2c/smbus_slave.c +++ b/hw/i2c/smbus_slave.c @@ -215,7 +215,7 @@ const VMStateDescription vmstate_smbus_device = { .name = TYPE_SMBUS_DEVICE, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_I2C_SLAVE(i2c, SMBusDevice), VMSTATE_INT32(mode, SMBusDevice), VMSTATE_INT32(data_len, SMBusDevice), diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig index 55850791df4148f5535eb06b76e09dabf75d84f1..a1846be6f761b1c40f63213df536d825a9c9a966 100644 --- a/hw/i386/Kconfig +++ b/hw/i386/Kconfig @@ -95,6 +95,7 @@ config Q35 imply E1000E_PCI_EXPRESS imply VMPORT imply VMMOUSE + imply IOMMUFD select PC_PCI select PC_ACPI select PCI_EXPRESS_Q35 diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index 80db183b786afbe1fc44b04e45ac0eaa08f7f3d2..edc979379c03bedbd65bf374f3ba278884e3cfd4 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -2770,7 +2770,7 @@ static const VMStateDescription vmstate_acpi_build = { .name = "acpi_build", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(patched, AcpiBuildState), VMSTATE_END_OF_LIST() }, diff --git a/hw/i386/acpi-microvm.c b/hw/i386/acpi-microvm.c index 2909a7393382c4c2f5d6f34278a8fee9eb8d2ee4..279da6b4aa2ffbea05accc9091c9765c09bc2b19 100644 --- a/hw/i386/acpi-microvm.c +++ b/hw/i386/acpi-microvm.c @@ -37,6 +37,7 @@ #include "hw/pci/pci.h" #include "hw/pci/pcie_host.h" #include "hw/usb/xhci.h" +#include "hw/virtio/virtio-acpi.h" #include "hw/virtio/virtio-mmio.h" #include "hw/input/i8042.h" @@ -77,19 +78,7 @@ static void acpi_dsdt_add_virtio(Aml *scope, uint32_t irq = mms->virtio_irq_base + index; hwaddr base = VIRTIO_MMIO_BASE + index * 512; hwaddr size = 512; - - Aml *dev = aml_device("VR%02u", (unsigned)index); - aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005"))); - aml_append(dev, aml_name_decl("_UID", aml_int(index))); - aml_append(dev, aml_name_decl("_CCA", aml_int(1))); - - Aml *crs = aml_resource_template(); - aml_append(crs, aml_memory32_fixed(base, size, AML_READ_WRITE)); - aml_append(crs, - aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, - AML_EXCLUSIVE, &irq, 1)); - aml_append(dev, aml_name_decl("_CRS", crs)); - aml_append(scope, dev); + virtio_acpi_dsdt_add(scope, base, size, irq, index, 1); } } } diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 5085a6fee3f1d4543b5947c1bc99cbc06982239d..1a07faddb40a02d8c0a6f41d772cbdd58766386f 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -1665,7 +1665,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as) { bool use_iommu, pt; /* Whether we need to take the BQL on our own */ - bool take_bql = !qemu_mutex_iothread_locked(); + bool take_bql = !bql_locked(); assert(as); @@ -1683,7 +1683,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as) * it. We'd better make sure we have had it already, or, take it. */ if (take_bql) { - qemu_mutex_lock_iothread(); + bql_lock(); } /* Turn off first then on the other */ @@ -1738,7 +1738,7 @@ static bool vtd_switch_address_space(VTDAddressSpace *as) } if (take_bql) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } return use_iommu; @@ -3289,7 +3289,7 @@ static const VMStateDescription vtd_vmstate = { .minimum_version_id = 1, .priority = MIG_PRI_IOMMU, .post_load = vtd_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(root, IntelIOMMUState), VMSTATE_UINT64(intr_root, IntelIOMMUState), VMSTATE_UINT64(iq, IntelIOMMUState), diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c index e756b0aa43f6d5b07d71be13e2a6236632443290..40aa9a32c32c808f3c870fa17ae33f0016c54b0a 100644 --- a/hw/i386/kvm/clock.c +++ b/hw/i386/kvm/clock.c @@ -245,7 +245,7 @@ static const VMStateDescription kvmclock_reliable_get_clock = { .version_id = 1, .minimum_version_id = 1, .needed = kvmclock_clock_is_reliable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(clock_is_reliable, KVMClockState), VMSTATE_END_OF_LIST() } @@ -295,11 +295,11 @@ static const VMStateDescription kvmclock_vmsd = { .minimum_version_id = 1, .pre_load = kvmclock_pre_load, .pre_save = kvmclock_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(clock, KVMClockState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &kvmclock_reliable_get_clock, NULL } diff --git a/hw/i386/kvm/xen_evtchn.c b/hw/i386/kvm/xen_evtchn.c index 02b8cbf8dfffeb925668d476c3782316abbb1848..0171ef6d59fd0c385644d9a8071a5452f8694b8d 100644 --- a/hw/i386/kvm/xen_evtchn.c +++ b/hw/i386/kvm/xen_evtchn.c @@ -240,7 +240,7 @@ static const VMStateDescription xen_evtchn_port_vmstate = { .name = "xen_evtchn_port", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(vcpu, XenEvtchnPort), VMSTATE_UINT16(type, XenEvtchnPort), VMSTATE_UINT16(u.val, XenEvtchnPort), @@ -255,7 +255,7 @@ static const VMStateDescription xen_evtchn_vmstate = { .needed = xen_evtchn_is_needed, .pre_load = xen_evtchn_pre_load, .post_load = xen_evtchn_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(callback_param, XenEvtchnState), VMSTATE_UINT32(nr_ports, XenEvtchnState), VMSTATE_STRUCT_VARRAY_UINT32(port_table, XenEvtchnState, nr_ports, 1, @@ -425,7 +425,7 @@ void xen_evtchn_set_callback_level(int level) * effect immediately. That just leaves interdomain loopback as the case * which uses the BH. */ - if (!qemu_mutex_iothread_locked()) { + if (!bql_locked()) { qemu_bh_schedule(s->gsi_bh); return; } @@ -459,7 +459,7 @@ int xen_evtchn_set_callback_param(uint64_t param) * We need the BQL because set_callback_pci_intx() may call into PCI code, * and because we may need to manipulate the old and new GSI levels. */ - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); qemu_mutex_lock(&s->port_lock); switch (type) { @@ -1037,7 +1037,7 @@ static int close_port(XenEvtchnState *s, evtchn_port_t port, XenEvtchnPort *p = &s->port_table[port]; /* Because it *might* be a PIRQ port */ - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); switch (p->type) { case EVTCHNSTAT_closed: @@ -1104,7 +1104,7 @@ int xen_evtchn_soft_reset(void) return -ENOTSUP; } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); qemu_mutex_lock(&s->port_lock); @@ -1127,7 +1127,7 @@ int xen_evtchn_reset_op(struct evtchn_reset *reset) return -ESRCH; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); return xen_evtchn_soft_reset(); } @@ -1145,7 +1145,7 @@ int xen_evtchn_close_op(struct evtchn_close *close) return -EINVAL; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); qemu_mutex_lock(&s->port_lock); ret = close_port(s, close->port, &flush_kvm_routes); @@ -1272,7 +1272,7 @@ int xen_evtchn_bind_pirq_op(struct evtchn_bind_pirq *pirq) return -EINVAL; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); if (s->pirq[pirq->pirq].port) { return -EBUSY; @@ -1601,7 +1601,7 @@ bool xen_evtchn_set_gsi(int gsi, int level) XenEvtchnState *s = xen_evtchn_singleton; int pirq; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); if (!s || gsi < 0 || gsi >= IOAPIC_NUM_PINS) { return false; @@ -1712,7 +1712,7 @@ void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector, return; } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); pirq = msi_pirq_target(addr, data); @@ -1749,7 +1749,7 @@ int xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry *route, return 1; /* Not a PIRQ */ } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); pirq = msi_pirq_target(address, data); if (!pirq || pirq >= s->nr_pirqs) { @@ -1796,7 +1796,7 @@ bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data) return false; } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); pirq = msi_pirq_target(address, data); if (!pirq || pirq >= s->nr_pirqs) { @@ -1824,7 +1824,7 @@ int xen_physdev_map_pirq(struct physdev_map_pirq *map) return -ENOTSUP; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); QEMU_LOCK_GUARD(&s->port_lock); if (map->domid != DOMID_SELF && map->domid != xen_domid) { @@ -1884,7 +1884,7 @@ int xen_physdev_unmap_pirq(struct physdev_unmap_pirq *unmap) return -EINVAL; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); qemu_mutex_lock(&s->port_lock); if (!pirq_inuse(s, pirq)) { @@ -1924,7 +1924,7 @@ int xen_physdev_eoi_pirq(struct physdev_eoi *eoi) return -ENOTSUP; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); QEMU_LOCK_GUARD(&s->port_lock); if (!pirq_inuse(s, pirq)) { @@ -1956,7 +1956,7 @@ int xen_physdev_query_pirq(struct physdev_irq_status_query *query) return -ENOTSUP; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); QEMU_LOCK_GUARD(&s->port_lock); if (!pirq_inuse(s, pirq)) { diff --git a/hw/i386/kvm/xen_gnttab.c b/hw/i386/kvm/xen_gnttab.c index 0a24f53f20ed752b75be483f09a7a199ab4dcc73..245e4b15db7bf40cebfedd7e05b07b47eff22014 100644 --- a/hw/i386/kvm/xen_gnttab.c +++ b/hw/i386/kvm/xen_gnttab.c @@ -127,7 +127,7 @@ static const VMStateDescription xen_gnttab_vmstate = { .minimum_version_id = 1, .needed = xen_gnttab_is_needed, .post_load = xen_gnttab_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(nr_frames, XenGnttabState), VMSTATE_VARRAY_UINT32(gnt_frame_gpas, XenGnttabState, nr_frames, 0, vmstate_info_uint64, uint64_t), @@ -176,7 +176,7 @@ int xen_gnttab_map_page(uint64_t idx, uint64_t gfn) return -EINVAL; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); QEMU_LOCK_GUARD(&s->gnt_lock); xen_overlay_do_map_page(&s->gnt_aliases[idx], gpa); diff --git a/hw/i386/kvm/xen_overlay.c b/hw/i386/kvm/xen_overlay.c index 39fda1b72c3f8ed8c807a65787795c494feb20e0..c68e78ac5ce2eded9ea56c0b3a788d6b2ea8b81b 100644 --- a/hw/i386/kvm/xen_overlay.c +++ b/hw/i386/kvm/xen_overlay.c @@ -139,7 +139,7 @@ static const VMStateDescription xen_overlay_vmstate = { .needed = xen_overlay_is_needed, .pre_save = xen_overlay_pre_save, .post_load = xen_overlay_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(shinfo_gpa, XenOverlayState), VMSTATE_BOOL(long_mode, XenOverlayState), VMSTATE_END_OF_LIST() @@ -194,7 +194,7 @@ int xen_overlay_map_shinfo_page(uint64_t gpa) return -ENOENT; } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); if (s->shinfo_gpa) { /* If removing shinfo page, turn the kernel magic off first */ diff --git a/hw/i386/kvm/xen_xenstore.c b/hw/i386/kvm/xen_xenstore.c index 6e651960b3ab5eb0cd1fc59b91496f048d352f5f..1a9bc342b888dbe3b020788758166796906707d7 100644 --- a/hw/i386/kvm/xen_xenstore.c +++ b/hw/i386/kvm/xen_xenstore.c @@ -243,7 +243,7 @@ static const VMStateDescription xen_xenstore_vmstate = { .needed = xen_xenstore_is_needed, .pre_save = xen_xenstore_pre_save, .post_load = xen_xenstore_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(req_data, XenXenstoreState, sizeof_field(XenXenstoreState, req_data)), VMSTATE_UINT8_ARRAY(rsp_data, XenXenstoreState, @@ -1341,7 +1341,7 @@ static void fire_watch_cb(void *opaque, const char *path, const char *token) { XenXenstoreState *s = opaque; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); /* * If there's a response pending, we obviously can't scribble over diff --git a/hw/i386/kvmvapic.c b/hw/i386/kvmvapic.c index 43f8a8f679e353adbf37e2cfa21e9c482d2817b0..f2b0aff4798b5330df2b50f63738f9de39e9d363 100644 --- a/hw/i386/kvmvapic.c +++ b/hw/i386/kvmvapic.c @@ -802,7 +802,7 @@ static const VMStateDescription vmstate_handlers = { .name = "kvmvapic-handlers", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(set_tpr, VAPICHandlers), VMSTATE_UINT32(set_tpr_eax, VAPICHandlers), VMSTATE_UINT32_ARRAY(get_tpr, VAPICHandlers, 8), @@ -815,7 +815,7 @@ static const VMStateDescription vmstate_guest_rom = { .name = "kvmvapic-guest-rom", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UNUSED(8), /* signature */ VMSTATE_UINT32(vaddr, GuestROMState), VMSTATE_UINT32(fixup_start, GuestROMState), @@ -835,7 +835,7 @@ static const VMStateDescription vmstate_vapic = { .version_id = 1, .minimum_version_id = 1, .post_load = vapic_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(rom_state, VAPICROMState, 0, vmstate_guest_rom, GuestROMState), VMSTATE_UINT32(state, VAPICROMState), diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 29b9964733ed118f330753c04146bebf4580cddf..496498df3a8f4733a836988bdb8951325b16c418 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -78,6 +78,9 @@ { "qemu64-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\ { "athlon-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, }, +GlobalProperty pc_compat_8_2[] = {}; +const size_t pc_compat_8_2_len = G_N_ELEMENTS(pc_compat_8_2); + GlobalProperty pc_compat_8_1[] = {}; const size_t pc_compat_8_1_len = G_N_ELEMENTS(pc_compat_8_1); diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index eace8543358a85a0d1f1f75353bb0e601db3b0e0..042c13cdbc335ed2f9853e977e33d730b2ba1e07 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -545,13 +545,26 @@ static void pc_i440fx_machine_options(MachineClass *m) "Use a different south bridge than PIIX3"); } -static void pc_i440fx_8_2_machine_options(MachineClass *m) +static void pc_i440fx_9_0_machine_options(MachineClass *m) { pc_i440fx_machine_options(m); m->alias = "pc"; m->is_default = true; } +DEFINE_I440FX_MACHINE(v9_0, "pc-i440fx-9.0", NULL, + pc_i440fx_9_0_machine_options); + +static void pc_i440fx_8_2_machine_options(MachineClass *m) +{ + pc_i440fx_9_0_machine_options(m); + m->alias = NULL; + m->is_default = false; + + compat_props_add(m->compat_props, hw_compat_8_2, hw_compat_8_2_len); + compat_props_add(m->compat_props, pc_compat_8_2, pc_compat_8_2_len); +} + DEFINE_I440FX_MACHINE(v8_2, "pc-i440fx-8.2", NULL, pc_i440fx_8_2_machine_options); @@ -560,8 +573,6 @@ static void pc_i440fx_8_1_machine_options(MachineClass *m) PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_i440fx_8_2_machine_options(m); - m->alias = NULL; - m->is_default = false; pcmc->broken_32bit_mem_addr_check = true; compat_props_add(m->compat_props, hw_compat_8_1, hw_compat_8_1_len); diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index 4f3e5412f6b8b42dffb55b4e6527a019fed35628..f43d5142b8e595dc3c1e8e68ea97762fd026e6d4 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -383,12 +383,23 @@ static void pc_q35_machine_options(MachineClass *m) machine_class_allow_dynamic_sysbus_dev(m, TYPE_VMBUS_BRIDGE); } -static void pc_q35_8_2_machine_options(MachineClass *m) +static void pc_q35_9_0_machine_options(MachineClass *m) { pc_q35_machine_options(m); m->alias = "q35"; } +DEFINE_Q35_MACHINE(v9_0, "pc-q35-9.0", NULL, + pc_q35_9_0_machine_options); + +static void pc_q35_8_2_machine_options(MachineClass *m) +{ + pc_q35_9_0_machine_options(m); + m->alias = NULL; + compat_props_add(m->compat_props, hw_compat_8_2, hw_compat_8_2_len); + compat_props_add(m->compat_props, pc_compat_8_2, pc_compat_8_2_len); +} + DEFINE_Q35_MACHINE(v8_2, "pc-q35-8.2", NULL, pc_q35_8_2_machine_options); diff --git a/hw/i386/port92.c b/hw/i386/port92.c index e1379a4f98001ea5ce741a7ba10ff5c02a057ed7..1070bfbf36127c579775b2c8e9b0d7c9e7b6b594 100644 --- a/hw/i386/port92.c +++ b/hw/i386/port92.c @@ -54,7 +54,7 @@ static const VMStateDescription vmstate_port92_isa = { .name = "port92", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(outport, Port92State), VMSTATE_END_OF_LIST() } diff --git a/hw/i386/sgx-stub.c b/hw/i386/sgx-stub.c index 26833eb233c5b6448831c76c31266babaa070265..16b1dfd90bb52a48a0a4ffcc76485d2baccf724e 100644 --- a/hw/i386/sgx-stub.c +++ b/hw/i386/sgx-stub.c @@ -34,5 +34,5 @@ void pc_machine_init_sgx_epc(PCMachineState *pcms) bool sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size) { - g_assert_not_reached(); + return true; } diff --git a/hw/i386/vmmouse.c b/hw/i386/vmmouse.c index 91320afa2f6e24ee5b06be22304d4f088e796d1b..a8d014d09a8c5f9d6465c189cef964d595e76be6 100644 --- a/hw/i386/vmmouse.c +++ b/hw/i386/vmmouse.c @@ -277,7 +277,7 @@ static const VMStateDescription vmstate_vmmouse = { .version_id = 0, .minimum_version_id = 0, .post_load = vmmouse_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_EQUAL(queue_size, VMMouseState, NULL), VMSTATE_UINT32_ARRAY(queue, VMMouseState, VMMOUSE_QUEUE_SIZE), VMSTATE_UINT16(nb_queue, VMMouseState), diff --git a/hw/i386/xen/xen_platform.c b/hw/i386/xen/xen_platform.c index ef7d3fc05f01ac86819d5d5efe70247ceeb9aea0..708488af32d61c757abe4cd793b7846f6c0355b6 100644 --- a/hw/i386/xen/xen_platform.c +++ b/hw/i386/xen/xen_platform.c @@ -537,7 +537,7 @@ static const VMStateDescription vmstate_xen_platform = { .version_id = 4, .minimum_version_id = 4, .post_load = xen_platform_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIXenPlatformState), VMSTATE_UINT8(flags, PCIXenPlatformState), VMSTATE_END_OF_LIST() diff --git a/hw/i386/xen/xen_pvdevice.c b/hw/i386/xen/xen_pvdevice.c index e62e06622b09b98ae1f90d7b1153f0412d233b65..ed621531d81083b7c2ce95d1ef2cae91ad0476d1 100644 --- a/hw/i386/xen/xen_pvdevice.c +++ b/hw/i386/xen/xen_pvdevice.c @@ -77,7 +77,7 @@ static const VMStateDescription vmstate_xen_pvdevice = { .name = "xen-pvdevice", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, XenPVDevice), VMSTATE_END_OF_LIST() } diff --git a/hw/ide/ahci-allwinner.c b/hw/ide/ahci-allwinner.c index 227e747ba722d2934090193f7e11db2095a604da..b173121006fe8e39c54d3230376195c1e3b26d0b 100644 --- a/hw/ide/ahci-allwinner.c +++ b/hw/ide/ahci-allwinner.c @@ -97,7 +97,7 @@ static const VMStateDescription vmstate_allwinner_ahci = { .name = "allwinner-ahci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AllwinnerAHCIState, ALLWINNER_AHCI_MMIO_SIZE / 4), VMSTATE_END_OF_LIST() diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c index afdc44b8e056dc4e6f999d699b47d65e896f951c..0eb83a6d46972f9fbec22f329f6893cd5ac49f06 100644 --- a/hw/ide/ahci.c +++ b/hw/ide/ahci.c @@ -1685,7 +1685,7 @@ void ahci_reset(AHCIState *s) static const VMStateDescription vmstate_ncq_tfs = { .name = "ncq state", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(sector_count, NCQTransferState), VMSTATE_UINT64(lba, NCQTransferState), VMSTATE_UINT8(tag, NCQTransferState), @@ -1700,7 +1700,7 @@ static const VMStateDescription vmstate_ncq_tfs = { static const VMStateDescription vmstate_ahci_device = { .name = "ahci port", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_IDE_BUS(port, AHCIDevice), VMSTATE_IDE_DRIVE(port.ifs[0], AHCIDevice), VMSTATE_UINT32(port_state, AHCIDevice), @@ -1817,7 +1817,7 @@ const VMStateDescription vmstate_ahci = { .name = "ahci", .version_id = 1, .post_load = ahci_state_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_VARRAY_POINTER_INT32(dev, AHCIState, ports, vmstate_ahci_device, AHCIDevice), VMSTATE_UINT32(control_regs.cap, AHCIState), @@ -1833,7 +1833,7 @@ const VMStateDescription vmstate_ahci = { static const VMStateDescription vmstate_sysbus_ahci = { .name = "sysbus-ahci", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_AHCI(ahci, SysbusAHCIState), VMSTATE_END_OF_LIST() }, diff --git a/hw/ide/core.c b/hw/ide/core.c index 8a0579bff4192e37bcd0c7d8993f85e555f2c66f..9c4a8129028073a809313d8c6d73c99b0114bd7b 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -2918,7 +2918,7 @@ static const VMStateDescription vmstate_ide_atapi_gesn_state = { .version_id = 1, .minimum_version_id = 1, .needed = ide_atapi_gesn_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(events.new_media, IDEState), VMSTATE_BOOL(events.eject_request, IDEState), VMSTATE_END_OF_LIST() @@ -2930,7 +2930,7 @@ static const VMStateDescription vmstate_ide_tray_state = { .version_id = 1, .minimum_version_id = 1, .needed = ide_tray_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(tray_open, IDEState), VMSTATE_BOOL(tray_locked, IDEState), VMSTATE_END_OF_LIST() @@ -2944,7 +2944,7 @@ static const VMStateDescription vmstate_ide_drive_pio_state = { .pre_save = ide_drive_pio_pre_save, .post_load = ide_drive_pio_post_load, .needed = ide_drive_pio_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(req_nb_sectors, IDEState), VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1, vmstate_info_uint8, uint8_t), @@ -2962,7 +2962,7 @@ const VMStateDescription vmstate_ide_drive = { .version_id = 3, .minimum_version_id = 0, .post_load = ide_drive_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(mult_sectors, IDEState), VMSTATE_INT32(identify_set, IDEState), VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set), @@ -2985,7 +2985,7 @@ const VMStateDescription vmstate_ide_drive = { VMSTATE_UINT8_V(cdrom_changed, IDEState, 3), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_ide_drive_pio_state, &vmstate_ide_tray_state, &vmstate_ide_atapi_gesn_state, @@ -2998,7 +2998,7 @@ static const VMStateDescription vmstate_ide_error_status = { .version_id = 2, .minimum_version_id = 1, .needed = ide_error_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(error_status, IDEBus), VMSTATE_INT64_V(retry_sector_num, IDEBus, 2), VMSTATE_UINT32_V(retry_nsector, IDEBus, 2), @@ -3011,12 +3011,12 @@ const VMStateDescription vmstate_ide_bus = { .name = "ide_bus", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(cmd, IDEBus), VMSTATE_UINT8(unit, IDEBus), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_ide_error_status, NULL } diff --git a/hw/ide/ich.c b/hw/ide/ich.c index d61faab5323269814918bff17d6b5857b4204ece..49f8eb8a7d4d40f733730f9962a9c0630888a70d 100644 --- a/hw/ide/ich.c +++ b/hw/ide/ich.c @@ -83,7 +83,7 @@ static const VMStateDescription vmstate_ich9_ahci = { .name = "ich9_ahci", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, AHCIPCIState), VMSTATE_AHCI(ahci, AHCIPCIState), VMSTATE_END_OF_LIST() diff --git a/hw/ide/isa.c b/hw/ide/isa.c index ea60c081160cc88f12313dd04f3436885ffc5891..cc865c83dc540787e0b56f70c129a23ed3ce33ac 100644 --- a/hw/ide/isa.c +++ b/hw/ide/isa.c @@ -58,7 +58,7 @@ static const VMStateDescription vmstate_ide_isa = { .name = "isa-ide", .version_id = 3, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_IDE_BUS(bus, ISAIDEState), VMSTATE_IDE_DRIVES(bus.ifs, ISAIDEState), VMSTATE_END_OF_LIST() diff --git a/hw/ide/macio.c b/hw/ide/macio.c index dca1cc9efc1d435a098d28395e652b04561374ec..0d2c6ba910bd4ac298c039f007bf44c4007955f2 100644 --- a/hw/ide/macio.c +++ b/hw/ide/macio.c @@ -361,7 +361,7 @@ static const VMStateDescription vmstate_pmac = { .name = "ide", .version_id = 5, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_IDE_BUS(bus, MACIOIDEState), VMSTATE_IDE_DRIVES(bus.ifs, MACIOIDEState), VMSTATE_BOOL(dma_active, MACIOIDEState), diff --git a/hw/ide/microdrive.c b/hw/ide/microdrive.c index 981cfbd97fd36e2b4adcf80432adb288d3883b0d..a7f415f0fce389020dd2708acb2488d38ac361ca 100644 --- a/hw/ide/microdrive.c +++ b/hw/ide/microdrive.c @@ -336,7 +336,7 @@ static const VMStateDescription vmstate_microdrive = { .name = "microdrive", .version_id = 3, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(opt, MicroDriveState), VMSTATE_UINT8(stat, MicroDriveState), VMSTATE_UINT8(pins, MicroDriveState), diff --git a/hw/ide/mmio.c b/hw/ide/mmio.c index 3aeacab3bb21462b0162715fbf8a7f360a7c66e7..e8f41c0610356f5dc3e83a6528601fda314f1b01 100644 --- a/hw/ide/mmio.c +++ b/hw/ide/mmio.c @@ -110,7 +110,7 @@ static const VMStateDescription vmstate_ide_mmio = { .name = "mmio-ide", .version_id = 3, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_IDE_BUS(bus, MMIOIDEState), VMSTATE_IDE_DRIVES(bus.ifs, MMIOIDEState), VMSTATE_END_OF_LIST() diff --git a/hw/ide/pci.c b/hw/ide/pci.c index 810c6b6d980f33ca7f543cf6f7b0559b7f526a6a..ca85d8474c14e8afb7aa2a3355f463cbb47779d4 100644 --- a/hw/ide/pci.c +++ b/hw/ide/pci.c @@ -501,7 +501,7 @@ static const VMStateDescription vmstate_bmdma_current = { .version_id = 1, .minimum_version_id = 1, .needed = ide_bmdma_current_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cur_addr, BMDMAState), VMSTATE_UINT32(cur_prd_last, BMDMAState), VMSTATE_UINT32(cur_prd_addr, BMDMAState), @@ -515,7 +515,7 @@ static const VMStateDescription vmstate_bmdma_status = { .version_id = 1, .minimum_version_id = 1, .needed = ide_bmdma_status_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(status, BMDMAState), VMSTATE_END_OF_LIST() } @@ -526,7 +526,7 @@ static const VMStateDescription vmstate_bmdma = { .version_id = 3, .minimum_version_id = 0, .pre_save = ide_bmdma_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(cmd, BMDMAState), VMSTATE_UINT8(migration_compat_status, BMDMAState), VMSTATE_UINT32(addr, BMDMAState), @@ -535,7 +535,7 @@ static const VMStateDescription vmstate_bmdma = { VMSTATE_UINT8(migration_retry_unit, BMDMAState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_bmdma_current, &vmstate_bmdma_status, NULL @@ -562,7 +562,7 @@ const VMStateDescription vmstate_ide_pci = { .version_id = 3, .minimum_version_id = 0, .post_load = ide_pci_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIIDEState), VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0, vmstate_bmdma, BMDMAState), diff --git a/hw/input/adb-kbd.c b/hw/input/adb-kbd.c index e21edf9acd4fdc94c604e5bb9c7dce3f901466c8..758fa6d2676596235eb8f45890997fbe3ab5fd67 100644 --- a/hw/input/adb-kbd.c +++ b/hw/input/adb-kbd.c @@ -332,7 +332,7 @@ static const VMStateDescription vmstate_adb_kbd = { .name = "adb_kbd", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(parent_obj, KBDState, 0, vmstate_adb_device, ADBDevice), VMSTATE_BUFFER(data, KBDState), VMSTATE_INT32(rptr, KBDState), diff --git a/hw/input/adb-mouse.c b/hw/input/adb-mouse.c index e6b341f0280809caab87dc6d61316dd979df442a..144a0ccce7149654cae6b422f082f092c960f5f4 100644 --- a/hw/input/adb-mouse.c +++ b/hw/input/adb-mouse.c @@ -217,7 +217,7 @@ static const VMStateDescription vmstate_adb_mouse = { .name = "adb_mouse", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(parent_obj, MouseState, 0, vmstate_adb_device, ADBDevice), VMSTATE_INT32(buttons_state, MouseState), diff --git a/hw/input/adb.c b/hw/input/adb.c index 8aed0da2cd579a204bccff40b6bb2566f017918d..0f3c73d6d000f84fc00b470474c6b502a41fc00b 100644 --- a/hw/input/adb.c +++ b/hw/input/adb.c @@ -221,7 +221,7 @@ static const VMStateDescription vmstate_adb_bus = { .name = "adb_bus", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(autopoll_timer, ADBBusState), VMSTATE_BOOL(autopoll_enabled, ADBBusState), VMSTATE_UINT8(autopoll_rate_ms, ADBBusState), @@ -279,7 +279,7 @@ const VMStateDescription vmstate_adb_device = { .name = "adb_device", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(devaddr, ADBDevice), VMSTATE_INT32(handler, ADBDevice), VMSTATE_END_OF_LIST() diff --git a/hw/input/ads7846.c b/hw/input/ads7846.c index 91116c6bdbde36a9ac40c4a0179b72f34ad8b64b..cde38922165c1a0ce5663f45b1e0857c1be7b3a2 100644 --- a/hw/input/ads7846.c +++ b/hw/input/ads7846.c @@ -130,7 +130,7 @@ static const VMStateDescription vmstate_ads7846 = { .version_id = 1, .minimum_version_id = 1, .post_load = ads7856_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SSI_PERIPHERAL(ssidev, ADS7846State), VMSTATE_INT32_ARRAY(input, ADS7846State, 8), VMSTATE_INT32(noise, ADS7846State), diff --git a/hw/input/hid.c b/hw/input/hid.c index b8e85374cab19f44efb543b9d84a0991de9a04b7..76bedc18443bffd4780b2058252d50c640628aa5 100644 --- a/hw/input/hid.c +++ b/hw/input/hid.c @@ -581,7 +581,7 @@ static const VMStateDescription vmstate_hid_ptr_queue = { .name = "HIDPointerEventQueue", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(xdx, HIDPointerEvent), VMSTATE_INT32(ydy, HIDPointerEvent), VMSTATE_INT32(dz, HIDPointerEvent), @@ -595,7 +595,7 @@ const VMStateDescription vmstate_hid_ptr_device = { .version_id = 1, .minimum_version_id = 1, .post_load = hid_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(ptr.queue, HIDState, QUEUE_LENGTH, 0, vmstate_hid_ptr_queue, HIDPointerEvent), VMSTATE_UINT32(head, HIDState), @@ -611,7 +611,7 @@ const VMStateDescription vmstate_hid_keyboard_device = { .version_id = 1, .minimum_version_id = 1, .post_load = hid_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(kbd.keycodes, HIDState, QUEUE_LENGTH), VMSTATE_UINT32(head, HIDState), VMSTATE_UINT32(n, HIDState), diff --git a/hw/input/lasips2.c b/hw/input/lasips2.c index 6075121b72ceb943ca6ad2f8de9bdb195974694e..d9f8c36778dd81110911c7f12d18eda36dfa7e80 100644 --- a/hw/input/lasips2.c +++ b/hw/input/lasips2.c @@ -39,7 +39,7 @@ static const VMStateDescription vmstate_lasips2_port = { .name = "lasips2-port", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(control, LASIPS2Port), VMSTATE_UINT8(buf, LASIPS2Port), VMSTATE_BOOL(loopback_rbne, LASIPS2Port), @@ -51,7 +51,7 @@ static const VMStateDescription vmstate_lasips2 = { .name = "lasips2", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(int_status, LASIPS2State), VMSTATE_STRUCT(kbd_port.parent_obj, LASIPS2State, 1, vmstate_lasips2_port, LASIPS2Port), diff --git a/hw/input/lm832x.c b/hw/input/lm832x.c index 19a646d9bb4eea23fc3196dad9c5c178c62420b7..59e5567afd527a00a87c892b0d4b1488328d6b19 100644 --- a/hw/input/lm832x.c +++ b/hw/input/lm832x.c @@ -441,7 +441,7 @@ static const VMStateDescription vmstate_lm_kbd = { .version_id = 0, .minimum_version_id = 0, .post_load = lm_kbd_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_I2C_SLAVE(parent_obj, LM823KbdState), VMSTATE_UINT8(i2c_dir, LM823KbdState), VMSTATE_UINT8(i2c_cycle, LM823KbdState), diff --git a/hw/input/pckbd.c b/hw/input/pckbd.c index b92b63bedca324fce039dc9a71e8cbf3ddd9690c..90a4d9eb406f7779a59bb8d20bd8c68c196ed82f 100644 --- a/hw/input/pckbd.c +++ b/hw/input/pckbd.c @@ -510,7 +510,7 @@ static const VMStateDescription vmstate_kbd_outport = { .minimum_version_id = 1, .post_load = kbd_outport_post_load, .needed = kbd_outport_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(outport, KBDState), VMSTATE_END_OF_LIST() } @@ -552,7 +552,7 @@ static const VMStateDescription vmstate_kbd_extended_state = { .post_load = kbd_extended_state_post_load, .pre_save = kbd_extended_state_pre_save, .needed = kbd_extended_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(migration_flags, KBDState), VMSTATE_UINT32(obsrc, KBDState), VMSTATE_UINT8(obdata, KBDState), @@ -619,14 +619,14 @@ static const VMStateDescription vmstate_kbd = { .pre_load = kbd_pre_load, .post_load = kbd_post_load, .pre_save = kbd_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(write_cmd, KBDState), VMSTATE_UINT8(status, KBDState), VMSTATE_UINT8(mode, KBDState), VMSTATE_UINT8(pending_tmp, KBDState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_kbd_outport, &vmstate_kbd_extended_state, NULL @@ -745,7 +745,7 @@ static const VMStateDescription vmstate_kbd_mmio = { .name = "pckbd-mmio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(kbd, MMIOKBDState, 0, vmstate_kbd, KBDState), VMSTATE_END_OF_LIST() } @@ -786,7 +786,7 @@ static const VMStateDescription vmstate_kbd_isa = { .name = "pckbd", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(kbd, ISAKBDState, 0, vmstate_kbd, KBDState), VMSTATE_END_OF_LIST() } diff --git a/hw/input/pl050.c b/hw/input/pl050.c index ec5e19285e3b524d27c8dae29b91d7cfe94c00b6..6519e260ed522a964ddee59f2c554cbe26953397 100644 --- a/hw/input/pl050.c +++ b/hw/input/pl050.c @@ -30,7 +30,7 @@ static const VMStateDescription vmstate_pl050 = { .name = "pl050", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cr, PL050State), VMSTATE_UINT32(clk, PL050State), VMSTATE_UINT32(last, PL050State), diff --git a/hw/input/ps2.c b/hw/input/ps2.c index c8fd23cf3600f415fe68e9259ed57ebaa09a8863..00b695a0b9758f1b36286b44972173f68c9ea2dc 100644 --- a/hw/input/ps2.c +++ b/hw/input/ps2.c @@ -1093,7 +1093,7 @@ static const VMStateDescription vmstate_ps2_common = { .name = "PS2 Common State", .version_id = 3, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(write_cmd, PS2State), VMSTATE_INT32(queue.rptr, PS2State), VMSTATE_INT32(queue.wptr, PS2State), @@ -1124,7 +1124,7 @@ static const VMStateDescription vmstate_ps2_keyboard_ledstate = { .minimum_version_id = 2, .post_load = ps2_kbd_ledstate_post_load, .needed = ps2_keyboard_ledstate_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(ledstate, PS2KbdState), VMSTATE_END_OF_LIST() } @@ -1141,7 +1141,7 @@ static const VMStateDescription vmstate_ps2_keyboard_need_high_bit = { .version_id = 1, .minimum_version_id = 1, .needed = ps2_keyboard_need_high_bit_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(need_high_bit, PS2KbdState), VMSTATE_END_OF_LIST() } @@ -1158,7 +1158,7 @@ static bool ps2_keyboard_cqueue_needed(void *opaque) static const VMStateDescription vmstate_ps2_keyboard_cqueue = { .name = "ps2kbd/command_reply_queue", .needed = ps2_keyboard_cqueue_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(parent_obj.queue.cwptr, PS2KbdState), VMSTATE_END_OF_LIST() } @@ -1183,7 +1183,7 @@ static const VMStateDescription vmstate_ps2_keyboard = { .version_id = 3, .minimum_version_id = 2, .post_load = ps2_kbd_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(parent_obj, PS2KbdState, 0, vmstate_ps2_common, PS2State), VMSTATE_INT32(scan_enabled, PS2KbdState), @@ -1191,7 +1191,7 @@ static const VMStateDescription vmstate_ps2_keyboard = { VMSTATE_INT32_V(scancode_set, PS2KbdState, 3), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_ps2_keyboard_ledstate, &vmstate_ps2_keyboard_need_high_bit, &vmstate_ps2_keyboard_cqueue, @@ -1214,7 +1214,7 @@ static const VMStateDescription vmstate_ps2_mouse = { .version_id = 2, .minimum_version_id = 2, .post_load = ps2_mouse_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(parent_obj, PS2MouseState, 0, vmstate_ps2_common, PS2State), VMSTATE_UINT8(mouse_status, PS2MouseState), diff --git a/hw/input/pxa2xx_keypad.c b/hw/input/pxa2xx_keypad.c index 3dd03e8c9f479330165223c037557ac3a9e58deb..3858648d9f6340dd191affbff124a0e4b215a74e 100644 --- a/hw/input/pxa2xx_keypad.c +++ b/hw/input/pxa2xx_keypad.c @@ -288,7 +288,7 @@ static const VMStateDescription vmstate_pxa2xx_keypad = { .name = "pxa2xx_keypad", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(kpc, PXA2xxKeyPadState), VMSTATE_UINT32(kpdk, PXA2xxKeyPadState), VMSTATE_UINT32(kprec, PXA2xxKeyPadState), diff --git a/hw/input/stellaris_gamepad.c b/hw/input/stellaris_gamepad.c index 9dfa620e29a131e21b085ea6637490f090155ff3..17ee42b9fce4d9e2eee2b48a62a2c95e5e73c867 100644 --- a/hw/input/stellaris_gamepad.c +++ b/hw/input/stellaris_gamepad.c @@ -35,7 +35,7 @@ static const VMStateDescription vmstate_stellaris_gamepad = { .name = "stellaris_gamepad", .version_id = 4, .minimum_version_id = 4, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(pressed, StellarisGamepad, num_buttons, 0, vmstate_info_uint8, uint8_t), VMSTATE_END_OF_LIST() diff --git a/hw/input/tsc2005.c b/hw/input/tsc2005.c index db2b80e35f5ca06e5683c39219122aa16d15e305..941f163d36466b2f4a375d6d4ab60abd4af28858 100644 --- a/hw/input/tsc2005.c +++ b/hw/input/tsc2005.c @@ -454,7 +454,7 @@ static const VMStateDescription vmstate_tsc2005 = { .version_id = 2, .minimum_version_id = 2, .post_load = tsc2005_post_load, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_BOOL(pressure, TSC2005State), VMSTATE_BOOL(irq, TSC2005State), VMSTATE_BOOL(command, TSC2005State), diff --git a/hw/input/tsc210x.c b/hw/input/tsc210x.c index 950506fb38226be79cca9309a394f37f94cf6dd7..c4e32c7a42f1f3efa7e69087473662b69b3097d4 100644 --- a/hw/input/tsc210x.c +++ b/hw/input/tsc210x.c @@ -1017,7 +1017,7 @@ static int tsc210x_post_load(void *opaque, int version_id) return 0; } -static VMStateField vmstatefields_tsc210x[] = { +static const VMStateField vmstatefields_tsc210x[] = { VMSTATE_BOOL(enabled, TSC210xState), VMSTATE_BOOL(host_mode, TSC210xState), VMSTATE_BOOL(irq, TSC210xState), diff --git a/hw/input/virtio-input.c b/hw/input/virtio-input.c index 5b5398b3cacfc9c4c5da5f171b455a90c9fd78c5..3bcdae41b2f035ecd4f851c4ed2f16f6aefdd7e3 100644 --- a/hw/input/virtio-input.c +++ b/hw/input/virtio-input.c @@ -293,7 +293,7 @@ static const VMStateDescription vmstate_virtio_input = { .name = "virtio-input", .minimum_version_id = VIRTIO_INPUT_VM_VERSION, .version_id = VIRTIO_INPUT_VM_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/intc/allwinner-a10-pic.c b/hw/intc/allwinner-a10-pic.c index d0bf8d545bae5ddb185a2a4d1b34977d143fa017..cea559c39ddf51fc6435e52a0bdbd8c5de87cfa4 100644 --- a/hw/intc/allwinner-a10-pic.c +++ b/hw/intc/allwinner-a10-pic.c @@ -142,7 +142,7 @@ static const VMStateDescription vmstate_aw_a10_pic = { .name = "a10.pic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(vector, AwA10PICState), VMSTATE_UINT32(base_addr, AwA10PICState), VMSTATE_UINT32(protect, AwA10PICState), diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c index bccb4241c2d49ecf7f771a8ce20f007a24c72b28..6c100b48d6b2436db559f26a21abd7fd3ca0f633 100644 --- a/hw/intc/apic_common.c +++ b/hw/intc/apic_common.c @@ -349,7 +349,7 @@ static const VMStateDescription vmstate_apic_common_sipi = { .version_id = 1, .minimum_version_id = 1, .needed = apic_common_sipi_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(sipi_vector, APICCommonState), VMSTATE_INT32(wait_for_sipi, APICCommonState), VMSTATE_END_OF_LIST() @@ -363,7 +363,7 @@ static const VMStateDescription vmstate_apic_common = { .pre_load = apic_pre_load, .pre_save = apic_dispatch_pre_save, .post_load = apic_dispatch_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(apicbase, APICCommonState), VMSTATE_UINT8(id, APICCommonState), VMSTATE_UINT8(arb_id, APICCommonState), @@ -386,7 +386,7 @@ static const VMStateDescription vmstate_apic_common = { APICCommonState), /* open-coded timer state */ VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_apic_common_sipi, NULL } diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c index 7c28504ace845d212408d736ee17411b1188fff9..94c173cb071aa44df684c26cabc78984bf8104fc 100644 --- a/hw/intc/arm_gic_common.c +++ b/hw/intc/arm_gic_common.c @@ -62,7 +62,7 @@ static const VMStateDescription vmstate_gic_irq_state = { .name = "arm_gic_irq_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(enabled, gic_irq_state), VMSTATE_UINT8(pending, gic_irq_state), VMSTATE_UINT8(active, gic_irq_state), @@ -79,7 +79,7 @@ static const VMStateDescription vmstate_gic_virt_state = { .version_id = 1, .minimum_version_id = 1, .needed = gic_virt_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* Virtual interface */ VMSTATE_UINT32_ARRAY(h_hcr, GICState, GIC_NCPU), VMSTATE_UINT32_ARRAY(h_misr, GICState, GIC_NCPU), @@ -104,7 +104,7 @@ static const VMStateDescription vmstate_gic = { .minimum_version_id = 12, .pre_save = gic_pre_save, .post_load = gic_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ctlr, GICState), VMSTATE_UINT32_SUB_ARRAY(cpu_ctlr, GICState, 0, GIC_NCPU), VMSTATE_STRUCT_ARRAY(irq_state, GICState, GIC_MAXIRQ, 1, @@ -122,7 +122,7 @@ static const VMStateDescription vmstate_gic = { VMSTATE_UINT32_2DARRAY(nsapr, GICState, GIC_NR_APRS, GIC_NCPU), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_gic_virt_state, NULL } diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c index 2ebf880eada850e7880aa478ec0b44bafa5fe9ef..cb55c726810c417afab92b2b098164e9b8500c36 100644 --- a/hw/intc/arm_gicv3_common.c +++ b/hw/intc/arm_gicv3_common.c @@ -105,7 +105,7 @@ static const VMStateDescription vmstate_gicv3_cpu_virt = { .version_id = 1, .minimum_version_id = 1, .needed = virt_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_2DARRAY(ich_apr, GICv3CPUState, 3, 4), VMSTATE_UINT64(ich_hcr_el2, GICv3CPUState), VMSTATE_UINT64_ARRAY(ich_lr_el2, GICv3CPUState, GICV3_LR_MAX), @@ -139,7 +139,7 @@ const VMStateDescription vmstate_gicv3_cpu_sre_el1 = { .version_id = 1, .minimum_version_id = 1, .needed = icc_sre_el1_reg_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(icc_sre_el1, GICv3CPUState), VMSTATE_END_OF_LIST() } @@ -157,7 +157,7 @@ const VMStateDescription vmstate_gicv3_gicv4 = { .version_id = 1, .minimum_version_id = 1, .needed = gicv4_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(gicr_vpropbaser, GICv3CPUState), VMSTATE_UINT64(gicr_vpendbaser, GICv3CPUState), VMSTATE_END_OF_LIST() @@ -169,7 +169,7 @@ static const VMStateDescription vmstate_gicv3_cpu = { .version_id = 1, .minimum_version_id = 1, .pre_load = vmstate_gicv3_cpu_pre_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(level, GICv3CPUState), VMSTATE_UINT32(gicr_ctlr, GICv3CPUState), VMSTATE_UINT32_ARRAY(gicr_statusr, GICv3CPUState, 2), @@ -192,7 +192,7 @@ static const VMStateDescription vmstate_gicv3_cpu = { VMSTATE_UINT64(icc_ctlr_el3, GICv3CPUState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_gicv3_cpu_virt, &vmstate_gicv3_cpu_sre_el1, &vmstate_gicv3_gicv4, @@ -232,7 +232,7 @@ const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug = { .version_id = 1, .minimum_version_id = 1, .needed = needed_always, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(gicd_no_migration_shift_bug, GICv3State), VMSTATE_END_OF_LIST() } @@ -246,7 +246,7 @@ static const VMStateDescription vmstate_gicv3 = { .pre_save = gicv3_pre_save, .post_load = gicv3_post_load, .priority = MIG_PRI_GICV3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(gicd_ctlr, GICv3State), VMSTATE_UINT32_ARRAY(gicd_statusr, GICv3State, 2), VMSTATE_UINT32_ARRAY(group, GICv3State, GICV3_BMP_SIZE), @@ -264,7 +264,7 @@ static const VMStateDescription vmstate_gicv3 = { vmstate_gicv3_cpu, GICv3CPUState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_gicv3_gicd_no_migration_shift_bug, NULL } diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c index ab1a00508e6f178c6059c8644492a4b455febd87..e1a60d8c15b0b83c4ca95d2a8d2fbba08113609a 100644 --- a/hw/intc/arm_gicv3_cpuif.c +++ b/hw/intc/arm_gicv3_cpuif.c @@ -934,7 +934,7 @@ void gicv3_cpuif_update(GICv3CPUState *cs) ARMCPU *cpu = ARM_CPU(cs->cpu); CPUARMState *env = &cpu->env; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); trace_gicv3_cpuif_update(gicv3_redist_affid(cs), cs->hppi.irq, cs->hppi.grp, cs->hppi.prio); @@ -1434,16 +1434,25 @@ static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, idx = icv_find_active(cs, irq); if (idx < 0) { - /* No valid list register corresponding to EOI ID */ - icv_increment_eoicount(cs); + /* + * No valid list register corresponding to EOI ID; if this is a vLPI + * not in the list regs then do nothing; otherwise increment EOI count + */ + if (irq < GICV3_LPI_INTID_START) { + icv_increment_eoicount(cs); + } } else { uint64_t lr = cs->ich_lr_el2[idx]; int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp); if (thisgrp == grp && lr_gprio == dropprio) { - if (!icv_eoi_split(env, cs)) { - /* Priority drop and deactivate not split: deactivate irq now */ + if (!icv_eoi_split(env, cs) || irq >= GICV3_LPI_INTID_START) { + /* + * Priority drop and deactivate not split: deactivate irq now. + * LPIs always get their active state cleared immediately + * because no separate deactivate is expected. + */ icv_deactivate_irq(cs, idx); } } @@ -2675,6 +2684,7 @@ static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = { { .name = "ICH_AP0R0_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 0, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x480, .access = PL2_RW, .readfn = ich_ap_read, .writefn = ich_ap_write, @@ -2682,6 +2692,7 @@ static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = { { .name = "ICH_AP1R0_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 0, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x4a0, .access = PL2_RW, .readfn = ich_ap_read, .writefn = ich_ap_write, @@ -2689,6 +2700,7 @@ static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = { { .name = "ICH_HCR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 0, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x4c0, .access = PL2_RW, .readfn = ich_hcr_read, .writefn = ich_hcr_write, @@ -2720,6 +2732,7 @@ static const ARMCPRegInfo gicv3_cpuif_hcr_reginfo[] = { { .name = "ICH_VMCR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 11, .opc2 = 7, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x4c8, .access = PL2_RW, .readfn = ich_vmcr_read, .writefn = ich_vmcr_write, @@ -2730,6 +2743,7 @@ static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = { { .name = "ICH_AP0R1_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 1, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x488, .access = PL2_RW, .readfn = ich_ap_read, .writefn = ich_ap_write, @@ -2737,6 +2751,7 @@ static const ARMCPRegInfo gicv3_cpuif_ich_apxr1_reginfo[] = { { .name = "ICH_AP1R1_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 1, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x4a8, .access = PL2_RW, .readfn = ich_ap_read, .writefn = ich_ap_write, @@ -2747,6 +2762,7 @@ static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = { { .name = "ICH_AP0R2_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 2, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x490, .access = PL2_RW, .readfn = ich_ap_read, .writefn = ich_ap_write, @@ -2754,6 +2770,7 @@ static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = { { .name = "ICH_AP0R3_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 8, .opc2 = 3, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x498, .access = PL2_RW, .readfn = ich_ap_read, .writefn = ich_ap_write, @@ -2761,6 +2778,7 @@ static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = { { .name = "ICH_AP1R2_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 2, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x4b0, .access = PL2_RW, .readfn = ich_ap_read, .writefn = ich_ap_write, @@ -2768,6 +2786,7 @@ static const ARMCPRegInfo gicv3_cpuif_ich_apxr23_reginfo[] = { { .name = "ICH_AP1R3_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 9, .opc2 = 3, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x4b8, .access = PL2_RW, .readfn = ich_ap_read, .writefn = ich_ap_write, @@ -2889,6 +2908,7 @@ void gicv3_init_cpuif(GICv3State *s) .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 12 + (j >> 3), .opc2 = j & 7, .type = ARM_CP_IO | ARM_CP_NO_RAW, + .nv2_redirect_offset = 0x400 + 8 * j, .access = PL2_RW, .readfn = ich_lr_read, .writefn = ich_lr_write, diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c index fddd6d490c2c9178b860c4fa279382f6768114b3..331d6b93cc12a45345286606d2efeb6253208de6 100644 --- a/hw/intc/arm_gicv3_its_common.c +++ b/hw/intc/arm_gicv3_its_common.c @@ -54,7 +54,7 @@ static const VMStateDescription vmstate_its = { .pre_save = gicv3_its_pre_save, .post_load = gicv3_its_post_load, .priority = MIG_PRI_GICV3_ITS, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ctlr, GICv3ITSState), VMSTATE_UINT32(iidr, GICv3ITSState), VMSTATE_UINT64(cbaser, GICv3ITSState), diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c index f7df602cfffbcad268cd819d233fdd221572915e..3befc960db2e2c8ecb6849f6d4ecdec429e7d50b 100644 --- a/hw/intc/arm_gicv3_its_kvm.c +++ b/hw/intc/arm_gicv3_its_kvm.c @@ -21,6 +21,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/module.h" +#include "qemu/error-report.h" #include "hw/intc/arm_gicv3_its_common.h" #include "hw/qdev-properties.h" #include "sysemu/runstate.h" diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index 942be7bd112904abac8a1a1253694dd213aefd31..404a445138a8217bfa3a8b38c6e4be250fb25f40 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -2498,7 +2498,7 @@ static const VMStateDescription vmstate_VecInfo = { .name = "armv7m_nvic_info", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT16(prio, VecInfo), VMSTATE_UINT8(enabled, VecInfo), VMSTATE_UINT8(pending, VecInfo), @@ -2543,7 +2543,7 @@ static const VMStateDescription vmstate_nvic_security = { .minimum_version_id = 1, .needed = nvic_security_needed, .post_load = &nvic_security_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1, vmstate_VecInfo, VecInfo), VMSTATE_UINT32(prigroup[M_REG_S], NVICState), @@ -2557,13 +2557,13 @@ static const VMStateDescription vmstate_nvic = { .version_id = 4, .minimum_version_id = 4, .post_load = &nvic_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1, vmstate_VecInfo, VecInfo), VMSTATE_UINT32(prigroup[M_REG_NS], NVICState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_nvic_security, NULL } @@ -2572,6 +2572,11 @@ static const VMStateDescription vmstate_nvic = { static Property props_nvic[] = { /* Number of external IRQ lines (so excluding the 16 internal exceptions) */ DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64), + /* + * Number of the maximum priority bits that can be used. 0 means + * to use a reasonable default. + */ + DEFINE_PROP_UINT8("num-prio-bits", NVICState, num_prio_bits, 0), DEFINE_PROP_END_OF_LIST() }; @@ -2685,7 +2690,23 @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp) /* include space for internal exception vectors */ s->num_irq += NVIC_FIRST_IRQ; - s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2; + if (s->num_prio_bits == 0) { + /* + * If left unspecified, use 2 bits by default on Cortex-M0/M0+/M1 + * and 8 bits otherwise. + */ + s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2; + } else { + uint8_t min_prio_bits = + arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 3 : 2; + if (s->num_prio_bits < min_prio_bits || s->num_prio_bits > 8) { + error_setg(errp, + "num-prio-bits %d is outside " + "NVIC acceptable range [%d-8]", + s->num_prio_bits, min_prio_bits); + return; + } + } /* * This device provides a single memory region which covers the diff --git a/hw/intc/aspeed_vic.c b/hw/intc/aspeed_vic.c index 5ba06c52628e2731d1bf9ec10406dd2449dcdedb..ba1d953c2cf96912f0eadf28dd30898e0f142616 100644 --- a/hw/intc/aspeed_vic.c +++ b/hw/intc/aspeed_vic.c @@ -326,7 +326,7 @@ static const VMStateDescription vmstate_aspeed_vic = { .name = "aspeed.new-vic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(level, AspeedVICState), VMSTATE_UINT64(raw, AspeedVICState), VMSTATE_UINT64(select, AspeedVICState), diff --git a/hw/intc/bcm2835_ic.c b/hw/intc/bcm2835_ic.c index 4513fad16f2e5c4821f760d34fc37f327a2e8599..2c2e2b1822c64c6c599caf77266dca98b540d9d6 100644 --- a/hw/intc/bcm2835_ic.c +++ b/hw/intc/bcm2835_ic.c @@ -208,7 +208,7 @@ static const VMStateDescription vmstate_bcm2835_ic = { .name = TYPE_BCM2835_IC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(gpu_irq_level, BCM2835ICState), VMSTATE_UINT64(gpu_irq_enable, BCM2835ICState), VMSTATE_UINT8(arm_irq_level, BCM2835ICState), diff --git a/hw/intc/bcm2836_control.c b/hw/intc/bcm2836_control.c index b0589df1881dbf6144603a64cb246e1f1618f79a..81faf032b0e387af48089fe17ec3c9cb4ea66490 100644 --- a/hw/intc/bcm2836_control.c +++ b/hw/intc/bcm2836_control.c @@ -369,7 +369,7 @@ static const VMStateDescription vmstate_bcm2836_control = { .name = TYPE_BCM2836_CONTROL, .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(mailboxes, BCM2836ControlState, BCM2836_NCORES * BCM2836_MBPERCORE), VMSTATE_UINT8(route_gpu_irq, BCM2836ControlState), diff --git a/hw/intc/exynos4210_combiner.c b/hw/intc/exynos4210_combiner.c index 4ba448fdb196420783c7c372ec8c7d6d64bf9fbb..f0d310a0ebc68612c61260f53bd67dd175588c86 100644 --- a/hw/intc/exynos4210_combiner.c +++ b/hw/intc/exynos4210_combiner.c @@ -54,7 +54,7 @@ static const VMStateDescription vmstate_exynos4210_combiner_group_state = { .name = "exynos4210.combiner.groupstate", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(src_mask, CombinerGroupState), VMSTATE_UINT8(src_pending, CombinerGroupState), VMSTATE_END_OF_LIST() @@ -65,7 +65,7 @@ static const VMStateDescription vmstate_exynos4210_combiner = { .name = "exynos4210.combiner", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(group, Exynos4210CombinerState, IIC_NGRP, 0, vmstate_exynos4210_combiner_group_state, CombinerGroupState), VMSTATE_UINT32_ARRAY(reg_set, Exynos4210CombinerState, diff --git a/hw/intc/goldfish_pic.c b/hw/intc/goldfish_pic.c index dfd53275f692435ddfd9627fbb777cdb953c5aaa..d662dfeb99d24e33814395669af3ba33e7d96923 100644 --- a/hw/intc/goldfish_pic.c +++ b/hw/intc/goldfish_pic.c @@ -161,7 +161,7 @@ static const VMStateDescription vmstate_goldfish_pic = { .name = "goldfish_pic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(pending, GoldfishPICState), VMSTATE_UINT32(enabled, GoldfishPICState), VMSTATE_END_OF_LIST() diff --git a/hw/intc/heathrow_pic.c b/hw/intc/heathrow_pic.c index 13048a2735402f47122447f87205a38e89b2730e..c2946ba1ad5b90c90a33f5b025abae01ff6088c7 100644 --- a/hw/intc/heathrow_pic.c +++ b/hw/intc/heathrow_pic.c @@ -141,7 +141,7 @@ static const VMStateDescription vmstate_heathrow_pic_one = { .name = "heathrow_pic_one", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(events, HeathrowPICState), VMSTATE_UINT32(mask, HeathrowPICState), VMSTATE_UINT32(levels, HeathrowPICState), @@ -154,7 +154,7 @@ static const VMStateDescription vmstate_heathrow = { .name = "heathrow_pic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(pics, HeathrowState, 2, 1, vmstate_heathrow_pic_one, HeathrowPICState), VMSTATE_END_OF_LIST() diff --git a/hw/intc/i8259_common.c b/hw/intc/i8259_common.c index c931dc2d07c8c5dc92b738ee6f6c9083eca9662f..ee0041115c3306b37cd2d8c6a32b6e340ee6fd74 100644 --- a/hw/intc/i8259_common.c +++ b/hw/intc/i8259_common.c @@ -156,7 +156,7 @@ static const VMStateDescription vmstate_pic_ltim = { .version_id = 1, .minimum_version_id = 1, .needed = ltim_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(ltim, PICCommonState), VMSTATE_END_OF_LIST() } @@ -168,7 +168,7 @@ static const VMStateDescription vmstate_pic_common = { .minimum_version_id = 1, .pre_save = pic_dispatch_pre_save, .post_load = pic_dispatch_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(last_irr, PICCommonState), VMSTATE_UINT8(irr, PICCommonState), VMSTATE_UINT8(imr, PICCommonState), @@ -187,7 +187,7 @@ static const VMStateDescription vmstate_pic_common = { VMSTATE_UINT8(elcr, PICCommonState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_pic_ltim, NULL } diff --git a/hw/intc/imx_avic.c b/hw/intc/imx_avic.c index 63fc602a1a9a0fc2879d502c90d213ff45ec8cac..aedc708bed4bda78fcd24d80dcad4aa3b9bc45a8 100644 --- a/hw/intc/imx_avic.c +++ b/hw/intc/imx_avic.c @@ -38,7 +38,7 @@ static const VMStateDescription vmstate_imx_avic = { .name = TYPE_IMX_AVIC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(pending, IMXAVICState), VMSTATE_UINT64(enabled, IMXAVICState), VMSTATE_UINT64(is_fiq, IMXAVICState), diff --git a/hw/intc/imx_gpcv2.c b/hw/intc/imx_gpcv2.c index 237d5f97eba3ba0043fdf62770a575c81970e87b..af45e5194c4bf86c7d53b54a0ba3c8ddebed893f 100644 --- a/hw/intc/imx_gpcv2.c +++ b/hw/intc/imx_gpcv2.c @@ -96,7 +96,7 @@ static const VMStateDescription vmstate_imx_gpcv2 = { .name = TYPE_IMX_GPCV2, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, IMXGPCv2State, GPC_NUM), VMSTATE_END_OF_LIST() }, diff --git a/hw/intc/ioapic_common.c b/hw/intc/ioapic_common.c index b05f436dac23466c2ac9d09433310c28c8c26af3..cb9bf6214608a8493d9eae340a0a1e38b76f25f9 100644 --- a/hw/intc/ioapic_common.c +++ b/hw/intc/ioapic_common.c @@ -182,7 +182,7 @@ static const VMStateDescription vmstate_ioapic_common = { .minimum_version_id = 1, .pre_save = ioapic_dispatch_pre_save, .post_load = ioapic_dispatch_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(id, IOAPICCommonState), VMSTATE_UINT8(ioregsel, IOAPICCommonState), VMSTATE_UNUSED_V(2, 8), /* to account for qemu-kvm's v2 format */ diff --git a/hw/intc/loongarch_extioi.c b/hw/intc/loongarch_extioi.c index 24fb3af8cc31fceda1169b9e26f549ac6b299a46..bdfa3b481e7bbe05031671289b0187362f26bf63 100644 --- a/hw/intc/loongarch_extioi.c +++ b/hw/intc/loongarch_extioi.c @@ -8,6 +8,7 @@ #include "qemu/osdep.h" #include "qemu/module.h" #include "qemu/log.h" +#include "qapi/error.h" #include "hw/irq.h" #include "hw/sysbus.h" #include "hw/loongarch/virt.h" @@ -32,23 +33,23 @@ static void extioi_update_irq(LoongArchExtIOI *s, int irq, int level) if (((s->enable[irq_index]) & irq_mask) == 0) { return; } - s->coreisr[cpu][irq_index] |= irq_mask; - found = find_first_bit(s->sw_isr[cpu][ipnum], EXTIOI_IRQS); - set_bit(irq, s->sw_isr[cpu][ipnum]); + s->cpu[cpu].coreisr[irq_index] |= irq_mask; + found = find_first_bit(s->cpu[cpu].sw_isr[ipnum], EXTIOI_IRQS); + set_bit(irq, s->cpu[cpu].sw_isr[ipnum]); if (found < EXTIOI_IRQS) { /* other irq is handling, need not update parent irq level */ return; } } else { - s->coreisr[cpu][irq_index] &= ~irq_mask; - clear_bit(irq, s->sw_isr[cpu][ipnum]); - found = find_first_bit(s->sw_isr[cpu][ipnum], EXTIOI_IRQS); + s->cpu[cpu].coreisr[irq_index] &= ~irq_mask; + clear_bit(irq, s->cpu[cpu].sw_isr[ipnum]); + found = find_first_bit(s->cpu[cpu].sw_isr[ipnum], EXTIOI_IRQS); if (found < EXTIOI_IRQS) { /* other irq is handling, need not update parent irq level */ return; } } - qemu_set_irq(s->parent_irq[cpu][ipnum], level); + qemu_set_irq(s->cpu[cpu].parent_irq[ipnum], level); } static void extioi_setirq(void *opaque, int irq, int level) @@ -96,7 +97,7 @@ static MemTxResult extioi_readw(void *opaque, hwaddr addr, uint64_t *data, index = (offset - EXTIOI_COREISR_START) >> 2; /* using attrs to get current cpu index */ cpu = attrs.requester_id; - *data = s->coreisr[cpu][index]; + *data = s->cpu[cpu].coreisr[index]; break; case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END - 1: index = (offset - EXTIOI_COREMAP_START) >> 2; @@ -129,12 +130,66 @@ static inline void extioi_enable_irq(LoongArchExtIOI *s, int index,\ } } +static inline void extioi_update_sw_coremap(LoongArchExtIOI *s, int irq, + uint64_t val, bool notify) +{ + int i, cpu; + + /* + * loongarch only support little endian, + * so we paresd the value with little endian. + */ + val = cpu_to_le64(val); + + for (i = 0; i < 4; i++) { + cpu = val & 0xff; + cpu = ctz32(cpu); + cpu = (cpu >= 4) ? 0 : cpu; + val = val >> 8; + + if (s->sw_coremap[irq + i] == cpu) { + continue; + } + + if (notify && test_bit(irq, (unsigned long *)s->isr)) { + /* + * lower irq at old cpu and raise irq at new cpu + */ + extioi_update_irq(s, irq + i, 0); + s->sw_coremap[irq + i] = cpu; + extioi_update_irq(s, irq + i, 1); + } else { + s->sw_coremap[irq + i] = cpu; + } + } +} + +static inline void extioi_update_sw_ipmap(LoongArchExtIOI *s, int index, + uint64_t val) +{ + int i; + uint8_t ipnum; + + /* + * loongarch only support little endian, + * so we paresd the value with little endian. + */ + val = cpu_to_le64(val); + for (i = 0; i < 4; i++) { + ipnum = val & 0xff; + ipnum = ctz32(ipnum); + ipnum = (ipnum >= 4) ? 0 : ipnum; + s->sw_ipmap[index * 4 + i] = ipnum; + val = val >> 8; + } +} + static MemTxResult extioi_writew(void *opaque, hwaddr addr, uint64_t val, unsigned size, MemTxAttrs attrs) { LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque); - int i, cpu, index, old_data, irq; + int cpu, index, old_data, irq; uint32_t offset; trace_loongarch_extioi_writew(addr, val); @@ -152,20 +207,7 @@ static MemTxResult extioi_writew(void *opaque, hwaddr addr, */ index = (offset - EXTIOI_IPMAP_START) >> 2; s->ipmap[index] = val; - /* - * loongarch only support little endian, - * so we paresd the value with little endian. - */ - val = cpu_to_le64(val); - for (i = 0; i < 4; i++) { - uint8_t ipnum; - ipnum = val & 0xff; - ipnum = ctz32(ipnum); - ipnum = (ipnum >= 4) ? 0 : ipnum; - s->sw_ipmap[index * 4 + i] = ipnum; - val = val >> 8; - } - + extioi_update_sw_ipmap(s, index, val); break; case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END - 1: index = (offset - EXTIOI_ENABLE_START) >> 2; @@ -189,8 +231,8 @@ static MemTxResult extioi_writew(void *opaque, hwaddr addr, index = (offset - EXTIOI_COREISR_START) >> 2; /* using attrs to get current cpu index */ cpu = attrs.requester_id; - old_data = s->coreisr[cpu][index]; - s->coreisr[cpu][index] = old_data & ~val; + old_data = s->cpu[cpu].coreisr[index]; + s->cpu[cpu].coreisr[index] = old_data & ~val; /* write 1 to clear interrupt */ old_data &= val; irq = ctz32(old_data); @@ -204,33 +246,8 @@ static MemTxResult extioi_writew(void *opaque, hwaddr addr, irq = offset - EXTIOI_COREMAP_START; index = irq / 4; s->coremap[index] = val; - /* - * loongarch only support little endian, - * so we paresd the value with little endian. - */ - val = cpu_to_le64(val); - - for (i = 0; i < 4; i++) { - cpu = val & 0xff; - cpu = ctz32(cpu); - cpu = (cpu >= 4) ? 0 : cpu; - val = val >> 8; - - if (s->sw_coremap[irq + i] == cpu) { - continue; - } - - if (test_bit(irq, (unsigned long *)s->isr)) { - /* - * lower irq at old cpu and raise irq at new cpu - */ - extioi_update_irq(s, irq + i, 0); - s->sw_coremap[irq + i] = cpu; - extioi_update_irq(s, irq + i, 1); - } else { - s->sw_coremap[irq + i] = cpu; - } - } + + extioi_update_sw_coremap(s, irq, val, true); break; default: break; @@ -248,65 +265,112 @@ static const MemoryRegionOps extioi_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; -static const VMStateDescription vmstate_loongarch_extioi = { - .name = TYPE_LOONGARCH_EXTIOI, +static void loongarch_extioi_realize(DeviceState *dev, Error **errp) +{ + LoongArchExtIOI *s = LOONGARCH_EXTIOI(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + int i, pin; + + if (s->num_cpu == 0) { + error_setg(errp, "num-cpu must be at least 1"); + return; + } + + for (i = 0; i < EXTIOI_IRQS; i++) { + sysbus_init_irq(sbd, &s->irq[i]); + } + + qdev_init_gpio_in(dev, extioi_setirq, EXTIOI_IRQS); + memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops, + s, "extioi_system_mem", 0x900); + sysbus_init_mmio(sbd, &s->extioi_system_mem); + s->cpu = g_new0(ExtIOICore, s->num_cpu); + if (s->cpu == NULL) { + error_setg(errp, "Memory allocation for ExtIOICore faile"); + return; + } + + for (i = 0; i < s->num_cpu; i++) { + for (pin = 0; pin < LS3A_INTC_IP; pin++) { + qdev_init_gpio_out(dev, &s->cpu[i].parent_irq[pin], 1); + } + } +} + +static void loongarch_extioi_finalize(Object *obj) +{ + LoongArchExtIOI *s = LOONGARCH_EXTIOI(obj); + + g_free(s->cpu); +} + +static int vmstate_extioi_post_load(void *opaque, int version_id) +{ + LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque); + int i, start_irq; + + for (i = 0; i < (EXTIOI_IRQS / 4); i++) { + start_irq = i * 4; + extioi_update_sw_coremap(s, start_irq, s->coremap[i], false); + } + + for (i = 0; i < (EXTIOI_IRQS_IPMAP_SIZE / 4); i++) { + extioi_update_sw_ipmap(s, i, s->ipmap[i]); + } + + return 0; +} + +static const VMStateDescription vmstate_extioi_core = { + .name = "extioi-core", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { + VMSTATE_UINT32_ARRAY(coreisr, ExtIOICore, EXTIOI_IRQS_GROUP_COUNT), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_loongarch_extioi = { + .name = TYPE_LOONGARCH_EXTIOI, + .version_id = 2, + .minimum_version_id = 2, + .post_load = vmstate_extioi_post_load, + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(bounce, LoongArchExtIOI, EXTIOI_IRQS_GROUP_COUNT), - VMSTATE_UINT32_2DARRAY(coreisr, LoongArchExtIOI, EXTIOI_CPUS, - EXTIOI_IRQS_GROUP_COUNT), VMSTATE_UINT32_ARRAY(nodetype, LoongArchExtIOI, EXTIOI_IRQS_NODETYPE_COUNT / 2), VMSTATE_UINT32_ARRAY(enable, LoongArchExtIOI, EXTIOI_IRQS / 32), VMSTATE_UINT32_ARRAY(isr, LoongArchExtIOI, EXTIOI_IRQS / 32), VMSTATE_UINT32_ARRAY(ipmap, LoongArchExtIOI, EXTIOI_IRQS_IPMAP_SIZE / 4), VMSTATE_UINT32_ARRAY(coremap, LoongArchExtIOI, EXTIOI_IRQS / 4), - VMSTATE_UINT8_ARRAY(sw_ipmap, LoongArchExtIOI, EXTIOI_IRQS_IPMAP_SIZE), - VMSTATE_UINT8_ARRAY(sw_coremap, LoongArchExtIOI, EXTIOI_IRQS), + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, LoongArchExtIOI, num_cpu, + vmstate_extioi_core, ExtIOICore), VMSTATE_END_OF_LIST() } }; -static void loongarch_extioi_instance_init(Object *obj) -{ - SysBusDevice *dev = SYS_BUS_DEVICE(obj); - LoongArchExtIOI *s = LOONGARCH_EXTIOI(obj); - int i, cpu, pin; - - for (i = 0; i < EXTIOI_IRQS; i++) { - sysbus_init_irq(dev, &s->irq[i]); - } - - qdev_init_gpio_in(DEVICE(obj), extioi_setirq, EXTIOI_IRQS); - - for (cpu = 0; cpu < EXTIOI_CPUS; cpu++) { - memory_region_init_io(&s->extioi_iocsr_mem[cpu], OBJECT(s), &extioi_ops, - s, "extioi_iocsr", 0x900); - sysbus_init_mmio(dev, &s->extioi_iocsr_mem[cpu]); - for (pin = 0; pin < LS3A_INTC_IP; pin++) { - qdev_init_gpio_out(DEVICE(obj), &s->parent_irq[cpu][pin], 1); - } - } - memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops, - s, "extioi_system_mem", 0x900); - sysbus_init_mmio(dev, &s->extioi_system_mem); -} +static Property extioi_properties[] = { + DEFINE_PROP_UINT32("num-cpu", LoongArchExtIOI, num_cpu, 1), + DEFINE_PROP_END_OF_LIST(), +}; static void loongarch_extioi_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = loongarch_extioi_realize; + device_class_set_props(dc, extioi_properties); dc->vmsd = &vmstate_loongarch_extioi; } static const TypeInfo loongarch_extioi_info = { .name = TYPE_LOONGARCH_EXTIOI, .parent = TYPE_SYS_BUS_DEVICE, - .instance_init = loongarch_extioi_instance_init, .instance_size = sizeof(struct LoongArchExtIOI), .class_init = loongarch_extioi_class_init, + .instance_finalize = loongarch_extioi_finalize, }; static void loongarch_extioi_register_types(void) diff --git a/hw/intc/loongarch_ipi.c b/hw/intc/loongarch_ipi.c index 67858b521c6a4ce68c1bc26b7ac6fba9e083db06..a184112b09239bee8099b224f81346970c8f8e41 100644 --- a/hw/intc/loongarch_ipi.c +++ b/hw/intc/loongarch_ipi.c @@ -9,6 +9,7 @@ #include "hw/sysbus.h" #include "hw/intc/loongarch_ipi.h" #include "hw/irq.h" +#include "hw/qdev-properties.h" #include "qapi/error.h" #include "qemu/log.h" #include "exec/address-spaces.h" @@ -17,14 +18,16 @@ #include "target/loongarch/internals.h" #include "trace.h" -static void loongarch_ipi_writel(void *, hwaddr, uint64_t, unsigned); - -static uint64_t loongarch_ipi_readl(void *opaque, hwaddr addr, unsigned size) +static MemTxResult loongarch_ipi_readl(void *opaque, hwaddr addr, + uint64_t *data, + unsigned size, MemTxAttrs attrs) { - IPICore *s = opaque; + IPICore *s; + LoongArchIPI *ipi = opaque; uint64_t ret = 0; int index = 0; + s = &ipi->cpu[attrs.requester_id]; addr &= 0xff; switch (addr) { case CORE_STATUS_OFF: @@ -49,10 +52,12 @@ static uint64_t loongarch_ipi_readl(void *opaque, hwaddr addr, unsigned size) } trace_loongarch_ipi_read(size, (uint64_t)addr, ret); - return ret; + *data = ret; + return MEMTX_OK; } -static void send_ipi_data(CPULoongArchState *env, uint64_t val, hwaddr addr) +static void send_ipi_data(CPULoongArchState *env, uint64_t val, hwaddr addr, + MemTxAttrs attrs) { int i, mask = 0, data = 0; @@ -61,8 +66,8 @@ static void send_ipi_data(CPULoongArchState *env, uint64_t val, hwaddr addr) * if the mask is 0, we need not to do anything. */ if ((val >> 27) & 0xf) { - data = address_space_ldl(&env->address_space_iocsr, addr, - MEMTXATTRS_UNSPECIFIED, NULL); + data = address_space_ldl(env->address_space_iocsr, addr, + attrs, NULL); for (i = 0; i < 4; i++) { /* get mask for byte writing */ if (val & (0x1 << (27 + i))) { @@ -73,8 +78,8 @@ static void send_ipi_data(CPULoongArchState *env, uint64_t val, hwaddr addr) data &= mask; data |= (val >> 32) & ~mask; - address_space_stl(&env->address_space_iocsr, addr, - data, MEMTXATTRS_UNSPECIFIED, NULL); + address_space_stl(env->address_space_iocsr, addr, + data, attrs, NULL); } static int archid_cmp(const void *a, const void *b) @@ -103,80 +108,72 @@ static CPUState *ipi_getcpu(int arch_id) CPUArchId *archid; archid = find_cpu_by_archid(machine, arch_id); - return CPU(archid->cpu); -} - -static void ipi_send(uint64_t val) -{ - uint32_t cpuid; - uint8_t vector; - CPUState *cs; - LoongArchCPU *cpu; - LoongArchIPI *s; - - cpuid = extract32(val, 16, 10); - if (cpuid >= LOONGARCH_MAX_CPUS) { - trace_loongarch_ipi_unsupported_cpuid("IOCSR_IPI_SEND", cpuid); - return; + if (archid) { + return CPU(archid->cpu); } - /* IPI status vector */ - vector = extract8(val, 0, 5); - - cs = ipi_getcpu(cpuid); - cpu = LOONGARCH_CPU(cs); - s = LOONGARCH_IPI(cpu->env.ipistate); - loongarch_ipi_writel(&s->ipi_core, CORE_SET_OFF, BIT(vector), 4); + return NULL; } -static void mail_send(uint64_t val) +static MemTxResult mail_send(uint64_t val, MemTxAttrs attrs) { uint32_t cpuid; hwaddr addr; - CPULoongArchState *env; CPUState *cs; - LoongArchCPU *cpu; cpuid = extract32(val, 16, 10); if (cpuid >= LOONGARCH_MAX_CPUS) { trace_loongarch_ipi_unsupported_cpuid("IOCSR_MAIL_SEND", cpuid); - return; + return MEMTX_DECODE_ERROR; } - addr = 0x1020 + (val & 0x1c); cs = ipi_getcpu(cpuid); - cpu = LOONGARCH_CPU(cs); - env = &cpu->env; - send_ipi_data(env, val, addr); + if (cs == NULL) { + return MEMTX_DECODE_ERROR; + } + + /* override requester_id */ + addr = SMP_IPI_MAILBOX + CORE_BUF_20 + (val & 0x1c); + attrs.requester_id = cs->cpu_index; + send_ipi_data(&LOONGARCH_CPU(cs)->env, val, addr, attrs); + return MEMTX_OK; } -static void any_send(uint64_t val) +static MemTxResult any_send(uint64_t val, MemTxAttrs attrs) { uint32_t cpuid; hwaddr addr; - CPULoongArchState *env; CPUState *cs; - LoongArchCPU *cpu; cpuid = extract32(val, 16, 10); if (cpuid >= LOONGARCH_MAX_CPUS) { trace_loongarch_ipi_unsupported_cpuid("IOCSR_ANY_SEND", cpuid); - return; + return MEMTX_DECODE_ERROR; } - addr = val & 0xffff; cs = ipi_getcpu(cpuid); - cpu = LOONGARCH_CPU(cs); - env = &cpu->env; - send_ipi_data(env, val, addr); + if (cs == NULL) { + return MEMTX_DECODE_ERROR; + } + + /* override requester_id */ + addr = val & 0xffff; + attrs.requester_id = cs->cpu_index; + send_ipi_data(&LOONGARCH_CPU(cs)->env, val, addr, attrs); + return MEMTX_OK; } -static void loongarch_ipi_writel(void *opaque, hwaddr addr, uint64_t val, - unsigned size) +static MemTxResult loongarch_ipi_writel(void *opaque, hwaddr addr, uint64_t val, + unsigned size, MemTxAttrs attrs) { - IPICore *s = opaque; + LoongArchIPI *ipi = opaque; + IPICore *s; int index = 0; + uint32_t cpuid; + uint8_t vector; + CPUState *cs; + s = &ipi->cpu[attrs.requester_id]; addr &= 0xff; trace_loongarch_ipi_write(size, (uint64_t)addr, val); switch (addr) { @@ -203,17 +200,34 @@ static void loongarch_ipi_writel(void *opaque, hwaddr addr, uint64_t val, s->buf[index] = val; break; case IOCSR_IPI_SEND: - ipi_send(val); + cpuid = extract32(val, 16, 10); + if (cpuid >= LOONGARCH_MAX_CPUS) { + trace_loongarch_ipi_unsupported_cpuid("IOCSR_IPI_SEND", cpuid); + return MEMTX_DECODE_ERROR; + } + + /* IPI status vector */ + vector = extract8(val, 0, 5); + cs = ipi_getcpu(cpuid); + if (cs == NULL) { + return MEMTX_DECODE_ERROR; + } + + /* override requester_id */ + attrs.requester_id = cs->cpu_index; + loongarch_ipi_writel(ipi, CORE_SET_OFF, BIT(vector), 4, attrs); break; default: qemu_log_mask(LOG_UNIMP, "invalid write: %x", (uint32_t)addr); break; } + + return MEMTX_OK; } static const MemoryRegionOps loongarch_ipi_ops = { - .read = loongarch_ipi_readl, - .write = loongarch_ipi_writel, + .read_with_attrs = loongarch_ipi_readl, + .write_with_attrs = loongarch_ipi_writel, .impl.min_access_size = 4, .impl.max_access_size = 4, .valid.min_access_size = 4, @@ -222,24 +236,28 @@ static const MemoryRegionOps loongarch_ipi_ops = { }; /* mail send and any send only support writeq */ -static void loongarch_ipi_writeq(void *opaque, hwaddr addr, uint64_t val, - unsigned size) +static MemTxResult loongarch_ipi_writeq(void *opaque, hwaddr addr, uint64_t val, + unsigned size, MemTxAttrs attrs) { + MemTxResult ret = MEMTX_OK; + addr &= 0xfff; switch (addr) { case MAIL_SEND_OFFSET: - mail_send(val); + ret = mail_send(val, attrs); break; case ANY_SEND_OFFSET: - any_send(val); + ret = any_send(val, attrs); break; default: break; } + + return ret; } static const MemoryRegionOps loongarch_ipi64_ops = { - .write = loongarch_ipi_writeq, + .write_with_attrs = loongarch_ipi_writeq, .impl.min_access_size = 8, .impl.max_access_size = 8, .valid.min_access_size = 8, @@ -247,30 +265,46 @@ static const MemoryRegionOps loongarch_ipi64_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; -static void loongarch_ipi_init(Object *obj) +static void loongarch_ipi_realize(DeviceState *dev, Error **errp) { - LoongArchIPI *s = LOONGARCH_IPI(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + LoongArchIPI *s = LOONGARCH_IPI(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + int i; + + if (s->num_cpu == 0) { + error_setg(errp, "num-cpu must be at least 1"); + return; + } - memory_region_init_io(&s->ipi_iocsr_mem, obj, &loongarch_ipi_ops, - &s->ipi_core, "loongarch_ipi_iocsr", 0x48); + memory_region_init_io(&s->ipi_iocsr_mem, OBJECT(dev), &loongarch_ipi_ops, + s, "loongarch_ipi_iocsr", 0x48); /* loongarch_ipi_iocsr performs re-entrant IO through ipi_send */ s->ipi_iocsr_mem.disable_reentrancy_guard = true; sysbus_init_mmio(sbd, &s->ipi_iocsr_mem); - memory_region_init_io(&s->ipi64_iocsr_mem, obj, &loongarch_ipi64_ops, - &s->ipi_core, "loongarch_ipi64_iocsr", 0x118); + memory_region_init_io(&s->ipi64_iocsr_mem, OBJECT(dev), + &loongarch_ipi64_ops, + s, "loongarch_ipi64_iocsr", 0x118); sysbus_init_mmio(sbd, &s->ipi64_iocsr_mem); - qdev_init_gpio_out(DEVICE(obj), &s->ipi_core.irq, 1); + + s->cpu = g_new0(IPICore, s->num_cpu); + if (s->cpu == NULL) { + error_setg(errp, "Memory allocation for ExtIOICore faile"); + return; + } + + for (i = 0; i < s->num_cpu; i++) { + qdev_init_gpio_out(dev, &s->cpu[i].irq, 1); + } } static const VMStateDescription vmstate_ipi_core = { .name = "ipi-single", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(status, IPICore), VMSTATE_UINT32(en, IPICore), VMSTATE_UINT32(set, IPICore), @@ -282,27 +316,42 @@ static const VMStateDescription vmstate_ipi_core = { static const VMStateDescription vmstate_loongarch_ipi = { .name = TYPE_LOONGARCH_IPI, - .version_id = 1, - .minimum_version_id = 1, - .fields = (VMStateField[]) { - VMSTATE_STRUCT(ipi_core, LoongArchIPI, 0, vmstate_ipi_core, IPICore), + .version_id = 2, + .minimum_version_id = 2, + .fields = (const VMStateField[]) { + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, LoongArchIPI, num_cpu, + vmstate_ipi_core, IPICore), VMSTATE_END_OF_LIST() } }; +static Property ipi_properties[] = { + DEFINE_PROP_UINT32("num-cpu", LoongArchIPI, num_cpu, 1), + DEFINE_PROP_END_OF_LIST(), +}; + static void loongarch_ipi_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = loongarch_ipi_realize; + device_class_set_props(dc, ipi_properties); dc->vmsd = &vmstate_loongarch_ipi; } +static void loongarch_ipi_finalize(Object *obj) +{ + LoongArchIPI *s = LOONGARCH_IPI(obj); + + g_free(s->cpu); +} + static const TypeInfo loongarch_ipi_info = { .name = TYPE_LOONGARCH_IPI, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(LoongArchIPI), - .instance_init = loongarch_ipi_init, .class_init = loongarch_ipi_class_init, + .instance_finalize = loongarch_ipi_finalize, }; static void loongarch_ipi_register_types(void) diff --git a/hw/intc/loongarch_pch_pic.c b/hw/intc/loongarch_pch_pic.c index 6aa4cadfa4afd936d61a96e600b31947a62bb9b9..2d5e65abfff3a44460e67480f25f146f34b23ed5 100644 --- a/hw/intc/loongarch_pch_pic.c +++ b/hw/intc/loongarch_pch_pic.c @@ -420,7 +420,7 @@ static const VMStateDescription vmstate_loongarch_pch_pic = { .name = TYPE_LOONGARCH_PCH_PIC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(int_mask, LoongArchPCHPIC), VMSTATE_UINT64(htmsi_en, LoongArchPCHPIC), VMSTATE_UINT64(intedge, LoongArchPCHPIC), diff --git a/hw/intc/m68k_irqc.c b/hw/intc/m68k_irqc.c index e09705eeaf1164778cca64a8155c6ea5d33d55bc..4b11fb9f7266e24effee86caa2a5d3cd37d55cdd 100644 --- a/hw/intc/m68k_irqc.c +++ b/hw/intc/m68k_irqc.c @@ -80,7 +80,7 @@ static const VMStateDescription vmstate_m68k_irqc = { .name = "m68k-irqc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(ipr, M68KIRQCState), VMSTATE_END_OF_LIST() } diff --git a/hw/intc/nios2_vic.c b/hw/intc/nios2_vic.c index cf63212a88698560a4267f41a00bd775e851502b..7e2d9d63276ed4816289cefa7b1ed924a3d5a492 100644 --- a/hw/intc/nios2_vic.c +++ b/hw/intc/nios2_vic.c @@ -275,7 +275,7 @@ static const VMStateDescription nios2_vic_vmstate = { .name = "nios2-vic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT32_ARRAY(int_config, Nios2VIC, 32), VMSTATE_UINT32(vic_config, Nios2VIC), VMSTATE_UINT32(int_raw_status, Nios2VIC), diff --git a/hw/intc/ompic.c b/hw/intc/ompic.c index 1f10314807d88d4b415e94348192ea582d3a8880..99032ea7f737315c3f8b6e37b0586def4566c1f0 100644 --- a/hw/intc/ompic.c +++ b/hw/intc/ompic.c @@ -137,7 +137,7 @@ static const VMStateDescription vmstate_or1k_ompic_cpu = { .name = "or1k_ompic_cpu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(status, OR1KOMPICCPUState), VMSTATE_UINT32(control, OR1KOMPICCPUState), VMSTATE_END_OF_LIST() @@ -148,7 +148,7 @@ static const VMStateDescription vmstate_or1k_ompic = { .name = TYPE_OR1K_OMPIC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(cpus, OR1KOMPICState, OMPIC_MAX_CPUS, 1, vmstate_or1k_ompic_cpu, OR1KOMPICCPUState), VMSTATE_UINT32(num_cpus, OR1KOMPICState), diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c index a6f91d4bcdf7d154ef16507a6da8b1d18dd6504a..9792a112240bac3c5cddda80d0ab0438a6d1fa59 100644 --- a/hw/intc/openpic.c +++ b/hw/intc/openpic.c @@ -1391,7 +1391,7 @@ static const VMStateDescription vmstate_openpic_irq_queue = { .name = "openpic_irq_queue", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BITMAP(queue, IRQQueue, 0, queue_size), VMSTATE_INT32(next, IRQQueue), VMSTATE_INT32(priority, IRQQueue), @@ -1403,7 +1403,7 @@ static const VMStateDescription vmstate_openpic_irqdest = { .name = "openpic_irqdest", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(ctpr, IRQDest), VMSTATE_STRUCT(raised, IRQDest, 0, vmstate_openpic_irq_queue, IRQQueue), @@ -1418,7 +1418,7 @@ static const VMStateDescription vmstate_openpic_irqsource = { .name = "openpic_irqsource", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ivpr, IRQSource), VMSTATE_UINT32(idr, IRQSource), VMSTATE_UINT32(destmask, IRQSource), @@ -1432,7 +1432,7 @@ static const VMStateDescription vmstate_openpic_timer = { .name = "openpic_timer", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(tccr, OpenPICTimer), VMSTATE_UINT32(tbcr, OpenPICTimer), VMSTATE_END_OF_LIST() @@ -1443,7 +1443,7 @@ static const VMStateDescription vmstate_openpic_msi = { .name = "openpic_msi", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(msir, OpenPICMSI), VMSTATE_END_OF_LIST() } @@ -1468,7 +1468,7 @@ static const VMStateDescription vmstate_openpic = { .version_id = 3, .minimum_version_id = 3, .post_load = openpic_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(gcr, OpenPICState), VMSTATE_UINT32(vir, OpenPICState), VMSTATE_UINT32(pir, OpenPICState), diff --git a/hw/intc/pl190.c b/hw/intc/pl190.c index cd88443601e781c79caf976caa4e977d116b35e6..d79e5d8076f68358b606de109b37fdf714fef481 100644 --- a/hw/intc/pl190.c +++ b/hw/intc/pl190.c @@ -258,7 +258,7 @@ static const VMStateDescription vmstate_pl190 = { .name = "pl190", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(level, PL190State), VMSTATE_UINT32(soft_level, PL190State), VMSTATE_UINT32(irq_enable, PL190State), diff --git a/hw/intc/ppc-uic.c b/hw/intc/ppc-uic.c index dcf5de5d43ca1b1a7bd3c962048a99960c78fbd2..9a67f7f65111e778c684705850fb6acb8f8e3b98 100644 --- a/hw/intc/ppc-uic.c +++ b/hw/intc/ppc-uic.c @@ -269,7 +269,7 @@ static const VMStateDescription ppc_uic_vmstate = { .name = "ppc-uic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(level, PPCUIC), VMSTATE_UINT32(uicsr, PPCUIC), VMSTATE_UINT32(uicer, PPCUIC), diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c index ab1a0b4b3ad71ee92b5a0bb8c349d2a6b39b79fd..e9f0536b1c621783565bc59560aac75cfbf0cd06 100644 --- a/hw/intc/riscv_aclint.c +++ b/hw/intc/riscv_aclint.c @@ -321,7 +321,7 @@ static const VMStateDescription vmstate_riscv_mtimer = { .name = "riscv_mtimer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(timecmp, RISCVAclintMTimerState, num_harts, 0, vmstate_info_uint64, uint64_t), diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c index c677b5cfbb54d3fdd2480063ba0f5edc0c67d379..e98e258deb2beb94301ac9ee7cd8c5b2e9dd586f 100644 --- a/hw/intc/riscv_aplic.c +++ b/hw/intc/riscv_aplic.c @@ -878,7 +878,7 @@ static const VMStateDescription vmstate_riscv_aplic = { .name = "riscv_aplic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(domaincfg, RISCVAPLICState), VMSTATE_UINT32(mmsicfgaddr, RISCVAPLICState), VMSTATE_UINT32(mmsicfgaddrH, RISCVAPLICState), diff --git a/hw/intc/riscv_imsic.c b/hw/intc/riscv_imsic.c index b31d07980c86e3dc680187fd99a0b8a7fe0f2541..b90f0d731df7dfd4be16da1019a9e3229b20c77b 100644 --- a/hw/intc/riscv_imsic.c +++ b/hw/intc/riscv_imsic.c @@ -386,7 +386,7 @@ static const VMStateDescription vmstate_riscv_imsic = { .name = "riscv_imsic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(eidelivery, RISCVIMSICState, num_pages, 0, vmstate_info_uint32, uint32_t), diff --git a/hw/intc/rx_icu.c b/hw/intc/rx_icu.c index e5c01807b9afa19b1384b9478d4618c8b878f176..b2d4338f61298a99657f06972706b102f096595e 100644 --- a/hw/intc/rx_icu.c +++ b/hw/intc/rx_icu.c @@ -345,7 +345,7 @@ static const VMStateDescription vmstate_rxicu = { .name = "rx-icu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(ir, RXICUState, NR_IRQS), VMSTATE_UINT8_ARRAY(dtcer, RXICUState, NR_IRQS), VMSTATE_UINT8_ARRAY(ier, RXICUState, NR_IRQS / 8), diff --git a/hw/intc/s390_flic.c b/hw/intc/s390_flic.c index 74e02858d43e3596d669e0d1a26b2bc8e4c90f3c..f4a848460b804fd0ea248bfcd1cdbb29b1b3b3fe 100644 --- a/hw/intc/s390_flic.c +++ b/hw/intc/s390_flic.c @@ -106,7 +106,7 @@ static int qemu_s390_clear_io_flic(S390FLICState *fs, uint16_t subchannel_id, QEMUS390FlicIO *cur, *next; uint8_t isc; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (!(flic->pending & FLIC_PENDING_IO)) { return 0; } @@ -223,7 +223,7 @@ uint32_t qemu_s390_flic_dequeue_service(QEMUS390FLICState *flic) { uint32_t tmp; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); g_assert(flic->pending & FLIC_PENDING_SERVICE); tmp = flic->service_param; flic->service_param = 0; @@ -238,7 +238,7 @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6) QEMUS390FlicIO *io; uint8_t isc; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (!(flic->pending & CR6_TO_PENDING_IO(cr6))) { return NULL; } @@ -262,7 +262,7 @@ QEMUS390FlicIO *qemu_s390_flic_dequeue_io(QEMUS390FLICState *flic, uint64_t cr6) void qemu_s390_flic_dequeue_crw_mchk(QEMUS390FLICState *flic) { - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); g_assert(flic->pending & FLIC_PENDING_MCHK_CR); flic->pending &= ~FLIC_PENDING_MCHK_CR; } @@ -271,7 +271,7 @@ static void qemu_s390_inject_service(S390FLICState *fs, uint32_t parm) { QEMUS390FLICState *flic = s390_get_qemu_flic(fs); - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); /* multiplexing is good enough for sclp - kvm does it internally as well */ flic->service_param |= parm; flic->pending |= FLIC_PENDING_SERVICE; @@ -287,7 +287,7 @@ static void qemu_s390_inject_io(S390FLICState *fs, uint16_t subchannel_id, QEMUS390FLICState *flic = s390_get_qemu_flic(fs); QEMUS390FlicIO *io; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); io = g_new0(QEMUS390FlicIO, 1); io->id = subchannel_id; io->nr = subchannel_nr; @@ -304,7 +304,7 @@ static void qemu_s390_inject_crw_mchk(S390FLICState *fs) { QEMUS390FLICState *flic = s390_get_qemu_flic(fs); - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); flic->pending |= FLIC_PENDING_MCHK_CR; qemu_s390_flic_notify(FLIC_PENDING_MCHK_CR); @@ -330,7 +330,7 @@ bool qemu_s390_flic_has_crw_mchk(QEMUS390FLICState *flic) bool qemu_s390_flic_has_any(QEMUS390FLICState *flic) { - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); return !!flic->pending; } @@ -340,7 +340,7 @@ static void qemu_s390_flic_reset(DeviceState *dev) QEMUS390FlicIO *cur, *next; int isc; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); flic->simm = 0; flic->nimm = 0; flic->pending = 0; @@ -366,7 +366,7 @@ static const VMStateDescription qemu_s390_flic_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = ais_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(simm, QEMUS390FLICState), VMSTATE_UINT8(nimm, QEMUS390FLICState), VMSTATE_END_OF_LIST() @@ -465,7 +465,7 @@ const VMStateDescription vmstate_adapter_info_so = { .version_id = 1, .minimum_version_id = 1, .needed = adapter_info_so_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(summary_offset, AdapterInfo), VMSTATE_END_OF_LIST() } @@ -475,7 +475,7 @@ const VMStateDescription vmstate_adapter_info = { .name = "s390_adapter_info", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(ind_offset, AdapterInfo), /* * We do not have to migrate neither the id nor the addresses. @@ -484,7 +484,7 @@ const VMStateDescription vmstate_adapter_info = { */ VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_adapter_info_so, NULL } @@ -495,7 +495,7 @@ const VMStateDescription vmstate_adapter_routes = { .name = "s390_adapter_routes", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(adapter, AdapterRoutes, 1, vmstate_adapter_info, AdapterInfo), VMSTATE_END_OF_LIST() diff --git a/hw/intc/s390_flic_kvm.c b/hw/intc/s390_flic_kvm.c index 28364b22d65c7d54026586ee0aabe9747fe00b8d..4d5cbb2a2fb64d15549ae06ca777cfbd42f70444 100644 --- a/hw/intc/s390_flic_kvm.c +++ b/hw/intc/s390_flic_kvm.c @@ -525,7 +525,7 @@ static const VMStateDescription kvm_s390_flic_ais_tmp = { .name = "s390-flic-ais-tmp", .pre_save = kvm_flic_ais_pre_save, .post_load = kvm_flic_ais_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(simm, KVMS390FLICStateMigTmp), VMSTATE_UINT8(nimm, KVMS390FLICStateMigTmp), VMSTATE_END_OF_LIST() @@ -537,7 +537,7 @@ static const VMStateDescription kvm_s390_flic_vmstate_ais = { .version_id = 1, .minimum_version_id = 1, .needed = ais_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_WITH_TMP(KVMS390FLICState, KVMS390FLICStateMigTmp, kvm_s390_flic_ais_tmp), VMSTATE_END_OF_LIST() @@ -550,7 +550,7 @@ static const VMStateDescription kvm_s390_flic_vmstate = { .name = "s390-flic", .version_id = FLIC_SAVEVM_VERSION, .minimum_version_id = FLIC_SAVEVM_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { { .name = "irqs", .info = &(const VMStateInfo) { @@ -562,7 +562,7 @@ static const VMStateDescription kvm_s390_flic_vmstate = { }, VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &kvm_s390_flic_vmstate_ais, NULL } diff --git a/hw/intc/sifive_plic.c b/hw/intc/sifive_plic.c index 5522ede2cf856f78be9107d2eee7e993960a76eb..e559f118052ae80639cde322166c2045f0738367 100644 --- a/hw/intc/sifive_plic.c +++ b/hw/intc/sifive_plic.c @@ -406,7 +406,7 @@ static const VMStateDescription vmstate_sifive_plic = { .name = "riscv_sifive_plic", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(source_priority, SiFivePLICState, num_sources, 0, vmstate_info_uint32, uint32_t), diff --git a/hw/intc/slavio_intctl.c b/hw/intc/slavio_intctl.c index f7e59ba6432e4dd50ecc45cfa16f3b8a459fc262..36b4a12f6061fb7a27ad2384dcb494888f40d1b5 100644 --- a/hw/intc/slavio_intctl.c +++ b/hw/intc/slavio_intctl.c @@ -353,7 +353,7 @@ static const VMStateDescription vmstate_intctl_cpu = { .name ="slavio_intctl_cpu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(intreg_pending, SLAVIO_CPUINTCTLState), VMSTATE_END_OF_LIST() } @@ -364,7 +364,7 @@ static const VMStateDescription vmstate_intctl = { .version_id = 1, .minimum_version_id = 1, .post_load = vmstate_intctl_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(slaves, SLAVIO_INTCTLState, MAX_CPUS, 1, vmstate_intctl_cpu, SLAVIO_CPUINTCTLState), VMSTATE_UINT32(intregm_pending, SLAVIO_INTCTLState), diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 199c261b07244bab181230c9e44c2fc690d92ff3..d7e56bfb20e61192c99243e57bc0f428eae6010b 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -522,7 +522,7 @@ static const VMStateDescription vmstate_spapr_xive_end = { .name = TYPE_SPAPR_XIVE "/end", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT32(w0, XiveEND), VMSTATE_UINT32(w1, XiveEND), VMSTATE_UINT32(w2, XiveEND), @@ -539,7 +539,7 @@ static const VMStateDescription vmstate_spapr_xive_eas = { .name = TYPE_SPAPR_XIVE "/eas", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT64(w, XiveEAS), VMSTATE_END_OF_LIST() }, @@ -577,7 +577,7 @@ static const VMStateDescription vmstate_spapr_xive = { .minimum_version_id = 1, .pre_save = vmstate_spapr_xive_pre_save, .post_load = NULL, /* handled at the machine level */ - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL), VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs, vmstate_spapr_xive_eas, XiveEAS), diff --git a/hw/intc/xics.c b/hw/intc/xics.c index c77e986136e5b39e797a42660b86ad2999e2ece9..8b257872270532591f85e06abb18099193cd0bab 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -273,7 +273,7 @@ static const VMStateDescription vmstate_icp_server = { .minimum_version_id = 1, .pre_save = icp_pre_save, .post_load = icp_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* Sanity check */ VMSTATE_UINT32(xirr, ICPState), VMSTATE_UINT8(pending_priority, ICPState), @@ -665,7 +665,7 @@ static const VMStateDescription vmstate_ics_irq = { .name = "ics/irq", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(server, ICSIRQState), VMSTATE_UINT8(priority, ICSIRQState), VMSTATE_UINT8(saved_priority, ICSIRQState), @@ -681,7 +681,7 @@ static const VMStateDescription vmstate_ics = { .minimum_version_id = 1, .pre_save = ics_pre_save, .post_load = ics_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* Sanity check */ VMSTATE_UINT32_EQUAL(nr_irqs, ICSState, NULL), diff --git a/hw/intc/xive.c b/hw/intc/xive.c index a3585593d8f495f9699eebfb050f83f67c1bd389..057b308ae927c6a42e0404edc8ba1cca4eb121c6 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -798,7 +798,7 @@ static const VMStateDescription vmstate_xive_tctx = { .minimum_version_id = 1, .pre_save = vmstate_xive_tctx_pre_save, .post_load = vmstate_xive_tctx_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(regs, XiveTCTX), VMSTATE_END_OF_LIST() }, @@ -1271,7 +1271,7 @@ static const VMStateDescription vmstate_xive_source = { .name = TYPE_XIVE_SOURCE, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_EQUAL(nr_irqs, XiveSource, NULL), VMSTATE_VBUFFER_UINT32(status, XiveSource, 1, NULL, nr_irqs), VMSTATE_END_OF_LIST() diff --git a/hw/intc/xlnx-pmu-iomod-intc.c b/hw/intc/xlnx-pmu-iomod-intc.c index acaa1c3e6f7bb3112900720a6fb51d984c4ce1d5..12bd1a3fff3dcc3eae25cd9a51d3e1198e006ca6 100644 --- a/hw/intc/xlnx-pmu-iomod-intc.c +++ b/hw/intc/xlnx-pmu-iomod-intc.c @@ -526,7 +526,7 @@ static const VMStateDescription vmstate_xlnx_pmu_io_intc = { .name = TYPE_XLNX_PMU_IO_INTC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxPMUIOIntc, XLNXPMUIOINTC_R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/intc/xlnx-zynqmp-ipi.c b/hw/intc/xlnx-zynqmp-ipi.c index adc1179014134b9c9c54daf3e09ad9f1c9c8a5aa..509ee799cc2de04473ed45639aebe14705290a18 100644 --- a/hw/intc/xlnx-zynqmp-ipi.c +++ b/hw/intc/xlnx-zynqmp-ipi.c @@ -349,7 +349,7 @@ static const VMStateDescription vmstate_zynqmp_pmu_ipi = { .name = TYPE_XLNX_ZYNQMP_IPI, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPIPI, R_XLNX_ZYNQMP_IPI_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/ipack/ipack.c b/hw/ipack/ipack.c index ae20f36da68c61228afb27ed750645188ea4de41..c39dbb481f206129cd65453f665f5b9ae63ebd2b 100644 --- a/hw/ipack/ipack.c +++ b/hw/ipack/ipack.c @@ -93,7 +93,7 @@ const VMStateDescription vmstate_ipack_device = { .name = "ipack_device", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(slot, IPackDevice), VMSTATE_END_OF_LIST() } diff --git a/hw/ipack/tpci200.c b/hw/ipack/tpci200.c index 6b3edbf01765fa6332200af591ee1099a1ec6eec..88eef4b8308d71bdcdee63704d58a01b3f46cd2d 100644 --- a/hw/ipack/tpci200.c +++ b/hw/ipack/tpci200.c @@ -619,7 +619,7 @@ static const VMStateDescription vmstate_tpci200 = { .name = "tpci200", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, TPCI200State), VMSTATE_BOOL_ARRAY(big_endian, TPCI200State, 3), VMSTATE_UINT8_ARRAY(ctrl, TPCI200State, N_MODULES), diff --git a/hw/ipmi/ipmi_bmc_extern.c b/hw/ipmi/ipmi_bmc_extern.c index 2117dad35a93b7386fcb7489b3b851d500fe1549..29c5af3cc36316173245faa15adb6a269384ce2b 100644 --- a/hw/ipmi/ipmi_bmc_extern.c +++ b/hw/ipmi/ipmi_bmc_extern.c @@ -479,7 +479,7 @@ static const VMStateDescription vmstate_ipmi_bmc_extern = { .version_id = 1, .minimum_version_id = 1, .post_load = ipmi_bmc_extern_post_migrate, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(send_reset, IPMIBmcExtern), VMSTATE_BOOL(waiting_rsp, IPMIBmcExtern), VMSTATE_END_OF_LIST() diff --git a/hw/ipmi/ipmi_bmc_sim.c b/hw/ipmi/ipmi_bmc_sim.c index 905e091094bb5201e6ca1c4df78620167bb76ca7..33c839c65aa1257f06af176a13c96816a22e1b67 100644 --- a/hw/ipmi/ipmi_bmc_sim.c +++ b/hw/ipmi/ipmi_bmc_sim.c @@ -2103,7 +2103,7 @@ static const VMStateDescription vmstate_ipmi_sim = { .name = TYPE_IPMI_BMC_SIMULATOR, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(bmc_global_enables, IPMIBmcSim), VMSTATE_UINT8(msg_flags, IPMIBmcSim), VMSTATE_BOOL(watchdog_initialized, IPMIBmcSim), diff --git a/hw/ipmi/ipmi_bt.c b/hw/ipmi/ipmi_bt.c index 22f94fb98dabc25c3b582e34c1b158551ab22885..583fc64730c616b6ca7e3a9c6dead0ec7281d089 100644 --- a/hw/ipmi/ipmi_bt.c +++ b/hw/ipmi/ipmi_bt.c @@ -396,7 +396,7 @@ const VMStateDescription vmstate_IPMIBT = { .version_id = 1, .minimum_version_id = 1, .post_load = ipmi_bt_vmstate_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(obf_irq_set, IPMIBT), VMSTATE_BOOL(atn_irq_set, IPMIBT), VMSTATE_BOOL(irqs_enabled, IPMIBT), diff --git a/hw/ipmi/ipmi_kcs.c b/hw/ipmi/ipmi_kcs.c index a77612946a5f4635c4998f9d45a2ac8a123780a4..c15977cab4c415668c82a932b27e3281074d6d73 100644 --- a/hw/ipmi/ipmi_kcs.c +++ b/hw/ipmi/ipmi_kcs.c @@ -379,7 +379,7 @@ const VMStateDescription vmstate_IPMIKCS = { .version_id = 2, .minimum_version_id = 1, .post_load = ipmi_kcs_vmstate_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(obf_irq_set, IPMIKCS), VMSTATE_BOOL(atn_irq_set, IPMIKCS), VMSTATE_UNUSED_TEST(vmstate_kcs_before_version2, 1), /* Was use_irq */ diff --git a/hw/ipmi/isa_ipmi_bt.c b/hw/ipmi/isa_ipmi_bt.c index aec064d3cd79fecd9ff79c277dc9cd5305064aeb..7b36d5149450def4763a33f7aa40a06c2718d0d1 100644 --- a/hw/ipmi/isa_ipmi_bt.c +++ b/hw/ipmi/isa_ipmi_bt.c @@ -77,7 +77,7 @@ static const VMStateDescription vmstate_ISAIPMIBTDevice = { * because it used VMSTATE_VBUFFER_UINT32, but it did not transfer * the buffer length, so random things would happen. */ - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(bt, ISAIPMIBTDevice, 1, vmstate_IPMIBT, IPMIBT), VMSTATE_END_OF_LIST() } diff --git a/hw/ipmi/isa_ipmi_kcs.c b/hw/ipmi/isa_ipmi_kcs.c index b5dcb646166afb00543c8b01bbffe8199e7bf139..f52b32e590b9a35838ab25577755b9ddb50aa861 100644 --- a/hw/ipmi/isa_ipmi_kcs.c +++ b/hw/ipmi/isa_ipmi_kcs.c @@ -76,7 +76,7 @@ static const VMStateDescription vmstate_ISAIPMIKCSDevice = { .name = TYPE_IPMI_INTERFACE, .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VSTRUCT_TEST(kcs, ISAIPMIKCSDevice, vmstate_kcs_before_version2, 0, vmstate_IPMIKCS, IPMIKCS, 1), VMSTATE_VSTRUCT_V(kcs, ISAIPMIKCSDevice, 2, vmstate_IPMIKCS, diff --git a/hw/ipmi/pci_ipmi_bt.c b/hw/ipmi/pci_ipmi_bt.c index 633931b82571322960666a1105d166df1380c296..afeea6f30316a5fd048a33bbeb706be4c9298815 100644 --- a/hw/ipmi/pci_ipmi_bt.c +++ b/hw/ipmi/pci_ipmi_bt.c @@ -87,7 +87,7 @@ const VMStateDescription vmstate_PCIIPMIBTDevice = { .name = TYPE_IPMI_INTERFACE_PREFIX "pci-bt", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PCIIPMIBTDevice), VMSTATE_STRUCT(bt, PCIIPMIBTDevice, 1, vmstate_IPMIBT, IPMIBT), VMSTATE_END_OF_LIST() diff --git a/hw/ipmi/pci_ipmi_kcs.c b/hw/ipmi/pci_ipmi_kcs.c index 1a581413c2659b02699904aca956b01d8b86ecbd..05ba97ec58f408692f3c05608fd3a16756cd55f9 100644 --- a/hw/ipmi/pci_ipmi_kcs.c +++ b/hw/ipmi/pci_ipmi_kcs.c @@ -87,7 +87,7 @@ const VMStateDescription vmstate_PCIIPMIKCSDevice = { .name = TYPE_IPMI_INTERFACE_PREFIX "pci-kcs", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PCIIPMIKCSDevice), VMSTATE_STRUCT(kcs, PCIIPMIKCSDevice, 1, vmstate_IPMIKCS, IPMIKCS), VMSTATE_END_OF_LIST() diff --git a/hw/ipmi/smbus_ipmi.c b/hw/ipmi/smbus_ipmi.c index d0991ab7f939b93eb7219bfe83ed385e8a5d41c0..56865df7dbd7bba37c466d4bca75fd53139a1b0d 100644 --- a/hw/ipmi/smbus_ipmi.c +++ b/hw/ipmi/smbus_ipmi.c @@ -299,7 +299,7 @@ static const VMStateDescription vmstate_smbus_ipmi = { .name = TYPE_SMBUS_IPMI, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SMBUS_DEVICE(parent, SMBusIPMIDevice), VMSTATE_UINT8(waiting_rsp, SMBusIPMIDevice), VMSTATE_UINT32(outlen, SMBusIPMIDevice), diff --git a/hw/isa/apm.c b/hw/isa/apm.c index dfe9020d30b768202f48d0eed2b97bfdffae6d12..e34edb864cdaf0c6647a174e017c34a1c6b3c1fe 100644 --- a/hw/isa/apm.c +++ b/hw/isa/apm.c @@ -68,7 +68,7 @@ const VMStateDescription vmstate_apm = { .name = "APM State", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(apmc, APMState), VMSTATE_UINT8(apms, APMState), VMSTATE_END_OF_LIST() diff --git a/hw/isa/i82378.c b/hw/isa/i82378.c index 203b92c26450f19aff6d11d972a71e02b4e53b20..cbaa152a899a8049ac7519362ad87de21ac2450c 100644 --- a/hw/isa/i82378.c +++ b/hw/isa/i82378.c @@ -40,7 +40,7 @@ static const VMStateDescription vmstate_i82378 = { .name = "pci-i82378", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, I82378State), VMSTATE_END_OF_LIST() }, diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c index 23eba64f226036f356b3a1dc5e986cc07890592e..3924eec483025a8e770eae5d97f4118af1026891 100644 --- a/hw/isa/lpc_ich9.c +++ b/hw/isa/lpc_ich9.c @@ -768,7 +768,7 @@ static const VMStateDescription vmstate_ich9_rst_cnt = { .version_id = 1, .minimum_version_id = 1, .needed = ich9_rst_cnt_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(rst_cnt, ICH9LPCState), VMSTATE_END_OF_LIST() } @@ -788,7 +788,7 @@ static const VMStateDescription vmstate_ich9_smi_feat = { .version_id = 1, .minimum_version_id = 1, .needed = ich9_smi_feat_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(smi_guest_features_le, ICH9LPCState, sizeof(uint64_t)), VMSTATE_UINT8(smi_features_ok, ICH9LPCState), @@ -802,7 +802,7 @@ static const VMStateDescription vmstate_ich9_lpc = { .version_id = 1, .minimum_version_id = 1, .post_load = ich9_lpc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(d, ICH9LPCState), VMSTATE_STRUCT(apm, ICH9LPCState, 0, vmstate_apm, APMState), VMSTATE_STRUCT(pm, ICH9LPCState, 0, vmstate_ich9_pm, ICH9LPCPMRegs), @@ -810,7 +810,7 @@ static const VMStateDescription vmstate_ich9_lpc = { VMSTATE_UINT32(sci_level, ICH9LPCState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_ich9_rst_cnt, &vmstate_ich9_smi_feat, NULL diff --git a/hw/isa/pc87312.c b/hw/isa/pc87312.c index 8d7b8d3db2beb609874f1f4f1881ee00d18d8c33..ee23f3e164df505ffc1fe386cea55ad5c52421bd 100644 --- a/hw/isa/pc87312.c +++ b/hw/isa/pc87312.c @@ -319,7 +319,7 @@ static const VMStateDescription vmstate_pc87312 = { .version_id = 1, .minimum_version_id = 1, .post_load = pc87312_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(read_id_step, PC87312State), VMSTATE_UINT8(selected_index, PC87312State), VMSTATE_UINT8_ARRAY(regs, PC87312State, 3), diff --git a/hw/isa/piix.c b/hw/isa/piix.c index 04ebed5b52603e4bf2777b5f30cad0145bee0009..344bf32e547f45d758d368feb051d6bf06a03957 100644 --- a/hw/isa/piix.c +++ b/hw/isa/piix.c @@ -230,7 +230,7 @@ static const VMStateDescription vmstate_piix3_rcr = { .version_id = 1, .minimum_version_id = 1, .needed = piix3_rcr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(rcr, PIIXState), VMSTATE_END_OF_LIST() } @@ -242,13 +242,13 @@ static const VMStateDescription vmstate_piix3 = { .minimum_version_id = 2, .post_load = piix_post_load, .pre_save = piix3_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PIIXState), VMSTATE_INT32_ARRAY_V(pci_irq_levels_vmstate, PIIXState, PIIX_NUM_PIRQS, 3), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_piix3_rcr, NULL } @@ -259,7 +259,7 @@ static const VMStateDescription vmstate_piix4 = { .version_id = 3, .minimum_version_id = 2, .post_load = piix4_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PIIXState), VMSTATE_UINT8_V(rcr, PIIXState, 3), VMSTATE_END_OF_LIST() diff --git a/hw/isa/vt82c686.c b/hw/isa/vt82c686.c index 9c2333a277d9ea253b50e1fc09b00c481913b03e..d3e0f6d01fb6354866743b6b5c43483749613a81 100644 --- a/hw/isa/vt82c686.c +++ b/hw/isa/vt82c686.c @@ -82,7 +82,7 @@ static const VMStateDescription vmstate_acpi = { .version_id = 1, .minimum_version_id = 1, .post_load = vmstate_acpi_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, ViaPMState), VMSTATE_UINT16(ar.pm1.evt.sts, ViaPMState), VMSTATE_UINT16(ar.pm1.evt.en, ViaPMState), @@ -563,7 +563,7 @@ static const VMStateDescription vmstate_via = { .name = "via-isa", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, ViaISAState), VMSTATE_END_OF_LIST() } diff --git a/hw/loongarch/acpi-build.c b/hw/loongarch/acpi-build.c index ae292fc5432670aed9b823f421428c859d9929f3..730bc4a748c487b766a1d3cccdf5a4e407a072ca 100644 --- a/hw/loongarch/acpi-build.c +++ b/hw/loongarch/acpi-build.c @@ -564,7 +564,7 @@ static const VMStateDescription vmstate_acpi_build = { .name = "acpi_build", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(patched, AcpiBuildState), VMSTATE_END_OF_LIST() }, diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c index 4b7dc67a2d79a136438afa789d98916878d8a9e1..c9a680e61ac6af20dc0964b4b4500b90463cee67 100644 --- a/hw/loongarch/virt.c +++ b/hw/loongarch/virt.c @@ -535,9 +535,6 @@ static void loongarch_irq_init(LoongArchMachineState *lams) CPUState *cpu_state; int cpu, pin, i, start, num; - extioi = qdev_new(TYPE_LOONGARCH_EXTIOI); - sysbus_realize_and_unref(SYS_BUS_DEVICE(extioi), &error_fatal); - /* * The connection of interrupts: * +-----+ +---------+ +-------+ @@ -559,41 +556,42 @@ static void loongarch_irq_init(LoongArchMachineState *lams) * | UARTs | | Devices | | Devices | * +--------+ +---------+ +---------+ */ + + /* Create IPI device */ + ipi = qdev_new(TYPE_LOONGARCH_IPI); + qdev_prop_set_uint32(ipi, "num-cpu", ms->smp.cpus); + sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal); + + /* IPI iocsr memory region */ + memory_region_add_subregion(&lams->system_iocsr, SMP_IPI_MAILBOX, + sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 0)); + memory_region_add_subregion(&lams->system_iocsr, MAIL_SEND_ADDR, + sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 1)); + for (cpu = 0; cpu < ms->smp.cpus; cpu++) { cpu_state = qemu_get_cpu(cpu); cpudev = DEVICE(cpu_state); lacpu = LOONGARCH_CPU(cpu_state); env = &(lacpu->env); - - ipi = qdev_new(TYPE_LOONGARCH_IPI); - sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal); + env->address_space_iocsr = &lams->as_iocsr; /* connect ipi irq to cpu irq */ - qdev_connect_gpio_out(ipi, 0, qdev_get_gpio_in(cpudev, IRQ_IPI)); - /* IPI iocsr memory region */ - memory_region_add_subregion(&env->system_iocsr, SMP_IPI_MAILBOX, - sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), - 0)); - memory_region_add_subregion(&env->system_iocsr, MAIL_SEND_ADDR, - sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), - 1)); - /* - * extioi iocsr memory region - * only one extioi is added on loongarch virt machine - * external device interrupt can only be routed to cpu 0-3 - */ - if (cpu < EXTIOI_CPUS) - memory_region_add_subregion(&env->system_iocsr, APIC_BASE, - sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), - cpu)); + qdev_connect_gpio_out(ipi, cpu, qdev_get_gpio_in(cpudev, IRQ_IPI)); env->ipistate = ipi; } + /* Create EXTIOI device */ + extioi = qdev_new(TYPE_LOONGARCH_EXTIOI); + qdev_prop_set_uint32(extioi, "num-cpu", ms->smp.cpus); + sysbus_realize_and_unref(SYS_BUS_DEVICE(extioi), &error_fatal); + memory_region_add_subregion(&lams->system_iocsr, APIC_BASE, + sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), 0)); + /* * connect ext irq to the cpu irq * cpu_pin[9:2] <= intc_pin[7:0] */ - for (cpu = 0; cpu < MIN(ms->smp.cpus, EXTIOI_CPUS); cpu++) { + for (cpu = 0; cpu < ms->smp.cpus; cpu++) { cpudev = DEVICE(qemu_get_cpu(cpu)); for (pin = 0; pin < LS3A_INTC_IP; pin++) { qdev_connect_gpio_out(extioi, (cpu * 8 + pin), @@ -733,6 +731,43 @@ static void loongarch_direct_kernel_boot(LoongArchMachineState *lams, } } +static void loongarch_qemu_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ +} + +static uint64_t loongarch_qemu_read(void *opaque, hwaddr addr, unsigned size) +{ + switch (addr) { + case VERSION_REG: + return 0x11ULL; + case FEATURE_REG: + return 1ULL << IOCSRF_MSI | 1ULL << IOCSRF_EXTIOI | + 1ULL << IOCSRF_CSRIPI; + case VENDOR_REG: + return 0x6e6f73676e6f6f4cULL; /* "Loongson" */ + case CPUNAME_REG: + return 0x303030354133ULL; /* "3A5000" */ + case MISC_FUNC_REG: + return 1ULL << IOCSRM_EXTIOI_EN; + } + return 0ULL; +} + +static const MemoryRegionOps loongarch_qemu_ops = { + .read = loongarch_qemu_read, + .write = loongarch_qemu_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + static void loongarch_init(MachineState *machine) { LoongArchCPU *lacpu; @@ -761,8 +796,17 @@ static void loongarch_init(MachineState *machine) exit(1); } create_fdt(lams); - /* Init CPUs */ + /* Create IOCSR space */ + memory_region_init_io(&lams->system_iocsr, OBJECT(machine), NULL, + machine, "iocsr", UINT64_MAX); + address_space_init(&lams->as_iocsr, &lams->system_iocsr, "IOCSR"); + memory_region_init_io(&lams->iocsr_mem, OBJECT(machine), + &loongarch_qemu_ops, + machine, "iocsr_misc", 0x428); + memory_region_add_subregion(&lams->system_iocsr, 0, &lams->iocsr_mem); + + /* Init CPUs */ possible_cpus = mc->possible_cpu_arch_ids(machine); for (i = 0; i < possible_cpus->len; i++) { cpu = cpu_create(machine->cpu_type); diff --git a/hw/m68k/mcf5206.c b/hw/m68k/mcf5206.c index a46a23538d9e64f1aee1ad50d87ab9e272f01b1f..183fd3cc08502df594eacd4c81b28b484121044f 100644 --- a/hw/m68k/mcf5206.c +++ b/hw/m68k/mcf5206.c @@ -148,15 +148,11 @@ static void m5206_timer_write(m5206_timer_state *s, uint32_t addr, uint32_t val) m5206_timer_update(s); } -static m5206_timer_state *m5206_timer_init(qemu_irq irq) +static void m5206_timer_init(m5206_timer_state *s, qemu_irq irq) { - m5206_timer_state *s; - - s = g_new0(m5206_timer_state, 1); s->timer = ptimer_init(m5206_timer_trigger, s, PTIMER_POLICY_LEGACY); s->irq = irq; m5206_timer_reset(s); - return s; } /* System Integration Module. */ @@ -167,7 +163,7 @@ typedef struct { M68kCPU *cpu; MemoryRegion iomem; qemu_irq *pic; - m5206_timer_state *timer[2]; + m5206_timer_state timer[2]; DeviceState *uart[2]; uint8_t scr; uint8_t icr[14]; @@ -293,9 +289,9 @@ static uint64_t m5206_mbar_read(m5206_mbar_state *s, uint16_t offset, unsigned size) { if (offset >= 0x100 && offset < 0x120) { - return m5206_timer_read(s->timer[0], offset - 0x100); + return m5206_timer_read(&s->timer[0], offset - 0x100); } else if (offset >= 0x120 && offset < 0x140) { - return m5206_timer_read(s->timer[1], offset - 0x120); + return m5206_timer_read(&s->timer[1], offset - 0x120); } else if (offset >= 0x140 && offset < 0x160) { return mcf_uart_read(s->uart[0], offset - 0x140, size); } else if (offset >= 0x180 && offset < 0x1a0) { @@ -333,10 +329,10 @@ static void m5206_mbar_write(m5206_mbar_state *s, uint16_t offset, uint64_t value, unsigned size) { if (offset >= 0x100 && offset < 0x120) { - m5206_timer_write(s->timer[0], offset - 0x100, value); + m5206_timer_write(&s->timer[0], offset - 0x100, value); return; } else if (offset >= 0x120 && offset < 0x140) { - m5206_timer_write(s->timer[1], offset - 0x120, value); + m5206_timer_write(&s->timer[1], offset - 0x120, value); return; } else if (offset >= 0x140 && offset < 0x160) { mcf_uart_write(s->uart[0], offset - 0x140, value, size); @@ -598,8 +594,8 @@ static void mcf5206_mbar_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); s->pic = qemu_allocate_irqs(m5206_mbar_set_irq, s, 14); - s->timer[0] = m5206_timer_init(s->pic[9]); - s->timer[1] = m5206_timer_init(s->pic[10]); + m5206_timer_init(&s->timer[0], s->pic[9]); + m5206_timer_init(&s->timer[1], s->pic[10]); s->uart[0] = mcf_uart_create(s->pic[12], serial_hd(0)); s->uart[1] = mcf_uart_create(s->pic[13], serial_hd(1)); } diff --git a/hw/m68k/next-cube.c b/hw/m68k/next-cube.c index fabd861941c3582cce8573b27feef3856bb6cc1c..9f6f90d68b40646317594b763423ed7c0d33bb08 100644 --- a/hw/m68k/next-cube.c +++ b/hw/m68k/next-cube.c @@ -62,6 +62,7 @@ typedef struct next_dma { } next_dma; typedef struct NextRtc { + int8_t phase; uint8_t ram[32]; uint8_t command; uint8_t value; @@ -73,6 +74,12 @@ typedef struct NextRtc { struct NeXTState { MachineState parent; + MemoryRegion rom; + MemoryRegion rom2; + MemoryRegion dmamem; + MemoryRegion bmapm1; + MemoryRegion bmapm2; + next_dma dma[10]; }; @@ -90,8 +97,10 @@ struct NeXTPC { uint32_t scr1; uint32_t scr2; + uint32_t old_scr2; uint32_t int_mask; uint32_t int_status; + uint32_t led; uint8_t scsi_csr_1; uint8_t scsi_csr_2; @@ -121,49 +130,46 @@ static const uint8_t rtc_ram2[32] = { #define SCR2_RTDATA 0x4 #define SCR2_TOBCD(x) (((x / 10) << 4) + (x % 10)) -static void nextscr2_write(NeXTPC *s, uint32_t val, int size) +static void next_scr2_led_update(NeXTPC *s) { - static int led; - static int phase; - static uint8_t old_scr2; - uint8_t scr2_2; - NextRtc *rtc = &s->rtc; - - if (size == 4) { - scr2_2 = (val >> 8) & 0xFF; - } else { - scr2_2 = val & 0xFF; - } - - if (val & 0x1) { + if (s->scr2 & 0x1) { DPRINTF("fault!\n"); - led++; - if (led == 10) { + s->led++; + if (s->led == 10) { DPRINTF("LED flashing, possible fault!\n"); - led = 0; + s->led = 0; } } +} + +static void next_scr2_rtc_update(NeXTPC *s) +{ + uint8_t old_scr2, scr2_2; + NextRtc *rtc = &s->rtc; + + old_scr2 = extract32(s->old_scr2, 8, 8); + scr2_2 = extract32(s->scr2, 8, 8); if (scr2_2 & 0x1) { - /* DPRINTF("RTC %x phase %i\n", scr2_2, phase); */ - if (phase == -1) { - phase = 0; + /* DPRINTF("RTC %x phase %i\n", scr2_2, rtc->phase); */ + if (rtc->phase == -1) { + rtc->phase = 0; } /* If we are in going down clock... do something */ if (((old_scr2 & SCR2_RTCLK) != (scr2_2 & SCR2_RTCLK)) && ((scr2_2 & SCR2_RTCLK) == 0)) { - if (phase < 8) { + if (rtc->phase < 8) { rtc->command = (rtc->command << 1) | ((scr2_2 & SCR2_RTDATA) ? 1 : 0); } - if (phase >= 8 && phase < 16) { + if (rtc->phase >= 8 && rtc->phase < 16) { rtc->value = (rtc->value << 1) | ((scr2_2 & SCR2_RTDATA) ? 1 : 0); /* if we read RAM register, output RT_DATA bit */ if (rtc->command <= 0x1F) { scr2_2 = scr2_2 & (~SCR2_RTDATA); - if (rtc->ram[rtc->command] & (0x80 >> (phase - 8))) { + if (rtc->ram[rtc->command] & (0x80 >> (rtc->phase - 8))) { scr2_2 |= SCR2_RTDATA; } @@ -174,7 +180,7 @@ static void nextscr2_write(NeXTPC *s, uint32_t val, int size) if (rtc->command == 0x30) { scr2_2 = scr2_2 & (~SCR2_RTDATA); /* for now status = 0x98 (new rtc + FTU) */ - if (rtc->status & (0x80 >> (phase - 8))) { + if (rtc->status & (0x80 >> (rtc->phase - 8))) { scr2_2 |= SCR2_RTDATA; } @@ -184,7 +190,7 @@ static void nextscr2_write(NeXTPC *s, uint32_t val, int size) /* read the status 0x31 */ if (rtc->command == 0x31) { scr2_2 = scr2_2 & (~SCR2_RTDATA); - if (rtc->control & (0x80 >> (phase - 8))) { + if (rtc->control & (0x80 >> (rtc->phase - 8))) { scr2_2 |= SCR2_RTDATA; } rtc->retval = (rtc->retval << 1) | @@ -220,7 +226,7 @@ static void nextscr2_write(NeXTPC *s, uint32_t val, int size) } - if (ret & (0x80 >> (phase - 8))) { + if (ret & (0x80 >> (rtc->phase - 8))) { scr2_2 |= SCR2_RTDATA; } rtc->retval = (rtc->retval << 1) | @@ -229,8 +235,8 @@ static void nextscr2_write(NeXTPC *s, uint32_t val, int size) } - phase++; - if (phase == 16) { + rtc->phase++; + if (rtc->phase == 16) { if (rtc->command >= 0x80 && rtc->command <= 0x9F) { rtc->ram[rtc->command - 0x80] = rtc->value; } @@ -246,233 +252,172 @@ static void nextscr2_write(NeXTPC *s, uint32_t val, int size) } } else { /* else end or abort */ - phase = -1; + rtc->phase = -1; rtc->command = 0; rtc->value = 0; } - s->scr2 = val & 0xFFFF00FF; - s->scr2 |= scr2_2 << 8; - old_scr2 = scr2_2; -} -static uint32_t mmio_readb(NeXTPC *s, hwaddr addr) -{ - switch (addr) { - case 0xc000: - return (s->scr1 >> 24) & 0xFF; - case 0xc001: - return (s->scr1 >> 16) & 0xFF; - case 0xc002: - return (s->scr1 >> 8) & 0xFF; - case 0xc003: - return (s->scr1 >> 0) & 0xFF; - - case 0xd000: - return (s->scr2 >> 24) & 0xFF; - case 0xd001: - return (s->scr2 >> 16) & 0xFF; - case 0xd002: - return (s->scr2 >> 8) & 0xFF; - case 0xd003: - return (s->scr2 >> 0) & 0xFF; - case 0x14020: - DPRINTF("MMIO Read 0x4020\n"); - return 0x7f; - - default: - DPRINTF("MMIO Read B @ %"HWADDR_PRIx"\n", addr); - return 0x0; - } + s->scr2 = deposit32(s->scr2, 8, 8, scr2_2); } -static uint32_t mmio_readw(NeXTPC *s, hwaddr addr) +static uint64_t next_mmio_read(void *opaque, hwaddr addr, unsigned size) { - switch (addr) { - default: - DPRINTF("MMIO Read W @ %"HWADDR_PRIx"\n", addr); - return 0x0; - } -} + NeXTPC *s = NEXT_PC(opaque); + uint64_t val; -static uint32_t mmio_readl(NeXTPC *s, hwaddr addr) -{ switch (addr) { case 0x7000: /* DPRINTF("Read INT status: %x\n", s->int_status); */ - return s->int_status; + val = s->int_status; + break; case 0x7800: DPRINTF("MMIO Read INT mask: %x\n", s->int_mask); - return s->int_mask; - - case 0xc000: - return s->scr1; + val = s->int_mask; + break; - case 0xd000: - return s->scr2; + case 0xc000 ... 0xc003: + val = extract32(s->scr1, (4 - (addr - 0xc000) - size) << 3, + size << 3); + break; - default: - DPRINTF("MMIO Read L @ %"HWADDR_PRIx"\n", addr); - return 0x0; - } -} + case 0xd000 ... 0xd003: + val = extract32(s->scr2, (4 - (addr - 0xd000) - size) << 3, + size << 3); + break; -static void mmio_writeb(NeXTPC *s, hwaddr addr, uint32_t val) -{ - switch (addr) { - case 0xd003: - nextscr2_write(s, val, 1); + case 0x14020: + val = 0x7f; break; + default: - DPRINTF("MMIO Write B @ %x with %x\n", (unsigned int)addr, val); + val = 0; + DPRINTF("MMIO Read @ 0x%"HWADDR_PRIx" size %d\n", addr, size); + break; } + return val; } -static void mmio_writew(NeXTPC *s, hwaddr addr, uint32_t val) +static void next_mmio_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) { - DPRINTF("MMIO Write W\n"); -} + NeXTPC *s = NEXT_PC(opaque); -static void mmio_writel(NeXTPC *s, hwaddr addr, uint32_t val) -{ switch (addr) { case 0x7000: - DPRINTF("INT Status old: %x new: %x\n", s->int_status, val); + DPRINTF("INT Status old: %x new: %x\n", s->int_status, + (unsigned int)val); s->int_status = val; break; + case 0x7800: - DPRINTF("INT Mask old: %x new: %x\n", s->int_mask, val); + DPRINTF("INT Mask old: %x new: %x\n", s->int_mask, (unsigned int)val); s->int_mask = val; break; - case 0xc000: - DPRINTF("SCR1 Write: %x\n", val); - break; - case 0xd000: - nextscr2_write(s, val, 4); - break; - - default: - DPRINTF("MMIO Write l @ %x with %x\n", (unsigned int)addr, val); - } -} -static uint64_t mmio_readfn(void *opaque, hwaddr addr, unsigned size) -{ - NeXTPC *s = NEXT_PC(opaque); - - switch (size) { - case 1: - return mmio_readb(s, addr); - case 2: - return mmio_readw(s, addr); - case 4: - return mmio_readl(s, addr); - default: - g_assert_not_reached(); - } -} - -static void mmio_writefn(void *opaque, hwaddr addr, uint64_t value, - unsigned size) -{ - NeXTPC *s = NEXT_PC(opaque); - - switch (size) { - case 1: - mmio_writeb(s, addr, value); - break; - case 2: - mmio_writew(s, addr, value); + case 0xc000 ... 0xc003: + DPRINTF("SCR1 Write: %x\n", (unsigned int)val); + s->scr1 = deposit32(s->scr1, (4 - (addr - 0xc000) - size) << 3, + size << 3, val); break; - case 4: - mmio_writel(s, addr, value); + + case 0xd000 ... 0xd003: + s->scr2 = deposit32(s->scr2, (4 - (addr - 0xd000) - size) << 3, + size << 3, val); + next_scr2_led_update(s); + next_scr2_rtc_update(s); + s->old_scr2 = s->scr2; break; + default: - g_assert_not_reached(); + DPRINTF("MMIO Write @ 0x%"HWADDR_PRIx " with 0x%x size %u\n", addr, + (unsigned int)val, size); } } -static const MemoryRegionOps mmio_ops = { - .read = mmio_readfn, - .write = mmio_writefn, +static const MemoryRegionOps next_mmio_ops = { + .read = next_mmio_read, + .write = next_mmio_write, .valid.min_access_size = 1, .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_BIG_ENDIAN, }; -static uint32_t scr_readb(NeXTPC *s, hwaddr addr) +#define SCSICSR_ENABLE 0x01 +#define SCSICSR_RESET 0x02 /* reset scsi dma */ +#define SCSICSR_FIFOFL 0x04 +#define SCSICSR_DMADIR 0x08 /* if set, scsi to mem */ +#define SCSICSR_CPUDMA 0x10 /* if set, dma enabled */ +#define SCSICSR_INTMASK 0x20 /* if set, interrupt enabled */ + +static uint64_t next_scr_readfn(void *opaque, hwaddr addr, unsigned size) { + NeXTPC *s = NEXT_PC(opaque); + uint64_t val; + switch (addr) { case 0x14108: DPRINTF("FD read @ %x\n", (unsigned int)addr); - return 0x40 | 0x04 | 0x2 | 0x1; + val = 0x40 | 0x04 | 0x2 | 0x1; + break; + case 0x14020: DPRINTF("SCSI 4020 STATUS READ %X\n", s->scsi_csr_1); - return s->scsi_csr_1; + val = s->scsi_csr_1; + break; case 0x14021: DPRINTF("SCSI 4021 STATUS READ %X\n", s->scsi_csr_2); - return 0x40; + val = 0x40; + break; /* * These 4 registers are the hardware timer, not sure which register - * is the latch instead of data, but no problems so far + * is the latch instead of data, but no problems so far. + * + * Hack: We need to have the LSB change consistently to make it work */ - case 0x1a000: - return 0xff & (clock() >> 24); - case 0x1a001: - return 0xff & (clock() >> 16); - case 0x1a002: - return 0xff & (clock() >> 8); - case 0x1a003: - /* Hack: We need to have this change consistently to make it work */ - return 0xFF & clock(); + case 0x1a000 ... 0x1a003: + val = extract32(clock(), (4 - (addr - 0x1a000) - size) << 3, + size << 3); + break; + + /* For now return dummy byte to allow the Ethernet test to timeout */ + case 0x6000: + val = 0xff; + break; default: - DPRINTF("BMAP Read B @ %x\n", (unsigned int)addr); - return 0; + DPRINTF("BMAP Read @ 0x%x size %u\n", (unsigned int)addr, size); + val = 0; + break; } -} -static uint32_t scr_readw(NeXTPC *s, hwaddr addr) -{ - DPRINTF("BMAP Read W @ %x\n", (unsigned int)addr); - return 0; + return val; } -static uint32_t scr_readl(NeXTPC *s, hwaddr addr) +static void next_scr_writefn(void *opaque, hwaddr addr, uint64_t val, + unsigned size) { - DPRINTF("BMAP Read L @ %x\n", (unsigned int)addr); - return 0; -} - -#define SCSICSR_ENABLE 0x01 -#define SCSICSR_RESET 0x02 /* reset scsi dma */ -#define SCSICSR_FIFOFL 0x04 -#define SCSICSR_DMADIR 0x08 /* if set, scsi to mem */ -#define SCSICSR_CPUDMA 0x10 /* if set, dma enabled */ -#define SCSICSR_INTMASK 0x20 /* if set, interrupt enabled */ + NeXTPC *s = NEXT_PC(opaque); -static void scr_writeb(NeXTPC *s, hwaddr addr, uint32_t value) -{ switch (addr) { case 0x14108: DPRINTF("FDCSR Write: %x\n", value); - - if (value == 0x0) { + if (val == 0x0) { /* qemu_irq_raise(s->fd_irq[0]); */ } break; + case 0x14020: /* SCSI Control Register */ - if (value & SCSICSR_FIFOFL) { + if (val & SCSICSR_FIFOFL) { DPRINTF("SCSICSR FIFO Flush\n"); /* will have to add another irq to the esp if this is needed */ /* esp_puflush_fifo(esp_g); */ - qemu_irq_pulse(s->scsi_dma); } - if (value & SCSICSR_ENABLE) { + if (val & SCSICSR_ENABLE) { DPRINTF("SCSICSR Enable\n"); /* * qemu_irq_raise(s->scsi_dma); @@ -486,17 +431,17 @@ static void scr_writeb(NeXTPC *s, hwaddr addr, uint32_t value) * s->scsi_csr_1 &= ~SCSICSR_ENABLE; */ - if (value & SCSICSR_RESET) { + if (val & SCSICSR_RESET) { DPRINTF("SCSICSR Reset\n"); /* I think this should set DMADIR. CPUDMA and INTMASK to 0 */ qemu_irq_raise(s->scsi_reset); s->scsi_csr_1 &= ~(SCSICSR_INTMASK | 0x80 | 0x1); qemu_irq_lower(s->scsi_reset); } - if (value & SCSICSR_DMADIR) { + if (val & SCSICSR_DMADIR) { DPRINTF("SCSICSR DMAdir\n"); } - if (value & SCSICSR_CPUDMA) { + if (val & SCSICSR_CPUDMA) { DPRINTF("SCSICSR CPUDMA\n"); /* qemu_irq_raise(s->scsi_dma); */ s->int_status |= 0x4000000; @@ -505,11 +450,11 @@ static void scr_writeb(NeXTPC *s, hwaddr addr, uint32_t value) s->int_status &= ~(0x4000000); /* qemu_irq_lower(s->scsi_dma); */ } - if (value & SCSICSR_INTMASK) { + if (val & SCSICSR_INTMASK) { DPRINTF("SCSICSR INTMASK\n"); /* * int_mask &= ~0x1000; - * s->scsi_csr_1 |= value; + * s->scsi_csr_1 |= val; * s->scsi_csr_1 &= ~SCSICSR_INTMASK; * if (s->scsi_queued) { * s->scsi_queued = 0; @@ -519,72 +464,28 @@ static void scr_writeb(NeXTPC *s, hwaddr addr, uint32_t value) } else { /* int_mask |= 0x1000; */ } - if (value & 0x80) { + if (val & 0x80) { /* int_mask |= 0x1000; */ /* s->scsi_csr_1 |= 0x80; */ } - DPRINTF("SCSICSR Write: %x\n", value); - /* s->scsi_csr_1 = value; */ - return; + DPRINTF("SCSICSR Write: %x\n", val); + /* s->scsi_csr_1 = val; */ + break; + /* Hardware timer latch - not implemented yet */ case 0x1a000: default: - DPRINTF("BMAP Write B @ %x with %x\n", (unsigned int)addr, value); - } -} - -static void scr_writew(NeXTPC *s, hwaddr addr, uint32_t value) -{ - DPRINTF("BMAP Write W @ %x with %x\n", (unsigned int)addr, value); -} - -static void scr_writel(NeXTPC *s, hwaddr addr, uint32_t value) -{ - DPRINTF("BMAP Write L @ %x with %x\n", (unsigned int)addr, value); -} - -static uint64_t scr_readfn(void *opaque, hwaddr addr, unsigned size) -{ - NeXTPC *s = NEXT_PC(opaque); - - switch (size) { - case 1: - return scr_readb(s, addr); - case 2: - return scr_readw(s, addr); - case 4: - return scr_readl(s, addr); - default: - g_assert_not_reached(); - } -} - -static void scr_writefn(void *opaque, hwaddr addr, uint64_t value, - unsigned size) -{ - NeXTPC *s = NEXT_PC(opaque); - - switch (size) { - case 1: - scr_writeb(s, addr, value); - break; - case 2: - scr_writew(s, addr, value); - break; - case 4: - scr_writel(s, addr, value); - break; - default: - g_assert_not_reached(); + DPRINTF("BMAP Write @ 0x%x with 0x%x size %u\n", (unsigned int)addr, + val, size); } } -static const MemoryRegionOps scr_ops = { - .read = scr_readfn, - .write = scr_writefn, +static const MemoryRegionOps next_scr_ops = { + .read = next_scr_readfn, + .write = next_scr_writefn, .valid.min_access_size = 1, .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_BIG_ENDIAN, }; #define NEXTDMA_SCSI(x) (0x10 + x) @@ -599,59 +500,63 @@ static const MemoryRegionOps scr_ops = { #define NEXTDMA_NEXT_INIT 0x4200 #define NEXTDMA_SIZE 0x4204 -static void dma_writel(void *opaque, hwaddr addr, uint64_t value, - unsigned int size) +static void next_dma_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int size) { NeXTState *next_state = NEXT_MACHINE(opaque); switch (addr) { case NEXTDMA_ENRX(NEXTDMA_CSR): - if (value & DMA_DEV2M) { + if (val & DMA_DEV2M) { next_state->dma[NEXTDMA_ENRX].csr |= DMA_DEV2M; } - if (value & DMA_SETENABLE) { + if (val & DMA_SETENABLE) { /* DPRINTF("SCSI DMA ENABLE\n"); */ next_state->dma[NEXTDMA_ENRX].csr |= DMA_ENABLE; } - if (value & DMA_SETSUPDATE) { + if (val & DMA_SETSUPDATE) { next_state->dma[NEXTDMA_ENRX].csr |= DMA_SUPDATE; } - if (value & DMA_CLRCOMPLETE) { + if (val & DMA_CLRCOMPLETE) { next_state->dma[NEXTDMA_ENRX].csr &= ~DMA_COMPLETE; } - if (value & DMA_RESET) { + if (val & DMA_RESET) { next_state->dma[NEXTDMA_ENRX].csr &= ~(DMA_COMPLETE | DMA_SUPDATE | DMA_ENABLE | DMA_DEV2M); } /* DPRINTF("RXCSR \tWrite: %x\n",value); */ break; + case NEXTDMA_ENRX(NEXTDMA_NEXT_INIT): - next_state->dma[NEXTDMA_ENRX].next_initbuf = value; + next_state->dma[NEXTDMA_ENRX].next_initbuf = val; break; + case NEXTDMA_ENRX(NEXTDMA_NEXT): - next_state->dma[NEXTDMA_ENRX].next = value; + next_state->dma[NEXTDMA_ENRX].next = val; break; + case NEXTDMA_ENRX(NEXTDMA_LIMIT): - next_state->dma[NEXTDMA_ENRX].limit = value; + next_state->dma[NEXTDMA_ENRX].limit = val; break; + case NEXTDMA_SCSI(NEXTDMA_CSR): - if (value & DMA_DEV2M) { + if (val & DMA_DEV2M) { next_state->dma[NEXTDMA_SCSI].csr |= DMA_DEV2M; } - if (value & DMA_SETENABLE) { + if (val & DMA_SETENABLE) { /* DPRINTF("SCSI DMA ENABLE\n"); */ next_state->dma[NEXTDMA_SCSI].csr |= DMA_ENABLE; } - if (value & DMA_SETSUPDATE) { + if (val & DMA_SETSUPDATE) { next_state->dma[NEXTDMA_SCSI].csr |= DMA_SUPDATE; } - if (value & DMA_CLRCOMPLETE) { + if (val & DMA_CLRCOMPLETE) { next_state->dma[NEXTDMA_SCSI].csr &= ~DMA_COMPLETE; } - if (value & DMA_RESET) { + if (val & DMA_RESET) { next_state->dma[NEXTDMA_SCSI].csr &= ~(DMA_COMPLETE | DMA_SUPDATE | DMA_ENABLE | DMA_DEV2M); /* DPRINTF("SCSI DMA RESET\n"); */ @@ -660,23 +565,23 @@ static void dma_writel(void *opaque, hwaddr addr, uint64_t value, break; case NEXTDMA_SCSI(NEXTDMA_NEXT): - next_state->dma[NEXTDMA_SCSI].next = value; + next_state->dma[NEXTDMA_SCSI].next = val; break; case NEXTDMA_SCSI(NEXTDMA_LIMIT): - next_state->dma[NEXTDMA_SCSI].limit = value; + next_state->dma[NEXTDMA_SCSI].limit = val; break; case NEXTDMA_SCSI(NEXTDMA_START): - next_state->dma[NEXTDMA_SCSI].start = value; + next_state->dma[NEXTDMA_SCSI].start = val; break; case NEXTDMA_SCSI(NEXTDMA_STOP): - next_state->dma[NEXTDMA_SCSI].stop = value; + next_state->dma[NEXTDMA_SCSI].stop = val; break; case NEXTDMA_SCSI(NEXTDMA_NEXT_INIT): - next_state->dma[NEXTDMA_SCSI].next_initbuf = value; + next_state->dma[NEXTDMA_SCSI].next_initbuf = val; break; default: @@ -684,52 +589,73 @@ static void dma_writel(void *opaque, hwaddr addr, uint64_t value, } } -static uint64_t dma_readl(void *opaque, hwaddr addr, unsigned int size) +static uint64_t next_dma_read(void *opaque, hwaddr addr, unsigned int size) { NeXTState *next_state = NEXT_MACHINE(opaque); + uint64_t val; switch (addr) { case NEXTDMA_SCSI(NEXTDMA_CSR): DPRINTF("SCSI DMA CSR READ\n"); - return next_state->dma[NEXTDMA_SCSI].csr; + val = next_state->dma[NEXTDMA_SCSI].csr; + break; + case NEXTDMA_ENRX(NEXTDMA_CSR): - return next_state->dma[NEXTDMA_ENRX].csr; + val = next_state->dma[NEXTDMA_ENRX].csr; + break; + case NEXTDMA_ENRX(NEXTDMA_NEXT_INIT): - return next_state->dma[NEXTDMA_ENRX].next_initbuf; + val = next_state->dma[NEXTDMA_ENRX].next_initbuf; + break; + case NEXTDMA_ENRX(NEXTDMA_NEXT): - return next_state->dma[NEXTDMA_ENRX].next; + val = next_state->dma[NEXTDMA_ENRX].next; + break; + case NEXTDMA_ENRX(NEXTDMA_LIMIT): - return next_state->dma[NEXTDMA_ENRX].limit; + val = next_state->dma[NEXTDMA_ENRX].limit; + break; case NEXTDMA_SCSI(NEXTDMA_NEXT): - return next_state->dma[NEXTDMA_SCSI].next; + val = next_state->dma[NEXTDMA_SCSI].next; + break; + case NEXTDMA_SCSI(NEXTDMA_NEXT_INIT): - return next_state->dma[NEXTDMA_SCSI].next_initbuf; + val = next_state->dma[NEXTDMA_SCSI].next_initbuf; + break; + case NEXTDMA_SCSI(NEXTDMA_LIMIT): - return next_state->dma[NEXTDMA_SCSI].limit; + val = next_state->dma[NEXTDMA_SCSI].limit; + break; + case NEXTDMA_SCSI(NEXTDMA_START): - return next_state->dma[NEXTDMA_SCSI].start; + val = next_state->dma[NEXTDMA_SCSI].start; + break; + case NEXTDMA_SCSI(NEXTDMA_STOP): - return next_state->dma[NEXTDMA_SCSI].stop; + val = next_state->dma[NEXTDMA_SCSI].stop; + break; default: DPRINTF("DMA read @ %x\n", (unsigned int)addr); - return 0; + val = 0; } /* * once the csr's are done, subtract 0x3FEC from the addr, and that will * normalize the upper registers */ + + return val; } -static const MemoryRegionOps dma_ops = { - .read = dma_readl, - .write = dma_writel, +static const MemoryRegionOps next_dma_ops = { + .read = next_dma_read, + .write = next_dma_write, .impl.min_access_size = 4, .valid.min_access_size = 4, .valid.max_access_size = 4, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_BIG_ENDIAN, }; static void next_irq(void *opaque, int number, int level) @@ -959,6 +885,7 @@ static void next_pc_reset(DeviceState *dev) /* 0x0000XX00 << vital bits */ s->scr1 = 0x00011102; s->scr2 = 0x00ff0c80; + s->old_scr2 = s->scr2; s->rtc.status = 0x90; @@ -973,9 +900,9 @@ static void next_pc_realize(DeviceState *dev, Error **errp) qdev_init_gpio_in(dev, next_irq, NEXT_NUM_IRQS); - memory_region_init_io(&s->mmiomem, OBJECT(s), &mmio_ops, s, - "next.mmio", 0xD0000); - memory_region_init_io(&s->scrmem, OBJECT(s), &scr_ops, s, + memory_region_init_io(&s->mmiomem, OBJECT(s), &next_mmio_ops, s, + "next.mmio", 0xd0000); + memory_region_init_io(&s->scrmem, OBJECT(s), &next_scr_ops, s, "next.scr", 0x20000); sysbus_init_mmio(sbd, &s->mmiomem); sysbus_init_mmio(sbd, &s->scrmem); @@ -994,9 +921,10 @@ static Property next_pc_properties[] = { static const VMStateDescription next_rtc_vmstate = { .name = "next-rtc", - .version_id = 1, - .minimum_version_id = 1, - .fields = (VMStateField[]) { + .version_id = 2, + .minimum_version_id = 2, + .fields = (const VMStateField[]) { + VMSTATE_INT8(phase, NextRtc), VMSTATE_UINT8_ARRAY(ram, NextRtc, 32), VMSTATE_UINT8(command, NextRtc), VMSTATE_UINT8(value, NextRtc), @@ -1009,13 +937,15 @@ static const VMStateDescription next_rtc_vmstate = { static const VMStateDescription next_pc_vmstate = { .name = "next-pc", - .version_id = 1, - .minimum_version_id = 1, - .fields = (VMStateField[]) { + .version_id = 2, + .minimum_version_id = 2, + .fields = (const VMStateField[]) { VMSTATE_UINT32(scr1, NeXTPC), VMSTATE_UINT32(scr2, NeXTPC), + VMSTATE_UINT32(old_scr2, NeXTPC), VMSTATE_UINT32(int_mask, NeXTPC), VMSTATE_UINT32(int_status, NeXTPC), + VMSTATE_UINT32(led, NeXTPC), VMSTATE_UINT8(scsi_csr_1, NeXTPC), VMSTATE_UINT8(scsi_csr_2, NeXTPC), VMSTATE_STRUCT(rtc, NeXTPC, 0, next_rtc_vmstate, NextRtc), @@ -1043,13 +973,9 @@ static const TypeInfo next_pc_info = { static void next_cube_init(MachineState *machine) { + NeXTState *m = NEXT_MACHINE(machine); M68kCPU *cpu; CPUM68KState *env; - MemoryRegion *rom = g_new(MemoryRegion, 1); - MemoryRegion *rom2 = g_new(MemoryRegion, 1); - MemoryRegion *dmamem = g_new(MemoryRegion, 1); - MemoryRegion *bmapm1 = g_new(MemoryRegion, 1); - MemoryRegion *bmapm2 = g_new(MemoryRegion, 1); MemoryRegion *sysmem = get_system_memory(); const char *bios_name = machine->firmware ?: ROM_FILE; DeviceState *pcdev; @@ -1084,21 +1010,23 @@ static void next_cube_init(MachineState *machine) sysbus_mmio_map(SYS_BUS_DEVICE(pcdev), 1, 0x02100000); /* BMAP memory */ - memory_region_init_ram_flags_nomigrate(bmapm1, NULL, "next.bmapmem", 64, - RAM_SHARED, &error_fatal); - memory_region_add_subregion(sysmem, 0x020c0000, bmapm1); + memory_region_init_ram_flags_nomigrate(&m->bmapm1, NULL, "next.bmapmem", + 64, RAM_SHARED, &error_fatal); + memory_region_add_subregion(sysmem, 0x020c0000, &m->bmapm1); /* The Rev_2.5_v66.bin firmware accesses it at 0x820c0020, too */ - memory_region_init_alias(bmapm2, NULL, "next.bmapmem2", bmapm1, 0x0, 64); - memory_region_add_subregion(sysmem, 0x820c0000, bmapm2); + memory_region_init_alias(&m->bmapm2, NULL, "next.bmapmem2", &m->bmapm1, + 0x0, 64); + memory_region_add_subregion(sysmem, 0x820c0000, &m->bmapm2); /* KBD */ sysbus_create_simple(TYPE_NEXTKBD, 0x0200e000, NULL); /* Load ROM here */ - memory_region_init_rom(rom, NULL, "next.rom", 0x20000, &error_fatal); - memory_region_add_subregion(sysmem, 0x01000000, rom); - memory_region_init_alias(rom2, NULL, "next.rom2", rom, 0x0, 0x20000); - memory_region_add_subregion(sysmem, 0x0, rom2); + memory_region_init_rom(&m->rom, NULL, "next.rom", 0x20000, &error_fatal); + memory_region_add_subregion(sysmem, 0x01000000, &m->rom); + memory_region_init_alias(&m->rom2, NULL, "next.rom2", &m->rom, 0x0, + 0x20000); + memory_region_add_subregion(sysmem, 0x0, &m->rom2); if (load_image_targphys(bios_name, 0x01000000, 0x20000) < 8) { if (!qtest_enabled()) { error_report("Failed to load firmware '%s'.", bios_name); @@ -1125,8 +1053,9 @@ static void next_cube_init(MachineState *machine) next_scsi_init(pcdev, cpu); /* DMA */ - memory_region_init_io(dmamem, NULL, &dma_ops, machine, "next.dma", 0x5000); - memory_region_add_subregion(sysmem, 0x02000000, dmamem); + memory_region_init_io(&m->dmamem, NULL, &next_dma_ops, machine, + "next.dma", 0x5000); + memory_region_add_subregion(sysmem, 0x02000000, &m->dmamem); } static void next_machine_class_init(ObjectClass *oc, void *data) diff --git a/hw/m68k/q800-glue.c b/hw/m68k/q800-glue.c index f413b1599a2c7494f22a21667be490fc9af21f3c..b5a7713863fb6058b43032a117d0bae16206740e 100644 --- a/hw/m68k/q800-glue.c +++ b/hw/m68k/q800-glue.c @@ -189,7 +189,7 @@ static const VMStateDescription vmstate_glue = { .name = "q800-glue", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(ipr, GLUEState), VMSTATE_UINT8(auxmode, GLUEState), VMSTATE_TIMER_PTR(nmi_release, GLUEState), diff --git a/hw/m68k/q800.c b/hw/m68k/q800.c index 83d1571d02fa62f470c08d9e415cf194f983f23e..b80a3b6d5fe641c8e74bda8238ac36ccd74bd23b 100644 --- a/hw/m68k/q800.c +++ b/hw/m68k/q800.c @@ -253,7 +253,6 @@ static void q800_machine_init(MachineState *machine) int bios_size; ram_addr_t initrd_base; int32_t initrd_size; - MemoryRegion *dp8393x_prom = g_new(MemoryRegion, 1); uint8_t *prom; int i, checksum; MacFbMode *macfb_mode; @@ -406,13 +405,13 @@ static void q800_machine_init(MachineState *machine) sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(DEVICE(&m->glue), GLUE_IRQ_IN_SONIC)); - memory_region_init_rom(dp8393x_prom, NULL, "dp8393x-q800.prom", + memory_region_init_rom(&m->dp8393x_prom, NULL, "dp8393x-q800.prom", SONIC_PROM_SIZE, &error_fatal); memory_region_add_subregion(get_system_memory(), SONIC_PROM_BASE, - dp8393x_prom); + &m->dp8393x_prom); /* Add MAC address with valid checksum to PROM */ - prom = memory_region_get_ram_ptr(dp8393x_prom); + prom = memory_region_get_ram_ptr(&m->dp8393x_prom); checksum = 0; for (i = 0; i < 6; i++) { prom[i] = revbit8(nd_table[0].macaddr.a[i]); diff --git a/hw/m68k/virt.c b/hw/m68k/virt.c index 2e49e262ee0e90e1d2c40a623be087153accae71..e2792ef46d93016f2b536a3047371c58524d8561 100644 --- a/hw/m68k/virt.c +++ b/hw/m68k/virt.c @@ -346,10 +346,17 @@ type_init(virt_machine_register_types) } \ type_init(machvirt_machine_##major##_##minor##_init); +static void virt_machine_9_0_options(MachineClass *mc) +{ +} +DEFINE_VIRT_MACHINE(9, 0, true) + static void virt_machine_8_2_options(MachineClass *mc) { + virt_machine_9_0_options(mc); + compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len); } -DEFINE_VIRT_MACHINE(8, 2, true) +DEFINE_VIRT_MACHINE(8, 2, false) static void virt_machine_8_1_options(MachineClass *mc) { diff --git a/hw/mem/meson.build b/hw/mem/meson.build index ec26ef5544376910e4a482d7529a365f319b3bb7..faee1fe93604a44944937b5ae13480c96cc955e8 100644 --- a/hw/mem/meson.build +++ b/hw/mem/meson.build @@ -5,7 +5,6 @@ mem_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_mc.c')) mem_ss.add(when: 'CONFIG_NVDIMM', if_true: files('nvdimm.c')) mem_ss.add(when: 'CONFIG_CXL_MEM_DEVICE', if_true: files('cxl_type3.c')) system_ss.add(when: 'CONFIG_CXL_MEM_DEVICE', if_false: files('cxl_type3_stubs.c')) -system_ss.add(when: 'CONFIG_ALL', if_true: files('cxl_type3_stubs.c')) system_ss.add_all(when: 'CONFIG_MEM_DEVICE', if_true: mem_ss) diff --git a/hw/mips/cps.c b/hw/mips/cps.c index b6612c1762ec1940509c50c3899ed8cf5cd2169b..4f12e23ab5be30b128fa68f2ab61dc7526bcac89 100644 --- a/hw/mips/cps.c +++ b/hw/mips/cps.c @@ -78,10 +78,9 @@ static void mips_cps_realize(DeviceState *dev, Error **errp) CPUMIPSState *env = &cpu->env; /* All VPs are halted on reset. Leave powering up to CPC. */ - if (!object_property_set_bool(OBJECT(cpu), "start-powered-off", true, - errp)) { - return; - } + object_property_set_bool(OBJECT(cpu), "start-powered-off", true, + &error_abort); + /* All cores use the same clock tree */ qdev_connect_clock_in(DEVICE(cpu), "clk-in", s->clock); diff --git a/hw/mips/jazz.c b/hw/mips/jazz.c index d33a76ad4d1d3453eb4ebea66683ed900f06d5f6..0d2348aa5aae81b165f581a66f8a7b52f296dc72 100644 --- a/hw/mips/jazz.c +++ b/hw/mips/jazz.c @@ -36,7 +36,6 @@ #include "hw/boards.h" #include "net/net.h" #include "hw/scsi/esp.h" -#include "hw/mips/bios.h" #include "hw/loader.h" #include "hw/rtc/mc146818rtc.h" #include "hw/timer/i8254.h" @@ -53,12 +52,19 @@ #ifdef CONFIG_TCG #include "hw/core/tcg-cpu-ops.h" #endif /* CONFIG_TCG */ +#include "cpu.h" enum jazz_model_e { JAZZ_MAGNUM, JAZZ_PICA61, }; +#if TARGET_BIG_ENDIAN +#define BIOS_FILENAME "mips_bios.bin" +#else +#define BIOS_FILENAME "mipsel_bios.bin" +#endif + static void main_cpu_reset(void *opaque) { MIPSCPU *cpu = opaque; @@ -147,6 +153,8 @@ static void mips_jazz_init_net(NICInfo *nd, IOMMUMemoryRegion *rc4030_dma_mr, prom[7] = 0xff - checksum; } +#define BIOS_SIZE (4 * MiB) + #define MAGNUM_BIOS_SIZE_MAX 0x7e000 #define MAGNUM_BIOS_SIZE \ (BIOS_SIZE < MAGNUM_BIOS_SIZE_MAX ? BIOS_SIZE : MAGNUM_BIOS_SIZE_MAX) diff --git a/hw/mips/malta.c b/hw/mips/malta.c index 049de46a9e1039fdc33550142e5e8fdf19224aef..d22bb1edefc8e3dc3eadf85aaf18e14462285504 100644 --- a/hw/mips/malta.c +++ b/hw/mips/malta.c @@ -40,7 +40,6 @@ #include "hw/pci/pci.h" #include "hw/pci/pci_bus.h" #include "qemu/log.h" -#include "hw/mips/bios.h" #include "hw/ide/pci.h" #include "hw/irq.h" #include "hw/loader.h" @@ -59,6 +58,7 @@ #include "hw/qdev-clock.h" #include "target/mips/internal.h" #include "trace.h" +#include "cpu.h" #define ENVP_PADDR 0x2000 #define ENVP_VADDR cpu_mips_phys_to_kseg0(NULL, ENVP_PADDR) @@ -71,6 +71,7 @@ #define RESET_ADDRESS 0x1fc00000ULL #define FLASH_SIZE 0x400000 +#define BIOS_SIZE (4 * MiB) #define PIIX4_PCI_DEVFN PCI_DEVFN(10, 0) @@ -91,6 +92,12 @@ typedef struct { bool display_inited; } MaltaFPGAState; +#if TARGET_BIG_ENDIAN +#define BIOS_FILENAME "mips_bios.bin" +#else +#define BIOS_FILENAME "mipsel_bios.bin" +#endif + #define TYPE_MIPS_MALTA "mips-malta" OBJECT_DECLARE_SIMPLE_TYPE(MaltaState, MIPS_MALTA) diff --git a/hw/mips/meson.build b/hw/mips/meson.build index 900613fc087f2814afa2240911aedb45b6d53b2e..f06d88f34307cc2425e5d9b4c275555f16306833 100644 --- a/hw/mips/meson.build +++ b/hw/mips/meson.build @@ -5,7 +5,7 @@ mips_ss.add(when: 'CONFIG_LOONGSON3V', if_true: files('loongson3_bootp.c', 'loon mips_ss.add(when: 'CONFIG_MALTA', if_true: files('malta.c')) mips_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('cps.c')) -if 'CONFIG_TCG' in config_all +if 'CONFIG_TCG' in config_all_accel mips_ss.add(when: 'CONFIG_JAZZ', if_true: files('jazz.c')) mips_ss.add(when: 'CONFIG_MIPSSIM', if_true: files('mipssim.c')) mips_ss.add(when: 'CONFIG_FULOONG', if_true: files('fuloong2e.c')) diff --git a/hw/mips/mips_int.c b/hw/mips/mips_int.c index 6c32e466a3beed9583e019d1b75d4c13a51e52be..eef2fd2cd11bd0e75a6e876f0d6202243714afb3 100644 --- a/hw/mips/mips_int.c +++ b/hw/mips/mips_int.c @@ -36,7 +36,7 @@ static void cpu_mips_irq_request(void *opaque, int irq, int level) return; } - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); if (level) { env->CP0_Cause |= 1 << (irq + CP0Ca_IP); diff --git a/hw/mips/mipssim.c b/hw/mips/mipssim.c index 4f743f37eb537c4e83231c9921d5c1d3e1279f14..01e323904d9b97b73249f071fbb8357f653d04d6 100644 --- a/hw/mips/mipssim.c +++ b/hw/mips/mipssim.c @@ -35,7 +35,6 @@ #include "net/net.h" #include "sysemu/sysemu.h" #include "hw/boards.h" -#include "hw/mips/bios.h" #include "hw/loader.h" #include "elf.h" #include "hw/sysbus.h" @@ -43,6 +42,15 @@ #include "qemu/error-report.h" #include "sysemu/qtest.h" #include "sysemu/reset.h" +#include "cpu.h" + +#define BIOS_SIZE (4 * MiB) + +#if TARGET_BIG_ENDIAN +#define BIOS_FILENAME "mips_bios.bin" +#else +#define BIOS_FILENAME "mipsel_bios.bin" +#endif static struct _loaderparams { int ram_size; diff --git a/hw/misc/a9scu.c b/hw/misc/a9scu.c index a375ebc9878deca095e2dc6f88304a1c2d8732bb..04225dfb78d689f7cb6ea2c4e8786b93a08e2e4b 100644 --- a/hw/misc/a9scu.c +++ b/hw/misc/a9scu.c @@ -116,7 +116,7 @@ static const VMStateDescription vmstate_a9_scu = { .name = "a9-scu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(control, A9SCUState), VMSTATE_UINT32(status, A9SCUState), VMSTATE_END_OF_LIST() diff --git a/hw/misc/allwinner-a10-ccm.c b/hw/misc/allwinner-a10-ccm.c index 68146ee34015b6e4d16685145b3a7580f221cc8b..575b0189524a8ba23c4132a2b81c1448a37438f1 100644 --- a/hw/misc/allwinner-a10-ccm.c +++ b/hw/misc/allwinner-a10-ccm.c @@ -193,7 +193,7 @@ static const VMStateDescription allwinner_a10_ccm_vmstate = { .name = "allwinner-a10-ccm", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AwA10ClockCtlState, AW_A10_CCM_REGS_NUM), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/allwinner-a10-dramc.c b/hw/misc/allwinner-a10-dramc.c index e118b0c2fd458c00052bb20276fd9d58e37afdca..a7c58fa6d060d427d1cf9e3012169aaf795f3ac4 100644 --- a/hw/misc/allwinner-a10-dramc.c +++ b/hw/misc/allwinner-a10-dramc.c @@ -147,7 +147,7 @@ static const VMStateDescription allwinner_a10_dramc_vmstate = { .name = "allwinner-a10-dramc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AwA10DramControllerState, AW_A10_DRAMC_REGS_NUM), VMSTATE_END_OF_LIST() diff --git a/hw/misc/allwinner-cpucfg.c b/hw/misc/allwinner-cpucfg.c index bbd33a7dac84e20e62b3424ad7c5e570a6ead217..31b97809695f605c3129600f523dcdda634ecffb 100644 --- a/hw/misc/allwinner-cpucfg.c +++ b/hw/misc/allwinner-cpucfg.c @@ -250,7 +250,7 @@ static const VMStateDescription allwinner_cpucfg_vmstate = { .name = "allwinner-cpucfg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(gen_ctrl, AwCpuCfgState), VMSTATE_UINT32(super_standby, AwCpuCfgState), VMSTATE_UINT32(entry_addr, AwCpuCfgState), diff --git a/hw/misc/allwinner-h3-ccu.c b/hw/misc/allwinner-h3-ccu.c index 18d1074545843ea23e584dddb2aa562336c7a041..cfc68522d333d3500e227ea8665ac55f1d860107 100644 --- a/hw/misc/allwinner-h3-ccu.c +++ b/hw/misc/allwinner-h3-ccu.c @@ -212,7 +212,7 @@ static const VMStateDescription allwinner_h3_ccu_vmstate = { .name = "allwinner-h3-ccu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AwH3ClockCtlState, AW_H3_CCU_REGS_NUM), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/allwinner-h3-dramc.c b/hw/misc/allwinner-h3-dramc.c index 1d37cf422cd21bac801dbb1a47c5448915db9d1e..e168ffe62330b2a532a141153ab0efe43e92265f 100644 --- a/hw/misc/allwinner-h3-dramc.c +++ b/hw/misc/allwinner-h3-dramc.c @@ -324,7 +324,7 @@ static const VMStateDescription allwinner_h3_dramc_vmstate = { .name = "allwinner-h3-dramc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(dramcom, AwH3DramCtlState, AW_H3_DRAMCOM_REGS_NUM), VMSTATE_UINT32_ARRAY(dramctl, AwH3DramCtlState, AW_H3_DRAMCTL_REGS_NUM), VMSTATE_UINT32_ARRAY(dramphy, AwH3DramCtlState, AW_H3_DRAMPHY_REGS_NUM), diff --git a/hw/misc/allwinner-h3-sysctrl.c b/hw/misc/allwinner-h3-sysctrl.c index 1d07efa880d1ac20add7f34f12979a5a70e08068..2d29be83e3ace577cab80ed704f04bf8e1a76977 100644 --- a/hw/misc/allwinner-h3-sysctrl.c +++ b/hw/misc/allwinner-h3-sysctrl.c @@ -110,7 +110,7 @@ static const VMStateDescription allwinner_h3_sysctrl_vmstate = { .name = "allwinner-h3-sysctrl", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AwH3SysCtrlState, AW_H3_SYSCTRL_REGS_NUM), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/allwinner-r40-ccu.c b/hw/misc/allwinner-r40-ccu.c index d82fee12db6ba76e8504cc76d17ec5b5c84742e7..33baf4429ddf58d1d793a752dabdd7b48a044a6f 100644 --- a/hw/misc/allwinner-r40-ccu.c +++ b/hw/misc/allwinner-r40-ccu.c @@ -179,7 +179,7 @@ static const VMStateDescription allwinner_r40_ccu_vmstate = { .name = "allwinner-r40-ccu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AwR40ClockCtlState, AW_R40_CCU_REGS_NUM), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/allwinner-r40-dramc.c b/hw/misc/allwinner-r40-dramc.c index 3d81ddb2e187f049f46905d9300bd0a87c8e3b78..75b0bef4fd916a7d446cc1858d8b5e6f83b693d7 100644 --- a/hw/misc/allwinner-r40-dramc.c +++ b/hw/misc/allwinner-r40-dramc.c @@ -474,7 +474,7 @@ static const VMStateDescription allwinner_r40_dramc_vmstate = { .name = "allwinner-r40-dramc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(dramcom, AwR40DramCtlState, AW_R40_DRAMCOM_REGS_NUM), VMSTATE_UINT32_ARRAY(dramctl, AwR40DramCtlState, diff --git a/hw/misc/allwinner-sid.c b/hw/misc/allwinner-sid.c index 6d61f55b1d494fa599cc928610bd25c517fd4812..e5cd431743b86697c3051f3d27219d7f61a8e3b6 100644 --- a/hw/misc/allwinner-sid.c +++ b/hw/misc/allwinner-sid.c @@ -136,7 +136,7 @@ static const VMStateDescription allwinner_sid_vmstate = { .name = "allwinner-sid", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(control, AwSidState), VMSTATE_UINT32(rdkey, AwSidState), VMSTATE_UINT8_ARRAY_V(identifier.data, AwSidState, sizeof(QemuUUID), 1), diff --git a/hw/misc/allwinner-sramc.c b/hw/misc/allwinner-sramc.c index d76c24d081f3f2288d942c0a45af36a729eceab7..cf10ca8ffe872ad9eb5e3ef87207d70b55350edb 100644 --- a/hw/misc/allwinner-sramc.c +++ b/hw/misc/allwinner-sramc.c @@ -116,7 +116,7 @@ static const VMStateDescription allwinner_sramc_vmstate = { .name = "allwinner-sramc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(sram_ver, AwSRAMCState), VMSTATE_UINT32(sram_soft_entry_reg0, AwSRAMCState), VMSTATE_END_OF_LIST() diff --git a/hw/misc/arm_l2x0.c b/hw/misc/arm_l2x0.c index 75c3eb8982f7731d544a74bec9b8bda42bf706ff..b14d0a267670bc210f8befac884a4a21689c425d 100644 --- a/hw/misc/arm_l2x0.c +++ b/hw/misc/arm_l2x0.c @@ -49,7 +49,7 @@ static const VMStateDescription vmstate_l2x0 = { .name = "l2x0", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ctrl, L2x0State), VMSTATE_UINT32(aux_ctrl, L2x0State), VMSTATE_UINT32(data_ctrl, L2x0State), diff --git a/hw/misc/arm_sysctl.c b/hw/misc/arm_sysctl.c index 3e4f4b052443da258e138ef8028e914515f11048..5108f3eda921bdffc1aded2543e499de48d33400 100644 --- a/hw/misc/arm_sysctl.c +++ b/hw/misc/arm_sysctl.c @@ -57,7 +57,7 @@ static const VMStateDescription vmstate_arm_sysctl = { .name = "realview_sysctl", .version_id = 4, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(leds, arm_sysctl_state), VMSTATE_UINT16(lockval, arm_sysctl_state), VMSTATE_UINT32(cfgdata1, arm_sysctl_state), diff --git a/hw/misc/armsse-cpu-pwrctrl.c b/hw/misc/armsse-cpu-pwrctrl.c index 42fc38879f2e53b1978e3dc35df7393e3ad76247..bfc51d175cbc4c1776f0e5082b774e36bc5365cd 100644 --- a/hw/misc/armsse-cpu-pwrctrl.c +++ b/hw/misc/armsse-cpu-pwrctrl.c @@ -109,7 +109,7 @@ static const VMStateDescription pwrctrl_vmstate = { .name = "armsse-cpu-pwrctrl", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cpupwrcfg, ARMSSECPUPwrCtrl), VMSTATE_END_OF_LIST() }, diff --git a/hw/misc/armsse-mhu.c b/hw/misc/armsse-mhu.c index 0be7f0fc874dc4246403ea97de071f3cb1a2478b..55625b2cca83e61eaecafe3083722a639bef35ae 100644 --- a/hw/misc/armsse-mhu.c +++ b/hw/misc/armsse-mhu.c @@ -157,7 +157,7 @@ static const VMStateDescription armsse_mhu_vmstate = { .name = "armsse-mhu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cpu0intr, ARMSSEMHU), VMSTATE_UINT32(cpu1intr, ARMSSEMHU), VMSTATE_END_OF_LIST() diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c index b07506ec04efe3dfe3faef16a5fc23e58dd40802..c06c04ddc66993c4a2aadc5460dc84f0e96972ee 100644 --- a/hw/misc/aspeed_hace.c +++ b/hw/misc/aspeed_hace.c @@ -433,7 +433,7 @@ static const VMStateDescription vmstate_aspeed_hace = { .name = TYPE_ASPEED_HACE, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS), VMSTATE_UINT32(total_req_len, AspeedHACEState), VMSTATE_UINT32(iov_count, AspeedHACEState), diff --git a/hw/misc/aspeed_i3c.c b/hw/misc/aspeed_i3c.c index d1ff61767167f4a6e17bde04785607f7fac3efef..827c9e522d3f63e0ebd93850aefd8e090b55ea39 100644 --- a/hw/misc/aspeed_i3c.c +++ b/hw/misc/aspeed_i3c.c @@ -168,7 +168,7 @@ static const VMStateDescription aspeed_i3c_device_vmstate = { .name = TYPE_ASPEED_I3C, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT32_ARRAY(regs, AspeedI3CDevice, ASPEED_I3C_DEVICE_NR_REGS), VMSTATE_END_OF_LIST(), } @@ -349,7 +349,7 @@ static const VMStateDescription vmstate_aspeed_i3c = { .name = TYPE_ASPEED_I3C, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedI3CState, ASPEED_I3C_NR_REGS), VMSTATE_STRUCT_ARRAY(devices, AspeedI3CState, ASPEED_I3C_NR_DEVICES, 1, aspeed_i3c_device_vmstate, AspeedI3CDevice), diff --git a/hw/misc/aspeed_lpc.c b/hw/misc/aspeed_lpc.c index 2dddb27c35d0dd5204e5fe76dae12df770aec404..193f0dea5915832796d2da81be51c8b0a7532372 100644 --- a/hw/misc/aspeed_lpc.c +++ b/hw/misc/aspeed_lpc.c @@ -447,7 +447,7 @@ static const VMStateDescription vmstate_aspeed_lpc = { .name = TYPE_ASPEED_LPC, .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedLPCState, ASPEED_LPC_NR_REGS), VMSTATE_UINT32(subdevice_irqs_pending, AspeedLPCState), VMSTATE_END_OF_LIST(), diff --git a/hw/misc/aspeed_sbc.c b/hw/misc/aspeed_sbc.c index c6f328e3be2371fa381bb1c4d1f2f1292bb471cb..8bb1f90e4e74644d2055a51d1eb62ed14a57216d 100644 --- a/hw/misc/aspeed_sbc.c +++ b/hw/misc/aspeed_sbc.c @@ -130,7 +130,7 @@ static const VMStateDescription vmstate_aspeed_sbc = { .name = TYPE_ASPEED_SBC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedSBCState, ASPEED_SBC_NR_REGS), VMSTATE_END_OF_LIST(), } diff --git a/hw/misc/aspeed_scu.c b/hw/misc/aspeed_scu.c index 83353649064aecd5297fc4dee8212b1087373647..1ac04b6cb0207ad2db2df4da946c46dde695deff 100644 --- a/hw/misc/aspeed_scu.c +++ b/hw/misc/aspeed_scu.c @@ -531,7 +531,7 @@ static const VMStateDescription vmstate_aspeed_scu = { .name = "aspeed.scu", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedSCUState, ASPEED_AST2600_SCU_NR_REGS), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/aspeed_sdmc.c b/hw/misc/aspeed_sdmc.c index abb27279339371532012c98e05c0610268e39173..64cd1a81dcd2cd04f87ef1bb03e7ac8264a5974d 100644 --- a/hw/misc/aspeed_sdmc.c +++ b/hw/misc/aspeed_sdmc.c @@ -243,7 +243,7 @@ static const VMStateDescription vmstate_aspeed_sdmc = { .name = "aspeed.sdmc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedSDMCState, ASPEED_SDMC_NR_REGS), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/aspeed_xdma.c b/hw/misc/aspeed_xdma.c index 1c21577c98c9abbe042b4fcb7489d31a4ec56605..76ab8467ddba5498d4404e3ca9b02cc943af29ec 100644 --- a/hw/misc/aspeed_xdma.c +++ b/hw/misc/aspeed_xdma.c @@ -144,7 +144,7 @@ static void aspeed_xdma_reset(DeviceState *dev) static const VMStateDescription aspeed_xdma_vmstate = { .name = TYPE_ASPEED_XDMA, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedXDMAState, ASPEED_XDMA_NUM_REGS), VMSTATE_END_OF_LIST(), }, diff --git a/hw/misc/axp2xx.c b/hw/misc/axp2xx.c index 41538c1cd780429d01948734e2de6750fcbb7cbc..af646878cd2e8ad588739e1ac2185842335ed3bc 100644 --- a/hw/misc/axp2xx.c +++ b/hw/misc/axp2xx.c @@ -217,7 +217,7 @@ static int axp2xx_tx(I2CSlave *i2c, uint8_t data) static const VMStateDescription vmstate_axp2xx = { .name = TYPE_AXP2XX, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(regs, AXP2xxI2CState, NR_REGS), VMSTATE_UINT8(ptr, AXP2xxI2CState), VMSTATE_UINT8(count, AXP2xxI2CState), diff --git a/hw/misc/bcm2835_cprman.c b/hw/misc/bcm2835_cprman.c index 75e6c574d46dc87076a205cceb00c5bdd76618f1..91c8f7bd170fdcb68b42d847eab9e62940bd7b06 100644 --- a/hw/misc/bcm2835_cprman.c +++ b/hw/misc/bcm2835_cprman.c @@ -125,7 +125,7 @@ static const VMStateDescription pll_vmstate = { .name = TYPE_CPRMAN_PLL, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(xosc_in, CprmanPllState), VMSTATE_END_OF_LIST() } @@ -229,7 +229,7 @@ static const VMStateDescription pll_channel_vmstate = { .name = TYPE_CPRMAN_PLL_CHANNEL, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(pll_in, CprmanPllChannelState), VMSTATE_END_OF_LIST() } @@ -349,7 +349,7 @@ static const VMStateDescription clock_mux_vmstate = { .name = TYPE_CPRMAN_CLOCK_MUX, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_ARRAY_CLOCK(srcs, CprmanClockMuxState, CPRMAN_NUM_CLOCK_MUX_SRC), VMSTATE_END_OF_LIST() @@ -404,7 +404,7 @@ static const VMStateDescription dsi0hsck_mux_vmstate = { .name = TYPE_CPRMAN_DSI0HSCK_MUX, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(plla_in, CprmanDsi0HsckMuxState), VMSTATE_CLOCK(plld_in, CprmanDsi0HsckMuxState), VMSTATE_END_OF_LIST() @@ -772,7 +772,7 @@ static const VMStateDescription cprman_vmstate = { .name = TYPE_BCM2835_CPRMAN, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, BCM2835CprmanState, CPRMAN_NUM_REGS), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/bcm2835_mbox.c b/hw/misc/bcm2835_mbox.c index 1e4e061bc1f77370f73dccd4e5a2b698c77f37f0..67bfc3bd71967201684ad330b378ffc8036eba02 100644 --- a/hw/misc/bcm2835_mbox.c +++ b/hw/misc/bcm2835_mbox.c @@ -257,7 +257,7 @@ static const VMStateDescription vmstate_bcm2835_mbox_box = { .name = TYPE_BCM2835_MBOX "_box", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg, BCM2835Mbox, MBOX_SIZE), VMSTATE_UINT32(count, BCM2835Mbox), VMSTATE_UINT32(status, BCM2835Mbox), @@ -271,7 +271,7 @@ static const VMStateDescription vmstate_bcm2835_mbox = { .name = TYPE_BCM2835_MBOX, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL_ARRAY(available, BCM2835MboxState, MBOX_CHAN_COUNT), VMSTATE_STRUCT_ARRAY(mbox, BCM2835MboxState, 2, 1, vmstate_bcm2835_mbox_box, BCM2835Mbox), diff --git a/hw/misc/bcm2835_mphi.c b/hw/misc/bcm2835_mphi.c index 0428e10ba5cc4fc3b9dcbf86a0a151d09db9330a..f1eeda2786291640d5fa116d98a65db7ebbad773 100644 --- a/hw/misc/bcm2835_mphi.c +++ b/hw/misc/bcm2835_mphi.c @@ -156,7 +156,7 @@ const VMStateDescription vmstate_mphi_state = { .name = "mphi", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(outdda, BCM2835MphiState), VMSTATE_UINT32(outddb, BCM2835MphiState), VMSTATE_UINT32(ctrl, BCM2835MphiState), diff --git a/hw/misc/bcm2835_powermgt.c b/hw/misc/bcm2835_powermgt.c index 976f3d34e5e1ce474c31bf8fd7c329a118ac7411..1649da866899b075460f9fd5eb529422b40e53fb 100644 --- a/hw/misc/bcm2835_powermgt.c +++ b/hw/misc/bcm2835_powermgt.c @@ -109,7 +109,7 @@ static const VMStateDescription vmstate_bcm2835_powermgt = { .name = TYPE_BCM2835_POWERMGT, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(rstc, BCM2835PowerMgtState), VMSTATE_UINT32(rsts, BCM2835PowerMgtState), VMSTATE_UINT32(wdog, BCM2835PowerMgtState), diff --git a/hw/misc/bcm2835_property.c b/hw/misc/bcm2835_property.c index ff55a4e2cd2f94aeedf65cba047cc7caac3bb5ae..5c48f8d7438897375254362b9c5612a90fd48f59 100644 --- a/hw/misc/bcm2835_property.c +++ b/hw/misc/bcm2835_property.c @@ -384,7 +384,7 @@ static const VMStateDescription vmstate_bcm2835_property = { .name = TYPE_BCM2835_PROPERTY, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_MACADDR(macaddr, BCM2835PropertyState), VMSTATE_UINT32(addr, BCM2835PropertyState), VMSTATE_BOOL(pending, BCM2835PropertyState), diff --git a/hw/misc/bcm2835_rng.c b/hw/misc/bcm2835_rng.c index b3c80cf1867ec28afbe7484aac93d5f0df240299..10e741b11d156b5d0ae1a670ce76577a6a14c9dc 100644 --- a/hw/misc/bcm2835_rng.c +++ b/hw/misc/bcm2835_rng.c @@ -99,7 +99,7 @@ static const VMStateDescription vmstate_bcm2835_rng = { .name = TYPE_BCM2835_RNG, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(rng_ctrl, BCM2835RngState), VMSTATE_UINT32(rng_status, BCM2835RngState), VMSTATE_END_OF_LIST() diff --git a/hw/misc/bcm2835_thermal.c b/hw/misc/bcm2835_thermal.c index c6f3b1ad605992d229eb6094d27a7945fc8329ea..ee7816b8a5d4dd6d83d3df053f1887cf0a0a27f9 100644 --- a/hw/misc/bcm2835_thermal.c +++ b/hw/misc/bcm2835_thermal.c @@ -105,7 +105,7 @@ static const VMStateDescription bcm2835_thermal_vmstate = { .name = "bcm2835_thermal", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ctl, Bcm2835ThermalState), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/djmemc.c b/hw/misc/djmemc.c index fd02640838b8fcecb2c21b8609065d95355ff1de..9b69656c3a87d75f4e891bb59f141b9e956b1897 100644 --- a/hw/misc/djmemc.c +++ b/hw/misc/djmemc.c @@ -107,7 +107,7 @@ static const VMStateDescription vmstate_djmemc = { .name = "djMEMC", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, DJMEMCState, DJMEMC_NUM_REGS), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/eccmemctl.c b/hw/misc/eccmemctl.c index c65806e3d9a987a68e5a2a8d124e84ee5b2e188e..5a14a48999175239eb11361c2f6013cc09fc1122 100644 --- a/hw/misc/eccmemctl.c +++ b/hw/misc/eccmemctl.c @@ -272,7 +272,7 @@ static const VMStateDescription vmstate_ecc = { .name ="ECC", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, ECCState, ECC_NREGS), VMSTATE_BUFFER(diag, ECCState), VMSTATE_UINT32(version, ECCState), diff --git a/hw/misc/edu.c b/hw/misc/edu.c index a1f8bc77e7701eb90abab3faef46164503e39560..2a976ca2b151839248126c5c6eb3833832c5bf22 100644 --- a/hw/misc/edu.c +++ b/hw/misc/edu.c @@ -115,7 +115,7 @@ static void edu_check_range(uint64_t addr, uint64_t size1, uint64_t start, uint64_t end2 = start + size2; if (within(addr, start, end2) && - end1 > addr && within(end1, start, end2)) { + end1 > addr && end1 <= end2) { return; } @@ -355,9 +355,9 @@ static void *edu_fact_thread(void *opaque) smp_mb__after_rmw(); if (qatomic_read(&edu->status) & EDU_STATUS_IRQFACT) { - qemu_mutex_lock_iothread(); + bql_lock(); edu_raise_irq(edu, FACT_IRQ); - qemu_mutex_unlock_iothread(); + bql_unlock(); } } diff --git a/hw/misc/exynos4210_clk.c b/hw/misc/exynos4210_clk.c index 58cec282f755e84244ea7f85c9fe8194f2c6848b..4566a426faacef00bc538bef648ff6e409b1b953 100644 --- a/hw/misc/exynos4210_clk.c +++ b/hw/misc/exynos4210_clk.c @@ -135,7 +135,7 @@ static const VMStateDescription exynos4210_clk_vmstate = { .name = TYPE_EXYNOS4210_CLK, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg, Exynos4210ClkState, EXYNOS4210_REGS_NUM), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/exynos4210_pmu.c b/hw/misc/exynos4210_pmu.c index e24139c630b83543116dab41b81b1d95a163a31e..7e28e790d7cc2f8415c66627725d85e448502378 100644 --- a/hw/misc/exynos4210_pmu.c +++ b/hw/misc/exynos4210_pmu.c @@ -492,7 +492,7 @@ static const VMStateDescription exynos4210_pmu_vmstate = { .name = "exynos4210.pmu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg, Exynos4210PmuState, PMU_NUM_OF_REGISTERS), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/exynos4210_rng.c b/hw/misc/exynos4210_rng.c index 9214ec14cc0b3b27365a703dbc7a040c3bff10bb..0756bd320591dd5931a3f7a6dfc1b10f5aba04b3 100644 --- a/hw/misc/exynos4210_rng.c +++ b/hw/misc/exynos4210_rng.c @@ -243,7 +243,7 @@ static const VMStateDescription exynos4210_rng_vmstate = { .name = TYPE_EXYNOS4210_RNG, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_ARRAY(randr_value, Exynos4210RngState, EXYNOS4210_RNG_PRNG_NUM), VMSTATE_UINT32(seed_set, Exynos4210RngState), diff --git a/hw/misc/imx25_ccm.c b/hw/misc/imx25_ccm.c index ff996e2f2caff5fdeb0d0f19d298be0cb1d3b814..d888966014d5082c1de7ceae76d38e7287bc8005 100644 --- a/hw/misc/imx25_ccm.c +++ b/hw/misc/imx25_ccm.c @@ -101,7 +101,7 @@ static const VMStateDescription vmstate_imx25_ccm = { .name = TYPE_IMX25_CCM, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg, IMX25CCMState, IMX25_CCM_MAX_REG), VMSTATE_END_OF_LIST() }, diff --git a/hw/misc/imx31_ccm.c b/hw/misc/imx31_ccm.c index ad30a4b2c0ccc15c8e6db50a6f030aa74dddcf9d..a9059bb1f7244950b8872d8f66517ce69c6a1183 100644 --- a/hw/misc/imx31_ccm.c +++ b/hw/misc/imx31_ccm.c @@ -98,7 +98,7 @@ static const VMStateDescription vmstate_imx31_ccm = { .name = TYPE_IMX31_CCM, .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg, IMX31CCMState, IMX31_CCM_MAX_REG), VMSTATE_END_OF_LIST() }, diff --git a/hw/misc/imx6_ccm.c b/hw/misc/imx6_ccm.c index 85af466c2beba49506afe0829c1fcacdb7323b75..56489d8b57b64336bc091c9cf2dc4b11414e7f18 100644 --- a/hw/misc/imx6_ccm.c +++ b/hw/misc/imx6_ccm.c @@ -235,7 +235,7 @@ static const VMStateDescription vmstate_imx6_ccm = { .name = TYPE_IMX6_CCM, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(ccm, IMX6CCMState, CCM_MAX), VMSTATE_UINT32_ARRAY(analog, IMX6CCMState, CCM_ANALOG_MAX), VMSTATE_END_OF_LIST() diff --git a/hw/misc/imx6_src.c b/hw/misc/imx6_src.c index a9c64d06ebcfe10d5b8dfb3a1bdd85a493f391f2..0c6003559f57f0ea22d4fe3cc3d5619a5576e183 100644 --- a/hw/misc/imx6_src.c +++ b/hw/misc/imx6_src.c @@ -77,7 +77,7 @@ static const VMStateDescription vmstate_imx6_src = { .name = TYPE_IMX6_SRC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, IMX6SRCState, SRC_MAX), VMSTATE_END_OF_LIST() }, @@ -131,7 +131,7 @@ static void imx6_clear_reset_bit(CPUState *cpu, run_on_cpu_data data) struct SRCSCRResetInfo *ri = data.host_ptr; IMX6SRCState *s = ri->s; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); s->regs[SRC_SCR] = deposit32(s->regs[SRC_SCR], ri->reset_bit, 1, 0); DPRINTF("reg[%s] <= 0x%" PRIx32 "\n", diff --git a/hw/misc/imx6ul_ccm.c b/hw/misc/imx6ul_ccm.c index e01bb68ac7296a2426348a6550a053485d76bdd2..bbc0be9921157814ffc7bad32137502bcaea6f34 100644 --- a/hw/misc/imx6ul_ccm.c +++ b/hw/misc/imx6ul_ccm.c @@ -285,7 +285,7 @@ static const VMStateDescription vmstate_imx6ul_ccm = { .name = TYPE_IMX6UL_CCM, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(ccm, IMX6ULCCMState, CCM_MAX), VMSTATE_UINT32_ARRAY(analog, IMX6ULCCMState, CCM_ANALOG_MAX), VMSTATE_END_OF_LIST() diff --git a/hw/misc/imx7_ccm.c b/hw/misc/imx7_ccm.c index 7539f7fb457047e7a11a3142289d8db10191befa..88354f020ee45c155a4bbf66cb74a29c2f334475 100644 --- a/hw/misc/imx7_ccm.c +++ b/hw/misc/imx7_ccm.c @@ -214,7 +214,7 @@ static const VMStateDescription vmstate_imx7_ccm = { .name = TYPE_IMX7_CCM, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(ccm, IMX7CCMState, CCM_MAX), VMSTATE_END_OF_LIST() }, @@ -286,7 +286,7 @@ static const VMStateDescription vmstate_imx7_analog = { .name = TYPE_IMX7_ANALOG, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(analog, IMX7AnalogState, ANALOG_MAX), VMSTATE_UINT32_ARRAY(pmu, IMX7AnalogState, PMU_MAX), VMSTATE_END_OF_LIST() diff --git a/hw/misc/imx7_snvs.c b/hw/misc/imx7_snvs.c index a245f96cd4e2784308213b23fa4d8b4785644c48..edb2df215a67c8cbb4496223dec467be59ff54bb 100644 --- a/hw/misc/imx7_snvs.c +++ b/hw/misc/imx7_snvs.c @@ -13,28 +13,100 @@ */ #include "qemu/osdep.h" +#include "qemu/bitops.h" +#include "qemu/timer.h" +#include "migration/vmstate.h" #include "hw/misc/imx7_snvs.h" +#include "qemu/cutils.h" #include "qemu/module.h" +#include "sysemu/sysemu.h" +#include "sysemu/rtc.h" #include "sysemu/runstate.h" #include "trace.h" +#define RTC_FREQ 32768ULL + +static const VMStateDescription vmstate_imx7_snvs = { + .name = TYPE_IMX7_SNVS, + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_UINT64(tick_offset, IMX7SNVSState), + VMSTATE_UINT64(lpcr, IMX7SNVSState), + VMSTATE_END_OF_LIST() + } +}; + +static uint64_t imx7_snvs_get_count(IMX7SNVSState *s) +{ + uint64_t ticks = muldiv64(qemu_clock_get_ns(rtc_clock), RTC_FREQ, + NANOSECONDS_PER_SECOND); + return s->tick_offset + ticks; +} + static uint64_t imx7_snvs_read(void *opaque, hwaddr offset, unsigned size) { - trace_imx7_snvs_read(offset, 0); + IMX7SNVSState *s = IMX7_SNVS(opaque); + uint64_t ret = 0; + + switch (offset) { + case SNVS_LPSRTCMR: + ret = extract64(imx7_snvs_get_count(s), 32, 15); + break; + case SNVS_LPSRTCLR: + ret = extract64(imx7_snvs_get_count(s), 0, 32); + break; + case SNVS_LPCR: + ret = s->lpcr; + break; + } - return 0; + trace_imx7_snvs_read(offset, ret, size); + + return ret; +} + +static void imx7_snvs_reset(DeviceState *dev) +{ + IMX7SNVSState *s = IMX7_SNVS(dev); + + s->lpcr = 0; } static void imx7_snvs_write(void *opaque, hwaddr offset, uint64_t v, unsigned size) { - const uint32_t value = v; - const uint32_t mask = SNVS_LPCR_TOP | SNVS_LPCR_DP_EN; + trace_imx7_snvs_write(offset, v, size); + + IMX7SNVSState *s = IMX7_SNVS(opaque); - trace_imx7_snvs_write(offset, value); + uint64_t new_value = 0, snvs_count = 0; - if (offset == SNVS_LPCR && ((value & mask) == mask)) { - qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + if (offset == SNVS_LPSRTCMR || offset == SNVS_LPSRTCLR) { + snvs_count = imx7_snvs_get_count(s); + } + + switch (offset) { + case SNVS_LPSRTCMR: + new_value = deposit64(snvs_count, 32, 32, v); + break; + case SNVS_LPSRTCLR: + new_value = deposit64(snvs_count, 0, 32, v); + break; + case SNVS_LPCR: { + s->lpcr = v; + + const uint32_t mask = SNVS_LPCR_TOP | SNVS_LPCR_DP_EN; + + if ((v & mask) == mask) { + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + } + break; + } + } + + if (offset == SNVS_LPSRTCMR || offset == SNVS_LPSRTCLR) { + s->tick_offset += new_value - snvs_count; } } @@ -59,17 +131,24 @@ static void imx7_snvs_init(Object *obj) { SysBusDevice *sd = SYS_BUS_DEVICE(obj); IMX7SNVSState *s = IMX7_SNVS(obj); + struct tm tm; memory_region_init_io(&s->mmio, obj, &imx7_snvs_ops, s, TYPE_IMX7_SNVS, 0x1000); sysbus_init_mmio(sd, &s->mmio); + + qemu_get_timedate(&tm, 0); + s->tick_offset = mktimegm(&tm) - + qemu_clock_get_ns(rtc_clock) / NANOSECONDS_PER_SECOND; } static void imx7_snvs_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + dc->reset = imx7_snvs_reset; + dc->vmsd = &vmstate_imx7_snvs; dc->desc = "i.MX7 Secure Non-Volatile Storage Module"; } diff --git a/hw/misc/imx7_src.c b/hw/misc/imx7_src.c index 983251e86f7268683388dd0eb104a5321771e92c..b3725ff6e72958bdeb53035abd02bb6730fa4614 100644 --- a/hw/misc/imx7_src.c +++ b/hw/misc/imx7_src.c @@ -84,7 +84,7 @@ static const VMStateDescription vmstate_imx7_src = { .name = TYPE_IMX7_SRC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, IMX7SRCState, SRC_MAX), VMSTATE_END_OF_LIST() }, @@ -136,7 +136,7 @@ static void imx7_clear_reset_bit(CPUState *cpu, run_on_cpu_data data) struct SRCSCRResetInfo *ri = data.host_ptr; IMX7SRCState *s = ri->s; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); s->regs[SRC_A7RCR0] = deposit32(s->regs[SRC_A7RCR0], ri->reset_bit, 1, 0); diff --git a/hw/misc/imx_rngc.c b/hw/misc/imx_rngc.c index 082c6980ad541db2fec2d72f3db048e9439aa124..ab7775e09523bc074244072f38309de2c86ae4b5 100644 --- a/hw/misc/imx_rngc.c +++ b/hw/misc/imx_rngc.c @@ -245,7 +245,7 @@ static const VMStateDescription vmstate_imx_rngc = { .name = RNGC_NAME, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(op_self_test, IMXRNGCState), VMSTATE_UINT8(op_seed, IMXRNGCState), VMSTATE_UINT8(mask, IMXRNGCState), diff --git a/hw/misc/iosb.c b/hw/misc/iosb.c index e7e9dcca476c70ad6d288dea0807d1c13b7339a3..e20305e801331a35e9f4aa44aa188c0e771701c3 100644 --- a/hw/misc/iosb.c +++ b/hw/misc/iosb.c @@ -105,7 +105,7 @@ static const VMStateDescription vmstate_iosb = { .name = "IOSB", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, IOSBState, IOSB_REGS), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/iotkit-secctl.c b/hw/misc/iotkit-secctl.c index b5a9e30a2c69afdb0b635936d52ca36ed0b3b59e..f9c45f60bf36cae2d0db20e84f7e1a506623cf18 100644 --- a/hw/misc/iotkit-secctl.c +++ b/hw/misc/iotkit-secctl.c @@ -753,7 +753,7 @@ static const VMStateDescription iotkit_secctl_ppc_vmstate = { .name = "iotkit-secctl-ppc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ns, IoTKitSecCtlPPC), VMSTATE_UINT32(sp, IoTKitSecCtlPPC), VMSTATE_UINT32(nsp, IoTKitSecCtlPPC), @@ -765,7 +765,7 @@ static const VMStateDescription iotkit_secctl_mpcintstatus_vmstate = { .name = "iotkit-secctl-mpcintstatus", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(mpcintstatus, IoTKitSecCtl), VMSTATE_END_OF_LIST() } @@ -781,7 +781,7 @@ static const VMStateDescription iotkit_secctl_msc_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = needed_always, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(secmscintstat, IoTKitSecCtl), VMSTATE_UINT32(secmscinten, IoTKitSecCtl), VMSTATE_UINT32(nsmscexp, IoTKitSecCtl), @@ -793,7 +793,7 @@ static const VMStateDescription iotkit_secctl_vmstate = { .name = "iotkit-secctl", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(secppcintstat, IoTKitSecCtl), VMSTATE_UINT32(secppcinten, IoTKitSecCtl), VMSTATE_UINT32(secrespcfg, IoTKitSecCtl), @@ -807,7 +807,7 @@ static const VMStateDescription iotkit_secctl_vmstate = { iotkit_secctl_ppc_vmstate, IoTKitSecCtlPPC), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &iotkit_secctl_mpcintstatus_vmstate, &iotkit_secctl_msc_vmstate, NULL diff --git a/hw/misc/iotkit-sysctl.c b/hw/misc/iotkit-sysctl.c index e664215ee67881841a98b1da04eed75d5388edaa..45393e84ba41b79e67779cc43ecbe228c6869f5c 100644 --- a/hw/misc/iotkit-sysctl.c +++ b/hw/misc/iotkit-sysctl.c @@ -777,7 +777,7 @@ static const VMStateDescription iotkit_sysctl_sse300_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = sse300_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(pwrctrl, IoTKitSysCtl), VMSTATE_UINT32(pdcm_pd_cpu0_sense, IoTKitSysCtl), VMSTATE_UINT32(pdcm_pd_vmr0_sense, IoTKitSysCtl), @@ -798,7 +798,7 @@ static const VMStateDescription iotkit_sysctl_sse200_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = sse200_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(scsecctrl, IoTKitSysCtl), VMSTATE_UINT32(fclk_div, IoTKitSysCtl), VMSTATE_UINT32(sysclk_div, IoTKitSysCtl), @@ -818,7 +818,7 @@ static const VMStateDescription iotkit_sysctl_vmstate = { .name = "iotkit-sysctl", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(secure_debug, IoTKitSysCtl), VMSTATE_UINT32(reset_syndrome, IoTKitSysCtl), VMSTATE_UINT32(reset_mask, IoTKitSysCtl), @@ -828,7 +828,7 @@ static const VMStateDescription iotkit_sysctl_vmstate = { VMSTATE_UINT32(wicctrl, IoTKitSysCtl), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &iotkit_sysctl_sse200_vmstate, &iotkit_sysctl_sse300_vmstate, NULL diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c index 044788802982426261b7e2e5750ad12d12d6d35b..a2fd0bc36544c4b50866dff84a29c438c11dd4ff 100644 --- a/hw/misc/ivshmem.c +++ b/hw/misc/ivshmem.c @@ -476,7 +476,6 @@ static void setup_interrupt(IVShmemState *s, int vector, Error **errp) static void process_msg_shmem(IVShmemState *s, int fd, Error **errp) { - Error *local_err = NULL; struct stat buf; size_t size; @@ -496,10 +495,9 @@ static void process_msg_shmem(IVShmemState *s, int fd, Error **errp) size = buf.st_size; /* mmap the region and map into the BAR2 */ - memory_region_init_ram_from_fd(&s->server_bar2, OBJECT(s), "ivshmem.bar2", - size, RAM_SHARED, fd, 0, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!memory_region_init_ram_from_fd(&s->server_bar2, OBJECT(s), + "ivshmem.bar2", size, RAM_SHARED, + fd, 0, errp)) { return; } @@ -1015,7 +1013,7 @@ static const VMStateDescription ivshmem_plain_vmsd = { .minimum_version_id = 0, .pre_load = ivshmem_pre_load, .post_load = ivshmem_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, IVShmemState), VMSTATE_UINT32(intrstatus, IVShmemState), VMSTATE_UINT32(intrmask, IVShmemState), @@ -1069,7 +1067,7 @@ static const VMStateDescription ivshmem_doorbell_vmsd = { .minimum_version_id = 0, .pre_load = ivshmem_pre_load, .post_load = ivshmem_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, IVShmemState), VMSTATE_MSIX(parent_obj, IVShmemState), VMSTATE_UINT32(intrstatus, IVShmemState), diff --git a/hw/misc/lasi.c b/hw/misc/lasi.c index ff9dc893ae6582b4a5c82e115892dead3369eae5..003f5b5ed86b2ce58ab10c9915ba8170d1e94136 100644 --- a/hw/misc/lasi.c +++ b/hw/misc/lasi.c @@ -196,7 +196,7 @@ static const VMStateDescription vmstate_lasi = { .name = "Lasi", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(irr, LasiState), VMSTATE_UINT32(imr, LasiState), VMSTATE_UINT32(ipr, LasiState), diff --git a/hw/misc/led.c b/hw/misc/led.c index 42bb43a39a2b2ceb5d849e778c3fd29703df66ea..d9998ab8954588f4fa6803aa86ea03a298fd0786 100644 --- a/hw/misc/led.c +++ b/hw/misc/led.c @@ -77,7 +77,7 @@ static const VMStateDescription vmstate_led = { .name = TYPE_LED, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(intensity_percent, LEDState), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/mac_via.c b/hw/misc/mac_via.c index b6206ef73ca74bbb6fd761a41eb3870625880d2d..db6142b5f4137bb03eb07cb242991fc439879e45 100644 --- a/hw/misc/mac_via.c +++ b/hw/misc/mac_via.c @@ -1292,7 +1292,7 @@ static const VMStateDescription vmstate_q800_via1 = { .version_id = 0, .minimum_version_id = 0, .post_load = via1_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(parent_obj, MOS6522Q800VIA1State, 0, vmstate_mos6522, MOS6522State), VMSTATE_UINT8(last_b, MOS6522Q800VIA1State), @@ -1411,7 +1411,7 @@ static const VMStateDescription vmstate_q800_via2 = { .name = "q800-via2", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(parent_obj, MOS6522Q800VIA2State, 0, vmstate_mos6522, MOS6522State), VMSTATE_END_OF_LIST() diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c index 6336dcb194828e9670a417a4427c448cd161d802..41934e2cf8e2548c5da93039759768bd97d622ee 100644 --- a/hw/misc/macio/cuda.c +++ b/hw/misc/macio/cuda.c @@ -487,7 +487,7 @@ static const VMStateDescription vmstate_cuda = { .name = "cuda", .version_id = 6, .minimum_version_id = 6, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(mos6522_cuda.parent_obj, CUDAState, 0, vmstate_mos6522, MOS6522State), VMSTATE_UINT8(last_b, CUDAState), diff --git a/hw/misc/macio/gpio.c b/hw/misc/macio/gpio.c index 4deb3304719e8dac94597bdc210a579b89bfb96f..549563747dccf9cb5574d1af4c5e10eef0a67608 100644 --- a/hw/misc/macio/gpio.c +++ b/hw/misc/macio/gpio.c @@ -168,7 +168,7 @@ static const VMStateDescription vmstate_macio_gpio = { .name = "macio_gpio", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(gpio_levels, MacIOGPIOState, 8), VMSTATE_UINT8_ARRAY(gpio_regs, MacIOGPIOState, 36), VMSTATE_END_OF_LIST() diff --git a/hw/misc/macio/mac_dbdma.c b/hw/misc/macio/mac_dbdma.c index 80a789f32b8d8819aa890048677a91ad1094c02f..2a528ea08cafe9f2f9c61b7182cc06bdf7246eb7 100644 --- a/hw/misc/macio/mac_dbdma.c +++ b/hw/misc/macio/mac_dbdma.c @@ -807,7 +807,7 @@ static const VMStateDescription vmstate_dbdma_io = { .name = "dbdma_io", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(addr, struct DBDMA_io), VMSTATE_INT32(len, struct DBDMA_io), VMSTATE_INT32(is_last, struct DBDMA_io), @@ -821,7 +821,7 @@ static const VMStateDescription vmstate_dbdma_cmd = { .name = "dbdma_cmd", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(req_count, dbdma_cmd), VMSTATE_UINT16(command, dbdma_cmd), VMSTATE_UINT32(phy_addr, dbdma_cmd), @@ -836,7 +836,7 @@ static const VMStateDescription vmstate_dbdma_channel = { .name = "dbdma_channel", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, struct DBDMA_channel, DBDMA_REGS), VMSTATE_STRUCT(io, struct DBDMA_channel, 0, vmstate_dbdma_io, DBDMA_io), VMSTATE_STRUCT(current, struct DBDMA_channel, 0, vmstate_dbdma_cmd, @@ -849,7 +849,7 @@ static const VMStateDescription vmstate_dbdma = { .name = "dbdma", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(channels, DBDMAState, DBDMA_CHANNELS, 1, vmstate_dbdma_channel, DBDMA_channel), VMSTATE_END_OF_LIST() diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c index 265c0bbd8dbc4a2b4fb8b6f92a0fa804768d9095..c9f22f85154fd1e0425872c0bcf11c32595aee8b 100644 --- a/hw/misc/macio/macio.c +++ b/hw/misc/macio/macio.c @@ -376,7 +376,7 @@ static const VMStateDescription vmstate_macio_oldworld = { .name = "macio-oldworld", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj.parent, OldWorldMacIOState), VMSTATE_END_OF_LIST() } @@ -396,7 +396,7 @@ static const VMStateDescription vmstate_macio_newworld = { .name = "macio-newworld", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj.parent, NewWorldMacIOState), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/macio/pmu.c b/hw/misc/macio/pmu.c index 58316d187135625b6e3c18f9b3358cf586e914e2..e9a90da88f2649fac955229cdf8d250849d41d81 100644 --- a/hw/misc/macio/pmu.c +++ b/hw/misc/macio/pmu.c @@ -668,7 +668,7 @@ static const VMStateDescription vmstate_pmu_adb = { .version_id = 1, .minimum_version_id = 1, .needed = pmu_adb_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(adb_reply_size, PMUState), VMSTATE_BUFFER(adb_reply, PMUState), VMSTATE_END_OF_LIST() @@ -679,7 +679,7 @@ static const VMStateDescription vmstate_pmu = { .name = "pmu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(mos6522_pmu.parent_obj, PMUState, 0, vmstate_mos6522, MOS6522State), VMSTATE_UINT8(last_b, PMUState), @@ -698,7 +698,7 @@ static const VMStateDescription vmstate_pmu = { VMSTATE_INT64(one_sec_target, PMUState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_pmu_adb, NULL } diff --git a/hw/misc/mips_cmgcr.c b/hw/misc/mips_cmgcr.c index 66eb11662c75b52a239d1ec7cc28c7dc6c93619b..2703040f45984fbea301cdc0d379b93e308d72e0 100644 --- a/hw/misc/mips_cmgcr.c +++ b/hw/misc/mips_cmgcr.c @@ -205,7 +205,7 @@ static const VMStateDescription vmstate_mips_gcr = { .name = "mips-gcr", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(cpc_base, MIPSGCRState), VMSTATE_END_OF_LIST() }, diff --git a/hw/misc/mips_cpc.c b/hw/misc/mips_cpc.c index 4a94c87054644c247f9fd4ea74724356a7d532fb..1e8fd2e6996bb6c6a0b48f51e3f49d794e0eac0a 100644 --- a/hw/misc/mips_cpc.c +++ b/hw/misc/mips_cpc.c @@ -157,7 +157,7 @@ static const VMStateDescription vmstate_mips_cpc = { .name = "mips-cpc", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(vp_running, MIPSCPCState), VMSTATE_END_OF_LIST() }, diff --git a/hw/misc/mips_itu.c b/hw/misc/mips_itu.c index 5a83ccc4e8bd3233c1f26566fe095307788509bf..37aea0e7376840944969b8b9e3316ac8d98eb303 100644 --- a/hw/misc/mips_itu.c +++ b/hw/misc/mips_itu.c @@ -22,9 +22,10 @@ #include "qemu/log.h" #include "qemu/module.h" #include "qapi/error.h" -#include "exec/exec-all.h" +#include "hw/core/cpu.h" #include "hw/misc/mips_itu.h" #include "hw/qdev-properties.h" +#include "target/mips/cpu.h" #define ITC_TAG_ADDRSPACE_SZ (ITC_ADDRESSMAP_NUM * 8) /* Initialize as 4kB area to fit all 32 cells with default 128B grain. diff --git a/hw/misc/mos6522.c b/hw/misc/mos6522.c index d6ba47bde97de5bc8b0c69579a49117c23523eea..e3fe87c20caab81bff61fdd5216159ebc027c41e 100644 --- a/hw/misc/mos6522.c +++ b/hw/misc/mos6522.c @@ -611,7 +611,7 @@ static const VMStateDescription vmstate_mos6522_timer = { .name = "mos6522_timer", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(latch, MOS6522Timer), VMSTATE_UINT16(counter_value, MOS6522Timer), VMSTATE_INT64(load_time, MOS6522Timer), @@ -625,7 +625,7 @@ const VMStateDescription vmstate_mos6522 = { .name = "mos6522", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(a, MOS6522State), VMSTATE_UINT8(b, MOS6522State), VMSTATE_UINT8(dira, MOS6522State), diff --git a/hw/misc/mps2-fpgaio.c b/hw/misc/mps2-fpgaio.c index 07b8cbdad28fabb36243a7b041347ad17459fc13..aa1bb83e72111de53be423a23fdc2e8e7c326dd1 100644 --- a/hw/misc/mps2-fpgaio.c +++ b/hw/misc/mps2-fpgaio.c @@ -305,7 +305,7 @@ static const VMStateDescription mps2_fpgaio_vmstate = { .name = "mps2-fpgaio", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(led0, MPS2FPGAIO), VMSTATE_UINT32(prescale, MPS2FPGAIO), VMSTATE_UINT32(misc, MPS2FPGAIO), diff --git a/hw/misc/mps2-scc.c b/hw/misc/mps2-scc.c index fe5034db1404a0a759cf7f3dfe5742a4dd4d779d..6cfb5ff1086a21515dd5c11831eca2f8d54a8939 100644 --- a/hw/misc/mps2-scc.c +++ b/hw/misc/mps2-scc.c @@ -340,7 +340,7 @@ static const VMStateDescription mps2_scc_vmstate = { .name = "mps2-scc", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cfg0, MPS2SCC), VMSTATE_UINT32(cfg1, MPS2SCC), VMSTATE_UINT32(cfg2, MPS2SCC), diff --git a/hw/misc/msf2-sysreg.c b/hw/misc/msf2-sysreg.c index 2dce55c3649993501d06d8ad32c4e310327909c3..f54382a816cd119e3f436e55c2fc395186ccc73c 100644 --- a/hw/misc/msf2-sysreg.c +++ b/hw/misc/msf2-sysreg.c @@ -112,7 +112,7 @@ static const VMStateDescription vmstate_msf2_sysreg = { .name = TYPE_MSF2_SYSREG, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, MSF2SysregState, MSF2_SYSREG_MMIO_SIZE / 4), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/mst_fpga.c b/hw/misc/mst_fpga.c index 7692825867d54c2ff30a2904e96413f01a49978e..2d7bfa5ad9ee9da83f22bf28c46868b33d301260 100644 --- a/hw/misc/mst_fpga.c +++ b/hw/misc/mst_fpga.c @@ -227,7 +227,7 @@ static const VMStateDescription vmstate_mst_fpga_regs = { .version_id = 0, .minimum_version_id = 0, .post_load = mst_fpga_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(prev_level, mst_irq_state), VMSTATE_UINT32(leddat1, mst_irq_state), VMSTATE_UINT32(leddat2, mst_irq_state), diff --git a/hw/misc/npcm7xx_clk.c b/hw/misc/npcm7xx_clk.c index bc2b879feb5d1f9871ecabdf289a6427da830e8f..ac1622c38aa2b097cbf53e82fbaf8ba12eba0a06 100644 --- a/hw/misc/npcm7xx_clk.c +++ b/hw/misc/npcm7xx_clk.c @@ -976,7 +976,7 @@ static const VMStateDescription vmstate_npcm7xx_clk_pll = { .name = "npcm7xx-clock-pll", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(clock_in, NPCM7xxClockPLLState), VMSTATE_END_OF_LIST(), }, @@ -986,7 +986,7 @@ static const VMStateDescription vmstate_npcm7xx_clk_sel = { .name = "npcm7xx-clock-sel", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(clock_in, NPCM7xxClockSELState, NPCM7XX_CLK_SEL_MAX_INPUT, 0, vmstate_clock, Clock), VMSTATE_END_OF_LIST(), @@ -997,7 +997,7 @@ static const VMStateDescription vmstate_npcm7xx_clk_divider = { .name = "npcm7xx-clock-divider", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(clock_in, NPCM7xxClockDividerState), VMSTATE_END_OF_LIST(), }, @@ -1008,7 +1008,7 @@ static const VMStateDescription vmstate_npcm7xx_clk = { .version_id = 1, .minimum_version_id = 1, .post_load = npcm7xx_clk_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, NPCM7xxCLKState, NPCM7XX_CLK_NR_REGS), VMSTATE_INT64(ref_ns, NPCM7xxCLKState), VMSTATE_CLOCK(clkref, NPCM7xxCLKState), diff --git a/hw/misc/npcm7xx_gcr.c b/hw/misc/npcm7xx_gcr.c index eace9e1967ac886b01590c2e052344a74a909d70..9252f9d14881fe4cc20447230c94b3d81bcbc047 100644 --- a/hw/misc/npcm7xx_gcr.c +++ b/hw/misc/npcm7xx_gcr.c @@ -227,7 +227,7 @@ static const VMStateDescription vmstate_npcm7xx_gcr = { .name = "npcm7xx-gcr", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, NPCM7xxGCRState, NPCM7XX_GCR_NR_REGS), VMSTATE_END_OF_LIST(), }, diff --git a/hw/misc/npcm7xx_mft.c b/hw/misc/npcm7xx_mft.c index a30583a1b05bef6617419b0df56b7658ab4ead41..9a848584e18ad57fa623f97d7f4ecd01b0b15dd9 100644 --- a/hw/misc/npcm7xx_mft.c +++ b/hw/misc/npcm7xx_mft.c @@ -503,7 +503,7 @@ static const VMStateDescription vmstate_npcm7xx_mft = { .name = "npcm7xx-mft-module", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(clock_in, NPCM7xxMFTState), VMSTATE_CLOCK(clock_1, NPCM7xxMFTState), VMSTATE_CLOCK(clock_2, NPCM7xxMFTState), diff --git a/hw/misc/npcm7xx_pwm.c b/hw/misc/npcm7xx_pwm.c index 2be5bd25c6c1750bdf16543800020d8105d2ef1d..fca2dd2e5af9fddd0ab2e93912637e72dab12c8a 100644 --- a/hw/misc/npcm7xx_pwm.c +++ b/hw/misc/npcm7xx_pwm.c @@ -511,7 +511,7 @@ static const VMStateDescription vmstate_npcm7xx_pwm = { .name = "npcm7xx-pwm", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(running, NPCM7xxPWM), VMSTATE_BOOL(inverted, NPCM7xxPWM), VMSTATE_UINT8(index, NPCM7xxPWM), @@ -529,7 +529,7 @@ static const VMStateDescription vmstate_npcm7xx_pwm_module = { .name = "npcm7xx-pwm-module", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(clock, NPCM7xxPWMState), VMSTATE_STRUCT_ARRAY(pwm, NPCM7xxPWMState, NPCM7XX_PWM_PER_MODULE, 0, vmstate_npcm7xx_pwm, diff --git a/hw/misc/npcm7xx_rng.c b/hw/misc/npcm7xx_rng.c index b01df7cdb257d4059a9b082bb71f6543deff64f7..7f7e5eca6269562695791e47abb79803238d542c 100644 --- a/hw/misc/npcm7xx_rng.c +++ b/hw/misc/npcm7xx_rng.c @@ -150,7 +150,7 @@ static const VMStateDescription vmstate_npcm7xx_rng = { .name = "npcm7xx-rng", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(rngcs, NPCM7xxRNGState), VMSTATE_UINT8(rngd, NPCM7xxRNGState), VMSTATE_UINT8(rngmode, NPCM7xxRNGState), diff --git a/hw/misc/nrf51_rng.c b/hw/misc/nrf51_rng.c index fc86e1b697905d2ae2fb6c5c69a54181a53b06f0..2d76c457182a60f406cc87ff1ff4c2142f423ebe 100644 --- a/hw/misc/nrf51_rng.c +++ b/hw/misc/nrf51_rng.c @@ -231,7 +231,7 @@ static const VMStateDescription vmstate_rng = { .name = "nrf51_soc.rng", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(active, NRF51RNGState), VMSTATE_UINT32(event_valrdy, NRF51RNGState), VMSTATE_UINT32(shortcut_stop_on_valrdy, NRF51RNGState), diff --git a/hw/misc/pca9552.c b/hw/misc/pca9552.c index fff19e369a397b238f4c35ab868422d1bacae700..72b653463f62f4ea9b42e7d31c1d5ebe9ece0851 100644 --- a/hw/misc/pca9552.c +++ b/hw/misc/pca9552.c @@ -328,7 +328,7 @@ static const VMStateDescription pca9552_vmstate = { .name = "PCA9552", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(len, PCA955xState), VMSTATE_UINT8(pointer, PCA955xState), VMSTATE_UINT8_ARRAY(regs, PCA955xState, PCA955X_NR_REGS), diff --git a/hw/misc/pvpanic-pci.c b/hw/misc/pvpanic-pci.c index fbcaa50731b380537b93345aa9641095be763677..c01e4ce8646a751de3a67a81ee0de8fd90fd9825 100644 --- a/hw/misc/pvpanic-pci.c +++ b/hw/misc/pvpanic-pci.c @@ -37,7 +37,7 @@ static const VMStateDescription vmstate_pvpanic_pci = { .name = "pvpanic-pci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PVPanicPCIState), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/slavio_misc.c b/hw/misc/slavio_misc.c index e8eb71570a89d56c74252d6e9d84b15fc602a6b8..94369e4cc88e25a69eb99a433ba2637b6e487129 100644 --- a/hw/misc/slavio_misc.c +++ b/hw/misc/slavio_misc.c @@ -408,7 +408,7 @@ static const VMStateDescription vmstate_misc = { .name ="slavio_misc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(dummy, MiscState), VMSTATE_UINT8(config, MiscState), VMSTATE_UINT8(aux1, MiscState), diff --git a/hw/misc/stm32f4xx_exti.c b/hw/misc/stm32f4xx_exti.c index 02e7810046126196a5c39d3c5b544a52a7ce19a1..7bd3afcd7ccc4a9cfed3447ec8b62508808f977c 100644 --- a/hw/misc/stm32f4xx_exti.c +++ b/hw/misc/stm32f4xx_exti.c @@ -153,7 +153,7 @@ static const VMStateDescription vmstate_stm32f4xx_exti = { .name = TYPE_STM32F4XX_EXTI, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(exti_imr, STM32F4xxExtiState), VMSTATE_UINT32(exti_emr, STM32F4xxExtiState), VMSTATE_UINT32(exti_rtsr, STM32F4xxExtiState), diff --git a/hw/misc/stm32f4xx_syscfg.c b/hw/misc/stm32f4xx_syscfg.c index f960e4ea1eebcc71b834b95a89eda4da4c1773fb..854fce6a952f3ffddb6d8f6ac3ac244fe8205dd9 100644 --- a/hw/misc/stm32f4xx_syscfg.c +++ b/hw/misc/stm32f4xx_syscfg.c @@ -137,7 +137,7 @@ static const VMStateDescription vmstate_stm32f4xx_syscfg = { .name = TYPE_STM32F4XX_SYSCFG, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(syscfg_memrmp, STM32F4xxSyscfgState), VMSTATE_UINT32(syscfg_pmc, STM32F4xxSyscfgState), VMSTATE_UINT32_ARRAY(syscfg_exticr, STM32F4xxSyscfgState, diff --git a/hw/misc/trace-events b/hw/misc/trace-events index 05ff692441be991ba0ca9ba15c10da054f997f5e..85725506bff809ea88ec37db8e7475f977fed1a3 100644 --- a/hw/misc/trace-events +++ b/hw/misc/trace-events @@ -116,8 +116,8 @@ imx7_gpr_read(uint64_t offset) "addr 0x%08" PRIx64 imx7_gpr_write(uint64_t offset, uint64_t value) "addr 0x%08" PRIx64 "value 0x%08" PRIx64 # imx7_snvs.c -imx7_snvs_read(uint64_t offset, uint32_t value) "addr 0x%08" PRIx64 "value 0x%08" PRIx32 -imx7_snvs_write(uint64_t offset, uint32_t value) "addr 0x%08" PRIx64 "value 0x%08" PRIx32 +imx7_snvs_read(uint64_t offset, uint64_t value, unsigned size) "i.MX SNVS read: offset 0x%08" PRIx64 " value 0x%08" PRIx64 " size %u" +imx7_snvs_write(uint64_t offset, uint64_t value, unsigned size) "i.MX SNVS write: offset 0x%08" PRIx64 " value 0x%08" PRIx64 " size %u" # mos6522.c mos6522_set_counter(int index, unsigned int val) "T%d.counter=%d" diff --git a/hw/misc/tz-mpc.c b/hw/misc/tz-mpc.c index 30481e1c9094de33beb148abff58ecd747e32a5f..92b994919bef1a7186369bb6dec5967282171e76 100644 --- a/hw/misc/tz-mpc.c +++ b/hw/misc/tz-mpc.c @@ -574,7 +574,7 @@ static const VMStateDescription tz_mpc_vmstate = { .version_id = 1, .minimum_version_id = 1, .post_load = tz_mpc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ctrl, TZMPC), VMSTATE_UINT32(blk_idx, TZMPC), VMSTATE_UINT32(int_stat, TZMPC), diff --git a/hw/misc/tz-msc.c b/hw/misc/tz-msc.c index acbe94400ba10a2c8e4efa881c06e6005255596a..de5a3126ccac8b8bb4c7e8a50bdb38c4b7216aa7 100644 --- a/hw/misc/tz-msc.c +++ b/hw/misc/tz-msc.c @@ -269,7 +269,7 @@ static const VMStateDescription tz_msc_vmstate = { .name = "tz-msc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(cfg_nonsec, TZMSC), VMSTATE_BOOL(cfg_sec_resp, TZMSC), VMSTATE_BOOL(irq_clear, TZMSC), diff --git a/hw/misc/tz-ppc.c b/hw/misc/tz-ppc.c index 36495c68e766a25cea2ccf1ca0bbb0ed351d62dd..645077872094b78db0761cc7a8b731180b7f4a44 100644 --- a/hw/misc/tz-ppc.c +++ b/hw/misc/tz-ppc.c @@ -290,7 +290,7 @@ static const VMStateDescription tz_ppc_vmstate = { .name = "tz-ppc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL_ARRAY(cfg_nonsec, TZPPC, 16), VMSTATE_BOOL_ARRAY(cfg_ap, TZPPC, 16), VMSTATE_BOOL(cfg_sec_resp, TZPPC), diff --git a/hw/misc/virt_ctrl.c b/hw/misc/virt_ctrl.c index e75d1e7e17b334f8c37c75d67bfb88c2b8c16890..1a6c744bac24697cc7de170781eb6c1cbd8351f3 100644 --- a/hw/misc/virt_ctrl.c +++ b/hw/misc/virt_ctrl.c @@ -108,7 +108,7 @@ static const VMStateDescription vmstate_virt_ctrl = { .name = "virt-ctrl", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(irq_enabled, VirtCtrlState), VMSTATE_END_OF_LIST() } diff --git a/hw/misc/vmcoreinfo.c b/hw/misc/vmcoreinfo.c index a9d718fc236d90637317927d2bec7c48905dfb63..833773ade52c3dc824e7dce28c4c5865ca6da9b7 100644 --- a/hw/misc/vmcoreinfo.c +++ b/hw/misc/vmcoreinfo.c @@ -73,7 +73,7 @@ static const VMStateDescription vmstate_vmcoreinfo = { .name = "vmcoreinfo", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(has_vmcoreinfo, VMCoreInfoState), VMSTATE_UINT16(vmcoreinfo.host_format, VMCoreInfoState), VMSTATE_UINT16(vmcoreinfo.guest_format, VMCoreInfoState), diff --git a/hw/misc/xlnx-versal-cframe-reg.c b/hw/misc/xlnx-versal-cframe-reg.c index 8e8ec0715abaab4c32427fd4740a315091561858..a6ab287b0190bedcaa424ac576cd21cd10ab8304 100644 --- a/hw/misc/xlnx-versal-cframe-reg.c +++ b/hw/misc/xlnx-versal-cframe-reg.c @@ -697,7 +697,7 @@ static const VMStateDescription vmstate_cframe = { .name = "cframe", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(data, XlnxCFrame, FRAME_NUM_WORDS), VMSTATE_END_OF_LIST() } @@ -707,7 +707,7 @@ static const VMStateDescription vmstate_cframe_reg = { .name = TYPE_XLNX_VERSAL_CFRAME_REG, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(wfifo, XlnxVersalCFrameReg, 4), VMSTATE_UINT32_ARRAY(regs, XlnxVersalCFrameReg, CFRAME_REG_R_MAX), VMSTATE_BOOL(rowon, XlnxVersalCFrameReg), @@ -765,7 +765,7 @@ static const VMStateDescription vmstate_cframe_bcast_reg = { .name = TYPE_XLNX_VERSAL_CFRAME_BCAST_REG, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(wfifo, XlnxVersalCFrameBcastReg, 4), VMSTATE_END_OF_LIST(), } diff --git a/hw/misc/xlnx-versal-cfu.c b/hw/misc/xlnx-versal-cfu.c index 8e588ac1d83fe9d98bd6327febbff688f5f17734..6bb82e51c1578da39f220c06a8e524314e6486a9 100644 --- a/hw/misc/xlnx-versal-cfu.c +++ b/hw/misc/xlnx-versal-cfu.c @@ -463,7 +463,7 @@ static const VMStateDescription vmstate_cfu_apb = { .name = TYPE_XLNX_VERSAL_CFU_APB, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(wfifo, XlnxVersalCFUAPB, 4), VMSTATE_UINT32_ARRAY(regs, XlnxVersalCFUAPB, R_MAX), VMSTATE_UINT8(fdri_row_addr, XlnxVersalCFUAPB), @@ -475,7 +475,7 @@ static const VMStateDescription vmstate_cfu_fdro = { .name = TYPE_XLNX_VERSAL_CFU_FDRO, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FIFO32(fdro_data, XlnxVersalCFUFDRO), VMSTATE_END_OF_LIST(), } @@ -485,7 +485,7 @@ static const VMStateDescription vmstate_cfu_sfr = { .name = TYPE_XLNX_VERSAL_CFU_SFR, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(wfifo, XlnxVersalCFUSFR, 4), VMSTATE_END_OF_LIST(), } diff --git a/hw/misc/xlnx-versal-crl.c b/hw/misc/xlnx-versal-crl.c index 767106b7a30796435e9be86d0ad78701c89ef660..ac6889fcf26e71ffcb9b0c032478ed721843331e 100644 --- a/hw/misc/xlnx-versal-crl.c +++ b/hw/misc/xlnx-versal-crl.c @@ -387,7 +387,7 @@ static const VMStateDescription vmstate_crl = { .name = TYPE_XLNX_VERSAL_CRL, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxVersalCRL, CRL_R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/misc/xlnx-versal-pmc-iou-slcr.c b/hw/misc/xlnx-versal-pmc-iou-slcr.c index 07b7ebc217320d826eb173130f95548f7f81099a..60e13a78ab88b0610c5a74e50f72620aedebf0e8 100644 --- a/hw/misc/xlnx-versal-pmc-iou-slcr.c +++ b/hw/misc/xlnx-versal-pmc-iou-slcr.c @@ -1412,7 +1412,7 @@ static const VMStateDescription vmstate_pmc_iou_slcr = { .name = TYPE_XILINX_VERSAL_PMC_IOU_SLCR, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxVersalPmcIouSlcr, XILINX_VERSAL_PMC_IOU_SLCR_R_MAX), VMSTATE_END_OF_LIST(), diff --git a/hw/misc/xlnx-versal-trng.c b/hw/misc/xlnx-versal-trng.c index 4d41c262c484984a36f328e9dd24fe0dcc45cc1d..b8111b8b66264e4a12f304484201c3e4ea83622f 100644 --- a/hw/misc/xlnx-versal-trng.c +++ b/hw/misc/xlnx-versal-trng.c @@ -674,7 +674,7 @@ static const VMStateDescription vmstate_trng = { .name = TYPE_XLNX_VERSAL_TRNG, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(rand_count, XlnxVersalTRng), VMSTATE_UINT64(rand_reseed, XlnxVersalTRng), VMSTATE_UINT64(forced_prng_count, XlnxVersalTRng), diff --git a/hw/misc/xlnx-versal-xramc.c b/hw/misc/xlnx-versal-xramc.c index e5b719a0ed49f4255e90a3dbaf75e34530ace6a9..a5f78c190ebd10de78525b50d976285846c7071a 100644 --- a/hw/misc/xlnx-versal-xramc.c +++ b/hw/misc/xlnx-versal-xramc.c @@ -212,7 +212,7 @@ static const VMStateDescription vmstate_xram_ctrl = { .name = TYPE_XLNX_XRAM_CTRL, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxXramCtrl, XRAM_CTRL_R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/misc/xlnx-zynqmp-apu-ctrl.c b/hw/misc/xlnx-zynqmp-apu-ctrl.c index 3d2be95e6db244a7d98ad96e382cedb13f7fef58..1d441b41dfeffc0cddb81d747b3ecd1d3a7d1925 100644 --- a/hw/misc/xlnx-zynqmp-apu-ctrl.c +++ b/hw/misc/xlnx-zynqmp-apu-ctrl.c @@ -218,7 +218,7 @@ static const VMStateDescription vmstate_zynqmp_apu = { .name = TYPE_XLNX_ZYNQMP_APU_CTRL, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPAPUCtrl, APU_R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/misc/xlnx-zynqmp-crf.c b/hw/misc/xlnx-zynqmp-crf.c index 57bc8cf49aeaec018111636884081e37eaae6c9c..a83efb44e3185b8b163f1d973107479da7941fa9 100644 --- a/hw/misc/xlnx-zynqmp-crf.c +++ b/hw/misc/xlnx-zynqmp-crf.c @@ -233,7 +233,7 @@ static const VMStateDescription vmstate_crf = { .name = TYPE_XLNX_ZYNQMP_CRF, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPCRF, CRF_R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/misc/zynq_slcr.c b/hw/misc/zynq_slcr.c index 41f38a98e98aa1a33205bc8c8771d50a5f7fd4b5..d2ac2e77f266152501913c7ebc39627218d769ab 100644 --- a/hw/misc/zynq_slcr.c +++ b/hw/misc/zynq_slcr.c @@ -603,7 +603,7 @@ static const VMStateDescription vmstate_zynq_slcr = { .name = "zynq_slcr", .version_id = 3, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, ZynqSLCRState, ZYNQ_SLCR_NUM_REGS), VMSTATE_CLOCK_V(ps_clk, ZynqSLCRState, 3), VMSTATE_END_OF_LIST() diff --git a/hw/net/allwinner-sun8i-emac.c b/hw/net/allwinner-sun8i-emac.c index cc350d40e59fad00e26fe3a8b58d3f1352645c79..108ae9c853545ac8c49f843e3e4ac619e7befe59 100644 --- a/hw/net/allwinner-sun8i-emac.c +++ b/hw/net/allwinner-sun8i-emac.c @@ -851,7 +851,7 @@ static const VMStateDescription vmstate_aw_emac = { .version_id = 1, .minimum_version_id = 1, .post_load = allwinner_sun8i_emac_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(mii_phy_addr, AwSun8iEmacState), VMSTATE_UINT32(mii_cmd, AwSun8iEmacState), VMSTATE_UINT32(mii_data, AwSun8iEmacState), diff --git a/hw/net/allwinner_emac.c b/hw/net/allwinner_emac.c index e10965de14005aa7ab5e38b3cd728a3efb78b415..989839784a9175710d492dbd3c47e16c374cdafd 100644 --- a/hw/net/allwinner_emac.c +++ b/hw/net/allwinner_emac.c @@ -472,7 +472,7 @@ static const VMStateDescription vmstate_mii = { .name = "rtl8201cp", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(bmcr, RTL8201CPState), VMSTATE_UINT16(bmsr, RTL8201CPState), VMSTATE_UINT16(anar, RTL8201CPState), @@ -495,7 +495,7 @@ static const VMStateDescription vmstate_aw_emac = { .version_id = 1, .minimum_version_id = 1, .post_load = aw_emac_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(mii, AwEmacState, 1, vmstate_mii, RTL8201CPState), VMSTATE_UINT32(ctl, AwEmacState), VMSTATE_UINT32(tx_mode, AwEmacState), diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c index 296bba238ec963640c0bdc10d18f03d50e39f9a5..ec7bf562e5763b028059384209185ac171c5c7c6 100644 --- a/hw/net/cadence_gem.c +++ b/hw/net/cadence_gem.c @@ -199,8 +199,8 @@ REG32(PHYMNTNC, 0x34) /* Phy Maintenance reg */ FIELD(PHYMNTNC, PHY_ADDR, 23, 5) FIELD(PHYMNTNC, OP, 28, 2) FIELD(PHYMNTNC, ST, 30, 2) -#define MDIO_OP_READ 0x3 -#define MDIO_OP_WRITE 0x2 +#define MDIO_OP_READ 0x2 +#define MDIO_OP_WRITE 0x1 REG32(RXPAUSE, 0x38) /* RX Pause Time reg */ REG32(TXPAUSE, 0x3c) /* TX Pause Time reg */ @@ -1771,7 +1771,7 @@ static const VMStateDescription vmstate_cadence_gem = { .name = "cadence_gem", .version_id = 4, .minimum_version_id = 4, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, CadenceGEMState, CADENCE_GEM_MAXREG), VMSTATE_UINT16_ARRAY(phy_regs, CadenceGEMState, 32), VMSTATE_UINT8(phy_loop, CadenceGEMState), diff --git a/hw/net/can/can_kvaser_pci.c b/hw/net/can/can_kvaser_pci.c index 2cd90cef1e1545a54ef0c9ee0e4bfaa6e8c07c95..bf41e6b261262c5909016db40717061ffd00b962 100644 --- a/hw/net/can/can_kvaser_pci.c +++ b/hw/net/can/can_kvaser_pci.c @@ -266,7 +266,7 @@ static const VMStateDescription vmstate_kvaser_pci = { .name = "kvaser_pci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, KvaserPCIState), /* Load this before sja_state. */ VMSTATE_UINT32(s5920_intcsr, KvaserPCIState), diff --git a/hw/net/can/can_mioe3680_pci.c b/hw/net/can/can_mioe3680_pci.c index b9918773b3fec41005c1b4a5e06bd1f1fe6aceb1..308b17e0c00f330894b5351af9359c0b22df6ee6 100644 --- a/hw/net/can/can_mioe3680_pci.c +++ b/hw/net/can/can_mioe3680_pci.c @@ -203,7 +203,7 @@ static const VMStateDescription vmstate_mioe3680_pci = { .name = "mioe3680_pci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, Mioe3680PCIState), VMSTATE_STRUCT(sja_state[0], Mioe3680PCIState, 0, vmstate_can_sja, CanSJA1000State), diff --git a/hw/net/can/can_pcm3680_pci.c b/hw/net/can/can_pcm3680_pci.c index 8ef3e4659cc09000971b4a55b2a249bbb3d85f21..e4c8d93b984976594463a3e54d99942b86df328b 100644 --- a/hw/net/can/can_pcm3680_pci.c +++ b/hw/net/can/can_pcm3680_pci.c @@ -204,7 +204,7 @@ static const VMStateDescription vmstate_pcm3680i_pci = { .name = "pcm3680i_pci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, Pcm3680iPCIState), VMSTATE_STRUCT(sja_state[0], Pcm3680iPCIState, 0, vmstate_can_sja, CanSJA1000State), diff --git a/hw/net/can/can_sja1000.c b/hw/net/can/can_sja1000.c index 73201f91395af81dea302332c7122ff4c3b3d84c..6694d7bfd84abf2a4209ce7e24c9d53bb931e5f5 100644 --- a/hw/net/can/can_sja1000.c +++ b/hw/net/can/can_sja1000.c @@ -108,7 +108,7 @@ void can_sja_single_filter(struct qemu_can_filter *filter, } filter->can_mask = (uint32_t)amr[0] << 3; - filter->can_mask |= (uint32_t)amr[1] << 5; + filter->can_mask |= (uint32_t)amr[1] >> 5; filter->can_mask = ~filter->can_mask & QEMU_CAN_SFF_MASK; if (!(amr[1] & 0x10)) { filter->can_mask |= QEMU_CAN_RTR_FLAG; @@ -929,7 +929,7 @@ const VMStateDescription vmstate_qemu_can_filter = { .name = "qemu_can_filter", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(can_id, qemu_can_filter), VMSTATE_UINT32(can_mask, qemu_can_filter), VMSTATE_END_OF_LIST() @@ -953,7 +953,7 @@ const VMStateDescription vmstate_can_sja = { .version_id = 1, .minimum_version_id = 1, .post_load = can_sja_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(mode, CanSJA1000State), VMSTATE_UINT8(status_pel, CanSJA1000State), diff --git a/hw/net/can/ctucan_core.c b/hw/net/can/ctucan_core.c index f2c3b6a706195ebc737d99d3772e41c5389f3cbb..812b83e93e1ab908ac5646c50fcae1f720afa9b6 100644 --- a/hw/net/can/ctucan_core.c +++ b/hw/net/can/ctucan_core.c @@ -617,7 +617,7 @@ const VMStateDescription vmstate_qemu_ctucan_tx_buffer = { .name = "qemu_ctucan_tx_buffer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(data, CtuCanCoreMsgBuffer, CTUCAN_CORE_MSG_MAX_LEN), VMSTATE_END_OF_LIST() } @@ -636,7 +636,7 @@ const VMStateDescription vmstate_ctucan = { .version_id = 1, .minimum_version_id = 1, .post_load = ctucan_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(mode_settings.u32, CtuCanCoreState), VMSTATE_UINT32(status.u32, CtuCanCoreState), VMSTATE_UINT32(int_stat.u32, CtuCanCoreState), diff --git a/hw/net/can/ctucan_pci.c b/hw/net/can/ctucan_pci.c index ea079e2af5629ee2ce0b96128df965ca2e4df389..d8f7344ddc62d1b685ad474af26b7820942b35cf 100644 --- a/hw/net/can/ctucan_pci.c +++ b/hw/net/can/ctucan_pci.c @@ -215,7 +215,7 @@ static const VMStateDescription vmstate_ctucan_pci = { .name = "ctucan_pci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, CtuCanPCIState), VMSTATE_STRUCT(ctucan_state[0], CtuCanPCIState, 0, vmstate_ctucan, CtuCanCoreState), diff --git a/hw/net/can/xlnx-versal-canfd.c b/hw/net/can/xlnx-versal-canfd.c index 5b8ce0a285ef48ad137173b270e967e0fbfa5d35..47a14cfe633b16cdb013e121ec09900e9de7af6a 100644 --- a/hw/net/can/xlnx-versal-canfd.c +++ b/hw/net/can/xlnx-versal-canfd.c @@ -2060,7 +2060,7 @@ static const VMStateDescription vmstate_canfd = { .name = TYPE_XILINX_CANFD, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxVersalCANFDState, XLNX_VERSAL_CANFD_R_MAX), VMSTATE_PTIMER(canfd_timer, XlnxVersalCANFDState), diff --git a/hw/net/can/xlnx-zynqmp-can.c b/hw/net/can/xlnx-zynqmp-can.c index f60e480c3ab2399926e3215c59530446813289bf..ca0ce4e8bbfaa5847849424063add5a5b7f18aab 100644 --- a/hw/net/can/xlnx-zynqmp-can.c +++ b/hw/net/can/xlnx-zynqmp-can.c @@ -1159,7 +1159,7 @@ static const VMStateDescription vmstate_can = { .name = TYPE_XLNX_ZYNQMP_CAN, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FIFO32(rx_fifo, XlnxZynqMPCANState), VMSTATE_FIFO32(tx_fifo, XlnxZynqMPCANState), VMSTATE_FIFO32(txhpb_fifo, XlnxZynqMPCANState), diff --git a/hw/net/dp8393x.c b/hw/net/dp8393x.c index b16b18b3c3cbaf6c1f902fd311db179587deb67d..bf0652da1b49f970f6bb11d4dbe2d4a73978337b 100644 --- a/hw/net/dp8393x.c +++ b/hw/net/dp8393x.c @@ -924,7 +924,7 @@ static const VMStateDescription vmstate_dp8393x = { .name = "dp8393x", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT16_2DARRAY(cam, dp8393xState, 16, 3), VMSTATE_UINT16_ARRAY(regs, dp8393xState, SONIC_REG_COUNT), VMSTATE_END_OF_LIST() diff --git a/hw/net/e1000.c b/hw/net/e1000.c index 8ffe1077f1961b8293ed15423774b23f700f970c..43f3a4a7011bc6e74af0826ebbab01e383e9461a 100644 --- a/hw/net/e1000.c +++ b/hw/net/e1000.c @@ -1437,7 +1437,7 @@ static const VMStateDescription vmstate_e1000_mit_state = { .name = "e1000/mit_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(mac_reg[RDTR], E1000State), VMSTATE_UINT32(mac_reg[RADV], E1000State), VMSTATE_UINT32(mac_reg[TADV], E1000State), @@ -1452,7 +1452,7 @@ static const VMStateDescription vmstate_e1000_full_mac_state = { .version_id = 1, .minimum_version_id = 1, .needed = e1000_full_mac_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(mac_reg, E1000State, 0x8000), VMSTATE_END_OF_LIST() } @@ -1464,7 +1464,7 @@ static const VMStateDescription vmstate_e1000_tx_tso_state = { .minimum_version_id = 1, .needed = e1000_tso_state_needed, .post_load = e1000_tx_tso_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(tx.tso_props.ipcss, E1000State), VMSTATE_UINT8(tx.tso_props.ipcso, E1000State), VMSTATE_UINT16(tx.tso_props.ipcse, E1000State), @@ -1486,7 +1486,7 @@ static const VMStateDescription vmstate_e1000 = { .minimum_version_id = 1, .pre_save = e1000_pre_save, .post_load = e1000_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, E1000State), VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */ VMSTATE_UNUSED(4), /* Was mmio_base. */ @@ -1558,7 +1558,7 @@ static const VMStateDescription vmstate_e1000 = { E1000_VLAN_FILTER_TBL_SIZE), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_e1000_mit_state, &vmstate_e1000_full_mac_state, &vmstate_e1000_tx_tso_state, diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c index e41a6c10385db0665b07f5e18d604ac8b853b0da..7c6f6029518cbe481196da0a7e6ec6a0b037af0a 100644 --- a/hw/net/e1000e.c +++ b/hw/net/e1000e.c @@ -564,7 +564,7 @@ static const VMStateDescription e1000e_vmstate_tx = { .name = "e1000e-tx", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(sum_needed, struct e1000e_tx), VMSTATE_UINT8(props.ipcss, struct e1000e_tx), VMSTATE_UINT8(props.ipcso, struct e1000e_tx), @@ -588,7 +588,7 @@ static const VMStateDescription e1000e_vmstate_intr_timer = { .name = "e1000e-intr-timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(timer, E1000IntrDelayTimer), VMSTATE_BOOL(running, E1000IntrDelayTimer), VMSTATE_END_OF_LIST() @@ -609,7 +609,7 @@ static const VMStateDescription e1000e_vmstate = { .minimum_version_id = 1, .pre_save = e1000e_pre_save, .post_load = e1000e_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, E1000EState), VMSTATE_MSIX(parent_obj, E1000EState), diff --git a/hw/net/eepro100.c b/hw/net/eepro100.c index 69e1c4bb8918fa506cf9f420edb257a0f5424127..d9a70c4544d071b5f91d95ea94b4fe051e372873 100644 --- a/hw/net/eepro100.c +++ b/hw/net/eepro100.c @@ -1772,7 +1772,7 @@ static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size) static const VMStateDescription vmstate_eepro100 = { .version_id = 3, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, EEPRO100State), VMSTATE_UNUSED(32), VMSTATE_BUFFER(mult, EEPRO100State), diff --git a/hw/net/ftgmac100.c b/hw/net/ftgmac100.c index 78e594afa4e739f3accbbf1ecd419ed98e539373..74b6c3d9a75b79109a34d4328e078b495eb4fafa 100644 --- a/hw/net/ftgmac100.c +++ b/hw/net/ftgmac100.c @@ -1119,7 +1119,7 @@ static const VMStateDescription vmstate_ftgmac100 = { .name = TYPE_FTGMAC100, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(irq_state, FTGMAC100State), VMSTATE_UINT32(isr, FTGMAC100State), VMSTATE_UINT32(ier, FTGMAC100State), @@ -1304,7 +1304,7 @@ static const VMStateDescription vmstate_aspeed_mii = { .name = TYPE_ASPEED_MII, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(phycr, FTGMAC100State), VMSTATE_UINT32(phydata, FTGMAC100State), VMSTATE_END_OF_LIST() diff --git a/hw/net/i82596.c b/hw/net/i82596.c index a907f0df8c442f8335db667b7553f404ac625bd0..6cc8292a65ad2894995ca9e249d26f9ed0cd2161 100644 --- a/hw/net/i82596.c +++ b/hw/net/i82596.c @@ -713,7 +713,7 @@ const VMStateDescription vmstate_i82596 = { .name = "i82596", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(lnkst, I82596State), VMSTATE_TIMER_PTR(flush_queue_timer, I82596State), VMSTATE_END_OF_LIST() diff --git a/hw/net/igb.c b/hw/net/igb.c index 8089acfea4148c64e98986758f51143b7cb5f106..0b5c31a58bba2835b04fc9d0180e63f7cf6d9cca 100644 --- a/hw/net/igb.c +++ b/hw/net/igb.c @@ -520,7 +520,7 @@ static const VMStateDescription igb_vmstate_tx_ctx = { .name = "igb-tx-ctx", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(vlan_macip_lens, struct e1000_adv_tx_context_desc), VMSTATE_UINT32(seqnum_seed, struct e1000_adv_tx_context_desc), VMSTATE_UINT32(type_tucmd_mlhl, struct e1000_adv_tx_context_desc), @@ -533,7 +533,7 @@ static const VMStateDescription igb_vmstate_tx = { .name = "igb-tx", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(ctx, struct igb_tx, 2, 0, igb_vmstate_tx_ctx, struct e1000_adv_tx_context_desc), VMSTATE_UINT32(first_cmd_type_len, struct igb_tx), @@ -548,7 +548,7 @@ static const VMStateDescription igb_vmstate_intr_timer = { .name = "igb-intr-timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(timer, IGBIntrDelayTimer), VMSTATE_BOOL(running, IGBIntrDelayTimer), VMSTATE_END_OF_LIST() @@ -569,7 +569,7 @@ static const VMStateDescription igb_vmstate = { .minimum_version_id = 1, .pre_save = igb_pre_save, .post_load = igb_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, IGBState), VMSTATE_MSIX(parent_obj, IGBState), diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c index 6881e3e4f05f7648d97add6997455491fdef3c87..cee84af7bab5e20ef78673ac8903afe94763e9a5 100644 --- a/hw/net/imx_fec.c +++ b/hw/net/imx_fec.c @@ -195,7 +195,7 @@ static const VMStateDescription vmstate_imx_eth_txdescs = { .version_id = 1, .minimum_version_id = 1, .needed = imx_eth_is_multi_tx_ring, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(tx_descriptor[1], IMXFECState), VMSTATE_UINT32(tx_descriptor[2], IMXFECState), VMSTATE_END_OF_LIST() @@ -206,7 +206,7 @@ static const VMStateDescription vmstate_imx_eth = { .name = TYPE_IMX_FEC, .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX), VMSTATE_UINT32(rx_descriptor, IMXFECState), VMSTATE_UINT32(tx_descriptor[0], IMXFECState), @@ -217,7 +217,7 @@ static const VMStateDescription vmstate_imx_eth = { VMSTATE_UINT32(phy_int_mask, IMXFECState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_imx_eth_txdescs, NULL }, diff --git a/hw/net/lan9118.c b/hw/net/lan9118.c index cf7b8c897a07f4c2593f534a7d75fbd34a5f0e09..598dd79e179933eadef23d9969987184ad6f5bea 100644 --- a/hw/net/lan9118.c +++ b/hw/net/lan9118.c @@ -173,7 +173,7 @@ static const VMStateDescription vmstate_lan9118_packet = { .name = "lan9118_packet", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(state, LAN9118Packet), VMSTATE_UINT32(cmd_a, LAN9118Packet), VMSTATE_UINT32(cmd_b, LAN9118Packet), @@ -271,7 +271,7 @@ static const VMStateDescription vmstate_lan9118 = { .name = "lan9118", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(timer, lan9118_state), VMSTATE_UINT32(irq_cfg, lan9118_state), VMSTATE_UINT32(int_sts, lan9118_state), diff --git a/hw/net/lance.c b/hw/net/lance.c index 4c5f01baad9c713438e39a52f48d30620e8bd7cd..e1ed24c2cea2f796c58dde72070ef2dbdc64364f 100644 --- a/hw/net/lance.c +++ b/hw/net/lance.c @@ -94,7 +94,7 @@ static const VMStateDescription vmstate_lance = { .name = "pcnet", .version_id = 3, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(state, SysBusPCNetState, 0, vmstate_pcnet, PCNetState), VMSTATE_END_OF_LIST() } diff --git a/hw/net/lasi_i82596.c b/hw/net/lasi_i82596.c index e37f7fabe952d2e687d52d6eac0365d4e2528ad4..6a3147fe2dc99acad41c982c84f83cca6d592b1e 100644 --- a/hw/net/lasi_i82596.c +++ b/hw/net/lasi_i82596.c @@ -99,7 +99,7 @@ static const VMStateDescription vmstate_lasi_82596 = { .name = "i82596", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(state, SysBusI82596State, 0, vmstate_i82596, I82596State), VMSTATE_END_OF_LIST() diff --git a/hw/net/meson.build b/hw/net/meson.build index f64651c467e11d39d317dd2765877e41c67f815e..9afceb0619105cc56eaeb0d8a77b67731f6d5c1d 100644 --- a/hw/net/meson.build +++ b/hw/net/meson.build @@ -50,7 +50,6 @@ specific_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('virtio-net.c')) if have_vhost_net system_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost_net.c'), if_false: files('vhost_net-stub.c')) - system_ss.add(when: 'CONFIG_ALL', if_true: files('vhost_net-stub.c')) else system_ss.add(files('vhost_net-stub.c')) endif @@ -69,7 +68,6 @@ system_ss.add(when: 'CONFIG_ROCKER', if_true: files( 'rocker/rocker_of_dpa.c', 'rocker/rocker_world.c', ), if_false: files('rocker/qmp-norocker.c')) -system_ss.add(when: 'CONFIG_ALL', if_true: files('rocker/qmp-norocker.c')) system_ss.add(files('rocker/rocker-hmp-cmds.c')) subdir('can') diff --git a/hw/net/mipsnet.c b/hw/net/mipsnet.c index 8e925de867c2aba1fba9bc6b23cca9247ea55a6b..df5101aed739aa9e3d8714c8f2b42566bb03be30 100644 --- a/hw/net/mipsnet.c +++ b/hw/net/mipsnet.c @@ -218,7 +218,7 @@ static const VMStateDescription vmstate_mipsnet = { .name = "mipsnet", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(busy, MIPSnetState), VMSTATE_UINT32(rx_count, MIPSnetState), VMSTATE_UINT32(rx_read, MIPSnetState), diff --git a/hw/net/msf2-emac.c b/hw/net/msf2-emac.c index 145a5e46ab446c166ad05abc1e7e756708235d52..c1fc10de2ab17bd0b5cdbf200abcfa5b1ab88113 100644 --- a/hw/net/msf2-emac.c +++ b/hw/net/msf2-emac.c @@ -557,7 +557,7 @@ static const VMStateDescription vmstate_msf2_emac = { .name = TYPE_MSS_EMAC, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(mac_addr, MSF2EmacState, ETH_ALEN), VMSTATE_UINT32(rx_desc, MSF2EmacState), VMSTATE_UINT16_ARRAY(phy_regs, MSF2EmacState, PHY_MAX_REGS), diff --git a/hw/net/mv88w8618_eth.c b/hw/net/mv88w8618_eth.c index 2185f1131a94a8c7f4bad94f1310f1b6e019166c..96c65f4d46222a08fb7b2824ad4ee139b1d50e30 100644 --- a/hw/net/mv88w8618_eth.c +++ b/hw/net/mv88w8618_eth.c @@ -358,7 +358,7 @@ static const VMStateDescription mv88w8618_eth_vmsd = { .name = "mv88w8618_eth", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(smir, mv88w8618_eth_state), VMSTATE_UINT32(icr, mv88w8618_eth_state), VMSTATE_UINT32(imr, mv88w8618_eth_state), diff --git a/hw/net/ne2000-isa.c b/hw/net/ne2000-isa.c index a79f7fad1f1c4c383795420444caa6ce49443ffe..26980e087eea778050e8f1249741b6e75c9b7ced 100644 --- a/hw/net/ne2000-isa.c +++ b/hw/net/ne2000-isa.c @@ -53,7 +53,7 @@ static const VMStateDescription vmstate_isa_ne2000 = { .name = "ne2000", .version_id = 2, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(ne2000, ISANE2000State, 0, vmstate_ne2000, NE2000State), VMSTATE_END_OF_LIST() } diff --git a/hw/net/ne2000-pci.c b/hw/net/ne2000-pci.c index fee93c6ec0d9fdde87b90020383fdddd7fbc1b1e..74773069c6902bbbf33ba54901501ac2d8320683 100644 --- a/hw/net/ne2000-pci.c +++ b/hw/net/ne2000-pci.c @@ -39,7 +39,7 @@ static const VMStateDescription vmstate_pci_ne2000 = { .name = "ne2000", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PCINE2000State), VMSTATE_STRUCT(ne2000, PCINE2000State, 0, vmstate_ne2000, NE2000State), VMSTATE_END_OF_LIST() diff --git a/hw/net/ne2000.c b/hw/net/ne2000.c index d79c884d50a3a726f1d19dc38403e44749d1403b..b482c5f3af7f0ec433dd52ec964ca556cd7d8389 100644 --- a/hw/net/ne2000.c +++ b/hw/net/ne2000.c @@ -606,7 +606,7 @@ const VMStateDescription vmstate_ne2000 = { .version_id = 2, .minimum_version_id = 0, .post_load = ne2000_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_V(rxcr, NE2000State, 2), VMSTATE_UINT8(cmd, NE2000State), VMSTATE_UINT32(start, NE2000State), diff --git a/hw/net/npcm7xx_emc.c b/hw/net/npcm7xx_emc.c index 1d4e8f59f35ec38f888d9407a2ca1780e987037d..d1583b6f9b3c0b831ffd4bcc7c91e7dd56f623a3 100644 --- a/hw/net/npcm7xx_emc.c +++ b/hw/net/npcm7xx_emc.c @@ -837,7 +837,7 @@ static const VMStateDescription vmstate_npcm7xx_emc = { .name = TYPE_NPCM7XX_EMC, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(emc_num, NPCM7xxEMCState), VMSTATE_UINT32_ARRAY(regs, NPCM7xxEMCState, NPCM7XX_NUM_EMC_REGS), VMSTATE_BOOL(tx_active, NPCM7xxEMCState), diff --git a/hw/net/pcnet-pci.c b/hw/net/pcnet-pci.c index 96a302c141abbc100a27601de53edcf7be514007..fe1a845b2b0fefbb2dc19795b7e38ff021f77bd7 100644 --- a/hw/net/pcnet-pci.c +++ b/hw/net/pcnet-pci.c @@ -147,7 +147,7 @@ static const VMStateDescription vmstate_pci_pcnet = { .name = "pcnet", .version_id = 3, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIPCNetState), VMSTATE_STRUCT(state, PCIPCNetState, 0, vmstate_pcnet, PCNetState), VMSTATE_END_OF_LIST() diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c index a7e123e60dbab97ebfafabb05fbbb99576af9cc9..494eab84791953740fe8fff5c5bf608a9c2010e5 100644 --- a/hw/net/pcnet.c +++ b/hw/net/pcnet.c @@ -1682,7 +1682,7 @@ const VMStateDescription vmstate_pcnet = { .name = "pcnet", .version_id = 3, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(rap, PCNetState), VMSTATE_INT32(isr, PCNetState), VMSTATE_INT32(lnkst, PCNetState), diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c index 4af8c66266b4c1ff6410dd1f6c7517974ee50b59..897c86ec41e536cb68b29298588e43a42a055e46 100644 --- a/hw/net/rtl8139.c +++ b/hw/net/rtl8139.c @@ -3150,7 +3150,7 @@ static const VMStateDescription vmstate_rtl8139_hotplug_ready ={ .version_id = 1, .minimum_version_id = 1, .needed = rtl8139_hotplug_ready_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_END_OF_LIST() } }; @@ -3173,7 +3173,7 @@ static const VMStateDescription vmstate_rtl8139 = { .minimum_version_id = 3, .post_load = rtl8139_post_load, .pre_save = rtl8139_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, RTL8139State), VMSTATE_PARTIAL_BUFFER(phys, RTL8139State, 6), VMSTATE_BUFFER(mult, RTL8139State), @@ -3257,7 +3257,7 @@ static const VMStateDescription vmstate_rtl8139 = { VMSTATE_UINT32_V(cplus_enabled, RTL8139State, 4), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_rtl8139_hotplug_ready, NULL } diff --git a/hw/net/smc91c111.c b/hw/net/smc91c111.c index 876a78456ac141e699f08fd3c3590147dfe6fb4a..49b7c2610270e7f658bd311c9aa1464576217387 100644 --- a/hw/net/smc91c111.c +++ b/hw/net/smc91c111.c @@ -62,7 +62,7 @@ static const VMStateDescription vmstate_smc91c111 = { .name = "smc91c111", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(tcr, smc91c111_state), VMSTATE_UINT16(rcr, smc91c111_state), VMSTATE_UINT16(cr, smc91c111_state), diff --git a/hw/net/spapr_llan.c b/hw/net/spapr_llan.c index 475d5f3a348d0c6f35d6d36f12be7e3caa06f9fd..ecb30b7c76bc4f89d5bb0cdac5c1dbe18a27172a 100644 --- a/hw/net/spapr_llan.c +++ b/hw/net/spapr_llan.c @@ -800,7 +800,7 @@ static const VMStateDescription vmstate_rx_buffer_pool = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_vlan_rx_buffer_pools_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(bufsize, RxBufPool), VMSTATE_INT32(count, RxBufPool), VMSTATE_UINT64_ARRAY(bds, RxBufPool, RX_POOL_MAX_BDS), @@ -813,7 +813,7 @@ static const VMStateDescription vmstate_rx_pools = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_vlan_rx_buffer_pools_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(rx_pool, SpaprVioVlan, RX_MAX_POOLS, 1, vmstate_rx_buffer_pool, RxBufPool), @@ -825,7 +825,7 @@ static const VMStateDescription vmstate_spapr_llan = { .name = "spapr_llan", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SPAPR_VIO(sdev, SpaprVioVlan), /* LLAN state */ VMSTATE_BOOL(isopen, SpaprVioVlan), @@ -837,7 +837,7 @@ static const VMStateDescription vmstate_spapr_llan = { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_rx_pools, NULL } diff --git a/hw/net/stellaris_enet.c b/hw/net/stellaris_enet.c index 6768a6912f02836a97ce0a17134f73ddeb74947b..db95766e2943304d9be9a6f720f169f3fc55991e 100644 --- a/hw/net/stellaris_enet.c +++ b/hw/net/stellaris_enet.c @@ -88,7 +88,7 @@ static const VMStateDescription vmstate_rx_frame = { .name = "stellaris_enet/rx_frame", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(data, StellarisEnetRxFrame, 2048), VMSTATE_UINT32(len, StellarisEnetRxFrame), VMSTATE_END_OF_LIST() @@ -133,7 +133,7 @@ static const VMStateDescription vmstate_stellaris_enet = { .version_id = 2, .minimum_version_id = 2, .post_load = stellaris_enet_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ris, stellaris_enet_state), VMSTATE_UINT32(im, stellaris_enet_state), VMSTATE_UINT32(rctl, stellaris_enet_state), diff --git a/hw/net/sungem.c b/hw/net/sungem.c index 013cfc273610d9f5015c33134c2fe74d9fd7b607..dd1b4a134469face0b34806aaec36a9308d446b5 100644 --- a/hw/net/sungem.c +++ b/hw/net/sungem.c @@ -1434,7 +1434,7 @@ static const VMStateDescription vmstate_sungem = { .name = "sungem", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(pdev, SunGEMState), VMSTATE_MACADDR(conf.macaddr, SunGEMState), VMSTATE_UINT32(phy_addr, SunGEMState), diff --git a/hw/net/sunhme.c b/hw/net/sunhme.c index ddc83a64bd184534ac7035c808c96217eac29ba4..ae8452e5f9f5a18ea64171c72c2444e5650c5abb 100644 --- a/hw/net/sunhme.c +++ b/hw/net/sunhme.c @@ -925,7 +925,7 @@ static const VMStateDescription vmstate_hme = { .name = "sunhme", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, SunHMEState), VMSTATE_MACADDR(conf.macaddr, SunHMEState), VMSTATE_UINT32_ARRAY(sebregs, SunHMEState, (HME_SEB_REG_SIZE >> 2)), diff --git a/hw/net/tulip.c b/hw/net/tulip.c index 962086aae4a562ca640e6f01b37dd5b6c275a695..6d4fb06dad17631d2cc54515343a614982cffcf5 100644 --- a/hw/net/tulip.c +++ b/hw/net/tulip.c @@ -48,7 +48,7 @@ struct TULIPState { static const VMStateDescription vmstate_pci_tulip = { .name = "tulip", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, TULIPState), VMSTATE_UINT32_ARRAY(csr, TULIPState, 16), VMSTATE_UINT32(old_csr9, TULIPState), diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index 80c56f0cfcf1675a1d1815489d0cfb94b525736e..7a2846fa1c717ab00112febf186fd5bb56f77fd5 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -3113,7 +3113,7 @@ static int virtio_net_post_load_virtio(VirtIODevice *vdev) /* tx_waiting field of a VirtIONetQueue */ static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = { .name = "virtio-net-queue-tx_waiting", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(tx_waiting, VirtIONetQueue), VMSTATE_END_OF_LIST() }, @@ -3191,7 +3191,7 @@ static const VMStateDescription vmstate_virtio_net_tx_waiting = { .name = "virtio-net-tx_waiting", .pre_load = virtio_net_tx_waiting_pre_load, .pre_save = virtio_net_tx_waiting_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp, curr_queue_pairs_1, vmstate_virtio_net_queue_tx_waiting, @@ -3228,7 +3228,7 @@ static const VMStateDescription vmstate_virtio_net_has_ufo = { .name = "virtio-net-ufo", .post_load = virtio_net_ufo_post_load, .pre_save = virtio_net_ufo_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp), VMSTATE_END_OF_LIST() }, @@ -3262,7 +3262,7 @@ static const VMStateDescription vmstate_virtio_net_has_vnet = { .name = "virtio-net-vnet", .post_load = virtio_net_vnet_post_load, .pre_save = virtio_net_vnet_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp), VMSTATE_END_OF_LIST() }, @@ -3278,7 +3278,7 @@ static const VMStateDescription vmstate_virtio_net_rss = { .version_id = 1, .minimum_version_id = 1, .needed = virtio_net_rss_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(rss_data.enabled, VirtIONet), VMSTATE_BOOL(rss_data.redirect, VirtIONet), VMSTATE_BOOL(rss_data.populate_hash, VirtIONet), @@ -3299,7 +3299,7 @@ static const VMStateDescription vmstate_virtio_net_device = { .version_id = VIRTIO_NET_VM_VERSION, .minimum_version_id = VIRTIO_NET_VM_VERSION, .post_load = virtio_net_post_load_device, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN), VMSTATE_STRUCT_POINTER(vqs, VirtIONet, vmstate_virtio_net_queue_tx_waiting, @@ -3343,8 +3343,8 @@ static const VMStateDescription vmstate_virtio_net_device = { VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet, has_ctrl_guest_offloads), VMSTATE_END_OF_LIST() - }, - .subsections = (const VMStateDescription * []) { + }, + .subsections = (const VMStateDescription * const []) { &vmstate_virtio_net_rss, NULL } @@ -3871,7 +3871,7 @@ static const VMStateDescription vmstate_virtio_net = { .name = "virtio-net", .minimum_version_id = VIRTIO_NET_VM_VERSION, .version_id = VIRTIO_NET_VM_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index 6fb4102d033015eab4094ec4aef5025334acaeec..707487c636662cdf5b8009d6bbd674feaf4a7072 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -2307,7 +2307,7 @@ static const VMStateDescription vmxstate_vmxnet3_mcast_list = { .minimum_version_id = 1, .pre_load = vmxnet3_mcast_list_pre_load, .needed = vmxnet3_mc_list_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VBUFFER_UINT32(mcast_list, VMXNET3State, 0, NULL, mcast_list_buff_size), VMSTATE_END_OF_LIST() @@ -2317,7 +2317,7 @@ static const VMStateDescription vmxstate_vmxnet3_mcast_list = { static const VMStateDescription vmstate_vmxnet3_ring = { .name = "vmxnet3-ring", .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(pa, Vmxnet3Ring), VMSTATE_UINT32(size, Vmxnet3Ring), VMSTATE_UINT32(cell_size, Vmxnet3Ring), @@ -2330,7 +2330,7 @@ static const VMStateDescription vmstate_vmxnet3_ring = { static const VMStateDescription vmstate_vmxnet3_tx_stats = { .name = "vmxnet3-tx-stats", .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(TSOPktsTxOK, struct UPT1_TxStats), VMSTATE_UINT64(TSOBytesTxOK, struct UPT1_TxStats), VMSTATE_UINT64(ucastPktsTxOK, struct UPT1_TxStats), @@ -2348,7 +2348,7 @@ static const VMStateDescription vmstate_vmxnet3_tx_stats = { static const VMStateDescription vmstate_vmxnet3_txq_descr = { .name = "vmxnet3-txq-descr", .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(tx_ring, Vmxnet3TxqDescr, 0, vmstate_vmxnet3_ring, Vmxnet3Ring), VMSTATE_STRUCT(comp_ring, Vmxnet3TxqDescr, 0, vmstate_vmxnet3_ring, @@ -2364,7 +2364,7 @@ static const VMStateDescription vmstate_vmxnet3_txq_descr = { static const VMStateDescription vmstate_vmxnet3_rx_stats = { .name = "vmxnet3-rx-stats", .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(LROPktsRxOK, struct UPT1_RxStats), VMSTATE_UINT64(LROBytesRxOK, struct UPT1_RxStats), VMSTATE_UINT64(ucastPktsRxOK, struct UPT1_RxStats), @@ -2382,7 +2382,7 @@ static const VMStateDescription vmstate_vmxnet3_rx_stats = { static const VMStateDescription vmstate_vmxnet3_rxq_descr = { .name = "vmxnet3-rxq-descr", .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(rx_ring, Vmxnet3RxqDescr, VMXNET3_RX_RINGS_PER_QUEUE, 0, vmstate_vmxnet3_ring, Vmxnet3Ring), @@ -2418,7 +2418,7 @@ static int vmxnet3_post_load(void *opaque, int version_id) static const VMStateDescription vmstate_vmxnet3_int_state = { .name = "vmxnet3-int-state", .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(is_masked, Vmxnet3IntState), VMSTATE_BOOL(is_pending, Vmxnet3IntState), VMSTATE_BOOL(is_asserted, Vmxnet3IntState), @@ -2432,7 +2432,7 @@ static const VMStateDescription vmstate_vmxnet3 = { .minimum_version_id = 1, .pre_save = vmxnet3_pre_save, .post_load = vmxnet3_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, VMXNET3State), VMSTATE_MSIX(parent_obj, VMXNET3State), VMSTATE_BOOL(rx_packets_compound, VMXNET3State), @@ -2468,7 +2468,7 @@ static const VMStateDescription vmstate_vmxnet3 = { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmxstate_vmxnet3_mcast_list, NULL } diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c index 1e2b3baeb1a5143b21885d35d0fbf33a83a2cdf2..453fdb98198302cccd52f11f2a590666b6e9f48c 100644 --- a/hw/net/xen_nic.c +++ b/hw/net/xen_nic.c @@ -133,7 +133,7 @@ static bool net_tx_packets(struct XenNetDev *netdev) void *page; void *tmpbuf = NULL; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); for (;;) { rc = netdev->tx_ring.req_cons; @@ -260,7 +260,7 @@ static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size RING_IDX rc, rp; void *page; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); if (xen_device_backend_get_state(&netdev->xendev) != XenbusStateConnected) { return -1; @@ -354,7 +354,7 @@ static bool xen_netdev_connect(XenDevice *xendev, Error **errp) XenNetDev *netdev = XEN_NET_DEVICE(xendev); unsigned int port, rx_copy; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); if (xen_device_frontend_scanf(xendev, "tx-ring-ref", "%u", &netdev->tx_ring_ref) != 1) { @@ -425,7 +425,7 @@ static void xen_netdev_disconnect(XenDevice *xendev, Error **errp) trace_xen_netdev_disconnect(netdev->dev); - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); netdev->tx_ring.sring = NULL; netdev->rx_ring.sring = NULL; diff --git a/hw/net/xgmac.c b/hw/net/xgmac.c index 1f4f277d840f826fb7b6c345e2cecc6aace6c82f..ffe3fc8dbefe2c5d469ef7a3cef6bebf7b7d8ac4 100644 --- a/hw/net/xgmac.c +++ b/hw/net/xgmac.c @@ -159,7 +159,7 @@ static const VMStateDescription vmstate_rxtx_stats = { .name = "xgmac_stats", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(rx_bytes, RxTxStats), VMSTATE_UINT64(tx_bytes, RxTxStats), VMSTATE_UINT64(rx, RxTxStats), @@ -173,7 +173,7 @@ static const VMStateDescription vmstate_xgmac = { .name = "xgmac", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(stats, XgmacState, 0, vmstate_rxtx_stats, RxTxStats), VMSTATE_UINT32_ARRAY(regs, XgmacState, R_MAX), VMSTATE_END_OF_LIST() diff --git a/hw/nvram/ds1225y.c b/hw/nvram/ds1225y.c index 3660a47c51ca6cb177d9e9293beb431bdd53c0e9..6d510dcc6860cffcb8be57d32ac858c3e1e75e8c 100644 --- a/hw/nvram/ds1225y.c +++ b/hw/nvram/ds1225y.c @@ -102,7 +102,7 @@ static const VMStateDescription vmstate_nvram = { .version_id = 0, .minimum_version_id = 0, .post_load = nvram_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(contents, NvRamState, chip_size, 0, vmstate_info_uint8, uint8_t), VMSTATE_END_OF_LIST() diff --git a/hw/nvram/eeprom93xx.c b/hw/nvram/eeprom93xx.c index 57d63638d77a53a784102ccc2991164a4ffc8a6e..a8fd60a8fb5a918dc08c60144ee771889ee7ac10 100644 --- a/hw/nvram/eeprom93xx.c +++ b/hw/nvram/eeprom93xx.c @@ -131,7 +131,7 @@ static const VMStateDescription vmstate_eeprom = { .name = "eeprom", .version_id = EEPROM_VERSION, .minimum_version_id = OLD_EEPROM_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(tick, eeprom_t), VMSTATE_UINT8(address, eeprom_t), VMSTATE_UINT8(command, eeprom_t), diff --git a/hw/nvram/fw_cfg-acpi.c b/hw/nvram/fw_cfg-acpi.c new file mode 100644 index 0000000000000000000000000000000000000000..4e48baeaa0154b2ba8e0f621d4f23aa4d1fa6d1d --- /dev/null +++ b/hw/nvram/fw_cfg-acpi.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Add fw_cfg device in DSDT + * + */ + +#include "hw/nvram/fw_cfg_acpi.h" +#include "hw/acpi/aml-build.h" + +void fw_cfg_acpi_dsdt_add(Aml *scope, const MemMapEntry *fw_cfg_memmap) +{ + Aml *dev = aml_device("FWCF"); + aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002"))); + /* device present, functioning, decoding, not shown in UI */ + aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); + aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + + Aml *crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base, + fw_cfg_memmap->size, AML_READ_WRITE)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); +} diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c index 4e4524673a39add1b830723d4e2e6d10a86ab582..e85493d5139844dd8285ee5e26f3453f1070284b 100644 --- a/hw/nvram/fw_cfg.c +++ b/hw/nvram/fw_cfg.c @@ -656,7 +656,7 @@ static int fw_cfg_acpi_mr_restore_post_load(void *opaque, int version_id) static const VMStateDescription vmstate_fw_cfg_dma = { .name = "fw_cfg/dma", .needed = fw_cfg_dma_enabled, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(dma_addr, FWCfgState), VMSTATE_END_OF_LIST() }, @@ -668,7 +668,7 @@ static const VMStateDescription vmstate_fw_cfg_acpi_mr = { .minimum_version_id = 1, .needed = fw_cfg_acpi_mr_restore, .post_load = fw_cfg_acpi_mr_restore_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(table_mr_size, FWCfgState), VMSTATE_UINT64(linker_mr_size, FWCfgState), VMSTATE_UINT64(rsdp_mr_size, FWCfgState), @@ -680,13 +680,13 @@ static const VMStateDescription vmstate_fw_cfg = { .name = "fw_cfg", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(cur_entry, FWCfgState), VMSTATE_UINT16_HACK(cur_offset, FWCfgState, is_version_1), VMSTATE_UINT32_V(cur_offset, FWCfgState, 2), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_fw_cfg_dma, &vmstate_fw_cfg_acpi_mr, NULL, diff --git a/hw/nvram/mac_nvram.c b/hw/nvram/mac_nvram.c index 810e84f07e4fca6a2d93917b6bf37dbe74611902..5f9d16fb3e35b44b37a8acfe36ef3184416e040c 100644 --- a/hw/nvram/mac_nvram.c +++ b/hw/nvram/mac_nvram.c @@ -79,7 +79,7 @@ static const VMStateDescription vmstate_macio_nvram = { .name = "macio_nvram", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VBUFFER_UINT32(data, MacIONVRAMState, 0, NULL, size), VMSTATE_END_OF_LIST() } diff --git a/hw/nvram/meson.build b/hw/nvram/meson.build index 75e415b1a01b62e3cae6396e2201f651ef31f1ce..4996c72456faf95450966de26e85604187ddd59c 100644 --- a/hw/nvram/meson.build +++ b/hw/nvram/meson.build @@ -17,3 +17,4 @@ system_ss.add(when: 'CONFIG_XLNX_EFUSE_ZYNQMP', if_true: files( system_ss.add(when: 'CONFIG_XLNX_BBRAM', if_true: files('xlnx-bbram.c')) specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_nvram.c')) +specific_ss.add(when: 'CONFIG_ACPI', if_true: files('fw_cfg-acpi.c')) diff --git a/hw/nvram/npcm7xx_otp.c b/hw/nvram/npcm7xx_otp.c index c61f2fc1aa2a88ed12d989c8cf65340e43746fc1..f00ebfa931ed6139f2de0bffafa682f0773195ff 100644 --- a/hw/nvram/npcm7xx_otp.c +++ b/hw/nvram/npcm7xx_otp.c @@ -384,7 +384,7 @@ static const VMStateDescription vmstate_npcm7xx_otp = { .name = "npcm7xx-otp", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, NPCM7xxOTPState, NPCM7XX_OTP_NR_REGS), VMSTATE_UINT8_ARRAY(array, NPCM7xxOTPState, NPCM7XX_OTP_ARRAY_BYTES), VMSTATE_END_OF_LIST(), diff --git a/hw/nvram/nrf51_nvm.c b/hw/nvram/nrf51_nvm.c index 7f1db8c4239ec718d6505574e466190b830e09f3..73564f7e6eadf1c05b2fcb43307622ea5470b6b5 100644 --- a/hw/nvram/nrf51_nvm.c +++ b/hw/nvram/nrf51_nvm.c @@ -336,12 +336,9 @@ static void nrf51_nvm_init(Object *obj) static void nrf51_nvm_realize(DeviceState *dev, Error **errp) { NRF51NVMState *s = NRF51_NVM(dev); - Error *err = NULL; - memory_region_init_rom_device(&s->flash, OBJECT(dev), &flash_ops, s, - "nrf51_soc.flash", s->flash_size, &err); - if (err) { - error_propagate(errp, err); + if (!memory_region_init_rom_device(&s->flash, OBJECT(dev), &flash_ops, s, + "nrf51_soc.flash", s->flash_size, errp)) { return; } @@ -366,7 +363,7 @@ static const VMStateDescription vmstate_nvm = { .name = "nrf51_soc.nvm", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(uicr_content, NRF51NVMState, NRF51_UICR_FIXTURE_SIZE), VMSTATE_UINT32(config, NRF51NVMState), diff --git a/hw/nvram/spapr_nvram.c b/hw/nvram/spapr_nvram.c index 2d72f304422a4f3e30aee6db7adc4f1c876ac221..bfd8aa367e1e1a38aa22d66f56fc3a1e707c988b 100644 --- a/hw/nvram/spapr_nvram.c +++ b/hw/nvram/spapr_nvram.c @@ -245,7 +245,7 @@ static const VMStateDescription vmstate_spapr_nvram = { .minimum_version_id = 1, .pre_load = spapr_nvram_pre_load, .post_load = spapr_nvram_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(size, SpaprNvram), VMSTATE_VBUFFER_ALLOC_UINT32(buf, SpaprNvram, 1, NULL, size), VMSTATE_END_OF_LIST() diff --git a/hw/nvram/xlnx-bbram.c b/hw/nvram/xlnx-bbram.c index e18e7770e1e83a5025bae3a8fb282b9741f220b3..0a71a005c693dfd6b8ba1564f10fc6e3035fcbd5 100644 --- a/hw/nvram/xlnx-bbram.c +++ b/hw/nvram/xlnx-bbram.c @@ -508,7 +508,7 @@ static const VMStateDescription vmstate_bbram_ctrl = { .name = TYPE_XLNX_BBRAM, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxBBRam, R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/nvram/xlnx-versal-efuse-ctrl.c b/hw/nvram/xlnx-versal-efuse-ctrl.c index 2480af35e1b85701ac99775e32e117602edf49ad..e4b9e11a3ddfb271ffd5fa5ced840e26d4bad1bf 100644 --- a/hw/nvram/xlnx-versal-efuse-ctrl.c +++ b/hw/nvram/xlnx-versal-efuse-ctrl.c @@ -737,7 +737,7 @@ static const VMStateDescription vmstate_efuse_ctrl = { .name = TYPE_XLNX_VERSAL_EFUSE_CTRL, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxVersalEFuseCtrl, R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/nvram/xlnx-zynqmp-efuse.c b/hw/nvram/xlnx-zynqmp-efuse.c index 3db5f98ec1a423066a6c9e7c199d1f0f33649fd6..ec98456e5d191fd6d9889087ced3fd5be01c00b8 100644 --- a/hw/nvram/xlnx-zynqmp-efuse.c +++ b/hw/nvram/xlnx-zynqmp-efuse.c @@ -821,7 +821,7 @@ static const VMStateDescription vmstate_efuse = { .name = TYPE_XLNX_ZYNQMP_EFUSE, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPEFuse, R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/openrisc/cputimer.c b/hw/openrisc/cputimer.c index 10163b391b261ff349b0d2b01c454f97e220d12c..835986c4dbe6ed8aec5db8dd31f62aa5a56c1e39 100644 --- a/hw/openrisc/cputimer.c +++ b/hw/openrisc/cputimer.c @@ -145,7 +145,7 @@ static const VMStateDescription vmstate_or1k_timer = { .name = "or1k_timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ttcr, OR1KTimerState), VMSTATE_UINT64(last_clk, OR1KTimerState), VMSTATE_END_OF_LIST() diff --git a/hw/pci-bridge/gen_pcie_root_port.c b/hw/pci-bridge/gen_pcie_root_port.c index 1ce4e7bebae7ef6719d9ec932e132ba04b69fe73..784507c826b18b6b4b795b2ac0e0d2a1ad3ed26e 100644 --- a/hw/pci-bridge/gen_pcie_root_port.c +++ b/hw/pci-bridge/gen_pcie_root_port.c @@ -117,7 +117,7 @@ static const VMStateDescription vmstate_rp_dev = { .version_id = 1, .minimum_version_id = 1, .post_load = pcie_cap_slot_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot), VMSTATE_STRUCT(parent_obj.parent_obj.parent_obj.exp.aer_log, PCIESlot, 0, vmstate_pcie_aer_log, PCIEAERLog), diff --git a/hw/pci-bridge/i82801b11.c b/hw/pci-bridge/i82801b11.c index 0e83cd11b2c0c4d310250898827cb2fc920b41bb..c140919cbc5987dccf03841579547e024b08fe45 100644 --- a/hw/pci-bridge/i82801b11.c +++ b/hw/pci-bridge/i82801b11.c @@ -81,7 +81,7 @@ err_bridge: static const VMStateDescription i82801b11_bridge_dev_vmstate = { .name = "i82801b11_bridge", .priority = MIG_PRI_PCI_BUS, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIBridge), VMSTATE_END_OF_LIST() } diff --git a/hw/pci-bridge/ioh3420.c b/hw/pci-bridge/ioh3420.c index f1e16135a32aadcb8b441ed4d8bce6380a7bbe83..be752a4bda5bd253036ebba9172ff79a3a3de452 100644 --- a/hw/pci-bridge/ioh3420.c +++ b/hw/pci-bridge/ioh3420.c @@ -88,7 +88,7 @@ static const VMStateDescription vmstate_ioh3420 = { .version_id = 1, .minimum_version_id = 1, .post_load = pcie_cap_slot_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot), VMSTATE_STRUCT(parent_obj.parent_obj.parent_obj.exp.aer_log, PCIESlot, 0, vmstate_pcie_aer_log, PCIEAERLog), diff --git a/hw/pci-bridge/meson.build b/hw/pci-bridge/meson.build index 6d5ad9f37b226c143d51ff60d87f76de59c78801..f2a60434ddadc70c023ad5f4a2e7b20ea638d1f3 100644 --- a/hw/pci-bridge/meson.build +++ b/hw/pci-bridge/meson.build @@ -13,5 +13,3 @@ pci_ss.add(when: 'CONFIG_CXL', if_true: files('cxl_root_port.c', 'cxl_upstream.c pci_ss.add(when: 'CONFIG_SIMBA', if_true: files('simba.c')) system_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss) - -system_ss.add(when: 'CONFIG_ALL', if_true: files('pci_expander_bridge_stubs.c')) diff --git a/hw/pci-bridge/pci_bridge_dev.c b/hw/pci-bridge/pci_bridge_dev.c index 4b2696ea7ff3bb93cbe333ab4fdeae36bac49b6d..089f91efed42d567121fcbf90456074b8e17efef 100644 --- a/hw/pci-bridge/pci_bridge_dev.c +++ b/hw/pci-bridge/pci_bridge_dev.c @@ -199,7 +199,7 @@ static bool pci_device_shpc_present(void *opaque, int version_id) static const VMStateDescription pci_bridge_dev_vmstate = { .name = "pci_bridge", .priority = MIG_PRI_PCI_BUS, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIBridge), SHPC_VMSTATE(shpc, PCIDevice, pci_device_shpc_present), VMSTATE_END_OF_LIST() diff --git a/hw/pci-bridge/pcie_pci_bridge.c b/hw/pci-bridge/pcie_pci_bridge.c index 2301b2ca0b088078f9fc6e0b79dbea5f6903e363..7646ac23975fd5353e752ca80d265efa901015f6 100644 --- a/hw/pci-bridge/pcie_pci_bridge.c +++ b/hw/pci-bridge/pcie_pci_bridge.c @@ -132,7 +132,7 @@ static Property pcie_pci_bridge_dev_properties[] = { static const VMStateDescription pcie_pci_bridge_dev_vmstate = { .name = TYPE_PCIE_PCI_BRIDGE_DEV, .priority = MIG_PRI_PCI_BUS, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIBridge), SHPC_VMSTATE(shpc, PCIDevice, NULL), VMSTATE_END_OF_LIST() diff --git a/hw/pci-bridge/xio3130_downstream.c b/hw/pci-bridge/xio3130_downstream.c index 38a2361fa2afde418dd44473318ecf5bf5ba6f71..907d5105b019224cf2ce5426ef56c3cb01255ff3 100644 --- a/hw/pci-bridge/xio3130_downstream.c +++ b/hw/pci-bridge/xio3130_downstream.c @@ -146,7 +146,7 @@ static const VMStateDescription vmstate_xio3130_downstream = { .version_id = 1, .minimum_version_id = 1, .post_load = pcie_cap_slot_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj.parent_obj.parent_obj, PCIESlot), VMSTATE_STRUCT(parent_obj.parent_obj.parent_obj.exp.aer_log, PCIESlot, 0, vmstate_pcie_aer_log, PCIEAERLog), diff --git a/hw/pci-bridge/xio3130_upstream.c b/hw/pci-bridge/xio3130_upstream.c index a48bfe3bc54f36aa6358271480a64dafb632c60e..2a6cff6e033f6255515f4ef2bcbc38376669008e 100644 --- a/hw/pci-bridge/xio3130_upstream.c +++ b/hw/pci-bridge/xio3130_upstream.c @@ -115,7 +115,7 @@ static const VMStateDescription vmstate_xio3130_upstream = { .priority = MIG_PRI_PCI_BUS, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj.parent_obj, PCIEPort), VMSTATE_STRUCT(parent_obj.parent_obj.exp.aer_log, PCIEPort, 0, vmstate_pcie_aer_log, PCIEAERLog), diff --git a/hw/pci-host/astro.c b/hw/pci-host/astro.c index 7d68ccee7ebaca852633df149aa35884c411e97d..37d271118ca224f27aeb006f9b326be08ecbe730 100644 --- a/hw/pci-host/astro.c +++ b/hw/pci-host/astro.c @@ -166,6 +166,8 @@ static MemTxResult elroy_chip_write_with_attrs(void *opaque, hwaddr addr, trace_elroy_write(addr, size, val); switch ((addr >> 3) << 3) { + case 0x000: /* PCI_ID & PCI_COMMAND_STATUS_REG */ + break; case 0x080: put_val_in_int64(&s->arb_mask, addr, size, val); break; @@ -175,6 +177,9 @@ static MemTxResult elroy_chip_write_with_attrs(void *opaque, hwaddr addr, case 0x200 ... 0x250 - 1: /* LMMIO, GMMIO, WLMMIO, WGMMIO, ... */ put_val_in_arrary(s->mmio_base, 0x200, addr, size, val); break; + case 0x300: /* ibase */ + case 0x308: /* imask */ + break; case 0x0680: put_val_in_int64(&s->error_config, addr, size, val); break; @@ -459,7 +464,7 @@ static const VMStateDescription vmstate_elroy = { .name = "Elroy", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(hpa, ElroyState), VMSTATE_UINT32(pci_bus_num, ElroyState), VMSTATE_UINT64(config_address, ElroyState), @@ -538,6 +543,9 @@ static MemTxResult astro_chip_read_with_attrs(void *opaque, hwaddr addr, case 0x0030: /* HP-UX 10.20 and 11.11 reads it. No idea. */ val = -1; break; + case 0x0078: /* NetBSD reads 0x78 ? */ + val = -1; + break; case 0x0300 ... 0x03d8: /* LMMIO_DIRECT0_BASE... */ index = (addr - 0x300) / 8; val = s->ioc_ranges[index]; @@ -624,31 +632,43 @@ static MemTxResult astro_chip_write_with_attrs(void *opaque, hwaddr addr, case 0x10220: case 0x10230: /* HP-UX 11.11 reads it. No idea. */ break; - case 0x22108: /* IOC STATUS_CONTROL */ - put_val_in_int64(&s->ioc_status_ctrl, addr, size, val); - break; case 0x20200 ... 0x20240 - 1: /* IOC Rope0_Control ... */ put_val_in_arrary(s->ioc_rope_control, 0x20200, addr, size, val); break; case 0x20040: /* IOC Rope config */ + case 0x22040: put_val_in_int64(&s->ioc_rope_config, addr, size, val); break; case 0x20300: + case 0x22300: put_val_in_int64(&s->tlb_ibase, addr, size, val); break; case 0x20308: + case 0x22308: put_val_in_int64(&s->tlb_imask, addr, size, val); break; case 0x20310: + case 0x22310: put_val_in_int64(&s->tlb_pcom, addr, size, val); /* TODO: flush iommu */ break; case 0x20318: + case 0x22318: put_val_in_int64(&s->tlb_tcnfg, addr, size, val); break; case 0x20320: + case 0x22320: put_val_in_int64(&s->tlb_pdir_base, addr, size, val); break; + case 0x22000: /* func_id */ + break; + case 0x22008: /* func_class */ + break; + case 0x22050: /* rope_debug */ + break; + case 0x22108: /* IOC STATUS_CONTROL */ + put_val_in_int64(&s->ioc_status_ctrl, addr, size, val); + break; /* * empty placeholders for non-existent elroys, e.g. * func_class, pci config & data @@ -691,7 +711,7 @@ static const VMStateDescription vmstate_astro = { .name = "Astro", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(ioc_ctrl, AstroState), VMSTATE_UINT64(ioc_status_ctrl, AstroState), VMSTATE_UINT64_ARRAY(ioc_ranges, AstroState, (0x03d8 - 0x300) / 8), diff --git a/hw/pci-host/bonito.c b/hw/pci-host/bonito.c index bab661f3ce1c7bebefb2ebd94e44ea63a3246d3a..1f0c4353484a118264e495df3a6ed6c843871e4f 100644 --- a/hw/pci-host/bonito.c +++ b/hw/pci-host/bonito.c @@ -619,7 +619,7 @@ static const VMStateDescription vmstate_bonito = { .name = "Bonito", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, PCIBonitoState), VMSTATE_END_OF_LIST() } diff --git a/hw/pci-host/designware.c b/hw/pci-host/designware.c index f477f97847d753341f341281ed2abed2ce242574..dd9e389c07bc606d10c520415efe320cdfe0f954 100644 --- a/hw/pci-host/designware.c +++ b/hw/pci-host/designware.c @@ -529,7 +529,7 @@ static const VMStateDescription vmstate_designware_pcie_msi_bank = { .name = "designware-pcie-msi-bank", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(enable, DesignwarePCIEMSIBank), VMSTATE_UINT32(mask, DesignwarePCIEMSIBank), VMSTATE_UINT32(status, DesignwarePCIEMSIBank), @@ -541,7 +541,7 @@ static const VMStateDescription vmstate_designware_pcie_msi = { .name = "designware-pcie-msi", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(base, DesignwarePCIEMSI), VMSTATE_STRUCT_ARRAY(intr, DesignwarePCIEMSI, @@ -557,7 +557,7 @@ static const VMStateDescription vmstate_designware_pcie_viewport = { .name = "designware-pcie-viewport", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(base, DesignwarePCIEViewport), VMSTATE_UINT64(target, DesignwarePCIEViewport), VMSTATE_UINT32(limit, DesignwarePCIEViewport), @@ -570,7 +570,7 @@ static const VMStateDescription vmstate_designware_pcie_root = { .name = "designware-pcie-root", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIBridge), VMSTATE_UINT32(atu_viewport, DesignwarePCIERoot), VMSTATE_STRUCT_2DARRAY(viewports, @@ -718,7 +718,7 @@ static const VMStateDescription vmstate_designware_pcie_host = { .name = "designware-pcie-host", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(root, DesignwarePCIEHost, 1, diff --git a/hw/pci-host/dino.c b/hw/pci-host/dino.c index 5b0947a16c9ec4abf8668bc4563b1cc181f154f3..d992c4bb69d8df55825cd3420476bd74cef2e0d4 100644 --- a/hw/pci-host/dino.c +++ b/hw/pci-host/dino.c @@ -287,7 +287,7 @@ static const VMStateDescription vmstate_dino = { .name = "Dino", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(iar0, DinoState), VMSTATE_UINT32(iar1, DinoState), VMSTATE_UINT32(imr, DinoState), diff --git a/hw/pci-host/gpex-acpi.c b/hw/pci-host/gpex-acpi.c index 1092dc3b7082bc8cedbcb1d697b729ead355fd54..f69413ea2c3568c1d26f50f2e2c86c7b44953e42 100644 --- a/hw/pci-host/gpex-acpi.c +++ b/hw/pci-host/gpex-acpi.c @@ -281,3 +281,16 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg) crs_range_set_free(&crs_range_set); } + +void acpi_dsdt_add_gpex_host(Aml *scope, uint32_t irq) +{ + bool ambig; + Object *obj = object_resolve_path_type("", TYPE_GPEX_HOST, &ambig); + + if (!obj || ambig) { + return; + } + + GPEX_HOST(obj)->gpex_cfg.irq = irq; + acpi_dsdt_add_gpex(scope, &GPEX_HOST(obj)->gpex_cfg); +} diff --git a/hw/pci-host/gpex.c b/hw/pci-host/gpex.c index a6752fac5e8e363889ec313ea5151877c19e7588..e9cf455bf5231c2984aac18e01434bd4ee40055c 100644 --- a/hw/pci-host/gpex.c +++ b/hw/pci-host/gpex.c @@ -154,6 +154,18 @@ static Property gpex_host_properties[] = { */ DEFINE_PROP_BOOL("allow-unmapped-accesses", GPEXHost, allow_unmapped_accesses, true), + DEFINE_PROP_UINT64(PCI_HOST_ECAM_BASE, GPEXHost, gpex_cfg.ecam.base, 0), + DEFINE_PROP_SIZE(PCI_HOST_ECAM_SIZE, GPEXHost, gpex_cfg.ecam.size, 0), + DEFINE_PROP_UINT64(PCI_HOST_PIO_BASE, GPEXHost, gpex_cfg.pio.base, 0), + DEFINE_PROP_SIZE(PCI_HOST_PIO_SIZE, GPEXHost, gpex_cfg.pio.size, 0), + DEFINE_PROP_UINT64(PCI_HOST_BELOW_4G_MMIO_BASE, GPEXHost, + gpex_cfg.mmio32.base, 0), + DEFINE_PROP_SIZE(PCI_HOST_BELOW_4G_MMIO_SIZE, GPEXHost, + gpex_cfg.mmio32.size, 0), + DEFINE_PROP_UINT64(PCI_HOST_ABOVE_4G_MMIO_BASE, GPEXHost, + gpex_cfg.mmio64.base, 0), + DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MMIO_SIZE, GPEXHost, + gpex_cfg.mmio64.size, 0), DEFINE_PROP_END_OF_LIST(), }; @@ -195,7 +207,7 @@ static const VMStateDescription vmstate_gpex_root = { .name = "gpex_root", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, GPEXRootState), VMSTATE_END_OF_LIST() } diff --git a/hw/pci-host/gt64120.c b/hw/pci-host/gt64120.c index 143bf053d71dd090a4613bb3c66376aaad5c5159..e02efc9e2ea79361c2f65faf94b6a750f09bdcc6 100644 --- a/hw/pci-host/gt64120.c +++ b/hw/pci-host/gt64120.c @@ -431,7 +431,7 @@ static const VMStateDescription vmstate_gt64120 = { .version_id = 1, .minimum_version_id = 1, .post_load = gt64120_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, GT64120State, GT_REGS), VMSTATE_END_OF_LIST() } diff --git a/hw/pci-host/i440fx.c b/hw/pci-host/i440fx.c index 653cc3f149508b657e972d4f073dab6a6f13f10d..4f0a0438d773d31b13fd5d99e4819dfd1108c8a0 100644 --- a/hw/pci-host/i440fx.c +++ b/hw/pci-host/i440fx.c @@ -125,7 +125,7 @@ static const VMStateDescription vmstate_i440fx = { .version_id = 3, .minimum_version_id = 3, .post_load = i440fx_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCII440FXState), /* Used to be smm_enabled, which was basically always zero because * SeaBIOS hardly uses SMM. SMRAM is now handled by CPU code. diff --git a/hw/pci-host/ppce500.c b/hw/pci-host/ppce500.c index 453a4e6ed3b02ed448c3fad71bd45eb73a5dbbb3..fa0d67b34245e63bfc581c900cbff07a06e50df3 100644 --- a/hw/pci-host/ppce500.c +++ b/hw/pci-host/ppce500.c @@ -379,7 +379,7 @@ static const VMStateDescription vmstate_pci_outbound = { .name = "pci_outbound", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(potar, struct pci_outbound), VMSTATE_UINT32(potear, struct pci_outbound), VMSTATE_UINT32(powbar, struct pci_outbound), @@ -392,7 +392,7 @@ static const VMStateDescription vmstate_pci_inbound = { .name = "pci_inbound", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(pitar, struct pci_inbound), VMSTATE_UINT32(piwbar, struct pci_inbound), VMSTATE_UINT32(piwbear, struct pci_inbound), @@ -405,7 +405,7 @@ static const VMStateDescription vmstate_ppce500_pci = { .name = "ppce500_pci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(pob, PPCE500PCIState, PPCE500_PCI_NR_POBS, 1, vmstate_pci_outbound, struct pci_outbound), VMSTATE_STRUCT_ARRAY(pib, PPCE500PCIState, PPCE500_PCI_NR_PIBS, 1, diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c index 08534bc7cc09c631db485d8e2da24eb1c99454bf..0d7d4e3f0860d2b8c5d010bd9f96c4cb75ee0461 100644 --- a/hw/pci-host/q35.c +++ b/hw/pci-host/q35.c @@ -520,7 +520,7 @@ static const VMStateDescription vmstate_mch = { .version_id = 1, .minimum_version_id = 1, .post_load = mch_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, MCHPCIState), /* Used to be smm_enabled, which was basically always zero because * SeaBIOS hardly uses SMM. SMRAM is now handled by CPU code. diff --git a/hw/pci-host/raven.c b/hw/pci-host/raven.c index 86c3a4908712988637484967e88155afb1413742..c7a0a2878abbe66c26ca724ccb734009d3016839 100644 --- a/hw/pci-host/raven.c +++ b/hw/pci-host/raven.c @@ -345,8 +345,10 @@ static void raven_realize(PCIDevice *d, Error **errp) d->config[PCI_LATENCY_TIMER] = 0x10; d->config[PCI_CAPABILITY_LIST] = 0x00; - memory_region_init_rom_nomigrate(&s->bios, OBJECT(s), "bios", BIOS_SIZE, - &error_fatal); + if (!memory_region_init_rom_nomigrate(&s->bios, OBJECT(s), "bios", + BIOS_SIZE, errp)) { + return; + } memory_region_add_subregion(get_system_memory(), (uint32_t)(-BIOS_SIZE), &s->bios); if (s->bios_name) { @@ -383,7 +385,7 @@ static const VMStateDescription vmstate_raven = { .name = "raven", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, RavenPCIState), VMSTATE_END_OF_LIST() }, diff --git a/hw/pci-host/versatile.c b/hw/pci-host/versatile.c index 60d4e7cd9235979d3f15eba29514bd0f7fb23500..0e65deb3f9726197bba5744c79613d60f7a8cd17 100644 --- a/hw/pci-host/versatile.c +++ b/hw/pci-host/versatile.c @@ -147,7 +147,7 @@ static const VMStateDescription pci_vpb_vmstate = { .version_id = 1, .minimum_version_id = 1, .post_load = pci_vpb_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(imap, PCIVPBState, 3), VMSTATE_UINT32_ARRAY(smap, PCIVPBState, 3), VMSTATE_UINT32(selfid, PCIVPBState), diff --git a/hw/pci/meson.build b/hw/pci/meson.build index b1855452f5b8471a3032672e6be566ece0280e63..b9c34b2acfea41e42b65d708abb3e81942327b6a 100644 --- a/hw/pci/meson.build +++ b/hw/pci/meson.build @@ -20,4 +20,3 @@ system_ss.add(when: 'CONFIG_PCI_EXPRESS', if_true: files('pcie_port.c', 'pcie_ho system_ss.add_all(when: 'CONFIG_PCI', if_true: pci_ss) system_ss.add(when: 'CONFIG_PCI', if_false: files('pci-stub.c')) -system_ss.add(when: 'CONFIG_ALL', if_true: files('pci-stub.c')) diff --git a/hw/pci/msix.c b/hw/pci/msix.c index cd817f4ca8ec78f61fd21deddcda9a475fdaee1c..487e49834ee936357342732361875948c50df006 100644 --- a/hw/pci/msix.c +++ b/hw/pci/msix.c @@ -685,7 +685,7 @@ static int get_msix_state(QEMUFile *f, void *pv, size_t size, return 0; } -static VMStateInfo vmstate_info_msix = { +static const VMStateInfo vmstate_info_msix = { .name = "msix state", .get = get_msix_state, .put = put_msix_state, @@ -693,7 +693,7 @@ static VMStateInfo vmstate_info_msix = { const VMStateDescription vmstate_msix = { .name = "msix", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { { .name = "msix", .version_id = 0, diff --git a/hw/pci/pci.c b/hw/pci/pci.c index c49417abb2df4dba40dcc52e61ee94c36300d328..76080af580d7a0499bbce24bb65f3f3816dc31f6 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -92,7 +92,7 @@ static const VMStateDescription vmstate_pcibus = { .name = "PCIBUS", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_EQUAL(nirq, PCIBus, NULL), VMSTATE_VARRAY_INT32(irq_count, PCIBus, nirq, 0, vmstate_info_int32, @@ -673,7 +673,7 @@ static int put_pci_config_device(QEMUFile *f, void *pv, size_t size, return 0; } -static VMStateInfo vmstate_info_pci_config = { +static const VMStateInfo vmstate_info_pci_config = { .name = "pci config", .get = get_pci_config_device, .put = put_pci_config_device, @@ -714,7 +714,7 @@ static int put_pci_irq_state(QEMUFile *f, void *pv, size_t size, return 0; } -static VMStateInfo vmstate_info_pci_irq_state = { +static const VMStateInfo vmstate_info_pci_irq_state = { .name = "pci irq state", .get = get_pci_irq_state, .put = put_pci_irq_state, @@ -734,7 +734,7 @@ const VMStateDescription vmstate_pci_device = { .name = "PCIDevice", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_POSITIVE_LE(version_id, PCIDevice), VMSTATE_BUFFER_UNSAFE_INFO_TEST(config, PCIDevice, migrate_is_not_pcie, diff --git a/hw/pci/pci_host.c b/hw/pci/pci_host.c index a18aa0a8d4c1d1c086d723b83c0d688603bd3253..dfe6fe618401e2b80a2ac69931d7d821bac0b6e0 100644 --- a/hw/pci/pci_host.c +++ b/hw/pci/pci_host.c @@ -234,7 +234,7 @@ const VMStateDescription vmstate_pcihost = { .needed = pci_host_needed, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(config_reg, PCIHostState), VMSTATE_END_OF_LIST() } diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c index b68c7ecb49c79e400728ccc5c4c4ccd822d767e0..2c85a78fcde461f237d481357f8cab792bc5ff30 100644 --- a/hw/pci/pcie_aer.c +++ b/hw/pci/pcie_aer.c @@ -797,7 +797,7 @@ static const VMStateDescription vmstate_pcie_aer_err = { .name = "PCIE_AER_ERROR", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(status, PCIEAERErr), VMSTATE_UINT16(source_id, PCIEAERErr), VMSTATE_UINT16(flags, PCIEAERErr), @@ -818,7 +818,7 @@ const VMStateDescription vmstate_pcie_aer_log = { .name = "PCIE_AER_ERROR_LOG", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(log_num, PCIEAERLog), VMSTATE_UINT16_EQUAL(log_max, PCIEAERLog, NULL), VMSTATE_VALIDATE("log_num <= log_max", pcie_aer_state_log_num_valid), diff --git a/hw/pci/shpc.c b/hw/pci/shpc.c index df7f3701119437897d593fe4b4c815f9a42891c0..d2a5eea69e21adb4c1936bde28779b56d4d3b8eb 100644 --- a/hw/pci/shpc.c +++ b/hw/pci/shpc.c @@ -736,7 +736,7 @@ static int shpc_load(QEMUFile *f, void *pv, size_t size, return 0; } -VMStateInfo shpc_vmstate_info = { +const VMStateInfo shpc_vmstate_info = { .name = "shpc", .get = shpc_load, .put = shpc_save, diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig index 56f0475a8e2e46e433d9df02460aec65e26d77a5..44263a58c4d3202afbe8bb44c67701bded855a57 100644 --- a/hw/ppc/Kconfig +++ b/hw/ppc/Kconfig @@ -3,11 +3,11 @@ config PSERIES imply PCI_DEVICES imply TEST_DEVICES imply VIRTIO_VGA + imply VFIO_PCI if LINUX # needed by spapr_pci_vfio.c select NVDIMM select DIMM select PCI select SPAPR_VSCSI - select VFIO if LINUX # needed by spapr_pci_vfio.c select XICS select XIVE select MSI_NONBROKEN diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index 384226296bfba2ffdcfe740296780ac65b8f854f..566f1200ddee8ac076790afc240bb5770a0fd5e9 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -955,7 +955,7 @@ void ppce500_init(MachineState *machine) * when implementing non-kernel boot. */ object_property_set_bool(OBJECT(cs), "start-powered-off", i != 0, - &error_fatal); + &error_abort); qdev_realize_and_unref(DEVICE(cs), NULL, &error_fatal); if (!firstenv) { diff --git a/hw/ppc/meson.build b/hw/ppc/meson.build index ea44856d43b03771c35ae55fb5f92266e9f62a04..eba3406e7f3919b38731b7961c2e55ac2995bf90 100644 --- a/hw/ppc/meson.build +++ b/hw/ppc/meson.build @@ -34,9 +34,11 @@ ppc_ss.add(when: ['CONFIG_PSERIES', 'CONFIG_TCG'], if_true: files( 'spapr_softmmu.c', )) ppc_ss.add(when: 'CONFIG_SPAPR_RNG', if_true: files('spapr_rng.c')) -ppc_ss.add(when: ['CONFIG_PSERIES', 'CONFIG_LINUX'], if_true: files( - 'spapr_pci_vfio.c', -)) +if host_os == 'linux' + ppc_ss.add(when: 'CONFIG_PSERIES', if_true: files( + 'spapr_pci_vfio.c', + )) +endif # IBM PowerNV ppc_ss.add(when: 'CONFIG_POWERNV', if_true: files( diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c index 3203a4a72898c9eec0f801c5e7e1cd3fff4922c5..d84f3f977d99336fb8d952f863bd0fb45d481fb8 100644 --- a/hw/ppc/pegasos2.c +++ b/hw/ppc/pegasos2.c @@ -515,7 +515,7 @@ static void pegasos2_hypercall(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) CPUPPCState *env = &cpu->env; /* The TCG path should also be holding the BQL at this point */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (FIELD_EX64(env->msr, MSR, PR)) { qemu_log_mask(LOG_GUEST_ERROR, "Hypercall made with MSR[PR]=1\n"); diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index be167710a3561fba9742676b330fe98471032000..fadb8f523911f9bd1c4192b6154d7ef89189cf94 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -47,7 +47,7 @@ void ppc_set_irq(PowerPCCPU *cpu, int irq, int level) unsigned int old_pending; /* We may already have the BQL if coming from the reset path */ - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); old_pending = env->pending_interrupts; @@ -314,7 +314,7 @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val) { PowerPCCPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); switch ((val >> 28) & 0x3) { case 0x0: @@ -334,7 +334,7 @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val) break; } - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* PowerPC 40x internal IRQ controller */ @@ -1066,7 +1066,7 @@ const VMStateDescription vmstate_ppc_timebase = { .version_id = 1, .minimum_version_id = 1, .pre_save = timebase_pre_save, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT64(guest_timebase, PPCTimebase), VMSTATE_INT64(time_of_the_day_ns, PPCTimebase), VMSTATE_END_OF_LIST() diff --git a/hw/ppc/ppc4xx_pci.c b/hw/ppc/ppc4xx_pci.c index 66521190086ec48c2068ce1c9dcee1f0daf55638..0a07aab5d1518fcaac5e77a4fe081310e8e47902 100644 --- a/hw/ppc/ppc4xx_pci.c +++ b/hw/ppc/ppc4xx_pci.c @@ -276,7 +276,7 @@ static const VMStateDescription vmstate_pci_master_map = { .name = "pci_master_map", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(la, struct PCIMasterMap), VMSTATE_UINT32(ma, struct PCIMasterMap), VMSTATE_UINT32(pcila, struct PCIMasterMap), @@ -289,7 +289,7 @@ static const VMStateDescription vmstate_pci_target_map = { .name = "pci_target_map", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ms, struct PCITargetMap), VMSTATE_UINT32(la, struct PCITargetMap), VMSTATE_END_OF_LIST() @@ -300,7 +300,7 @@ static const VMStateDescription vmstate_ppc4xx_pci = { .name = "ppc4xx_pci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(pmm, PPC4xxPCIState, PPC4xx_PCI_NR_PMMS, 1, vmstate_pci_master_map, struct PCIMasterMap), diff --git a/hw/ppc/prep_systemio.c b/hw/ppc/prep_systemio.c index c96cefb13d567d26b23341a60e90aef4f264830a..4d3a251ed823a3a9facb706f212c18f88eb566e3 100644 --- a/hw/ppc/prep_systemio.c +++ b/hw/ppc/prep_systemio.c @@ -277,7 +277,7 @@ static const VMStateDescription vmstate_prep_systemio = { .name = "prep_systemio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(sreset, PrepSystemIoState), VMSTATE_UINT8(system_control, PrepSystemIoState), VMSTATE_UINT8(iomap_type, PrepSystemIoState), diff --git a/hw/ppc/rs6000_mc.c b/hw/ppc/rs6000_mc.c index c0bc212e92400d37ef267f59abb2a59cf612f3ca..e6ec4b4c4062ed341e81a8802a7f0a05c22e8f8a 100644 --- a/hw/ppc/rs6000_mc.c +++ b/hw/ppc/rs6000_mc.c @@ -143,7 +143,6 @@ static void rs6000mc_realize(DeviceState *dev, Error **errp) RS6000MCState *s = RS6000MC(dev); int socket = 0; unsigned int ram_size = s->ram_size / MiB; - Error *local_err = NULL; while (socket < 6) { if (ram_size >= 64) { @@ -165,10 +164,8 @@ static void rs6000mc_realize(DeviceState *dev, Error **errp) if (s->simm_size[socket]) { char name[] = "simm.?"; name[5] = socket + '0'; - memory_region_init_ram(&s->simm[socket], OBJECT(dev), name, - s->simm_size[socket] * MiB, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!memory_region_init_ram(&s->simm[socket], OBJECT(dev), name, + s->simm_size[socket] * MiB, errp)) { return; } memory_region_add_subregion_overlap(get_system_memory(), 0, @@ -202,7 +199,7 @@ static const VMStateDescription vmstate_rs6000mc = { .name = "rs6000-mc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(port0820_index, RS6000MCState), VMSTATE_END_OF_LIST() }, diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index df09aa9d6a007cd2adf8e3c0ccf678919f2a2e96..e8dabc8614191fcddab2ef0e888a2323a792d98e 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -152,7 +152,7 @@ static const VMStateDescription pre_2_10_vmstate_dummy_icp = { .version_id = 1, .minimum_version_id = 1, .needed = pre_2_10_vmstate_dummy_icp_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UNUSED(4), /* uint32_t xirr */ VMSTATE_UNUSED(1), /* uint8_t pending_priority */ VMSTATE_UNUSED(1), /* uint8_t mfrr */ @@ -1304,7 +1304,7 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, CPUPPCState *env = &cpu->env; /* The TCG path should also be holding the BQL at this point */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); g_assert(!vhyp_cpu_in_nested(cpu)); @@ -1919,7 +1919,7 @@ static const VMStateDescription vmstate_spapr_event_entry = { .name = "spapr_event_log_entry", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(summary, SpaprEventLogEntry), VMSTATE_UINT32(extended_length, SpaprEventLogEntry), VMSTATE_VBUFFER_ALLOC_UINT32(extended_log, SpaprEventLogEntry, 0, @@ -1933,7 +1933,7 @@ static const VMStateDescription vmstate_spapr_pending_events = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_pending_events_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_QTAILQ_V(pending_events, SpaprMachineState, 1, vmstate_spapr_event_entry, SpaprEventLogEntry, next), VMSTATE_END_OF_LIST() @@ -1989,7 +1989,7 @@ static const VMStateDescription vmstate_spapr_ov5_cas = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_ov5_cas_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_POINTER_V(ov5_cas, SpaprMachineState, 1, vmstate_spapr_ovec, SpaprOptionVector), VMSTATE_END_OF_LIST() @@ -2008,7 +2008,7 @@ static const VMStateDescription vmstate_spapr_patb_entry = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_patb_entry_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(patb_entry, SpaprMachineState), VMSTATE_END_OF_LIST() }, @@ -2026,7 +2026,7 @@ static const VMStateDescription vmstate_spapr_irq_map = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_irq_map_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BITMAP(irq_map, SpaprMachineState, 0, irq_map_nr), VMSTATE_END_OF_LIST() }, @@ -2056,7 +2056,7 @@ static const VMStateDescription vmstate_spapr_dtb = { .minimum_version_id = 1, .needed = spapr_dtb_needed, .pre_load = spapr_dtb_pre_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(fdt_initial_size, SpaprMachineState), VMSTATE_UINT32(fdt_size, SpaprMachineState), VMSTATE_VBUFFER_ALLOC_UINT32(fdt_blob, SpaprMachineState, 0, NULL, @@ -2094,7 +2094,7 @@ static const VMStateDescription vmstate_spapr_fwnmi = { .minimum_version_id = 1, .needed = spapr_fwnmi_needed, .pre_save = spapr_fwnmi_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(fwnmi_system_reset_addr, SpaprMachineState), VMSTATE_UINT64(fwnmi_machine_check_addr, SpaprMachineState), VMSTATE_INT32(fwnmi_machine_check_interlock, SpaprMachineState), @@ -2109,7 +2109,7 @@ static const VMStateDescription vmstate_spapr = { .pre_load = spapr_pre_load, .post_load = spapr_post_load, .pre_save = spapr_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* used to be @next_irq */ VMSTATE_UNUSED_BUFFER(version_before_3, 0, 4), @@ -2119,7 +2119,7 @@ static const VMStateDescription vmstate_spapr = { VMSTATE_PPC_TIMEBASE_V(tb, SpaprMachineState, 2), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_spapr_ov5_cas, &vmstate_spapr_patb_entry, &vmstate_spapr_pending_events, @@ -4785,15 +4785,26 @@ static void spapr_machine_latest_class_options(MachineClass *mc) } \ type_init(spapr_machine_register_##suffix) +/* + * pseries-9.0 + */ +static void spapr_machine_9_0_class_options(MachineClass *mc) +{ + /* Defaults for the latest behaviour inherited from the base class */ +} + +DEFINE_SPAPR_MACHINE(9_0, "9.0", true); + /* * pseries-8.2 */ static void spapr_machine_8_2_class_options(MachineClass *mc) { - /* Defaults for the latest behaviour inherited from the base class */ + spapr_machine_9_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len); } -DEFINE_SPAPR_MACHINE(8_2, "8.2", true); +DEFINE_SPAPR_MACHINE(8_2, "8.2", false); /* * pseries-8.1 diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c index 5a0755d34fbe53ea15f0dd45ac7e8273cc043674..e889244e52ad57a4722705f1f85182943473989d 100644 --- a/hw/ppc/spapr_caps.c +++ b/hw/ppc/spapr_caps.c @@ -904,7 +904,7 @@ const VMStateDescription vmstate_spapr_cap_##sname = { \ .version_id = 1, \ .minimum_version_id = 1, \ .needed = spapr_cap_##sname##_needed, \ - .fields = (VMStateField[]) { \ + .fields = (const VMStateField[]) { \ VMSTATE_UINT8(mig.caps[cap], \ SpaprMachineState), \ VMSTATE_END_OF_LIST() \ diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 91fae56573ee5e312202f2f1c33dc18e22e4fa6b..5aa1ed474ad6c44e34bc297c69c4aa67f9af4cce 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -127,7 +127,7 @@ static const VMStateDescription vmstate_spapr_cpu_slb_shadow = { .version_id = 1, .minimum_version_id = 1, .needed = slb_shadow_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(slb_shadow_addr, SpaprCpuState), VMSTATE_UINT64(slb_shadow_size, SpaprCpuState), VMSTATE_END_OF_LIST() @@ -146,7 +146,7 @@ static const VMStateDescription vmstate_spapr_cpu_dtl = { .version_id = 1, .minimum_version_id = 1, .needed = dtl_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(dtl_addr, SpaprCpuState), VMSTATE_UINT64(dtl_size, SpaprCpuState), VMSTATE_END_OF_LIST() @@ -165,11 +165,11 @@ static const VMStateDescription vmstate_spapr_cpu_vpa = { .version_id = 1, .minimum_version_id = 1, .needed = vpa_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(vpa_addr, SpaprCpuState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_spapr_cpu_slb_shadow, &vmstate_spapr_cpu_dtl, NULL @@ -180,10 +180,10 @@ static const VMStateDescription vmstate_spapr_cpu_state = { .name = "spapr_cpu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_spapr_cpu_vpa, NULL } @@ -306,7 +306,7 @@ static PowerPCCPU *spapr_create_vcpu(SpaprCpuCore *sc, int i, Error **errp) * All CPUs start halted. CPU0 is unhalted from the machine level reset code * and the rest are explicitly started up by the guest using an RTAS call. */ - cs->start_powered_off = true; + qdev_prop_set_bit(DEVICE(obj), "start-powered-off", true); cs->cpu_index = cc->core_id + i; if (!spapr_set_vcpu_id(cpu, cs->cpu_index, errp)) { return NULL; diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c index 2b99d3b4b1a600c29d92757ef0225fe9a6ad9572..1484e3209d9ab45555d4ac41d67c990b540c946e 100644 --- a/hw/ppc/spapr_drc.c +++ b/hw/ppc/spapr_drc.c @@ -471,7 +471,7 @@ static const VMStateDescription vmstate_spapr_drc_unplug_requested = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_drc_unplug_requested_needed, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_BOOL(unplug_requested, SpaprDrc), VMSTATE_END_OF_LIST() } @@ -504,11 +504,11 @@ static const VMStateDescription vmstate_spapr_drc = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_drc_needed, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT32(state, SpaprDrc), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_spapr_drc_unplug_requested, NULL } @@ -611,7 +611,7 @@ static const VMStateDescription vmstate_spapr_drc_physical = { .version_id = 1, .minimum_version_id = 1, .needed = drc_physical_needed, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT32(dr_indicator, SpaprDrcPhysical), VMSTATE_END_OF_LIST() } diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c index deb464150571b3e7cf3e6c7ca65bd42f877f1a1d..cb0eeee58741bc5adbc301216b55771eaed83f74 100644 --- a/hw/ppc/spapr_events.c +++ b/hw/ppc/spapr_events.c @@ -899,7 +899,7 @@ void spapr_mce_req_event(PowerPCCPU *cpu, bool recovered) } return; } - qemu_cond_wait_iothread(&spapr->fwnmi_machine_check_interlock_cond); + qemu_cond_wait_bql(&spapr->fwnmi_machine_check_interlock_cond); if (spapr->fwnmi_machine_check_addr == -1) { /* * If the machine was reset while waiting for the interlock, diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 522a2396c7f73ea1cfa953f8a99d4780c57b82c0..fcefd1d1c70c8ef6c8af1bef8e667e1cb5230ab2 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -8,7 +8,6 @@ #include "qemu/main-loop.h" #include "qemu/module.h" #include "qemu/error-report.h" -#include "exec/exec-all.h" #include "exec/tb-flush.h" #include "helper_regs.h" #include "hw/ppc/ppc.h" diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c index 5e3973fc5fba30354de3dd285b0d297f7f8b1613..e3c01ef44f8130b1bb2f4f95ba30a5185f1d23cc 100644 --- a/hw/ppc/spapr_iommu.c +++ b/hw/ppc/spapr_iommu.c @@ -270,7 +270,7 @@ static const VMStateDescription vmstate_spapr_tce_table_ex = { .version_id = 1, .minimum_version_id = 1, .needed = spapr_tce_table_ex_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(bus_offset, SpaprTceTable), VMSTATE_UINT32(page_shift, SpaprTceTable), VMSTATE_END_OF_LIST() @@ -283,7 +283,7 @@ static const VMStateDescription vmstate_spapr_tce_table = { .minimum_version_id = 2, .pre_save = spapr_tce_table_pre_save, .post_load = spapr_tce_table_post_load, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { /* Sanity check */ VMSTATE_UINT32_EQUAL(liobn, SpaprTceTable, NULL), @@ -296,7 +296,7 @@ static const VMStateDescription vmstate_spapr_tce_table = { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_spapr_tce_table_ex, NULL } diff --git a/hw/ppc/spapr_nvdimm.c b/hw/ppc/spapr_nvdimm.c index ad7afe754448beefdcd2adaea914039dbd16c678..7d2dfe5e3d2f725ecbf69467df583b550bd29713 100644 --- a/hw/ppc/spapr_nvdimm.c +++ b/hw/ppc/spapr_nvdimm.c @@ -528,7 +528,7 @@ static const VMStateDescription vmstate_spapr_nvdimm_flush_state = { .name = "spapr_nvdimm_flush_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(continue_token, SpaprNVDIMMDeviceFlushState), VMSTATE_INT64(hcall_ret, SpaprNVDIMMDeviceFlushState), VMSTATE_UINT32(drcidx, SpaprNVDIMMDeviceFlushState), @@ -541,7 +541,7 @@ const VMStateDescription vmstate_spapr_nvdimm_states = { .version_id = 1, .minimum_version_id = 1, .post_load = spapr_nvdimm_flush_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(hcall_flush_required, SpaprNVDIMMDevice), VMSTATE_UINT64(nvdimm_flush_token, SpaprNVDIMMDevice), VMSTATE_QLIST_V(completed_nvdimm_flush_states, SpaprNVDIMMDevice, 1, diff --git a/hw/ppc/spapr_ovec.c b/hw/ppc/spapr_ovec.c index b2567caa5cf4740c796ebaacdd9a87e88b9b9a4a..88e29536aa71591d77c62d6ce5db41674f1fa33f 100644 --- a/hw/ppc/spapr_ovec.c +++ b/hw/ppc/spapr_ovec.c @@ -36,7 +36,7 @@ const VMStateDescription vmstate_spapr_ovec = { .name = "spapr_option_vector", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BITMAP(bitmap, SpaprOptionVector, 1, bitmap_size), VMSTATE_END_OF_LIST() } diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index 6760823e13f92f4872d0d6db0c375ae96ccaa8d5..25e0295d6fd5cab4615a46a756eabf6ebb3821c9 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -2115,7 +2115,7 @@ static const VMStateDescription vmstate_spapr_pci_lsi = { .name = "spapr_pci/lsi", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_EQUAL(irq, SpaprPciLsi, NULL), VMSTATE_END_OF_LIST() @@ -2126,7 +2126,7 @@ static const VMStateDescription vmstate_spapr_pci_msi = { .name = "spapr_pci/msi", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT32(key, SpaprPciMsiMig), VMSTATE_UINT32(value.first_irq, SpaprPciMsiMig), VMSTATE_UINT32(value.num, SpaprPciMsiMig), @@ -2216,7 +2216,7 @@ static const VMStateDescription vmstate_spapr_pci = { .pre_save = spapr_pci_pre_save, .post_save = spapr_pci_post_save, .post_load = spapr_pci_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_EQUAL(buid, SpaprPhbState, NULL), VMSTATE_UINT32_TEST(mig_liobn, SpaprPhbState, pre_2_8_migration), VMSTATE_UINT64_TEST(mig_mem_win_addr, SpaprPhbState, pre_2_8_migration), diff --git a/hw/ppc/spapr_pci_vfio.c b/hw/ppc/spapr_pci_vfio.c index f283f7e38d619fd65985146b5e1572c221f95c53..76b2a3487b5d6f21528e9c301341eb27bc8fec1d 100644 --- a/hw/ppc/spapr_pci_vfio.c +++ b/hw/ppc/spapr_pci_vfio.c @@ -26,10 +26,12 @@ #include "hw/pci/pci_device.h" #include "hw/vfio/vfio-common.h" #include "qemu/error-report.h" +#include CONFIG_DEVICES /* CONFIG_VFIO_PCI */ /* * Interfaces for IBM EEH (Enhanced Error Handling) */ +#ifdef CONFIG_VFIO_PCI static bool vfio_eeh_container_ok(VFIOContainer *container) { /* @@ -84,27 +86,27 @@ static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op) static VFIOContainer *vfio_eeh_as_container(AddressSpace *as) { VFIOAddressSpace *space = vfio_get_address_space(as); - VFIOContainer *container = NULL; + VFIOContainerBase *bcontainer = NULL; if (QLIST_EMPTY(&space->containers)) { /* No containers to act on */ goto out; } - container = QLIST_FIRST(&space->containers); + bcontainer = QLIST_FIRST(&space->containers); - if (QLIST_NEXT(container, next)) { + if (QLIST_NEXT(bcontainer, next)) { /* * We don't yet have logic to synchronize EEH state across * multiple containers */ - container = NULL; + bcontainer = NULL; goto out; } out: vfio_put_address_space(space); - return container; + return container_of(bcontainer, VFIOContainer, bcontainer); } static bool vfio_eeh_as_ok(AddressSpace *as) @@ -314,3 +316,37 @@ int spapr_phb_vfio_eeh_configure(SpaprPhbState *sphb) return RTAS_OUT_SUCCESS; } + +#else + +bool spapr_phb_eeh_available(SpaprPhbState *sphb) +{ + return false; +} + +void spapr_phb_vfio_reset(DeviceState *qdev) +{ +} + +int spapr_phb_vfio_eeh_set_option(SpaprPhbState *sphb, + unsigned int addr, int option) +{ + return RTAS_OUT_NOT_SUPPORTED; +} + +int spapr_phb_vfio_eeh_get_state(SpaprPhbState *sphb, int *state) +{ + return RTAS_OUT_NOT_SUPPORTED; +} + +int spapr_phb_vfio_eeh_reset(SpaprPhbState *sphb, int option) +{ + return RTAS_OUT_NOT_SUPPORTED; +} + +int spapr_phb_vfio_eeh_configure(SpaprPhbState *sphb) +{ + return RTAS_OUT_NOT_SUPPORTED; +} + +#endif /* CONFIG_VFIO_PCI */ diff --git a/hw/ppc/spapr_rng.c b/hw/ppc/spapr_rng.c index df5c4b9687350e11ef98d31229befebb623ed599..c2fda7ad2094afe9ff7c93da661104a13e918289 100644 --- a/hw/ppc/spapr_rng.c +++ b/hw/ppc/spapr_rng.c @@ -82,9 +82,9 @@ static target_ulong h_random(PowerPCCPU *cpu, SpaprMachineState *spapr, while (hrdata.received < 8) { rng_backend_request_entropy(rngstate->backend, 8 - hrdata.received, random_recv, &hrdata); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_sem_wait(&hrdata.sem); - qemu_mutex_lock_iothread(); + bql_lock(); } qemu_sem_destroy(&hrdata.sem); diff --git a/hw/ppc/spapr_rtc.c b/hw/ppc/spapr_rtc.c index d55b4b0c50d8aa354453cd6d469dce1b4c242d2c..deb3ea4e495af3fe680e32234e56d35f69a07de7 100644 --- a/hw/ppc/spapr_rtc.c +++ b/hw/ppc/spapr_rtc.c @@ -157,7 +157,7 @@ static const VMStateDescription vmstate_spapr_rtc = { .name = "spapr/rtc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(ns_offset, SpaprRtcState), VMSTATE_END_OF_LIST() }, diff --git a/hw/ppc/spapr_softmmu.c b/hw/ppc/spapr_softmmu.c index 278666317ef22cb8b8ed8d6832fbef81009cb3b3..fc1bbc0b61c8e92dc51702d036788c54e801b5fb 100644 --- a/hw/ppc/spapr_softmmu.c +++ b/hw/ppc/spapr_softmmu.c @@ -334,7 +334,7 @@ static void *hpt_prepare_thread(void *opaque) pending->ret = H_NO_MEM; } - qemu_mutex_lock_iothread(); + bql_lock(); if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) { /* Ready to go */ @@ -344,7 +344,7 @@ static void *hpt_prepare_thread(void *opaque) free_pending_hpt(pending); } - qemu_mutex_unlock_iothread(); + bql_unlock(); return NULL; } diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c index f8ef2b6fa877dbcc7a16b0fe2ee9fef05a6b64a4..3221874848da693b3d458a438f5a6645c15a982f 100644 --- a/hw/ppc/spapr_vio.c +++ b/hw/ppc/spapr_vio.c @@ -616,7 +616,7 @@ const VMStateDescription vmstate_spapr_vio = { .name = "spapr_vio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* Sanity check */ VMSTATE_UINT32_EQUAL(reg, SpaprVioDevice, NULL), VMSTATE_UINT32_EQUAL(irq, SpaprVioDevice, NULL), diff --git a/hw/remote/meson.build b/hw/remote/meson.build index a3aa29aaf173f7f7e610c19b3bffd445609d095a..41eb4971d98307b51be9e0c298e8fb5bce82a730 100644 --- a/hw/remote/meson.build +++ b/hw/remote/meson.build @@ -11,7 +11,6 @@ remote_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('iommu.c')) remote_ss.add(when: 'CONFIG_VFIO_USER_SERVER', if_true: libvfio_user_dep) remote_ss.add(when: 'CONFIG_VFIO_USER_SERVER', if_true: files('vfio-user-obj.c'), if_false: files('vfio-user-obj-stub.c')) -remote_ss.add(when: 'CONFIG_ALL', if_true: files('vfio-user-obj-stub.c')) specific_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('memory.c')) specific_ss.add(when: 'CONFIG_MULTIPROCESS', if_true: files('proxy-memory-listener.c')) diff --git a/hw/remote/mpqemu-link.c b/hw/remote/mpqemu-link.c index 9bd98e82197e4dbdb0ad37bb519dec6f31e28d92..4394dc4d821c11e7f7a33fc4eb64e68afd94f76e 100644 --- a/hw/remote/mpqemu-link.c +++ b/hw/remote/mpqemu-link.c @@ -33,7 +33,7 @@ */ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp) { - bool iolock = qemu_mutex_iothread_locked(); + bool drop_bql = bql_locked(); bool iothread = qemu_in_iothread(); struct iovec send[2] = {}; int *fds = NULL; @@ -58,13 +58,13 @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp) assert(qemu_in_coroutine() || !iothread); /* - * Skip unlocking/locking iothread lock when the IOThread is running + * Skip unlocking/locking BQL when the IOThread is running * in co-routine context. Co-routine context is asserted above * for IOThread case. * Also skip lock handling while in a co-routine in the main context. */ - if (iolock && !iothread && !qemu_in_coroutine()) { - qemu_mutex_unlock_iothread(); + if (drop_bql && !iothread && !qemu_in_coroutine()) { + bql_unlock(); } if (!qio_channel_writev_full_all(ioc, send, G_N_ELEMENTS(send), @@ -74,9 +74,9 @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp) trace_mpqemu_send_io_error(msg->cmd, msg->size, nfds); } - if (iolock && !iothread && !qemu_in_coroutine()) { + if (drop_bql && !iothread && !qemu_in_coroutine()) { /* See above comment why skip locking here. */ - qemu_mutex_lock_iothread(); + bql_lock(); } return ret; @@ -96,7 +96,7 @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds, size_t *nfds, Error **errp) { struct iovec iov = { .iov_base = buf, .iov_len = len }; - bool iolock = qemu_mutex_iothread_locked(); + bool drop_bql = bql_locked(); bool iothread = qemu_in_iothread(); int ret = -1; @@ -106,14 +106,14 @@ static ssize_t mpqemu_read(QIOChannel *ioc, void *buf, size_t len, int **fds, */ assert(qemu_in_coroutine() || !iothread); - if (iolock && !iothread && !qemu_in_coroutine()) { - qemu_mutex_unlock_iothread(); + if (drop_bql && !iothread && !qemu_in_coroutine()) { + bql_unlock(); } ret = qio_channel_readv_full_all_eof(ioc, &iov, 1, fds, nfds, errp); - if (iolock && !iothread && !qemu_in_coroutine()) { - qemu_mutex_lock_iothread(); + if (drop_bql && !iothread && !qemu_in_coroutine()) { + bql_lock(); } return (ret <= 0) ? ret : iov.iov_len; diff --git a/hw/remote/vfio-user-obj.c b/hw/remote/vfio-user-obj.c index 8b10c32a3c6eee5446ed3a0ee54433b8e577355e..d9b879e056ba04791ccee282ba1f2cd29201f88a 100644 --- a/hw/remote/vfio-user-obj.c +++ b/hw/remote/vfio-user-obj.c @@ -400,7 +400,7 @@ static int vfu_object_mr_rw(MemoryRegion *mr, uint8_t *buf, hwaddr offset, } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); release_lock = false; } diff --git a/hw/riscv/Kconfig b/hw/riscv/Kconfig index b6a5eb4452e44fd95ac2a78118295fe4f3475d66..a50717be879773b8c9a68fe7614f5abace1fa514 100644 --- a/hw/riscv/Kconfig +++ b/hw/riscv/Kconfig @@ -45,6 +45,7 @@ config RISCV_VIRT select FW_CFG_DMA select PLATFORM_BUS select ACPI + select ACPI_PCI config SHAKTI_C bool diff --git a/hw/riscv/shakti_c.c b/hw/riscv/shakti_c.c index 12ea74b0324bcdb756800e124d58bd07725a9b42..3888034c2b905043b7e7d842fb0a14d7a1c10c63 100644 --- a/hw/riscv/shakti_c.c +++ b/hw/riscv/shakti_c.c @@ -28,7 +28,6 @@ #include "exec/address-spaces.h" #include "hw/riscv/boot.h" - static const struct MemmapEntry { hwaddr base; hwaddr size; @@ -47,12 +46,6 @@ static void shakti_c_machine_state_init(MachineState *mstate) ShaktiCMachineState *sms = RISCV_SHAKTI_MACHINE(mstate); MemoryRegion *system_memory = get_system_memory(); - /* Allow only Shakti C CPU for this platform */ - if (strcmp(mstate->cpu_type, TYPE_RISCV_CPU_SHAKTI_C) != 0) { - error_report("This board can only be used with Shakti C CPU"); - exit(1); - } - /* Initialize SoC */ object_initialize_child(OBJECT(mstate), "soc", &sms->soc, TYPE_RISCV_SHAKTI_SOC); @@ -82,9 +75,15 @@ static void shakti_c_machine_instance_init(Object *obj) static void shakti_c_machine_class_init(ObjectClass *klass, void *data) { MachineClass *mc = MACHINE_CLASS(klass); + static const char * const valid_cpu_types[] = { + RISCV_CPU_TYPE_NAME("shakti-c"), + NULL + }; + mc->desc = "RISC-V Board compatible with Shakti SDK"; mc->init = shakti_c_machine_state_init; mc->default_cpu_type = TYPE_RISCV_CPU_SHAKTI_C; + mc->valid_cpu_types = valid_cpu_types; mc->default_ram_id = "riscv.shakti.c.ram"; } diff --git a/hw/riscv/virt-acpi-build.c b/hw/riscv/virt-acpi-build.c index 7331248f59280b4b3589ff767369741b7d70a566..26c7e4482d75a73f4a8a5440d79d26f24110fbf9 100644 --- a/hw/riscv/virt-acpi-build.c +++ b/hw/riscv/virt-acpi-build.c @@ -27,16 +27,21 @@ #include "hw/acpi/acpi-defs.h" #include "hw/acpi/acpi.h" #include "hw/acpi/aml-build.h" +#include "hw/acpi/pci.h" #include "hw/acpi/utils.h" +#include "hw/intc/riscv_aclint.h" +#include "hw/nvram/fw_cfg_acpi.h" +#include "hw/pci-host/gpex.h" +#include "hw/riscv/virt.h" +#include "hw/riscv/numa.h" +#include "hw/virtio/virtio-acpi.h" +#include "migration/vmstate.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "sysemu/reset.h" -#include "migration/vmstate.h" -#include "hw/riscv/virt.h" -#include "hw/riscv/numa.h" -#include "hw/intc/riscv_aclint.h" #define ACPI_BUILD_TABLE_SIZE 0x20000 +#define ACPI_BUILD_INTC_ID(socket, index) ((socket << 24) | (index)) typedef struct AcpiBuildState { /* Copy of table in RAM (for patching) */ @@ -58,17 +63,56 @@ static void acpi_align_size(GArray *blob, unsigned align) static void riscv_acpi_madt_add_rintc(uint32_t uid, const CPUArchIdList *arch_ids, - GArray *entry) + GArray *entry, + RISCVVirtState *s) { + uint8_t guest_index_bits = imsic_num_bits(s->aia_guests + 1); uint64_t hart_id = arch_ids->cpus[uid].arch_id; + uint32_t imsic_size, local_cpu_id, socket_id; + uint64_t imsic_socket_addr, imsic_addr; + MachineState *ms = MACHINE(s); + socket_id = arch_ids->cpus[uid].props.node_id; + local_cpu_id = (arch_ids->cpus[uid].arch_id - + riscv_socket_first_hartid(ms, socket_id)) % + riscv_socket_hart_count(ms, socket_id); + imsic_socket_addr = s->memmap[VIRT_IMSIC_S].base + + (socket_id * VIRT_IMSIC_GROUP_MAX_SIZE); + imsic_size = IMSIC_HART_SIZE(guest_index_bits); + imsic_addr = imsic_socket_addr + local_cpu_id * imsic_size; build_append_int_noprefix(entry, 0x18, 1); /* Type */ - build_append_int_noprefix(entry, 20, 1); /* Length */ + build_append_int_noprefix(entry, 36, 1); /* Length */ build_append_int_noprefix(entry, 1, 1); /* Version */ build_append_int_noprefix(entry, 0, 1); /* Reserved */ build_append_int_noprefix(entry, 0x1, 4); /* Flags */ build_append_int_noprefix(entry, hart_id, 8); /* Hart ID */ build_append_int_noprefix(entry, uid, 4); /* ACPI Processor UID */ + /* External Interrupt Controller ID */ + if (s->aia_type == VIRT_AIA_TYPE_APLIC) { + build_append_int_noprefix(entry, + ACPI_BUILD_INTC_ID( + arch_ids->cpus[uid].props.node_id, + local_cpu_id), + 4); + } else if (s->aia_type == VIRT_AIA_TYPE_NONE) { + build_append_int_noprefix(entry, + ACPI_BUILD_INTC_ID( + arch_ids->cpus[uid].props.node_id, + 2 * local_cpu_id + 1), + 4); + } else { + build_append_int_noprefix(entry, 0, 4); + } + + if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) { + /* IMSIC Base address */ + build_append_int_noprefix(entry, imsic_addr, 8); + /* IMSIC Size */ + build_append_int_noprefix(entry, imsic_size, 4); + } else { + build_append_int_noprefix(entry, 0, 8); + build_append_int_noprefix(entry, 0, 4); + } } static void acpi_dsdt_add_cpus(Aml *scope, RISCVVirtState *s) @@ -87,7 +131,7 @@ static void acpi_dsdt_add_cpus(Aml *scope, RISCVVirtState *s) aml_int(arch_ids->cpus[i].arch_id))); /* build _MAT object */ - riscv_acpi_madt_add_rintc(i, arch_ids, madt_buf); + riscv_acpi_madt_add_rintc(i, arch_ids, madt_buf, s); aml_append(dev, aml_name_decl("_MAT", aml_buffer(madt_buf->len, (uint8_t *)madt_buf->data))); @@ -97,19 +141,36 @@ static void acpi_dsdt_add_cpus(Aml *scope, RISCVVirtState *s) } } -static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap) +static void +acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap, + uint32_t uart_irq) { - Aml *dev = aml_device("FWCF"); - aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002"))); - - /* device present, functioning, decoding, not shown in UI */ - aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); - aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + Aml *dev = aml_device("COM0"); + aml_append(dev, aml_name_decl("_HID", aml_string("PNP0501"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); Aml *crs = aml_resource_template(); - aml_append(crs, aml_memory32_fixed(fw_cfg_memmap->base, - fw_cfg_memmap->size, AML_READ_WRITE)); + aml_append(crs, aml_memory32_fixed(uart_memmap->base, + uart_memmap->size, AML_READ_WRITE)); + aml_append(crs, + aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_EXCLUSIVE, &uart_irq, 1)); aml_append(dev, aml_name_decl("_CRS", crs)); + + Aml *pkg = aml_package(2); + aml_append(pkg, aml_string("clock-frequency")); + aml_append(pkg, aml_int(3686400)); + + Aml *UUID = aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301"); + + Aml *pkg1 = aml_package(1); + aml_append(pkg1, pkg); + + Aml *package = aml_package(2); + aml_append(package, UUID); + aml_append(package, pkg1); + + aml_append(dev, aml_name_decl("_DSD", package)); aml_append(scope, dev); } @@ -121,6 +182,7 @@ static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap) * 5.2.36 RISC-V Hart Capabilities Table (RHCT) * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/16 * https://drive.google.com/file/d/1nP3nFiH4jkPMp6COOxP6123DCZKR-tia/view + * https://drive.google.com/file/d/1sKbOa8m1UZw1JkquZYe3F1zQBN1xXsaf/view */ static void build_rhct(GArray *table_data, BIOSLinker *linker, @@ -130,8 +192,10 @@ static void build_rhct(GArray *table_data, MachineState *ms = MACHINE(s); const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms); size_t len, aligned_len; - uint32_t isa_offset, num_rhct_nodes; - RISCVCPU *cpu; + uint32_t isa_offset, num_rhct_nodes, cmo_offset = 0; + RISCVCPU *cpu = &s->soc[0].harts[0]; + uint32_t mmu_offset = 0; + uint8_t satp_mode_max; char *isa; AcpiTable table = { .sig = "RHCT", .rev = 1, .oem_id = s->oem_id, @@ -147,6 +211,13 @@ static void build_rhct(GArray *table_data, /* ISA + N hart info */ num_rhct_nodes = 1 + ms->smp.cpus; + if (cpu->cfg.ext_zicbom || cpu->cfg.ext_zicboz) { + num_rhct_nodes++; + } + + if (cpu->cfg.satp_mode.supported != 0) { + num_rhct_nodes++; + } /* Number of RHCT nodes*/ build_append_int_noprefix(table_data, num_rhct_nodes, 4); @@ -158,7 +229,6 @@ static void build_rhct(GArray *table_data, isa_offset = table_data->len - table.table_offset; build_append_int_noprefix(table_data, 0, 2); /* Type 0 */ - cpu = &s->soc[0].harts[0]; isa = riscv_isa_string(cpu); len = 8 + strlen(isa) + 1; aligned_len = (len % 2) ? (len + 1) : len; @@ -174,14 +244,87 @@ static void build_rhct(GArray *table_data, build_append_int_noprefix(table_data, 0x0, 1); /* Optional Padding */ } + /* CMO node */ + if (cpu->cfg.ext_zicbom || cpu->cfg.ext_zicboz) { + cmo_offset = table_data->len - table.table_offset; + build_append_int_noprefix(table_data, 1, 2); /* Type */ + build_append_int_noprefix(table_data, 10, 2); /* Length */ + build_append_int_noprefix(table_data, 0x1, 2); /* Revision */ + build_append_int_noprefix(table_data, 0, 1); /* Reserved */ + + /* CBOM block size */ + if (cpu->cfg.cbom_blocksize) { + build_append_int_noprefix(table_data, + __builtin_ctz(cpu->cfg.cbom_blocksize), + 1); + } else { + build_append_int_noprefix(table_data, 0, 1); + } + + /* CBOP block size */ + build_append_int_noprefix(table_data, 0, 1); + + /* CBOZ block size */ + if (cpu->cfg.cboz_blocksize) { + build_append_int_noprefix(table_data, + __builtin_ctz(cpu->cfg.cboz_blocksize), + 1); + } else { + build_append_int_noprefix(table_data, 0, 1); + } + } + + /* MMU node structure */ + if (cpu->cfg.satp_mode.supported != 0) { + satp_mode_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map); + mmu_offset = table_data->len - table.table_offset; + build_append_int_noprefix(table_data, 2, 2); /* Type */ + build_append_int_noprefix(table_data, 8, 2); /* Length */ + build_append_int_noprefix(table_data, 0x1, 2); /* Revision */ + build_append_int_noprefix(table_data, 0, 1); /* Reserved */ + /* MMU Type */ + if (satp_mode_max == VM_1_10_SV57) { + build_append_int_noprefix(table_data, 2, 1); /* Sv57 */ + } else if (satp_mode_max == VM_1_10_SV48) { + build_append_int_noprefix(table_data, 1, 1); /* Sv48 */ + } else if (satp_mode_max == VM_1_10_SV39) { + build_append_int_noprefix(table_data, 0, 1); /* Sv39 */ + } else { + assert(1); + } + } + /* Hart Info Node */ for (int i = 0; i < arch_ids->len; i++) { + len = 16; + int num_offsets = 1; build_append_int_noprefix(table_data, 0xFFFF, 2); /* Type */ - build_append_int_noprefix(table_data, 16, 2); /* Length */ - build_append_int_noprefix(table_data, 0x1, 2); /* Revision */ - build_append_int_noprefix(table_data, 1, 2); /* Number of offsets */ - build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */ - build_append_int_noprefix(table_data, isa_offset, 4); /* Offsets[0] */ + + /* Length */ + if (cmo_offset) { + len += 4; + num_offsets++; + } + + if (mmu_offset) { + len += 4; + num_offsets++; + } + + build_append_int_noprefix(table_data, len, 2); + build_append_int_noprefix(table_data, 0x1, 2); /* Revision */ + /* Number of offsets */ + build_append_int_noprefix(table_data, num_offsets, 2); + build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */ + /* Offsets */ + build_append_int_noprefix(table_data, isa_offset, 4); + if (cmo_offset) { + build_append_int_noprefix(table_data, cmo_offset, 4); + } + + if (mmu_offset) { + build_append_int_noprefix(table_data, mmu_offset, 4); + } } acpi_table_end(linker, &table); @@ -209,6 +352,8 @@ static void build_dsdt(GArray *table_data, RISCVVirtState *s) { Aml *scope, *dsdt; + MachineState *ms = MACHINE(s); + uint8_t socket_count; const MemMapEntry *memmap = s->memmap; AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = s->oem_id, .oem_table_id = s->oem_table_id }; @@ -226,7 +371,30 @@ static void build_dsdt(GArray *table_data, scope = aml_scope("\\_SB"); acpi_dsdt_add_cpus(scope, s); - acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]); + fw_cfg_acpi_dsdt_add(scope, &memmap[VIRT_FW_CFG]); + + socket_count = riscv_socket_count(ms); + + acpi_dsdt_add_uart(scope, &memmap[VIRT_UART0], UART0_IRQ); + + if (socket_count == 1) { + virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base, + memmap[VIRT_VIRTIO].size, + VIRTIO_IRQ, 0, VIRTIO_COUNT); + acpi_dsdt_add_gpex_host(scope, PCIE_IRQ); + } else if (socket_count == 2) { + virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base, + memmap[VIRT_VIRTIO].size, + VIRTIO_IRQ + VIRT_IRQCHIP_NUM_SOURCES, 0, + VIRTIO_COUNT); + acpi_dsdt_add_gpex_host(scope, PCIE_IRQ + VIRT_IRQCHIP_NUM_SOURCES); + } else { + virtio_acpi_dsdt_add(scope, memmap[VIRT_VIRTIO].base, + memmap[VIRT_VIRTIO].size, + VIRTIO_IRQ + VIRT_IRQCHIP_NUM_SOURCES, 0, + VIRTIO_COUNT); + acpi_dsdt_add_gpex_host(scope, PCIE_IRQ + VIRT_IRQCHIP_NUM_SOURCES * 2); + } aml_append(dsdt, scope); @@ -242,6 +410,7 @@ static void build_dsdt(GArray *table_data, * 5.2.12 Multiple APIC Description Table (MADT) * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/15 * https://drive.google.com/file/d/1R6k4MshhN3WTT-hwqAquu5nX6xSEqK2l/view + * https://drive.google.com/file/d/1oMGPyOD58JaPgMl1pKasT-VKsIKia7zR/view */ static void build_madt(GArray *table_data, BIOSLinker *linker, @@ -250,6 +419,21 @@ static void build_madt(GArray *table_data, MachineClass *mc = MACHINE_GET_CLASS(s); MachineState *ms = MACHINE(s); const CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(ms); + uint8_t group_index_bits = imsic_num_bits(riscv_socket_count(ms)); + uint8_t guest_index_bits = imsic_num_bits(s->aia_guests + 1); + uint16_t imsic_max_hart_per_socket = 0; + uint8_t hart_index_bits; + uint64_t aplic_addr; + uint32_t gsi_base; + uint8_t socket; + + for (socket = 0; socket < riscv_socket_count(ms); socket++) { + if (imsic_max_hart_per_socket < s->soc[socket].num_harts) { + imsic_max_hart_per_socket = s->soc[socket].num_harts; + } + } + + hart_index_bits = imsic_num_bits(imsic_max_hart_per_socket); AcpiTable table = { .sig = "APIC", .rev = 6, .oem_id = s->oem_id, .oem_table_id = s->oem_table_id }; @@ -261,7 +445,84 @@ static void build_madt(GArray *table_data, /* RISC-V Local INTC structures per HART */ for (int i = 0; i < arch_ids->len; i++) { - riscv_acpi_madt_add_rintc(i, arch_ids, table_data); + riscv_acpi_madt_add_rintc(i, arch_ids, table_data, s); + } + + /* IMSIC */ + if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) { + /* IMSIC */ + build_append_int_noprefix(table_data, 0x19, 1); /* Type */ + build_append_int_noprefix(table_data, 16, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, 0, 1); /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); /* Flags */ + /* Number of supervisor mode Interrupt Identities */ + build_append_int_noprefix(table_data, VIRT_IRQCHIP_NUM_MSIS, 2); + /* Number of guest mode Interrupt Identities */ + build_append_int_noprefix(table_data, VIRT_IRQCHIP_NUM_MSIS, 2); + /* Guest Index Bits */ + build_append_int_noprefix(table_data, guest_index_bits, 1); + /* Hart Index Bits */ + build_append_int_noprefix(table_data, hart_index_bits, 1); + /* Group Index Bits */ + build_append_int_noprefix(table_data, group_index_bits, 1); + /* Group Index Shift */ + build_append_int_noprefix(table_data, IMSIC_MMIO_GROUP_MIN_SHIFT, 1); + } + + if (s->aia_type != VIRT_AIA_TYPE_NONE) { + /* APLICs */ + for (socket = 0; socket < riscv_socket_count(ms); socket++) { + aplic_addr = s->memmap[VIRT_APLIC_S].base + + s->memmap[VIRT_APLIC_S].size * socket; + gsi_base = VIRT_IRQCHIP_NUM_SOURCES * socket; + build_append_int_noprefix(table_data, 0x1A, 1); /* Type */ + build_append_int_noprefix(table_data, 36, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, socket, 1); /* APLIC ID */ + build_append_int_noprefix(table_data, 0, 4); /* Flags */ + build_append_int_noprefix(table_data, 0, 8); /* Hardware ID */ + /* Number of IDCs */ + if (s->aia_type == VIRT_AIA_TYPE_APLIC) { + build_append_int_noprefix(table_data, + s->soc[socket].num_harts, + 2); + } else { + build_append_int_noprefix(table_data, 0, 2); + } + /* Total External Interrupt Sources Supported */ + build_append_int_noprefix(table_data, VIRT_IRQCHIP_NUM_SOURCES, 2); + /* Global System Interrupt Base */ + build_append_int_noprefix(table_data, gsi_base, 4); + /* APLIC Address */ + build_append_int_noprefix(table_data, aplic_addr, 8); + /* APLIC size */ + build_append_int_noprefix(table_data, + s->memmap[VIRT_APLIC_S].size, 4); + } + } else { + /* PLICs */ + for (socket = 0; socket < riscv_socket_count(ms); socket++) { + aplic_addr = s->memmap[VIRT_PLIC].base + + s->memmap[VIRT_PLIC].size * socket; + gsi_base = VIRT_IRQCHIP_NUM_SOURCES * socket; + build_append_int_noprefix(table_data, 0x1B, 1); /* Type */ + build_append_int_noprefix(table_data, 36, 1); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Version */ + build_append_int_noprefix(table_data, socket, 1); /* PLIC ID */ + build_append_int_noprefix(table_data, 0, 8); /* Hardware ID */ + /* Total External Interrupt Sources Supported */ + build_append_int_noprefix(table_data, + VIRT_IRQCHIP_NUM_SOURCES - 1, 2); + build_append_int_noprefix(table_data, 0, 2); /* Max Priority */ + build_append_int_noprefix(table_data, 0, 4); /* Flags */ + /* PLIC Size */ + build_append_int_noprefix(table_data, s->memmap[VIRT_PLIC].size, 4); + /* PLIC Address */ + build_append_int_noprefix(table_data, aplic_addr, 8); + /* Global System Interrupt Vector Base */ + build_append_int_noprefix(table_data, gsi_base, 4); + } } acpi_table_end(linker, &table); @@ -294,6 +555,16 @@ static void virt_acpi_build(RISCVVirtState *s, AcpiBuildTables *tables) acpi_add_table(table_offsets, tables_blob); build_rhct(tables_blob, tables->linker, s); + acpi_add_table(table_offsets, tables_blob); + { + AcpiMcfgInfo mcfg = { + .base = s->memmap[VIRT_PCIE_MMIO].base, + .size = s->memmap[VIRT_PCIE_MMIO].size, + }; + build_mcfg(tables_blob, tables->linker, &mcfg, s->oem_id, + s->oem_table_id); + } + /* XSDT is pointed to by RSDP */ xsdt = tables_blob->len; build_xsdt(tables_blob, tables->linker, table_offsets, s->oem_id, @@ -374,7 +645,7 @@ static const VMStateDescription vmstate_virt_acpi_build = { .name = "virt_acpi_build", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(patched, AcpiBuildState), VMSTATE_END_OF_LIST() }, diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c index d2eac2415619c704349a6bdad23a2fb2cf27ffd2..f9fd1341fce46709c774a470051b2a75f4f9210f 100644 --- a/hw/riscv/virt.c +++ b/hw/riscv/virt.c @@ -38,7 +38,6 @@ #include "kvm/kvm_riscv.h" #include "hw/intc/riscv_aclint.h" #include "hw/intc/riscv_aplic.h" -#include "hw/intc/riscv_imsic.h" #include "hw/intc/sifive_plic.h" #include "hw/misc/sifive_test.h" #include "hw/platform-bus.h" @@ -54,28 +53,6 @@ #include "hw/acpi/aml-build.h" #include "qapi/qapi-visit-common.h" -/* - * The virt machine physical address space used by some of the devices - * namely ACLINT, PLIC, APLIC, and IMSIC depend on number of Sockets, - * number of CPUs, and number of IMSIC guest files. - * - * Various limits defined by VIRT_SOCKETS_MAX_BITS, VIRT_CPUS_MAX_BITS, - * and VIRT_IRQCHIP_MAX_GUESTS_BITS are tuned for maximum utilization - * of virt machine physical address space. - */ - -#define VIRT_IMSIC_GROUP_MAX_SIZE (1U << IMSIC_MMIO_GROUP_MIN_SHIFT) -#if VIRT_IMSIC_GROUP_MAX_SIZE < \ - IMSIC_GROUP_SIZE(VIRT_CPUS_MAX_BITS, VIRT_IRQCHIP_MAX_GUESTS_BITS) -#error "Can't accommodate single IMSIC group in address space" -#endif - -#define VIRT_IMSIC_MAX_SIZE (VIRT_SOCKETS_MAX * \ - VIRT_IMSIC_GROUP_MAX_SIZE) -#if 0x4000000 < VIRT_IMSIC_MAX_SIZE -#error "Can't accommodate all IMSIC groups in address space" -#endif - /* KVM AIA only supports APLIC MSI. APLIC Wired is always emulated by QEMU. */ static bool virt_use_kvm_aia(RISCVVirtState *s) { @@ -273,6 +250,11 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, cpu_ptr->cfg.cboz_blocksize); } + if (cpu_ptr->cfg.ext_zicbop) { + qemu_fdt_setprop_cell(ms->fdt, cpu_name, "riscv,cbop-block-size", + cpu_ptr->cfg.cbop_blocksize); + } + qemu_fdt_setprop_string(ms->fdt, cpu_name, "compatible", "riscv"); qemu_fdt_setprop_string(ms->fdt, cpu_name, "status", "okay"); qemu_fdt_setprop_cell(ms->fdt, cpu_name, "reg", @@ -460,24 +442,6 @@ static void create_fdt_socket_plic(RISCVVirtState *s, "sifive,plic-1.0.0", "riscv,plic0" }; - if (kvm_enabled()) { - plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2); - } else { - plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4); - } - - for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) { - if (kvm_enabled()) { - plic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); - plic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT); - } else { - plic_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandles[cpu]); - plic_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_EXT); - plic_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandles[cpu]); - plic_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_S_EXT); - } - } - plic_phandles[socket] = (*phandle)++; plic_addr = memmap[VIRT_PLIC].base + (memmap[VIRT_PLIC].size * socket); plic_name = g_strdup_printf("/soc/plic@%lx", plic_addr); @@ -490,8 +454,33 @@ static void create_fdt_socket_plic(RISCVVirtState *s, (char **)&plic_compat, ARRAY_SIZE(plic_compat)); qemu_fdt_setprop(ms->fdt, plic_name, "interrupt-controller", NULL, 0); - qemu_fdt_setprop(ms->fdt, plic_name, "interrupts-extended", - plic_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4); + + if (kvm_enabled()) { + plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 2); + + for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) { + plic_cells[cpu * 2 + 0] = cpu_to_be32(intc_phandles[cpu]); + plic_cells[cpu * 2 + 1] = cpu_to_be32(IRQ_S_EXT); + } + + qemu_fdt_setprop(ms->fdt, plic_name, "interrupts-extended", + plic_cells, + s->soc[socket].num_harts * sizeof(uint32_t) * 2); + } else { + plic_cells = g_new0(uint32_t, s->soc[socket].num_harts * 4); + + for (cpu = 0; cpu < s->soc[socket].num_harts; cpu++) { + plic_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandles[cpu]); + plic_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_EXT); + plic_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandles[cpu]); + plic_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_S_EXT); + } + + qemu_fdt_setprop(ms->fdt, plic_name, "interrupts-extended", + plic_cells, + s->soc[socket].num_harts * sizeof(uint32_t) * 4); + } + qemu_fdt_setprop_cells(ms->fdt, plic_name, "reg", 0x0, plic_addr, 0x0, memmap[VIRT_PLIC].size); qemu_fdt_setprop_cell(ms->fdt, plic_name, "riscv,ndev", @@ -512,7 +501,7 @@ static void create_fdt_socket_plic(RISCVVirtState *s, g_free(plic_cells); } -static uint32_t imsic_num_bits(uint32_t count) +uint32_t imsic_num_bits(uint32_t count) { uint32_t ret = 0; @@ -1077,21 +1066,45 @@ static void create_fdt(RISCVVirtState *s, const MemMapEntry *memmap) } static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem, - hwaddr ecam_base, hwaddr ecam_size, - hwaddr mmio_base, hwaddr mmio_size, - hwaddr high_mmio_base, - hwaddr high_mmio_size, - hwaddr pio_base, - DeviceState *irqchip) + DeviceState *irqchip, + RISCVVirtState *s) { DeviceState *dev; MemoryRegion *ecam_alias, *ecam_reg; MemoryRegion *mmio_alias, *high_mmio_alias, *mmio_reg; + hwaddr ecam_base = s->memmap[VIRT_PCIE_ECAM].base; + hwaddr ecam_size = s->memmap[VIRT_PCIE_ECAM].size; + hwaddr mmio_base = s->memmap[VIRT_PCIE_MMIO].base; + hwaddr mmio_size = s->memmap[VIRT_PCIE_MMIO].size; + hwaddr high_mmio_base = virt_high_pcie_memmap.base; + hwaddr high_mmio_size = virt_high_pcie_memmap.size; + hwaddr pio_base = s->memmap[VIRT_PCIE_PIO].base; + hwaddr pio_size = s->memmap[VIRT_PCIE_PIO].size; qemu_irq irq; int i; dev = qdev_new(TYPE_GPEX_HOST); + /* Set GPEX object properties for the virt machine */ + object_property_set_uint(OBJECT(GPEX_HOST(dev)), PCI_HOST_ECAM_BASE, + ecam_base, NULL); + object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_ECAM_SIZE, + ecam_size, NULL); + object_property_set_uint(OBJECT(GPEX_HOST(dev)), + PCI_HOST_BELOW_4G_MMIO_BASE, + mmio_base, NULL); + object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_BELOW_4G_MMIO_SIZE, + mmio_size, NULL); + object_property_set_uint(OBJECT(GPEX_HOST(dev)), + PCI_HOST_ABOVE_4G_MMIO_BASE, + high_mmio_base, NULL); + object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_ABOVE_4G_MMIO_SIZE, + high_mmio_size, NULL); + object_property_set_uint(OBJECT(GPEX_HOST(dev)), PCI_HOST_PIO_BASE, + pio_base, NULL); + object_property_set_int(OBJECT(GPEX_HOST(dev)), PCI_HOST_PIO_SIZE, + pio_size, NULL); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); ecam_alias = g_new0(MemoryRegion, 1); @@ -1122,6 +1135,7 @@ static inline DeviceState *gpex_pcie_init(MemoryRegion *sys_mem, gpex_set_irq_num(GPEX_HOST(dev), i, PCIE_IRQ + i); } + GPEX_HOST(dev)->gpex_cfg.bus = PCI_HOST_BRIDGE(GPEX_HOST(dev))->bus; return dev; } @@ -1517,15 +1531,7 @@ static void virt_machine_init(MachineState *machine) qdev_get_gpio_in(virtio_irqchip, VIRTIO_IRQ + i)); } - gpex_pcie_init(system_memory, - memmap[VIRT_PCIE_ECAM].base, - memmap[VIRT_PCIE_ECAM].size, - memmap[VIRT_PCIE_MMIO].base, - memmap[VIRT_PCIE_MMIO].size, - virt_high_pcie_memmap.base, - virt_high_pcie_memmap.size, - memmap[VIRT_PCIE_PIO].base, - pcie_irqchip); + gpex_pcie_init(system_memory, pcie_irqchip, s); create_platform_bus(s, mmio_irqchip); diff --git a/hw/rtc/allwinner-rtc.c b/hw/rtc/allwinner-rtc.c index 7e493f0e79d0d2257e4a94e3f855e051e7c33f43..2ac50b30cb80f323d7710624dc392184d7c1b01f 100644 --- a/hw/rtc/allwinner-rtc.c +++ b/hw/rtc/allwinner-rtc.c @@ -305,7 +305,7 @@ static const VMStateDescription allwinner_rtc_vmstate = { .name = "allwinner-rtc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AwRtcState, AW_RTC_REGS_NUM), VMSTATE_END_OF_LIST() } diff --git a/hw/rtc/aspeed_rtc.c b/hw/rtc/aspeed_rtc.c index fa861e2d49400e6abfa2a5f2862e7443e5216f91..589d9a5a7a8311f87221e71ad4a29a442c035c1e 100644 --- a/hw/rtc/aspeed_rtc.c +++ b/hw/rtc/aspeed_rtc.c @@ -137,7 +137,7 @@ static const MemoryRegionOps aspeed_rtc_ops = { static const VMStateDescription vmstate_aspeed_rtc = { .name = TYPE_ASPEED_RTC, .version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg, AspeedRtcState, 0x18), VMSTATE_INT64(offset, AspeedRtcState), VMSTATE_END_OF_LIST() diff --git a/hw/rtc/ds1338.c b/hw/rtc/ds1338.c index 36d8121ddda080d60d05f6d69c15155655241a85..e479661c391c40a56b33e6a350f28c410628a5c2 100644 --- a/hw/rtc/ds1338.c +++ b/hw/rtc/ds1338.c @@ -46,7 +46,7 @@ static const VMStateDescription vmstate_ds1338 = { .name = "ds1338", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_I2C_SLAVE(parent_obj, DS1338State), VMSTATE_INT64(offset, DS1338State), VMSTATE_UINT8_V(wday_offset, DS1338State, 2), diff --git a/hw/rtc/exynos4210_rtc.c b/hw/rtc/exynos4210_rtc.c index cc7101c530aadcf59404d41ad375c7682771860f..319371f97d1dff2888b6d7d124120c5bba583389 100644 --- a/hw/rtc/exynos4210_rtc.c +++ b/hw/rtc/exynos4210_rtc.c @@ -122,7 +122,7 @@ static const VMStateDescription vmstate_exynos4210_rtc_state = { .name = "exynos4210.rtc", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(reg_intp, Exynos4210RTCState), VMSTATE_UINT32(reg_rtccon, Exynos4210RTCState), VMSTATE_UINT32(reg_ticcnt, Exynos4210RTCState), diff --git a/hw/rtc/goldfish_rtc.c b/hw/rtc/goldfish_rtc.c index 19a56402a0c6c3f5972b1abb981e19a88ed69a1b..01acf30b2787f44b599937534409cfe2c8ec930f 100644 --- a/hw/rtc/goldfish_rtc.c +++ b/hw/rtc/goldfish_rtc.c @@ -242,7 +242,7 @@ static const VMStateDescription goldfish_rtc_vmstate = { .version_id = 2, .pre_save = goldfish_rtc_pre_save, .post_load = goldfish_rtc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(tick_offset_vmstate, GoldfishRTCState), VMSTATE_UINT64(alarm_next, GoldfishRTCState), VMSTATE_UINT32(alarm_running, GoldfishRTCState), diff --git a/hw/rtc/ls7a_rtc.c b/hw/rtc/ls7a_rtc.c index 1f9e38a735bd75886908e7009f14a26ca1e6261d..ac28c1165bfec818619d5812fe87ac8f180cf474 100644 --- a/hw/rtc/ls7a_rtc.c +++ b/hw/rtc/ls7a_rtc.c @@ -454,7 +454,7 @@ static const VMStateDescription vmstate_ls7a_rtc = { .minimum_version_id = 1, .pre_save = ls7a_rtc_pre_save, .post_load = ls7a_rtc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(offset_toy, LS7ARtcState), VMSTATE_INT64(offset_rtc, LS7ARtcState), VMSTATE_UINT32_ARRAY(toymatch, LS7ARtcState, TIMER_NUMS), diff --git a/hw/rtc/m48t59.c b/hw/rtc/m48t59.c index 2e2c849985c6c6bc19e150bfb32115b623e3433d..aa44c4b20c4d1a79ec799be6d79ee1ee8d246382 100644 --- a/hw/rtc/m48t59.c +++ b/hw/rtc/m48t59.c @@ -526,7 +526,7 @@ static const VMStateDescription vmstate_m48t59 = { .name = "m48t59", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(lock, M48t59State), VMSTATE_UINT16(addr, M48t59State), VMSTATE_VBUFFER_UINT32(buffer, M48t59State, 0, NULL, size), diff --git a/hw/rtc/mc146818rtc.c b/hw/rtc/mc146818rtc.c index 2d391a83969e92529125a9f3f7ab846d7f84cabf..f4c18692325c40f21c157cc2e3c5fa500cd21f9e 100644 --- a/hw/rtc/mc146818rtc.c +++ b/hw/rtc/mc146818rtc.c @@ -817,7 +817,7 @@ static const VMStateDescription vmstate_rtc_irq_reinject_on_ack_count = { .version_id = 1, .minimum_version_id = 1, .needed = rtc_irq_reinject_on_ack_count_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(irq_reinject_on_ack_count, MC146818RtcState), VMSTATE_END_OF_LIST() } @@ -829,7 +829,7 @@ static const VMStateDescription vmstate_rtc = { .minimum_version_id = 1, .pre_save = rtc_pre_save, .post_load = rtc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(cmos_data, MC146818RtcState), VMSTATE_UINT8(cmos_index, MC146818RtcState), VMSTATE_UNUSED(7*4), @@ -845,7 +845,7 @@ static const VMStateDescription vmstate_rtc = { VMSTATE_UINT64_V(next_alarm_time, MC146818RtcState, 3), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_rtc_irq_reinject_on_ack_count, NULL } diff --git a/hw/rtc/pl031.c b/hw/rtc/pl031.c index b01d0e75d1af40334289df94795fa5e5e3c513e6..837b0bdf9bdec56f0f656cc89c5c8adc205eebd0 100644 --- a/hw/rtc/pl031.c +++ b/hw/rtc/pl031.c @@ -290,7 +290,7 @@ static const VMStateDescription vmstate_pl031_tick_offset = { .minimum_version_id = 1, .needed = pl031_tick_offset_needed, .post_load = pl031_tick_offset_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(tick_offset, PL031State), VMSTATE_END_OF_LIST() } @@ -303,7 +303,7 @@ static const VMStateDescription vmstate_pl031 = { .pre_save = pl031_pre_save, .pre_load = pl031_pre_load, .post_load = pl031_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(tick_offset_vmstate, PL031State), VMSTATE_UINT32(mr, PL031State), VMSTATE_UINT32(lr, PL031State), @@ -312,7 +312,7 @@ static const VMStateDescription vmstate_pl031 = { VMSTATE_UINT32(is, PL031State), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_pl031_tick_offset, NULL } diff --git a/hw/rtc/twl92230.c b/hw/rtc/twl92230.c index 64c61c3daebc82429e3b57465f6b4dd4e5cbcf10..efd19a76e61d08e4a6dfef629c6c583b1fedd6e6 100644 --- a/hw/rtc/twl92230.c +++ b/hw/rtc/twl92230.c @@ -768,7 +768,7 @@ static const VMStateDescription vmstate_menelaus_tm = { .name = "menelaus_tm", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16_HACK(tm_sec, struct tm), VMSTATE_UINT16_HACK(tm_min, struct tm), VMSTATE_UINT16_HACK(tm_hour, struct tm), @@ -810,7 +810,7 @@ static const VMStateDescription vmstate_menelaus = { .minimum_version_id = 0, .pre_save = menelaus_pre_save, .post_load = menelaus_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(firstbyte, MenelausState), VMSTATE_UINT8(reg, MenelausState), VMSTATE_UINT8_ARRAY(vcore, MenelausState, 5), diff --git a/hw/rtc/xlnx-zynqmp-rtc.c b/hw/rtc/xlnx-zynqmp-rtc.c index 3e7d61a41c1783ffa264cdf43f8b3d60c7a0debe..613c6407a6085cd3771e05dc6e4e19e1085961ab 100644 --- a/hw/rtc/xlnx-zynqmp-rtc.c +++ b/hw/rtc/xlnx-zynqmp-rtc.c @@ -244,7 +244,7 @@ static const VMStateDescription vmstate_rtc = { .minimum_version_id = 1, .pre_save = rtc_pre_save, .post_load = rtc_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPRTC, XLNX_ZYNQMP_RTC_R_MAX), VMSTATE_UINT32(tick_offset, XlnxZynqMPRTC), VMSTATE_END_OF_LIST(), diff --git a/hw/s390x/Kconfig b/hw/s390x/Kconfig index 4c068d7960b973474b84d3cef9d4a3bf31ca4f9f..26ad1044858ce9ba3e78c65ffb9f295c31d7ab4c 100644 --- a/hw/s390x/Kconfig +++ b/hw/s390x/Kconfig @@ -6,6 +6,7 @@ config S390_CCW_VIRTIO imply VFIO_CCW imply WDT_DIAG288 imply PCIE_DEVICES + imply IOMMUFD select PCI_EXPRESS select S390_FLIC select S390_FLIC_KVM if KVM diff --git a/hw/s390x/ccw-device.c b/hw/s390x/ccw-device.c index 95f269ab441e7b558b7ce0fef4e67d4fc33aef87..fb8c1acc64d5002c861a4913f292d8346dbef192 100644 --- a/hw/s390x/ccw-device.c +++ b/hw/s390x/ccw-device.c @@ -66,7 +66,7 @@ const VMStateDescription vmstate_ccw_dev = { .name = "s390_ccw_dev", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_POINTER(sch, CcwDevice, vmstate_subch_dev, SubchDev), VMSTATE_END_OF_LIST() } diff --git a/hw/s390x/css.c b/hw/s390x/css.c index bcedec2fc82136d97e5ce9e49cf5b7c191120bb3..295530963a6c9515bc2c655a3cd2f7e337f62a7e 100644 --- a/hw/s390x/css.c +++ b/hw/s390x/css.c @@ -32,7 +32,7 @@ static const VMStateDescription vmstate_crw = { .name = "s390_crw", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(flags, CRW), VMSTATE_UINT16(rsid, CRW), VMSTATE_END_OF_LIST() @@ -43,7 +43,7 @@ static const VMStateDescription vmstate_crw_container = { .name = "s390_crw_container", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(crw, CrwContainer, 0, vmstate_crw, CRW), VMSTATE_END_OF_LIST() }, @@ -59,7 +59,7 @@ static const VMStateDescription vmstate_chp_info = { .name = "s390_chp_info", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(in_use, ChpInfo), VMSTATE_UINT8(type, ChpInfo), VMSTATE_UINT8(is_virtual, ChpInfo), @@ -77,7 +77,7 @@ static const VMStateDescription vmstate_scsw = { .name = "s390_scsw", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(flags, SCSW), VMSTATE_UINT16(ctrl, SCSW), VMSTATE_UINT32(cpa, SCSW), @@ -92,7 +92,7 @@ static const VMStateDescription vmstate_pmcw = { .name = "s390_pmcw", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(intparm, PMCW), VMSTATE_UINT16(flags, PMCW), VMSTATE_UINT16(devno, PMCW), @@ -113,7 +113,7 @@ static const VMStateDescription vmstate_schib = { .name = "s390_schib", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(pmcw, SCHIB, 0, vmstate_pmcw, PMCW), VMSTATE_STRUCT(scsw, SCHIB, 0, vmstate_scsw, SCSW), VMSTATE_UINT64(mba, SCHIB), @@ -127,7 +127,7 @@ static const VMStateDescription vmstate_ccw1 = { .name = "s390_ccw1", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(cmd_code, CCW1), VMSTATE_UINT8(flags, CCW1), VMSTATE_UINT16(count, CCW1), @@ -140,7 +140,7 @@ static const VMStateDescription vmstate_ciw = { .name = "s390_ciw", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(type, CIW), VMSTATE_UINT8(command, CIW), VMSTATE_UINT16(count, CIW), @@ -152,7 +152,7 @@ static const VMStateDescription vmstate_sense_id = { .name = "s390_sense_id", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(reserved, SenseId), VMSTATE_UINT16(cu_type, SenseId), VMSTATE_UINT8(cu_model, SenseId), @@ -168,7 +168,7 @@ static const VMStateDescription vmstate_orb = { .name = "s390_orb", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(intparm, ORB), VMSTATE_UINT16(ctrl0, ORB), VMSTATE_UINT8(lpm, ORB), @@ -188,7 +188,7 @@ static const VMStateDescription vmstate_schdev_orb = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_schdev_orb_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(orb, SubchDev, 1, vmstate_orb, ORB), VMSTATE_END_OF_LIST() } @@ -207,7 +207,7 @@ const VMStateDescription vmstate_subch_dev = { .minimum_version_id = 1, .post_load = subch_dev_post_load, .pre_save = subch_dev_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_EQUAL(cssid, SubchDev, "Bug!"), VMSTATE_UINT8_EQUAL(ssid, SubchDev, "Bug!"), VMSTATE_UINT16(migrated_schid, SubchDev), @@ -223,7 +223,7 @@ const VMStateDescription vmstate_subch_dev = { VMSTATE_UINT8(ccw_no_data_cnt, SubchDev), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_schdev_orb, NULL } @@ -264,12 +264,12 @@ static int pre_save_ind_addr(void *opaque) return 0; } -const VMStateDescription vmstate_ind_addr_tmp = { +static const VMStateDescription vmstate_ind_addr_tmp = { .name = "s390_ind_addr_tmp", .pre_save = pre_save_ind_addr, .post_load = post_load_ind_addr, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(len, IndAddrPtrTmp), VMSTATE_UINT64(addr, IndAddrPtrTmp), VMSTATE_END_OF_LIST() @@ -278,7 +278,7 @@ const VMStateDescription vmstate_ind_addr_tmp = { const VMStateDescription vmstate_ind_addr = { .name = "s390_ind_addr_tmp", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_WITH_TMP(IndAddr*, IndAddrPtrTmp, vmstate_ind_addr_tmp), VMSTATE_END_OF_LIST() } @@ -293,7 +293,7 @@ static const VMStateDescription vmstate_css_img = { .name = "s390_css_img", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* Subchannel sets have no relevant state. */ VMSTATE_STRUCT_ARRAY(chpids, CssImage, MAX_CHPID + 1, 0, vmstate_chp_info, ChpInfo), @@ -330,7 +330,7 @@ static const VMStateDescription vmstate_css = { .name = "s390_css", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_QTAILQ_V(pending_crws, ChannelSubSys, 1, vmstate_crw_container, CrwContainer, sibling), VMSTATE_BOOL(sei_pending, ChannelSubSys), diff --git a/hw/s390x/event-facility.c b/hw/s390x/event-facility.c index 6891e3cd73b466d8ef816b07b2e2e593c4341818..f9829de9532b65466a65443cecd9609198c5c026 100644 --- a/hw/s390x/event-facility.c +++ b/hw/s390x/event-facility.c @@ -367,7 +367,7 @@ static const VMStateDescription vmstate_event_facility_mask64 = { .version_id = 0, .minimum_version_id = 0, .needed = vmstate_event_facility_mask64_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_LOWER], SCLPEventFacility), VMSTATE_END_OF_LIST() } @@ -378,7 +378,7 @@ static const VMStateDescription vmstate_event_facility_mask_length = { .version_id = 0, .minimum_version_id = 0, .needed = vmstate_event_facility_mask_length_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(mask_length, SCLPEventFacility), VMSTATE_END_OF_LIST() } @@ -388,11 +388,11 @@ static const VMStateDescription vmstate_event_facility = { .name = "vmstate-event-facility", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_UPPER], SCLPEventFacility), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_event_facility_mask64, &vmstate_event_facility_mask_length, NULL diff --git a/hw/s390x/ipl.c b/hw/s390x/ipl.c index 515dcf51b5f561089616ccadc0b144dd0053f3a0..e934bf89d1582700d3d10811c871a18f889ec13b 100644 --- a/hw/s390x/ipl.c +++ b/hw/s390x/ipl.c @@ -35,7 +35,6 @@ #include "qemu/cutils.h" #include "qemu/option.h" #include "standard-headers/linux/virtio_ids.h" -#include "exec/exec-all.h" #define KERN_IMAGE_START 0x010000UL #define LINUX_MAGIC_ADDR 0x010008UL @@ -60,7 +59,7 @@ static const VMStateDescription vmstate_iplb_extended = { .version_id = 0, .minimum_version_id = 0, .needed = iplb_extended_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(reserved_ext, IplParameterBlock, 4096 - 200), VMSTATE_END_OF_LIST() } @@ -70,13 +69,13 @@ static const VMStateDescription vmstate_iplb = { .name = "ipl/iplb", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(reserved1, IplParameterBlock, 110), VMSTATE_UINT16(devno, IplParameterBlock), VMSTATE_UINT8_ARRAY(reserved2, IplParameterBlock, 88), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_iplb_extended, NULL } @@ -86,7 +85,7 @@ static const VMStateDescription vmstate_ipl = { .name = "ipl", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(compat_start_addr, S390IPLState), VMSTATE_UINT64(compat_bios_start_addr, S390IPLState), VMSTATE_STRUCT(iplb, S390IPLState, 0, vmstate_iplb, IplParameterBlock), @@ -703,7 +702,7 @@ static void s390_ipl_prepare_qipl(S390CPU *cpu) cpu_physical_memory_unmap(addr, len, 1, len); } -int s390_ipl_prepare_pv_header(void) +int s390_ipl_prepare_pv_header(Error **errp) { IplParameterBlock *ipib = s390_ipl_get_iplb_pv(); IPLBlockPV *ipib_pv = &ipib->pv; @@ -712,8 +711,7 @@ int s390_ipl_prepare_pv_header(void) cpu_physical_memory_read(ipib_pv->pv_header_addr, hdr, ipib_pv->pv_header_len); - rc = s390_pv_set_sec_parms((uintptr_t)hdr, - ipib_pv->pv_header_len); + rc = s390_pv_set_sec_parms((uintptr_t)hdr, ipib_pv->pv_header_len, errp); g_free(hdr); return rc; } diff --git a/hw/s390x/ipl.h b/hw/s390x/ipl.h index 7fc86e790547f2d40060030bb79aa8673996c7c7..57cd12576973a7eca10fe2653d637bf018a8fcd0 100644 --- a/hw/s390x/ipl.h +++ b/hw/s390x/ipl.h @@ -107,7 +107,7 @@ typedef union IplParameterBlock IplParameterBlock; int s390_ipl_set_loadparm(uint8_t *loadparm); void s390_ipl_update_diag308(IplParameterBlock *iplb); -int s390_ipl_prepare_pv_header(void); +int s390_ipl_prepare_pv_header(Error **errp); int s390_ipl_pv_unpack(void); void s390_ipl_prepare_cpu(S390CPU *cpu); IplParameterBlock *s390_ipl_get_iplb(void); diff --git a/hw/s390x/s390-ccw.c b/hw/s390x/s390-ccw.c index e2d86d96e728401a4e98dec2574575923f5ece02..5261e66724f1cc3157b9413b0d5fdf5289c92503 100644 --- a/hw/s390x/s390-ccw.c +++ b/hw/s390x/s390-ccw.c @@ -76,7 +76,9 @@ static void s390_ccw_get_dev_info(S390CCWDevice *cdev, Error **errp) { unsigned int cssid, ssid, devid; - char dev_path[PATH_MAX] = {0}, *tmp; + char dev_path[PATH_MAX] = {0}; + g_autofree char *tmp_dir = NULL; + g_autofree char *tmp = NULL; if (!sysfsdev) { error_setg(errp, "No host device provided"); @@ -92,7 +94,8 @@ static void s390_ccw_get_dev_info(S390CCWDevice *cdev, cdev->mdevid = g_path_get_basename(dev_path); - tmp = basename(dirname(dev_path)); + tmp_dir = g_path_get_dirname(dev_path); + tmp = g_path_get_basename(tmp_dir); if (sscanf(tmp, "%2x.%1x.%4x", &cssid, &ssid, &devid) != 3) { error_setg_errno(errp, errno, "Failed to read %s", tmp); return; diff --git a/hw/s390x/s390-skeys.c b/hw/s390x/s390-skeys.c index 8f5159d85dc6a491fe30a654f8dc54f4f09ab2ca..5c535d483e9f7bb0e665330c6b694cc19cc8904c 100644 --- a/hw/s390x/s390-skeys.c +++ b/hw/s390x/s390-skeys.c @@ -153,7 +153,7 @@ void qmp_dump_skeys(const char *filename, Error **errp) goto out; } - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); guest_phys_blocks_init(&guest_phys_blocks); guest_phys_blocks_append(&guest_phys_blocks); diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index 7262725d2e8a1cc1fa33fdbe2a074c87dfe69628..eaf61d36400fbbd678809407b781d0bb1fe3f079 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -391,7 +391,7 @@ static int s390_machine_protect(S390CcwMachineState *ms) } /* Set SE header and unpack */ - rc = s390_ipl_prepare_pv_header(); + rc = s390_ipl_prepare_pv_header(&local_err); if (rc) { goto out_err; } @@ -410,6 +410,9 @@ static int s390_machine_protect(S390CcwMachineState *ms) return rc; out_err: + if (local_err) { + error_report_err(local_err); + } s390_machine_unprotect(ms); return rc; } @@ -855,14 +858,26 @@ bool css_migration_enabled(void) } \ type_init(ccw_machine_register_##suffix) +static void ccw_machine_9_0_instance_options(MachineState *machine) +{ +} + +static void ccw_machine_9_0_class_options(MachineClass *mc) +{ +} +DEFINE_CCW_MACHINE(9_0, "9.0", true); + static void ccw_machine_8_2_instance_options(MachineState *machine) { + ccw_machine_9_0_instance_options(machine); } static void ccw_machine_8_2_class_options(MachineClass *mc) { + ccw_machine_9_0_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_8_2, hw_compat_8_2_len); } -DEFINE_CCW_MACHINE(8_2, "8.2", true); +DEFINE_CCW_MACHINE(8_2, "8.2", false); static void ccw_machine_8_1_instance_options(MachineState *machine) { diff --git a/hw/s390x/sclpquiesce.c b/hw/s390x/sclpquiesce.c index a641089929baa2c847d7661829482d01d1b96072..14936aa94baeeca55b82eb9ca3ecf50ae390277c 100644 --- a/hw/s390x/sclpquiesce.c +++ b/hw/s390x/sclpquiesce.c @@ -72,7 +72,7 @@ static const VMStateDescription vmstate_sclpquiesce = { .name = TYPE_SCLP_QUIESCE, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(event_pending, SCLPEvent), VMSTATE_END_OF_LIST() } diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index 80453718a338c9e6952a2a978c13b680e7b058da..b4676909dd6a058a48766b2a573d00dcf3cfab51 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -87,7 +87,7 @@ const VMStateDescription vmstate_virtio_ccw_dev_tmp = { .name = "s390_virtio_ccw_dev_tmp", .pre_save = virtio_ccw_dev_tmp_pre_save, .post_load = virtio_ccw_dev_tmp_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(config_vector, VirtioCcwDeviceTmp), VMSTATE_END_OF_LIST() } @@ -98,7 +98,7 @@ const VMStateDescription vmstate_virtio_ccw_dev = { .version_id = 1, .minimum_version_id = 1, .post_load = virtio_ccw_dev_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CCW_DEVICE(parent_obj, VirtioCcwDevice), VMSTATE_PTR_TO_IND_ADDR(indicators, VirtioCcwDevice), VMSTATE_PTR_TO_IND_ADDR(indicators2, VirtioCcwDevice), diff --git a/hw/scsi/esp-pci.c b/hw/scsi/esp-pci.c index 4e890db0e213a296a2dfab10d5a6cdc5d2383570..93b3429e0f565eed63af4b8757ee67bc48f6a0f1 100644 --- a/hw/scsi/esp-pci.c +++ b/hw/scsi/esp-pci.c @@ -333,7 +333,7 @@ static const VMStateDescription vmstate_esp_pci_scsi = { .version_id = 2, .minimum_version_id = 1, .pre_save = esp_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIESPState), VMSTATE_BUFFER_UNSAFE(dma_regs, PCIESPState, 0, 8 * sizeof(uint32_t)), VMSTATE_UINT8_V(esp.mig_version_id, PCIESPState, 2), diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c index 9b11d8c5738ab34a3fbf62c4bcaa0b3cea84c4af..3a1c9f7c3b80c75f5fc024c74c4b62aa224fb0c6 100644 --- a/hw/scsi/esp.c +++ b/hw/scsi/esp.c @@ -292,6 +292,15 @@ static void do_command_phase(ESPState *s) esp_fifo_pop_buf(&s->cmdfifo, buf, cmdlen); current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, s->lun); + if (!current_lun) { + /* No such drive */ + s->rregs[ESP_RSTAT] = 0; + s->rregs[ESP_RINTR] = INTR_DC; + s->rregs[ESP_RSEQ] = SEQ_0; + esp_raise_irq(s); + return; + } + s->current_req = scsi_req_new(current_lun, 0, s->lun, buf, cmdlen, s); datalen = scsi_req_enqueue(s->current_req); s->ti_size = datalen; @@ -1237,7 +1246,7 @@ static const VMStateDescription vmstate_esp_pdma = { .version_id = 0, .minimum_version_id = 0, .needed = esp_pdma_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(pdma_cb, ESPState), VMSTATE_END_OF_LIST() } @@ -1248,7 +1257,7 @@ const VMStateDescription vmstate_esp = { .version_id = 6, .minimum_version_id = 3, .post_load = esp_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(rregs, ESPState), VMSTATE_BUFFER(wregs, ESPState), VMSTATE_INT32(ti_size, ESPState), @@ -1277,7 +1286,7 @@ const VMStateDescription vmstate_esp = { VMSTATE_UINT8_TEST(lun, ESPState, esp_is_version_6), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_esp_pdma, NULL } @@ -1448,7 +1457,7 @@ static const VMStateDescription vmstate_sysbus_esp_scsi = { .version_id = 2, .minimum_version_id = 1, .pre_save = esp_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_V(esp.mig_version_id, SysBusESPState, 2), VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState), VMSTATE_END_OF_LIST() diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c index 634ed49c2e776222239e01cf5011de50fbe09f95..34e3b8928777263cae78cfd2fa9c01db9213c406 100644 --- a/hw/scsi/lsi53c895a.c +++ b/hw/scsi/lsi53c895a.c @@ -2205,7 +2205,7 @@ static const VMStateDescription vmstate_lsi_scsi = { .minimum_version_id = 0, .pre_save = lsi_pre_save, .post_load = lsi_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, LSIState), VMSTATE_INT32(carry, LSIState), diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c index 32c70c9e997546d93640bd070132128eee501749..2d0c60717718337bb297d1d95cf7ef4f20a3456d 100644 --- a/hw/scsi/megasas.c +++ b/hw/scsi/megasas.c @@ -2299,7 +2299,7 @@ static const VMStateDescription vmstate_megasas_gen1 = { .name = "megasas", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, MegasasState), VMSTATE_MSIX(parent_obj, MegasasState), @@ -2317,7 +2317,7 @@ static const VMStateDescription vmstate_megasas_gen2 = { .name = "megasas-gen2", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, MegasasState), VMSTATE_MSIX(parent_obj, MegasasState), diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c index 75d3ab8bd180f00a8ce8f7c3e0e74048070fd708..c5d3138c9362f1308ec6095a3fc9be7fe2470dc6 100644 --- a/hw/scsi/mptsas.c +++ b/hw/scsi/mptsas.c @@ -1366,7 +1366,7 @@ static const VMStateDescription vmstate_mptsas = { .version_id = 0, .minimum_version_id = 0, .post_load = mptsas_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, MPTSASState), VMSTATE_BOOL(msi_in_use, MPTSASState), VMSTATE_UINT32(state, MPTSASState), diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c index fc4b77fdb02707fa51585b511ed016ffb7315bdc..0a2eb11c56cf289a7ebdd551bf90e043b196b8df 100644 --- a/hw/scsi/scsi-bus.c +++ b/hw/scsi/scsi-bus.c @@ -85,6 +85,89 @@ SCSIDevice *scsi_device_get(SCSIBus *bus, int channel, int id, int lun) return d; } +/* + * Invoke @fn() for each enqueued request in device @s. Must be called from the + * main loop thread while the guest is stopped. This is only suitable for + * vmstate ->put(), use scsi_device_for_each_req_async() for other cases. + */ +static void scsi_device_for_each_req_sync(SCSIDevice *s, + void (*fn)(SCSIRequest *, void *), + void *opaque) +{ + SCSIRequest *req; + SCSIRequest *next_req; + + assert(!runstate_is_running()); + assert(qemu_in_main_thread()); + + QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) { + fn(req, opaque); + } +} + +typedef struct { + SCSIDevice *s; + void (*fn)(SCSIRequest *, void *); + void *fn_opaque; +} SCSIDeviceForEachReqAsyncData; + +static void scsi_device_for_each_req_async_bh(void *opaque) +{ + g_autofree SCSIDeviceForEachReqAsyncData *data = opaque; + SCSIDevice *s = data->s; + AioContext *ctx; + SCSIRequest *req; + SCSIRequest *next; + + /* + * If the AioContext changed before this BH was called then reschedule into + * the new AioContext before accessing ->requests. This can happen when + * scsi_device_for_each_req_async() is called and then the AioContext is + * changed before BHs are run. + */ + ctx = blk_get_aio_context(s->conf.blk); + if (ctx != qemu_get_current_aio_context()) { + aio_bh_schedule_oneshot(ctx, scsi_device_for_each_req_async_bh, + g_steal_pointer(&data)); + return; + } + + QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { + data->fn(req, data->fn_opaque); + } + + /* Drop the reference taken by scsi_device_for_each_req_async() */ + object_unref(OBJECT(s)); +} + +/* + * Schedule @fn() to be invoked for each enqueued request in device @s. @fn() + * runs in the AioContext that is executing the request. + */ +static void scsi_device_for_each_req_async(SCSIDevice *s, + void (*fn)(SCSIRequest *, void *), + void *opaque) +{ + assert(qemu_in_main_thread()); + + SCSIDeviceForEachReqAsyncData *data = + g_new(SCSIDeviceForEachReqAsyncData, 1); + + data->s = s; + data->fn = fn; + data->fn_opaque = opaque; + + /* + * Hold a reference to the SCSIDevice until + * scsi_device_for_each_req_async_bh() finishes. + */ + object_ref(OBJECT(s)); + + aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk), + scsi_device_for_each_req_async_bh, + data); +} + static void scsi_device_realize(SCSIDevice *s, Error **errp) { SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); @@ -144,20 +227,18 @@ void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host, qbus_set_bus_hotplug_handler(BUS(bus)); } -static void scsi_dma_restart_bh(void *opaque) +void scsi_req_retry(SCSIRequest *req) { - SCSIDevice *s = opaque; - SCSIRequest *req, *next; - - qemu_bh_delete(s->bh); - s->bh = NULL; + req->retry = true; +} - aio_context_acquire(blk_get_aio_context(s->conf.blk)); - QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { - scsi_req_ref(req); - if (req->retry) { - req->retry = false; - switch (req->cmd.mode) { +/* Called in the AioContext that is executing the request */ +static void scsi_dma_restart_req(SCSIRequest *req, void *opaque) +{ + scsi_req_ref(req); + if (req->retry) { + req->retry = false; + switch (req->cmd.mode) { case SCSI_XFER_FROM_DEV: case SCSI_XFER_TO_DEV: scsi_req_continue(req); @@ -166,37 +247,22 @@ static void scsi_dma_restart_bh(void *opaque) scsi_req_dequeue(req); scsi_req_enqueue(req); break; - } } - scsi_req_unref(req); } - aio_context_release(blk_get_aio_context(s->conf.blk)); - /* Drop the reference that was acquired in scsi_dma_restart_cb */ - object_unref(OBJECT(s)); -} - -void scsi_req_retry(SCSIRequest *req) -{ - /* No need to save a reference, because scsi_dma_restart_bh just - * looks at the request list. */ - req->retry = true; + scsi_req_unref(req); } static void scsi_dma_restart_cb(void *opaque, bool running, RunState state) { SCSIDevice *s = opaque; + assert(qemu_in_main_thread()); + if (!running) { return; } - if (!s->bh) { - AioContext *ctx = blk_get_aio_context(s->conf.blk); - /* The reference is dropped in scsi_dma_restart_bh.*/ - object_ref(OBJECT(s)); - s->bh = aio_bh_new_guarded(ctx, scsi_dma_restart_bh, s, - &DEVICE(s)->mem_reentrancy_guard); - qemu_bh_schedule(s->bh); - } + + scsi_device_for_each_req_async(s, scsi_dma_restart_req, NULL); } static bool scsi_bus_is_address_free(SCSIBus *bus, @@ -1657,17 +1723,16 @@ void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense) } } +static void scsi_device_purge_one_req(SCSIRequest *req, void *opaque) +{ + scsi_req_cancel_async(req, NULL); +} + void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense) { - SCSIRequest *req; + scsi_device_for_each_req_async(sdev, scsi_device_purge_one_req, NULL); - aio_context_acquire(blk_get_aio_context(sdev->conf.blk)); - while (!QTAILQ_EMPTY(&sdev->requests)) { - req = QTAILQ_FIRST(&sdev->requests); - scsi_req_cancel_async(req, NULL); - } blk_drain(sdev->conf.blk); - aio_context_release(blk_get_aio_context(sdev->conf.blk)); scsi_device_set_ua(sdev, sense); } @@ -1737,31 +1802,33 @@ static char *scsibus_get_fw_dev_path(DeviceState *dev) /* SCSI request list. For simplicity, pv points to the whole device */ +static void put_scsi_req(SCSIRequest *req, void *opaque) +{ + QEMUFile *f = opaque; + + assert(!req->io_canceled); + assert(req->status == -1 && req->host_status == -1); + assert(req->enqueued); + + qemu_put_sbyte(f, req->retry ? 1 : 2); + qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf)); + qemu_put_be32s(f, &req->tag); + qemu_put_be32s(f, &req->lun); + if (req->bus->info->save_request) { + req->bus->info->save_request(f, req); + } + if (req->ops->save_request) { + req->ops->save_request(f, req); + } +} + static int put_scsi_requests(QEMUFile *f, void *pv, size_t size, const VMStateField *field, JSONWriter *vmdesc) { SCSIDevice *s = pv; - SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); - SCSIRequest *req; - QTAILQ_FOREACH(req, &s->requests, next) { - assert(!req->io_canceled); - assert(req->status == -1 && req->host_status == -1); - assert(req->enqueued); - - qemu_put_sbyte(f, req->retry ? 1 : 2); - qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf)); - qemu_put_be32s(f, &req->tag); - qemu_put_be32s(f, &req->lun); - if (bus->info->save_request) { - bus->info->save_request(f, req); - } - if (req->ops->save_request) { - req->ops->save_request(f, req); - } - } + scsi_device_for_each_req_sync(s, put_scsi_req, f); qemu_put_sbyte(f, 0); - return 0; } @@ -1826,7 +1893,7 @@ static const VMStateDescription vmstate_scsi_sense_state = { .version_id = 1, .minimum_version_id = 1, .needed = scsi_sense_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, SCSI_SENSE_BUF_SIZE_OLD, SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD), @@ -1838,7 +1905,7 @@ const VMStateDescription vmstate_scsi_device = { .name = "SCSIDevice", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(unit_attention.key, SCSIDevice), VMSTATE_UINT8(unit_attention.asc, SCSIDevice), VMSTATE_UINT8(unit_attention.ascq, SCSIDevice), @@ -1856,7 +1923,7 @@ const VMStateDescription vmstate_scsi_device = { }, VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_scsi_sense_state, NULL } diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c index 6691f5edb841b744a25a090d9c5f2d294a18ec5d..4bd7af9d0c23e49df617e90b0047c1ceab604d8d 100644 --- a/hw/scsi/scsi-disk.c +++ b/hw/scsi/scsi-disk.c @@ -273,7 +273,9 @@ static void scsi_aio_complete(void *opaque, int ret) SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); + /* The request must only run in the BlockBackend's AioContext */ + assert(blk_get_aio_context(s->qdev.conf.blk) == + qemu_get_current_aio_context()); assert(r->req.aiocb != NULL); r->req.aiocb = NULL; @@ -286,7 +288,6 @@ static void scsi_aio_complete(void *opaque, int ret) scsi_req_complete(&r->req, GOOD); done: - aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); scsi_req_unref(&r->req); } @@ -354,7 +355,6 @@ done: scsi_req_unref(&r->req); } -/* Called with AioContext lock held */ static void scsi_dma_complete(void *opaque, int ret) { SCSIDiskReq *r = (SCSIDiskReq *)opaque; @@ -373,8 +373,13 @@ static void scsi_dma_complete(void *opaque, int ret) static void scsi_read_complete_noio(SCSIDiskReq *r, int ret) { + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; + /* The request must only run in the BlockBackend's AioContext */ + assert(blk_get_aio_context(s->qdev.conf.blk) == + qemu_get_current_aio_context()); + assert(r->req.aiocb == NULL); if (scsi_disk_req_check_error(r, ret, false)) { goto done; @@ -394,8 +399,6 @@ static void scsi_read_complete(void *opaque, int ret) SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); - assert(r->req.aiocb != NULL); r->req.aiocb = NULL; @@ -406,7 +409,6 @@ static void scsi_read_complete(void *opaque, int ret) trace_scsi_disk_read_complete(r->req.tag, r->qiov.size); } scsi_read_complete_noio(r, ret); - aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); } /* Actually issue a read to the block device. */ @@ -448,8 +450,6 @@ static void scsi_do_read_cb(void *opaque, int ret) SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); - assert (r->req.aiocb != NULL); r->req.aiocb = NULL; @@ -459,7 +459,6 @@ static void scsi_do_read_cb(void *opaque, int ret) block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); } scsi_do_read(opaque, ret); - aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); } /* Read more data from scsi device into buffer. */ @@ -505,8 +504,13 @@ static void scsi_read_data(SCSIRequest *req) static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) { + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); uint32_t n; + /* The request must only run in the BlockBackend's AioContext */ + assert(blk_get_aio_context(s->qdev.conf.blk) == + qemu_get_current_aio_context()); + assert (r->req.aiocb == NULL); if (scsi_disk_req_check_error(r, ret, false)) { goto done; @@ -533,8 +537,6 @@ static void scsi_write_complete(void * opaque, int ret) SCSIDiskReq *r = (SCSIDiskReq *)opaque; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); - assert (r->req.aiocb != NULL); r->req.aiocb = NULL; @@ -544,7 +546,6 @@ static void scsi_write_complete(void * opaque, int ret) block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); } scsi_write_complete_noio(r, ret); - aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); } static void scsi_write_data(SCSIRequest *req) @@ -1742,8 +1743,6 @@ static void scsi_unmap_complete(void *opaque, int ret) SCSIDiskReq *r = data->r; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); - assert(r->req.aiocb != NULL); r->req.aiocb = NULL; @@ -1754,7 +1753,6 @@ static void scsi_unmap_complete(void *opaque, int ret) block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); scsi_unmap_complete_noio(data, ret); } - aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); } static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) @@ -1822,8 +1820,6 @@ static void scsi_write_same_complete(void *opaque, int ret) SCSIDiskReq *r = data->r; SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); - aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); - assert(r->req.aiocb != NULL); r->req.aiocb = NULL; @@ -1847,7 +1843,6 @@ static void scsi_write_same_complete(void *opaque, int ret) data->sector << BDRV_SECTOR_BITS, &data->qiov, 0, scsi_write_same_complete, data); - aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); return; } @@ -1857,7 +1852,6 @@ done: scsi_req_unref(&r->req); qemu_vfree(data->iov.iov_base); g_free(data); - aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); } static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) @@ -2344,14 +2338,10 @@ static void scsi_disk_reset(DeviceState *dev) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); uint64_t nb_sectors; - AioContext *ctx; scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); - ctx = blk_get_aio_context(s->qdev.conf.blk); - aio_context_acquire(ctx); blk_get_geometry(s->qdev.conf.blk, &nb_sectors); - aio_context_release(ctx); nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; if (nb_sectors) { @@ -2550,15 +2540,13 @@ static void scsi_unrealize(SCSIDevice *dev) static void scsi_hd_realize(SCSIDevice *dev, Error **errp) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); - AioContext *ctx = NULL; + /* can happen for devices without drive. The error message for missing * backend will be issued in scsi_realize */ if (s->qdev.conf.blk) { - ctx = blk_get_aio_context(s->qdev.conf.blk); - aio_context_acquire(ctx); if (!blkconf_blocksizes(&s->qdev.conf, errp)) { - goto out; + return; } } s->qdev.blocksize = s->qdev.conf.logical_block_size; @@ -2567,16 +2555,11 @@ static void scsi_hd_realize(SCSIDevice *dev, Error **errp) s->product = g_strdup("QEMU HARDDISK"); } scsi_realize(&s->qdev, errp); -out: - if (ctx) { - aio_context_release(ctx); - } } static void scsi_cd_realize(SCSIDevice *dev, Error **errp) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); - AioContext *ctx; int ret; uint32_t blocksize = 2048; @@ -2592,8 +2575,6 @@ static void scsi_cd_realize(SCSIDevice *dev, Error **errp) blocksize = dev->conf.physical_block_size; } - ctx = blk_get_aio_context(dev->conf.blk); - aio_context_acquire(ctx); s->qdev.blocksize = blocksize; s->qdev.type = TYPE_ROM; s->features |= 1 << SCSI_DISK_F_REMOVABLE; @@ -2601,7 +2582,6 @@ static void scsi_cd_realize(SCSIDevice *dev, Error **errp) s->product = g_strdup("QEMU CD-ROM"); } scsi_realize(&s->qdev, errp); - aio_context_release(ctx); } @@ -2732,7 +2712,6 @@ static int get_device_type(SCSIDiskState *s) static void scsi_block_realize(SCSIDevice *dev, Error **errp) { SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); - AioContext *ctx; int sg_version; int rc; @@ -2747,9 +2726,6 @@ static void scsi_block_realize(SCSIDevice *dev, Error **errp) "be removed in a future version"); } - ctx = blk_get_aio_context(s->qdev.conf.blk); - aio_context_acquire(ctx); - /* check we are using a driver managing SG_IO (version 3 and after) */ rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); if (rc < 0) { @@ -2757,18 +2733,18 @@ static void scsi_block_realize(SCSIDevice *dev, Error **errp) if (rc != -EPERM) { error_append_hint(errp, "Is this a SCSI device?\n"); } - goto out; + return; } if (sg_version < 30000) { error_setg(errp, "scsi generic interface too old"); - goto out; + return; } /* get device type from INQUIRY data */ rc = get_device_type(s); if (rc < 0) { error_setg(errp, "INQUIRY failed"); - goto out; + return; } /* Make a guess for the block size, we'll fix it when the guest sends. @@ -2788,9 +2764,6 @@ static void scsi_block_realize(SCSIDevice *dev, Error **errp) scsi_realize(&s->qdev, errp); scsi_generic_read_device_inquiry(&s->qdev); - -out: - aio_context_release(ctx); } typedef struct SCSIBlockReq { @@ -2810,7 +2783,6 @@ static void scsi_block_sgio_complete(void *opaque, int ret) { SCSIBlockReq *req = (SCSIBlockReq *)opaque; SCSIDiskReq *r = &req->req; - SCSIDevice *s = r->req.dev; sg_io_hdr_t *io_hdr = &req->io_header; if (ret == 0) { @@ -2827,13 +2799,10 @@ static void scsi_block_sgio_complete(void *opaque, int ret) } if (ret > 0) { - aio_context_acquire(blk_get_aio_context(s->conf.blk)); if (scsi_handle_rw_error(r, ret, true)) { - aio_context_release(blk_get_aio_context(s->conf.blk)); scsi_req_unref(&r->req); return; } - aio_context_release(blk_get_aio_context(s->conf.blk)); /* Ignore error. */ ret = 0; @@ -3168,7 +3137,7 @@ static const VMStateDescription vmstate_scsi_disk_state = { .name = "scsi-disk", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), VMSTATE_BOOL(media_changed, SCSIDiskState), VMSTATE_BOOL(media_event, SCSIDiskState), diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c index 2417f0ad84799059a626d41099cd537bf4bdb836..b7b04e1d637ab0cf576d6cd7d0a89d9c58d664c4 100644 --- a/hw/scsi/scsi-generic.c +++ b/hw/scsi/scsi-generic.c @@ -109,15 +109,11 @@ done: static void scsi_command_complete(void *opaque, int ret) { SCSIGenericReq *r = (SCSIGenericReq *)opaque; - SCSIDevice *s = r->req.dev; - - aio_context_acquire(blk_get_aio_context(s->conf.blk)); assert(r->req.aiocb != NULL); r->req.aiocb = NULL; scsi_command_complete_noio(r, ret); - aio_context_release(blk_get_aio_context(s->conf.blk)); } static int execute_command(BlockBackend *blk, @@ -274,14 +270,12 @@ static void scsi_read_complete(void * opaque, int ret) SCSIDevice *s = r->req.dev; int len; - aio_context_acquire(blk_get_aio_context(s->conf.blk)); - assert(r->req.aiocb != NULL); r->req.aiocb = NULL; if (ret || r->req.io_canceled) { scsi_command_complete_noio(r, ret); - goto done; + return; } len = r->io_header.dxfer_len - r->io_header.resid; @@ -320,7 +314,7 @@ static void scsi_read_complete(void * opaque, int ret) r->io_header.status != GOOD || len == 0) { scsi_command_complete_noio(r, 0); - goto done; + return; } /* Snoop READ CAPACITY output to set the blocksize. */ @@ -356,9 +350,6 @@ static void scsi_read_complete(void * opaque, int ret) req_complete: scsi_req_data(&r->req, len); scsi_req_unref(&r->req); - -done: - aio_context_release(blk_get_aio_context(s->conf.blk)); } /* Read more data from scsi device into buffer. */ @@ -391,14 +382,12 @@ static void scsi_write_complete(void * opaque, int ret) trace_scsi_generic_write_complete(ret); - aio_context_acquire(blk_get_aio_context(s->conf.blk)); - assert(r->req.aiocb != NULL); r->req.aiocb = NULL; if (ret || r->req.io_canceled) { scsi_command_complete_noio(r, ret); - goto done; + return; } if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 && @@ -408,9 +397,6 @@ static void scsi_write_complete(void * opaque, int ret) } scsi_command_complete_noio(r, ret); - -done: - aio_context_release(blk_get_aio_context(s->conf.blk)); } /* Write data to a scsi device. Returns nonzero on failure. diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c index 5bbbef64ef34f59d6f48707cce1cc1ed591109d2..c75a6c88079df38d89bee33a28a8e7d590637c8b 100644 --- a/hw/scsi/spapr_vscsi.c +++ b/hw/scsi/spapr_vscsi.c @@ -605,7 +605,7 @@ static const VMStateDescription vmstate_spapr_vscsi_req = { .name = "spapr_vscsi_req", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(crq.raw, vscsi_req), VMSTATE_BUFFER(viosrp_iu_buf, vscsi_req), VMSTATE_UINT32(qtag, vscsi_req), @@ -1259,7 +1259,7 @@ static const VMStateDescription vmstate_spapr_vscsi = { .name = "spapr_vscsi", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SPAPR_VIO(vdev, VSCSIState), /* VSCSI state */ /* ???? */ diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c index 3126df9e1d9d5553a673128200b494fea22c1ff4..58a00336c2db2502c0a8ce329a710eac47d78280 100644 --- a/hw/scsi/vhost-scsi.c +++ b/hw/scsi/vhost-scsi.c @@ -91,13 +91,13 @@ static int vhost_scsi_start(VHostSCSI *s) ret = vhost_scsi_common_start(vsc, &local_err); if (ret < 0) { - error_reportf_err(local_err, "Error starting vhost-scsi"); + error_reportf_err(local_err, "Error starting vhost-scsi: "); return ret; } ret = vhost_scsi_set_endpoint(s); if (ret < 0) { - error_reportf_err(local_err, "Error setting vhost-scsi endpoint"); + error_report("Error setting vhost-scsi endpoint"); vhost_scsi_common_stop(vsc); } @@ -158,13 +158,66 @@ static const VMStateDescription vmstate_virtio_vhost_scsi = { .name = "virtio-vhost_scsi", .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, .pre_save = vhost_scsi_pre_save, }; +static int vhost_scsi_set_workers(VHostSCSICommon *vsc, bool per_virtqueue) +{ + struct vhost_dev *dev = &vsc->dev; + struct vhost_vring_worker vq_worker; + struct vhost_worker_state worker; + int i, ret; + + /* Use default worker */ + if (!per_virtqueue || dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) { + return 0; + } + + /* + * ctl/evt share the first worker since it will be rare for them + * to send cmds while IO is running. + */ + for (i = VHOST_SCSI_VQ_NUM_FIXED + 1; i < dev->nvqs; i++) { + memset(&worker, 0, sizeof(worker)); + + ret = dev->vhost_ops->vhost_new_worker(dev, &worker); + if (ret == -ENOTTY) { + /* + * worker ioctls are not implemented so just ignore and + * and continue device setup. + */ + warn_report("vhost-scsi: Backend supports a single worker. " + "Ignoring worker_per_virtqueue=true setting."); + ret = 0; + break; + } else if (ret) { + break; + } + + memset(&vq_worker, 0, sizeof(vq_worker)); + vq_worker.worker_id = worker.worker_id; + vq_worker.index = i; + + ret = dev->vhost_ops->vhost_attach_vring_worker(dev, &vq_worker); + if (ret == -ENOTTY) { + /* + * It's a bug for the kernel to have supported the worker creation + * ioctl but not attach. + */ + dev->vhost_ops->vhost_free_worker(dev, &worker); + break; + } else if (ret) { + break; + } + } + + return ret; +} + static void vhost_scsi_realize(DeviceState *dev, Error **errp) { VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev); @@ -232,6 +285,13 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp) goto free_vqs; } + ret = vhost_scsi_set_workers(vsc, vs->conf.worker_per_virtqueue); + if (ret < 0) { + error_setg(errp, "vhost-scsi: vhost worker setup failed: %s", + strerror(-ret)); + goto free_vqs; + } + /* At present, channel and lun both are 0 for bootable vhost-scsi disk */ vsc->channel = 0; vsc->lun = 0; @@ -297,6 +357,8 @@ static Property vhost_scsi_properties[] = { VIRTIO_SCSI_F_T10_PI, false), DEFINE_PROP_BOOL("migratable", VHostSCSICommon, migratable, false), + DEFINE_PROP_BOOL("worker_per_virtqueue", VirtIOSCSICommon, + conf.worker_per_virtqueue, false), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c index 780f10559d256016902989d20c495bb533c0099a..a63b1f494824bf81a32bc453490cb07e743557e4 100644 --- a/hw/scsi/vhost-user-scsi.c +++ b/hw/scsi/vhost-user-scsi.c @@ -83,7 +83,8 @@ static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status) if (should_start) { ret = vhost_user_scsi_start(s, &local_err); if (ret < 0) { - error_reportf_err(local_err, "unable to start vhost-user-scsi: %s", + error_reportf_err(local_err, + "unable to start vhost-user-scsi: %s: ", strerror(-ret)); qemu_chr_fe_disconnect(&vs->conf.chardev); } @@ -378,7 +379,7 @@ static const VMStateDescription vmstate_vhost_scsi = { .name = "virtio-scsi", .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/scsi/virtio-scsi-dataplane.c b/hw/scsi/virtio-scsi-dataplane.c index 1e684beebe2bac430ba5021338282fce3ebe32a9..2806a121b2483729be3cc1d0205a9006bfe73f2d 100644 --- a/hw/scsi/virtio-scsi-dataplane.c +++ b/hw/scsi/virtio-scsi-dataplane.c @@ -20,7 +20,7 @@ #include "scsi/constants.h" #include "hw/virtio/virtio-bus.h" -/* Context: QEMU global mutex held */ +/* Context: BQL held */ void virtio_scsi_dataplane_setup(VirtIOSCSI *s, Error **errp) { VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); @@ -93,7 +93,7 @@ static void virtio_scsi_dataplane_stop_bh(void *opaque) } } -/* Context: QEMU global mutex held */ +/* Context: BQL held */ int virtio_scsi_dataplane_start(VirtIODevice *vdev) { int i; @@ -149,23 +149,17 @@ int virtio_scsi_dataplane_start(VirtIODevice *vdev) memory_region_transaction_commit(); - /* - * These fields are visible to the IOThread so we rely on implicit barriers - * in aio_context_acquire() on the write side and aio_notify_accept() on - * the read side. - */ s->dataplane_starting = false; s->dataplane_started = true; + smp_wmb(); /* paired with aio_notify_accept() */ if (s->bus.drain_count == 0) { - aio_context_acquire(s->ctx); virtio_queue_aio_attach_host_notifier(vs->ctrl_vq, s->ctx); virtio_queue_aio_attach_host_notifier_no_poll(vs->event_vq, s->ctx); for (i = 0; i < vs->conf.num_queues; i++) { virtio_queue_aio_attach_host_notifier(vs->cmd_vqs[i], s->ctx); } - aio_context_release(s->ctx); } return 0; @@ -191,7 +185,7 @@ fail_guest_notifiers: return -ENOSYS; } -/* Context: QEMU global mutex held */ +/* Context: BQL held */ void virtio_scsi_dataplane_stop(VirtIODevice *vdev) { BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c index 9c751bf29699697e40a24bc5149583f9380b7cee..690aceec45c9fe1280d8489f9326f7ac21f501b2 100644 --- a/hw/scsi/virtio-scsi.c +++ b/hw/scsi/virtio-scsi.c @@ -123,6 +123,30 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req) virtio_scsi_free_req(req); } +static void virtio_scsi_complete_req_bh(void *opaque) +{ + VirtIOSCSIReq *req = opaque; + + virtio_scsi_complete_req(req); +} + +/* + * Called from virtio_scsi_do_one_tmf_bh() in main loop thread. The main loop + * thread cannot touch the virtqueue since that could race with an IOThread. + */ +static void virtio_scsi_complete_req_from_main_loop(VirtIOSCSIReq *req) +{ + VirtIOSCSI *s = req->dev; + + if (!s->ctx || s->ctx == qemu_get_aio_context()) { + /* No need to schedule a BH when there is no IOThread */ + virtio_scsi_complete_req(req); + } else { + /* Run request completion in the IOThread */ + aio_wait_bh_oneshot(s->ctx, virtio_scsi_complete_req_bh, req); + } +} + static void virtio_scsi_bad_req(VirtIOSCSIReq *req) { virtio_error(VIRTIO_DEVICE(req->dev), "wrong size for virtio-scsi headers"); @@ -338,10 +362,7 @@ static void virtio_scsi_do_one_tmf_bh(VirtIOSCSIReq *req) out: object_unref(OBJECT(d)); - - virtio_scsi_acquire(s); - virtio_scsi_complete_req(req); - virtio_scsi_release(s); + virtio_scsi_complete_req_from_main_loop(req); } /* Some TMFs must be processed from the main loop thread */ @@ -354,18 +375,16 @@ static void virtio_scsi_do_tmf_bh(void *opaque) GLOBAL_STATE_CODE(); - virtio_scsi_acquire(s); + WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) { + QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) { + QTAILQ_REMOVE(&s->tmf_bh_list, req, next); + QTAILQ_INSERT_TAIL(&reqs, req, next); + } - QTAILQ_FOREACH_SAFE(req, &s->tmf_bh_list, next, tmp) { - QTAILQ_REMOVE(&s->tmf_bh_list, req, next); - QTAILQ_INSERT_TAIL(&reqs, req, next); + qemu_bh_delete(s->tmf_bh); + s->tmf_bh = NULL; } - qemu_bh_delete(s->tmf_bh); - s->tmf_bh = NULL; - - virtio_scsi_release(s); - QTAILQ_FOREACH_SAFE(req, &reqs, next, tmp) { QTAILQ_REMOVE(&reqs, req, next); virtio_scsi_do_one_tmf_bh(req); @@ -379,8 +398,7 @@ static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s) GLOBAL_STATE_CODE(); - virtio_scsi_acquire(s); - + /* Called after ioeventfd has been stopped, so tmf_bh_lock is not needed */ if (s->tmf_bh) { qemu_bh_delete(s->tmf_bh); s->tmf_bh = NULL; @@ -393,19 +411,19 @@ static void virtio_scsi_reset_tmf_bh(VirtIOSCSI *s) req->resp.tmf.response = VIRTIO_SCSI_S_TARGET_FAILURE; virtio_scsi_complete_req(req); } - - virtio_scsi_release(s); } static void virtio_scsi_defer_tmf_to_bh(VirtIOSCSIReq *req) { VirtIOSCSI *s = req->dev; - QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next); + WITH_QEMU_LOCK_GUARD(&s->tmf_bh_lock) { + QTAILQ_INSERT_TAIL(&s->tmf_bh_list, req, next); - if (!s->tmf_bh) { - s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s); - qemu_bh_schedule(s->tmf_bh); + if (!s->tmf_bh) { + s->tmf_bh = qemu_bh_new(virtio_scsi_do_tmf_bh, s); + qemu_bh_schedule(s->tmf_bh); + } } } @@ -624,9 +642,7 @@ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) return; } - virtio_scsi_acquire(s); virtio_scsi_handle_ctrl_vq(s, vq); - virtio_scsi_release(s); } static void virtio_scsi_complete_cmd_req(VirtIOSCSIReq *req) @@ -864,9 +880,7 @@ static void virtio_scsi_handle_cmd(VirtIODevice *vdev, VirtQueue *vq) return; } - virtio_scsi_acquire(s); virtio_scsi_handle_cmd_vq(s, vq); - virtio_scsi_release(s); } static void virtio_scsi_get_config(VirtIODevice *vdev, @@ -1013,9 +1027,7 @@ static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq) return; } - virtio_scsi_acquire(s); virtio_scsi_handle_event_vq(s, vq); - virtio_scsi_release(s); } static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense) @@ -1034,9 +1046,7 @@ static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense) }, }; - virtio_scsi_acquire(s); virtio_scsi_push_event(s, &info); - virtio_scsi_release(s); } } @@ -1053,17 +1063,13 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev); VirtIOSCSI *s = VIRTIO_SCSI(vdev); SCSIDevice *sd = SCSI_DEVICE(dev); - AioContext *old_context; int ret; if (s->ctx && !s->dataplane_fenced) { if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) { return; } - old_context = blk_get_aio_context(sd->conf.blk); - aio_context_acquire(old_context); ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp); - aio_context_release(old_context); if (ret < 0) { return; } @@ -1079,10 +1085,8 @@ static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, }, }; - virtio_scsi_acquire(s); virtio_scsi_push_event(s, &info); scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED)); - virtio_scsi_release(s); } } @@ -1104,17 +1108,13 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev, qdev_simple_device_unplug_cb(hotplug_dev, dev, errp); if (s->ctx) { - virtio_scsi_acquire(s); /* If other users keep the BlockBackend in the iothread, that's ok */ blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL); - virtio_scsi_release(s); } if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { - virtio_scsi_acquire(s); virtio_scsi_push_event(s, &info); scsi_bus_set_ua(&s->bus, SENSE_CODE(REPORTED_LUNS_CHANGED)); - virtio_scsi_release(s); } } @@ -1235,6 +1235,7 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp) Error *err = NULL; QTAILQ_INIT(&s->tmf_bh_list); + qemu_mutex_init(&s->tmf_bh_lock); virtio_scsi_common_realize(dev, virtio_scsi_handle_ctrl, @@ -1277,6 +1278,7 @@ static void virtio_scsi_device_unrealize(DeviceState *dev) qbus_set_hotplug_handler(BUS(&s->bus), NULL); virtio_scsi_common_unrealize(dev); + qemu_mutex_destroy(&s->tmf_bh_lock); } static Property virtio_scsi_properties[] = { @@ -1303,7 +1305,7 @@ static const VMStateDescription vmstate_virtio_scsi = { .name = "virtio-scsi", .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c index 4de34536e98d844841264b06446ae443f9c40b7b..cd7bf6aa015337a13cbe1d8816fba0970bc8f3d7 100644 --- a/hw/scsi/vmw_pvscsi.c +++ b/hw/scsi/vmw_pvscsi.c @@ -1249,7 +1249,7 @@ static bool pvscsi_vmstate_test_pci_device(void *opaque, int version_id) static const VMStateDescription vmstate_pvscsi_pcie_device = { .name = "pvscsi/pcie", .needed = pvscsi_vmstate_need_pcie_device, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PVSCSIState), VMSTATE_END_OF_LIST() } @@ -1261,7 +1261,7 @@ static const VMStateDescription vmstate_pvscsi = { .minimum_version_id = 0, .pre_save = pvscsi_pre_save, .post_load = pvscsi_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_TEST(parent_obj, PVSCSIState, pvscsi_vmstate_test_pci_device, 0, vmstate_pci_device, PCIDevice), @@ -1290,7 +1290,7 @@ static const VMStateDescription vmstate_pvscsi = { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_pvscsi_pcie_device, NULL } diff --git a/hw/sd/allwinner-sdhost.c b/hw/sd/allwinner-sdhost.c index 1a576d62ae2fc9c83bb1c581f1338c7a5d7243f8..a1b7230633e7e645335a147c93b16875ce122f4d 100644 --- a/hw/sd/allwinner-sdhost.c +++ b/hw/sd/allwinner-sdhost.c @@ -773,7 +773,7 @@ static const VMStateDescription vmstate_allwinner_sdhost = { .name = "allwinner-sdhost", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(global_ctl, AwSdHostState), VMSTATE_UINT32(clock_ctl, AwSdHostState), VMSTATE_UINT32(timeout, AwSdHostState), diff --git a/hw/sd/aspeed_sdhci.c b/hw/sd/aspeed_sdhci.c index e53206d9594638b6934f3b03b6ff8e84a1e2734a..3b63926c3a267a6018ce715509c40d4a7d8cafb7 100644 --- a/hw/sd/aspeed_sdhci.c +++ b/hw/sd/aspeed_sdhci.c @@ -177,7 +177,7 @@ static void aspeed_sdhci_reset(DeviceState *dev) static const VMStateDescription vmstate_aspeed_sdhci = { .name = TYPE_ASPEED_SDHCI, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedSDHCIState, ASPEED_SDHCI_NUM_REGS), VMSTATE_END_OF_LIST(), }, diff --git a/hw/sd/bcm2835_sdhost.c b/hw/sd/bcm2835_sdhost.c index a600cf39e23ab66bddf456dd370e2af1980bf2e7..11c54dd4a73267abd76f6a1733ff61887a83db64 100644 --- a/hw/sd/bcm2835_sdhost.c +++ b/hw/sd/bcm2835_sdhost.c @@ -381,7 +381,7 @@ static const VMStateDescription vmstate_bcm2835_sdhost = { .name = TYPE_BCM2835_SDHOST, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cmd, BCM2835SDHostState), VMSTATE_UINT32(cmdarg, BCM2835SDHostState), VMSTATE_UINT32(status, BCM2835SDHostState), diff --git a/hw/sd/cadence_sdhci.c b/hw/sd/cadence_sdhci.c index ef4e0d74e3e748870019167ec3293018fbcc2c47..7c8bc5464b946d65e290e0e44825801529176d0f 100644 --- a/hw/sd/cadence_sdhci.c +++ b/hw/sd/cadence_sdhci.c @@ -159,7 +159,7 @@ static void cadence_sdhci_realize(DeviceState *dev, Error **errp) static const VMStateDescription vmstate_cadence_sdhci = { .name = TYPE_CADENCE_SDHCI, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, CadenceSDHCIState, CADENCE_SDHCI_NUM_REGS), VMSTATE_END_OF_LIST(), }, diff --git a/hw/sd/npcm7xx_sdhci.c b/hw/sd/npcm7xx_sdhci.c index 9958680090f28d11f4c2970798f70a122c1a13f4..e93dab8dbd531f2bcc65cf9dc4ff49054e5845e9 100644 --- a/hw/sd/npcm7xx_sdhci.c +++ b/hw/sd/npcm7xx_sdhci.c @@ -142,7 +142,7 @@ static void npcm7xx_sdhci_reset(DeviceState *dev) static const VMStateDescription vmstate_npcm7xx_sdhci = { .name = TYPE_NPCM7XX_SDHCI, .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(regs.boottoctrl, NPCM7xxSDHCIState), VMSTATE_END_OF_LIST(), }, diff --git a/hw/sd/pl181.c b/hw/sd/pl181.c index 2b33814d830edb5b08d4687edaebb1128466ae69..e3633c2e6fcb56c2f7c692245180e70682409e2c 100644 --- a/hw/sd/pl181.c +++ b/hw/sd/pl181.c @@ -63,7 +63,7 @@ static const VMStateDescription vmstate_pl181 = { .name = "pl181", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(clock, PL181State), VMSTATE_UINT32(power, PL181State), VMSTATE_UINT32(cmdarg, PL181State), diff --git a/hw/sd/pxa2xx_mmci.c b/hw/sd/pxa2xx_mmci.c index 5e8ea691886b0902c4e090768b483017fb8db382..82529708c8adb5f70206e6e11dafff2b8aca69b5 100644 --- a/hw/sd/pxa2xx_mmci.c +++ b/hw/sd/pxa2xx_mmci.c @@ -84,7 +84,7 @@ static const VMStateDescription vmstate_pxa2xx_mmci = { .name = "pxa2xx-mmci", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(status, PXA2xxMMCIState), VMSTATE_UINT32(clkrt, PXA2xxMMCIState), VMSTATE_UINT32(spi, PXA2xxMMCIState), diff --git a/hw/sd/sd.c b/hw/sd/sd.c index 1106ff7d785dac16b80c65afaab7d015ba0ad900..807b5d3de321661017109232e168093868809514 100644 --- a/hw/sd/sd.c +++ b/hw/sd/sd.c @@ -681,7 +681,7 @@ static const VMStateDescription sd_ocr_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = sd_ocr_vmstate_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ocr, SDState), VMSTATE_TIMER_PTR(ocr_power_timer, SDState), VMSTATE_END_OF_LIST() @@ -706,7 +706,7 @@ static const VMStateDescription sd_vmstate = { .version_id = 2, .minimum_version_id = 2, .pre_load = sd_vmstate_pre_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(mode, SDState), VMSTATE_INT32(state, SDState), VMSTATE_UINT8_ARRAY(cid, SDState, 16), @@ -733,7 +733,7 @@ static const VMStateDescription sd_vmstate = { VMSTATE_BOOL(enable, SDState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &sd_ocr_vmstate, NULL }, diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c index 40473b0db099a6289181ffe2795e19ccbe532ae9..c5e0bc018b09cb74e599edc0d6e6fc2798e3235d 100644 --- a/hw/sd/sdhci.c +++ b/hw/sd/sdhci.c @@ -1457,7 +1457,7 @@ static const VMStateDescription sdhci_pending_insert_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = sdhci_pending_insert_vmstate_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(pending_insert_state, SDHCIState), VMSTATE_END_OF_LIST() }, @@ -1467,7 +1467,7 @@ const VMStateDescription sdhci_vmstate = { .name = "sdhci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(sdmasysad, SDHCIState), VMSTATE_UINT16(blksize, SDHCIState), VMSTATE_UINT16(blkcnt, SDHCIState), @@ -1498,7 +1498,7 @@ const VMStateDescription sdhci_vmstate = { VMSTATE_TIMER_PTR(transfer_timer, SDHCIState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &sdhci_pending_insert_vmstate, NULL }, diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c index a6cc1ad6c898bc9d91008d7500d3f543f8149ee3..2dd070f978cc18a7d329d4378e5ec7672f411f89 100644 --- a/hw/sd/ssi-sd.c +++ b/hw/sd/ssi-sd.c @@ -350,7 +350,7 @@ static const VMStateDescription vmstate_ssi_sd = { .version_id = 7, .minimum_version_id = 7, .post_load = ssi_sd_post_load, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINT32(mode, ssi_sd_state), VMSTATE_INT32(cmd, ssi_sd_state), VMSTATE_UINT8_ARRAY(cmdarg, ssi_sd_state, 4), diff --git a/hw/sensor/adm1266.c b/hw/sensor/adm1266.c index 5ae4f82ba1696a446a523d6709d0465406d37d53..5454b73a639836e3028dc5fabdf9b0357d866883 100644 --- a/hw/sensor/adm1266.c +++ b/hw/sensor/adm1266.c @@ -202,7 +202,7 @@ static const VMStateDescription vmstate_adm1266 = { .name = "ADM1266", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_PMBUS_DEVICE(parent, ADM1266State), VMSTATE_END_OF_LIST() } diff --git a/hw/sensor/adm1272.c b/hw/sensor/adm1272.c index 8f4a1c2cd4bd915f543e9d205e50dc15992f8b38..1f7c8abb838e82717bb3ea7258fcfb2fddbbdac3 100644 --- a/hw/sensor/adm1272.c +++ b/hw/sensor/adm1272.c @@ -457,7 +457,7 @@ static const VMStateDescription vmstate_adm1272 = { .name = "ADM1272", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_PMBUS_DEVICE(parent, ADM1272State), VMSTATE_UINT64(ein_ext, ADM1272State), VMSTATE_UINT32(pin_ext, ADM1272State), diff --git a/hw/sensor/dps310.c b/hw/sensor/dps310.c index addee99b196bfbd404830713d51a5fce98d95220..01c776dd7a839ebe96f34d1d38fc4af00d22c3d5 100644 --- a/hw/sensor/dps310.c +++ b/hw/sensor/dps310.c @@ -188,7 +188,7 @@ static const VMStateDescription vmstate_dps310 = { .name = "DPS310", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(len, DPS310State), VMSTATE_UINT8_ARRAY(regs, DPS310State, NUM_REGISTERS), VMSTATE_UINT8(pointer, DPS310State), diff --git a/hw/sensor/emc141x.c b/hw/sensor/emc141x.c index 7ce8f4e97943f0faf7d88d18a738a2fbda81c057..95079558e87cd2f588c73b0ae4ddc0469e9f54e6 100644 --- a/hw/sensor/emc141x.c +++ b/hw/sensor/emc141x.c @@ -228,7 +228,7 @@ static const VMStateDescription vmstate_emc141x = { .name = "EMC141X", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(len, EMC141XState), VMSTATE_UINT8(data, EMC141XState), VMSTATE_UINT8(pointer, EMC141XState), diff --git a/hw/sensor/lsm303dlhc_mag.c b/hw/sensor/lsm303dlhc_mag.c index bb8d48b2fdb0fa54b565915cc6484e45437c8b4a..343ff989904c69340af72f55c43dc44affdeda28 100644 --- a/hw/sensor/lsm303dlhc_mag.c +++ b/hw/sensor/lsm303dlhc_mag.c @@ -442,7 +442,7 @@ static const VMStateDescription vmstate_lsm303dlhc_mag = { .name = "LSM303DLHC_MAG", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_I2C_SLAVE(parent_obj, LSM303DLHCMagState), VMSTATE_UINT8(len, LSM303DLHCMagState), diff --git a/hw/sensor/max31785.c b/hw/sensor/max31785.c index 8b95e324814b4a83f686b8df61f22cf5f1604fff..916ed4d457ba3c9ab840d2e3e583aebaac881af3 100644 --- a/hw/sensor/max31785.c +++ b/hw/sensor/max31785.c @@ -487,7 +487,7 @@ static const VMStateDescription vmstate_max31785 = { .name = TYPE_MAX31785, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_PMBUS_DEVICE(parent, MAX31785State), VMSTATE_UINT16_ARRAY(mfr_mode, MAX31785State, MAX31785_TOTAL_NUM_PAGES), diff --git a/hw/sensor/max34451.c b/hw/sensor/max34451.c index 9db52ef6778721935dfebde8d7d7979724bd520f..031ae53f594c9ca9f5f474a58b6774c606fd53d8 100644 --- a/hw/sensor/max34451.c +++ b/hw/sensor/max34451.c @@ -654,7 +654,7 @@ static const VMStateDescription vmstate_max34451 = { .name = TYPE_MAX34451, .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_PMBUS_DEVICE(parent, MAX34451State), VMSTATE_UINT16_ARRAY(power_good_on, MAX34451State, MAX34451_NUM_PWR_DEVICES), diff --git a/hw/sensor/tmp105.c b/hw/sensor/tmp105.c index 20564494899f2e8ea988208ec69d80b8bed9dd54..a8730d0b7f97362110163a08e1b5e06f52a2a166 100644 --- a/hw/sensor/tmp105.c +++ b/hw/sensor/tmp105.c @@ -238,7 +238,7 @@ static const VMStateDescription vmstate_tmp105_detect_falling = { .version_id = 1, .minimum_version_id = 1, .needed = detect_falling_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(detect_falling, TMP105State), VMSTATE_END_OF_LIST() } @@ -249,7 +249,7 @@ static const VMStateDescription vmstate_tmp105 = { .version_id = 0, .minimum_version_id = 0, .post_load = tmp105_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(len, TMP105State), VMSTATE_UINT8_ARRAY(buf, TMP105State, 2), VMSTATE_UINT8(pointer, TMP105State), @@ -260,7 +260,7 @@ static const VMStateDescription vmstate_tmp105 = { VMSTATE_I2C_SLAVE(i2c, TMP105State), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_tmp105_detect_falling, NULL } diff --git a/hw/sensor/tmp421.c b/hw/sensor/tmp421.c index a3db57dcb5a70718ec8b5c353314b362b848822f..b6f0b62ab11b08243c803eda468fb20716d5c86b 100644 --- a/hw/sensor/tmp421.c +++ b/hw/sensor/tmp421.c @@ -290,7 +290,7 @@ static const VMStateDescription vmstate_tmp421 = { .name = "TMP421", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(len, TMP421State), VMSTATE_UINT8_ARRAY(buf, TMP421State, 2), VMSTATE_UINT8(pointer, TMP421State), diff --git a/hw/smbios/meson.build b/hw/smbios/meson.build index 6eeae4b35c22766a9806217a19f76a23c915291f..70469674621d121ed3fd78671e2cc2c9c70c67a4 100644 --- a/hw/smbios/meson.build +++ b/hw/smbios/meson.build @@ -6,8 +6,3 @@ smbios_ss.add(when: 'CONFIG_IPMI', system_ss.add_all(when: 'CONFIG_SMBIOS', if_true: smbios_ss) system_ss.add(when: 'CONFIG_SMBIOS', if_false: files('smbios-stub.c')) - -system_ss.add(when: 'CONFIG_ALL', if_true: files( - 'smbios-stub.c', - 'smbios_type_38-stub.c', -)) diff --git a/hw/sparc/leon3.c b/hw/sparc/leon3.c index 1e39d2e2d0ac4d0c52fcba0379dfd773cdc2b1ba..2dfb742566c400f4a2097b9b36e3ac417db4e9f2 100644 --- a/hw/sparc/leon3.c +++ b/hw/sparc/leon3.c @@ -164,9 +164,9 @@ static void leon3_cache_control_int(CPUSPARCState *env) } } -static void leon3_irq_ack(void *irq_manager, int intno) +static void leon3_irq_ack(CPUSPARCState *env, int intno) { - grlib_irqmp_ack((DeviceState *)irq_manager, intno); + grlib_irqmp_ack(env->irq_manager, intno); } /* @@ -208,9 +208,9 @@ static void leon3_set_pil_in(void *opaque, int n, int level) } } -static void leon3_irq_manager(CPUSPARCState *env, void *irq_manager, int intno) +static void leon3_irq_manager(CPUSPARCState *env, int intno) { - leon3_irq_ack(irq_manager, intno); + leon3_irq_ack(env, intno); leon3_cache_control_int(env); } diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c index 17bf5f28791a0650ff8f4670fb0a78ed258d9f7c..550af01690ffe85e05a9c217cbf1c5253e36f964 100644 --- a/hw/sparc/sun4m.c +++ b/hw/sparc/sun4m.c @@ -577,12 +577,9 @@ static void idreg_realize(DeviceState *ds, Error **errp) { IDRegState *s = MACIO_ID_REGISTER(ds); SysBusDevice *dev = SYS_BUS_DEVICE(ds); - Error *local_err = NULL; - memory_region_init_ram_nomigrate(&s->mem, OBJECT(ds), "sun4m.idreg", - sizeof(idreg_data), &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!memory_region_init_ram_nomigrate(&s->mem, OBJECT(ds), "sun4m.idreg", + sizeof(idreg_data), errp)) { return; } @@ -631,12 +628,9 @@ static void afx_realize(DeviceState *ds, Error **errp) { AFXState *s = TCX_AFX(ds); SysBusDevice *dev = SYS_BUS_DEVICE(ds); - Error *local_err = NULL; - memory_region_init_ram_nomigrate(&s->mem, OBJECT(ds), "sun4m.afx", 4, - &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!memory_region_init_ram_nomigrate(&s->mem, OBJECT(ds), "sun4m.afx", + 4, errp)) { return; } @@ -715,12 +709,9 @@ static void prom_realize(DeviceState *ds, Error **errp) { PROMState *s = OPENPROM(ds); SysBusDevice *dev = SYS_BUS_DEVICE(ds); - Error *local_err = NULL; - memory_region_init_ram_nomigrate(&s->prom, OBJECT(ds), "sun4m.prom", - PROM_SIZE_MAX, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!memory_region_init_ram_nomigrate(&s->prom, OBJECT(ds), "sun4m.prom", + PROM_SIZE_MAX, errp)) { return; } @@ -804,7 +795,7 @@ static void cpu_devinit(const char *cpu_type, unsigned int id, qemu_register_reset(sun4m_cpu_reset, cpu); object_property_set_bool(OBJECT(cpu), "start-powered-off", id != 0, - &error_fatal); + &error_abort); qdev_realize_and_unref(DEVICE(cpu), NULL, &error_fatal); cpu_sparc_set_id(env, id); *cpu_irqs = qemu_allocate_irqs(cpu_set_irq, cpu, MAX_PILS); diff --git a/hw/sparc/sun4m_iommu.c b/hw/sparc/sun4m_iommu.c index eb40f9377c11ad788c706c38a310723cb4751bd6..06703b1d96e43a41be6d9c8afdc3e5ff515b5f6b 100644 --- a/hw/sparc/sun4m_iommu.c +++ b/hw/sparc/sun4m_iommu.c @@ -331,7 +331,7 @@ static const VMStateDescription vmstate_iommu = { .name = "iommu", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, IOMMUState, IOMMU_NREGS), VMSTATE_UINT64(iostart, IOMMUState), VMSTATE_END_OF_LIST() diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c index c871170378490e9c31b01b688493c5c7ce647b32..24d53bf5fd08cd9cbdec550917fa5973c41d8893 100644 --- a/hw/sparc64/sun4u.c +++ b/hw/sparc64/sun4u.c @@ -454,12 +454,9 @@ static void prom_realize(DeviceState *ds, Error **errp) { PROMState *s = OPENPROM(ds); SysBusDevice *dev = SYS_BUS_DEVICE(ds); - Error *local_err = NULL; - memory_region_init_ram_nomigrate(&s->prom, OBJECT(ds), "sun4u.prom", - PROM_SIZE_MAX, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (!memory_region_init_ram_nomigrate(&s->prom, OBJECT(ds), "sun4u.prom", + PROM_SIZE_MAX, errp)) { return; } diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c index 2a4001b774a2f5c66a6c794763e99276b2bb1d94..3c93936fd1ff98d20b6d6a940768f488d433d879 100644 --- a/hw/ssi/aspeed_smc.c +++ b/hw/ssi/aspeed_smc.c @@ -1201,7 +1201,7 @@ static const VMStateDescription vmstate_aspeed_smc = { .name = "aspeed.smc", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, AspeedSMCState, ASPEED_SMC_R_MAX), VMSTATE_UINT8(snoop_index, AspeedSMCState), VMSTATE_UINT8(snoop_dummies, AspeedSMCState), diff --git a/hw/ssi/ibex_spi_host.c b/hw/ssi/ibex_spi_host.c index c300ec294d8733375ded0a68d835cad85bc63c27..863b5fd60e963a767dd992a03380987ab390a84e 100644 --- a/hw/ssi/ibex_spi_host.c +++ b/hw/ssi/ibex_spi_host.c @@ -570,7 +570,7 @@ static const VMStateDescription vmstate_ibex = { .name = TYPE_IBEX_SPI_HOST, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, IbexSPIHostState, IBEX_SPI_HOST_MAX_REGS), VMSTATE_VARRAY_UINT32(config_opts, IbexSPIHostState, num_cs, 0, vmstate_info_uint32, uint32_t), diff --git a/hw/ssi/imx_spi.c b/hw/ssi/imx_spi.c index 189423bb3a539c9f8d7123053061dd090580204e..d8a7583ff3413cbd42f65653721d65d1362342b5 100644 --- a/hw/ssi/imx_spi.c +++ b/hw/ssi/imx_spi.c @@ -62,7 +62,7 @@ static const VMStateDescription vmstate_imx_spi = { .name = TYPE_IMX_SPI, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FIFO32(tx_fifo, IMXSPIState), VMSTATE_FIFO32(rx_fifo, IMXSPIState), VMSTATE_INT16(burst_length, IMXSPIState), diff --git a/hw/ssi/mss-spi.c b/hw/ssi/mss-spi.c index b2432c5a132837d649a5cd6d3955d43b53b5fbfd..1d25ba23aa5a38dd9254268f47bb75270d15cdb7 100644 --- a/hw/ssi/mss-spi.c +++ b/hw/ssi/mss-spi.c @@ -390,7 +390,7 @@ static const VMStateDescription vmstate_mss_spi = { .name = TYPE_MSS_SPI, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FIFO32(tx_fifo, MSSSpiState), VMSTATE_FIFO32(rx_fifo, MSSSpiState), VMSTATE_UINT32_ARRAY(regs, MSSSpiState, R_SPI_MAX), diff --git a/hw/ssi/npcm7xx_fiu.c b/hw/ssi/npcm7xx_fiu.c index 4eedb2927e73135999b20c1edf68f58d9b975357..81dd972ee8cb1902f3fac6b13764f144b670afeb 100644 --- a/hw/ssi/npcm7xx_fiu.c +++ b/hw/ssi/npcm7xx_fiu.c @@ -534,7 +534,7 @@ static const VMStateDescription vmstate_npcm7xx_fiu = { .name = "npcm7xx-fiu", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(active_cs, NPCM7xxFIUState), VMSTATE_UINT32_ARRAY(regs, NPCM7xxFIUState, NPCM7XX_FIU_NR_REGS), VMSTATE_END_OF_LIST(), diff --git a/hw/ssi/npcm_pspi.c b/hw/ssi/npcm_pspi.c index 3fb935043ab75f2a69724ff15cf82b2eef806321..41a53235303beac8c5731d1f4c0bbe5f173cb604 100644 --- a/hw/ssi/npcm_pspi.c +++ b/hw/ssi/npcm_pspi.c @@ -192,7 +192,7 @@ static const VMStateDescription vmstate_npcm_pspi = { .name = "npcm-pspi", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16_ARRAY(regs, NPCMPSPIState, NPCM_PSPI_NR_REGS), VMSTATE_END_OF_LIST(), }, diff --git a/hw/ssi/pl022.c b/hw/ssi/pl022.c index 8954ffebb1fbfd320ec767bfcb88f684a136bd4f..b8be8ddf0eaf85cbfd5e4e1723b4d2fc9b2dc64b 100644 --- a/hw/ssi/pl022.c +++ b/hw/ssi/pl022.c @@ -249,7 +249,7 @@ static const VMStateDescription vmstate_pl022 = { .version_id = 1, .minimum_version_id = 1, .post_load = pl022_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cr0, PL022State), VMSTATE_UINT32(cr1, PL022State), VMSTATE_UINT32(bitmask, PL022State), diff --git a/hw/ssi/ssi.c b/hw/ssi/ssi.c index 1f3e540ab8a10f19857def310c46c0eee41aaff1..3f357e8f16abaa259e7766cb6c8274b3d964c17c 100644 --- a/hw/ssi/ssi.c +++ b/hw/ssi/ssi.c @@ -172,7 +172,7 @@ const VMStateDescription vmstate_ssi_peripheral = { .name = "SSISlave", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(cs, SSIPeripheral), VMSTATE_END_OF_LIST() } diff --git a/hw/ssi/stm32f2xx_spi.c b/hw/ssi/stm32f2xx_spi.c index cd6e8443db3003103958414ed16f87f360ba571d..a37139fe5acd67f09f30897907ac68e533bd026f 100644 --- a/hw/ssi/stm32f2xx_spi.c +++ b/hw/ssi/stm32f2xx_spi.c @@ -174,7 +174,7 @@ static const VMStateDescription vmstate_stm32f2xx_spi = { .name = TYPE_STM32F2XX_SPI, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(spi_cr1, STM32F2XXSPIState), VMSTATE_UINT32(spi_cr2, STM32F2XXSPIState), VMSTATE_UINT32(spi_sr, STM32F2XXSPIState), diff --git a/hw/ssi/xilinx_spi.c b/hw/ssi/xilinx_spi.c index d4de2e7aabc2905e20b02daca29d1c236daee4b1..2e0687ac9075fd07aab1f12f2430d2b6220faa55 100644 --- a/hw/ssi/xilinx_spi.c +++ b/hw/ssi/xilinx_spi.c @@ -353,7 +353,7 @@ static const VMStateDescription vmstate_xilinx_spi = { .name = "xilinx_spi", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FIFO8(tx_fifo, XilinxSPI), VMSTATE_FIFO8(rx_fifo, XilinxSPI), VMSTATE_UINT32_ARRAY(regs, XilinxSPI, R_MAX), diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c index 0bdfad7e2e5245ddf28909d7fb7fe448ba34d900..71952a410d8711be4f0f45c1ed0b372600d8b771 100644 --- a/hw/ssi/xilinx_spips.c +++ b/hw/ssi/xilinx_spips.c @@ -1369,7 +1369,7 @@ static const VMStateDescription vmstate_xilinx_spips = { .version_id = 2, .minimum_version_id = 2, .post_load = xilinx_spips_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FIFO8(tx_fifo, XilinxSPIPS), VMSTATE_FIFO8(rx_fifo, XilinxSPIPS), VMSTATE_UINT32_ARRAY(regs, XilinxSPIPS, XLNX_SPIPS_R_MAX), @@ -1395,7 +1395,7 @@ static const VMStateDescription vmstate_xilinx_qspips = { .name = "xilinx_qspips", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(parent_obj, XilinxQSPIPS, 0, vmstate_xilinx_spips, XilinxSPIPS), VMSTATE_END_OF_LIST() @@ -1407,7 +1407,7 @@ static const VMStateDescription vmstate_xlnx_zynqmp_qspips = { .version_id = 1, .minimum_version_id = 1, .post_load = xlnx_zynqmp_qspips_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(parent_obj, XlnxZynqMPQSPIPS, 0, vmstate_xilinx_qspips, XilinxQSPIPS), VMSTATE_FIFO8(tx_fifo_g, XlnxZynqMPQSPIPS), diff --git a/hw/ssi/xlnx-versal-ospi.c b/hw/ssi/xlnx-versal-ospi.c index 1a61679c2fecce8e818a465cb4283966f9fb9d6f..c7b95b1f377c6753f6cfb9104e21f1131084becb 100644 --- a/hw/ssi/xlnx-versal-ospi.c +++ b/hw/ssi/xlnx-versal-ospi.c @@ -1787,7 +1787,7 @@ static const VMStateDescription vmstate_ind_op = { .name = "OSPIIndOp", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(flash_addr, IndOp), VMSTATE_UINT32(num_bytes, IndOp), VMSTATE_UINT32(done_bytes, IndOp), @@ -1800,7 +1800,7 @@ static const VMStateDescription vmstate_xlnx_versal_ospi = { .name = TYPE_XILINX_VERSAL_OSPI, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FIFO8(rx_fifo, XlnxVersalOspi), VMSTATE_FIFO8(tx_fifo, XlnxVersalOspi), VMSTATE_FIFO8(rx_sram, XlnxVersalOspi), diff --git a/hw/timer/a9gtimer.c b/hw/timer/a9gtimer.c index 5e959b6d09fcf9e587fbfffe0d6b1bc8a981902e..a2ac5bdfb990804d580bf448ad693de24793a245 100644 --- a/hw/timer/a9gtimer.c +++ b/hw/timer/a9gtimer.c @@ -328,7 +328,7 @@ static const VMStateDescription vmstate_a9_gtimer_per_cpu = { .name = "arm.cortex-a9-global-timer.percpu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(control, A9GTimerPerCPU), VMSTATE_UINT64(compare, A9GTimerPerCPU), VMSTATE_UINT32(status, A9GTimerPerCPU), @@ -342,7 +342,7 @@ static const VMStateDescription vmstate_a9_gtimer_control = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_a9_gtimer_control_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(control, A9GTimerState), VMSTATE_END_OF_LIST() } @@ -352,7 +352,7 @@ static const VMStateDescription vmstate_a9_gtimer = { .name = "arm.cortex-a9-global-timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(timer, A9GTimerState), VMSTATE_UINT64(counter, A9GTimerState), VMSTATE_UINT64(ref_counter, A9GTimerState), @@ -362,7 +362,7 @@ static const VMStateDescription vmstate_a9_gtimer = { A9GTimerPerCPU), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_a9_gtimer_control, NULL } diff --git a/hw/timer/allwinner-a10-pit.c b/hw/timer/allwinner-a10-pit.c index 971f78462ab40b5d631f62c63e5066b15980dbdc..a524de13817827dbe45912cf55a29f2addb6bec1 100644 --- a/hw/timer/allwinner-a10-pit.c +++ b/hw/timer/allwinner-a10-pit.c @@ -200,7 +200,7 @@ static const VMStateDescription vmstate_a10_pit = { .name = "a10.pit", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(irq_enable, AwA10PITState), VMSTATE_UINT32(irq_status, AwA10PITState), VMSTATE_UINT32_ARRAY(control, AwA10PITState, AW_A10_PIT_TIMER_NR), diff --git a/hw/timer/arm_mptimer.c b/hw/timer/arm_mptimer.c index cdfca3000bee288e1ab976f2334f3bfd2b7e7165..bca4cee0e4ee1e4be61a8694cce73913de0c39ec 100644 --- a/hw/timer/arm_mptimer.c +++ b/hw/timer/arm_mptimer.c @@ -281,7 +281,7 @@ static const VMStateDescription vmstate_timerblock = { .name = "arm_mptimer_timerblock", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(control, TimerBlock), VMSTATE_UINT32(status, TimerBlock), VMSTATE_PTIMER(timer, TimerBlock), @@ -293,7 +293,7 @@ static const VMStateDescription vmstate_arm_mptimer = { .name = "arm_mptimer", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_VARRAY_UINT32(timerblock, ARMMPTimerState, num_cpu, 3, vmstate_timerblock, TimerBlock), VMSTATE_END_OF_LIST() diff --git a/hw/timer/arm_timer.c b/hw/timer/arm_timer.c index 9afe8da831fb93a941cf35201ac6828595f6ac70..0940e03f1d9a9dcb3c17f891fb5780ea3e901097 100644 --- a/hw/timer/arm_timer.c +++ b/hw/timer/arm_timer.c @@ -163,7 +163,7 @@ static const VMStateDescription vmstate_arm_timer = { .name = "arm_timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(control, arm_timer_state), VMSTATE_UINT32(limit, arm_timer_state), VMSTATE_INT32(int_level, arm_timer_state), @@ -282,7 +282,7 @@ static const VMStateDescription vmstate_sp804 = { .name = "sp804", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_ARRAY(level, SP804State, 2), VMSTATE_END_OF_LIST() } diff --git a/hw/timer/armv7m_systick.c b/hw/timer/armv7m_systick.c index 5dfe39afe36d9ca9de4c6f5e0e260d0a56232956..f6b1acef27122fffa67620228307f5cc697a714f 100644 --- a/hw/timer/armv7m_systick.c +++ b/hw/timer/armv7m_systick.c @@ -275,7 +275,7 @@ static const VMStateDescription vmstate_systick = { .name = "armv7m_systick", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(refclk, SysTickState), VMSTATE_CLOCK(cpuclk, SysTickState), VMSTATE_UINT32(control, SysTickState), diff --git a/hw/timer/aspeed_timer.c b/hw/timer/aspeed_timer.c index 72161f07bbee828466f98fdb4ca1fc80abf7d478..fc5c94bdf36ac1c52830f83342c23069f7b99755 100644 --- a/hw/timer/aspeed_timer.c +++ b/hw/timer/aspeed_timer.c @@ -645,7 +645,7 @@ static const VMStateDescription vmstate_aspeed_timer = { .name = "aspeed.timer", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(id, AspeedTimer), VMSTATE_INT32(level, AspeedTimer), VMSTATE_TIMER(timer, AspeedTimer), @@ -659,7 +659,7 @@ static const VMStateDescription vmstate_aspeed_timer_state = { .name = "aspeed.timerctrl", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ctrl, AspeedTimerCtrlState), VMSTATE_UINT32(ctrl2, AspeedTimerCtrlState), VMSTATE_UINT32(ctrl3, AspeedTimerCtrlState), diff --git a/hw/timer/bcm2835_systmr.c b/hw/timer/bcm2835_systmr.c index 67669a57ff351d61f10c068c8c4a871c80769323..3ec64604ee512beabc3d67e708061db518c047ba 100644 --- a/hw/timer/bcm2835_systmr.c +++ b/hw/timer/bcm2835_systmr.c @@ -146,7 +146,7 @@ static const VMStateDescription bcm2835_systmr_vmstate = { .name = "bcm2835_sys_timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(reg.ctrl_status, BCM2835SystemTimerState), VMSTATE_UINT32_ARRAY(reg.compare, BCM2835SystemTimerState, BCM2835_SYSTIMER_COUNT), diff --git a/hw/timer/cadence_ttc.c b/hw/timer/cadence_ttc.c index e57a0f5f09f74271bf171058580c14acfa0c89e2..54dbd4c564632dfe93c6e606874cab86de7a5582 100644 --- a/hw/timer/cadence_ttc.c +++ b/hw/timer/cadence_ttc.c @@ -425,7 +425,7 @@ static const VMStateDescription vmstate_cadence_timer = { .minimum_version_id = 1, .pre_save = cadence_timer_pre_save, .post_load = cadence_timer_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(reg_clock, CadenceTimerState), VMSTATE_UINT32(reg_count, CadenceTimerState), VMSTATE_UINT32(reg_value, CadenceTimerState), @@ -443,7 +443,7 @@ static const VMStateDescription vmstate_cadence_ttc = { .name = "cadence_TTC", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(timer, CadenceTTCState, 3, 0, vmstate_cadence_timer, CadenceTimerState), diff --git a/hw/timer/cmsdk-apb-dualtimer.c b/hw/timer/cmsdk-apb-dualtimer.c index d4a509c798ea70a6fd0bbf3c4caef288d4072355..ddf9070c3c0f362677900d3ad79641fc7ac1d27a 100644 --- a/hw/timer/cmsdk-apb-dualtimer.c +++ b/hw/timer/cmsdk-apb-dualtimer.c @@ -508,7 +508,7 @@ static const VMStateDescription cmsdk_dualtimermod_vmstate = { .name = "cmsdk-apb-dualtimer-module", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(timer, CMSDKAPBDualTimerModule), VMSTATE_UINT32(load, CMSDKAPBDualTimerModule), VMSTATE_UINT32(value, CMSDKAPBDualTimerModule), @@ -522,7 +522,7 @@ static const VMStateDescription cmsdk_apb_dualtimer_vmstate = { .name = "cmsdk-apb-dualtimer", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(timclk, CMSDKAPBDualTimer), VMSTATE_STRUCT_ARRAY(timermod, CMSDKAPBDualTimer, CMSDK_APB_DUALTIMER_NUM_MODULES, diff --git a/hw/timer/cmsdk-apb-timer.c b/hw/timer/cmsdk-apb-timer.c index 68aa1a76360d303613a57b856b811ddc922e82cc..814545c783216de8fe8fbf25429d11c17e45faaf 100644 --- a/hw/timer/cmsdk-apb-timer.c +++ b/hw/timer/cmsdk-apb-timer.c @@ -250,7 +250,7 @@ static const VMStateDescription cmsdk_apb_timer_vmstate = { .name = "cmsdk-apb-timer", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(timer, CMSDKAPBTimer), VMSTATE_CLOCK(pclk, CMSDKAPBTimer), VMSTATE_UINT32(ctrl, CMSDKAPBTimer), diff --git a/hw/timer/digic-timer.c b/hw/timer/digic-timer.c index 973eab4386e1acc499cb1ee7b22e0f841cfc3e0d..9fc5c1d8a46185fe02380aa843f8bab8a5e1e158 100644 --- a/hw/timer/digic-timer.c +++ b/hw/timer/digic-timer.c @@ -39,7 +39,7 @@ static const VMStateDescription vmstate_digic_timer = { .name = "digic.timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(ptimer, DigicTimerState), VMSTATE_UINT32(control, DigicTimerState), VMSTATE_UINT32(relvalue, DigicTimerState), diff --git a/hw/timer/etraxfs_timer.c b/hw/timer/etraxfs_timer.c index f035b745601f3d07a8da0884f0c2d8871237f629..da7c946af523d8c56a4d0d2d5ddf342a8f62c32a 100644 --- a/hw/timer/etraxfs_timer.c +++ b/hw/timer/etraxfs_timer.c @@ -88,7 +88,7 @@ static const VMStateDescription vmstate_etraxfs = { .name = "etraxfs", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(ptimer_t0, ETRAXTimerState), VMSTATE_PTIMER(ptimer_t1, ETRAXTimerState), VMSTATE_PTIMER(ptimer_wd, ETRAXTimerState), diff --git a/hw/timer/exynos4210_mct.c b/hw/timer/exynos4210_mct.c index 446bbd2b96cf3c65d8c15b1ddb13c39f2937065f..75098cdb555cc344004941cc4f874e1a85781dbd 100644 --- a/hw/timer/exynos4210_mct.c +++ b/hw/timer/exynos4210_mct.c @@ -264,7 +264,7 @@ static const VMStateDescription vmstate_tick_timer = { .name = "exynos4210.mct.tick_timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cnt_run, struct tick_timer), VMSTATE_UINT32(int_run, struct tick_timer), VMSTATE_UINT32(last_icnto, struct tick_timer), @@ -283,7 +283,7 @@ static const VMStateDescription vmstate_lregs = { .name = "exynos4210.mct.lregs", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(cnt, struct lregs, L_REG_CNT_AMOUNT), VMSTATE_UINT32(tcon, struct lregs), VMSTATE_UINT32(int_cstat, struct lregs), @@ -297,7 +297,7 @@ static const VMStateDescription vmstate_exynos4210_mct_lt = { .name = "exynos4210.mct.lt", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(id, Exynos4210MCTLT), VMSTATE_STRUCT(tick_timer, Exynos4210MCTLT, 0, vmstate_tick_timer, @@ -314,7 +314,7 @@ static const VMStateDescription vmstate_gregs = { .name = "exynos4210.mct.lregs", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(cnt, struct gregs), VMSTATE_UINT32(cnt_wstat, struct gregs), VMSTATE_UINT32(tcon, struct gregs), @@ -332,7 +332,7 @@ static const VMStateDescription vmstate_exynos4210_mct_gt = { .name = "exynos4210.mct.lt", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(reg, Exynos4210MCTGT, 0, vmstate_gregs, struct gregs), VMSTATE_UINT64(count, Exynos4210MCTGT), @@ -346,7 +346,7 @@ static const VMStateDescription vmstate_exynos4210_mct_state = { .name = "exynos4210.mct", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(reg_mct_cfg, Exynos4210MCTState), VMSTATE_STRUCT_ARRAY(l_timer, Exynos4210MCTState, 2, 0, vmstate_exynos4210_mct_lt, Exynos4210MCTLT), diff --git a/hw/timer/exynos4210_pwm.c b/hw/timer/exynos4210_pwm.c index 3528d0f33ab66268e11a4e3bb7f13e48f6c7326c..ca330e9446c44e22a00980406dbb895e885f37ec 100644 --- a/hw/timer/exynos4210_pwm.c +++ b/hw/timer/exynos4210_pwm.c @@ -123,7 +123,7 @@ static const VMStateDescription vmstate_exynos4210_pwm = { .name = "exynos4210.pwm.pwm", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(id, Exynos4210PWM), VMSTATE_UINT32(freq, Exynos4210PWM), VMSTATE_PTIMER(ptimer, Exynos4210PWM), @@ -137,7 +137,7 @@ static const VMStateDescription vmstate_exynos4210_pwm_state = { .name = "exynos4210.pwm", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(reg_tcfg, Exynos4210PWMState, 2), VMSTATE_UINT32(reg_tcon, Exynos4210PWMState), VMSTATE_UINT32(reg_tint_cstat, Exynos4210PWMState), diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c index 6998094233a7dbd3d3faa9ad1ea114c330fdc04e..f2f1580f81729794ada0c88e4ed87611e9bbe23c 100644 --- a/hw/timer/hpet.c +++ b/hw/timer/hpet.c @@ -296,7 +296,7 @@ static const VMStateDescription vmstate_hpet_rtc_irq_level = { .version_id = 1, .minimum_version_id = 1, .needed = hpet_rtc_irq_level_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(rtc_irq_level, HPETState), VMSTATE_END_OF_LIST() } @@ -307,7 +307,7 @@ static const VMStateDescription vmstate_hpet_offset = { .version_id = 1, .minimum_version_id = 1, .needed = hpet_offset_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(hpet_offset, HPETState), VMSTATE_END_OF_LIST() } @@ -317,7 +317,7 @@ static const VMStateDescription vmstate_hpet_timer = { .name = "hpet_timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(tn, HPETTimer), VMSTATE_UINT64(config, HPETTimer), VMSTATE_UINT64(cmp, HPETTimer), @@ -336,7 +336,7 @@ static const VMStateDescription vmstate_hpet = { .pre_save = hpet_pre_save, .pre_load = hpet_pre_load, .post_load = hpet_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(config, HPETState), VMSTATE_UINT64(isr, HPETState), VMSTATE_UINT64(hpet_counter, HPETState), @@ -346,7 +346,7 @@ static const VMStateDescription vmstate_hpet = { vmstate_hpet_timer, HPETTimer), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_hpet_rtc_irq_level, &vmstate_hpet_offset, NULL diff --git a/hw/timer/i8254_common.c b/hw/timer/i8254_common.c index b25da448c86a9121e46b77f97766c6ffb158d91d..28fdabc321879efe3a35196a420f56860ebf8665 100644 --- a/hw/timer/i8254_common.c +++ b/hw/timer/i8254_common.c @@ -180,7 +180,7 @@ static const VMStateDescription vmstate_pit_channel = { .name = "pit channel", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(count, PITChannelState), VMSTATE_UINT16(latched_count, PITChannelState), VMSTATE_UINT8(count_latched, PITChannelState), @@ -228,7 +228,7 @@ static const VMStateDescription vmstate_pit_common = { .minimum_version_id = 2, .pre_save = pit_dispatch_pre_save, .post_load = pit_dispatch_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_V(channels[0].irq_disabled, PITCommonState, 3), VMSTATE_STRUCT_ARRAY(channels, PITCommonState, 3, 2, vmstate_pit_channel, PITChannelState), diff --git a/hw/timer/ibex_timer.c b/hw/timer/ibex_timer.c index d8b8e4e1f6020b814044572edd0beb3f923537d4..4917388d45a83e2e93913115b05bfa5b73caa1de 100644 --- a/hw/timer/ibex_timer.c +++ b/hw/timer/ibex_timer.c @@ -252,7 +252,7 @@ static const VMStateDescription vmstate_ibex_timer = { .version_id = 2, .minimum_version_id = 2, .post_load = ibex_timer_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(timer_ctrl, IbexTimerState), VMSTATE_UINT32(timer_cfg0, IbexTimerState), VMSTATE_UINT32(timer_compare_lower0, IbexTimerState), diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c index 640e4399c248c46254ffc0827ec7841f3918ec56..bd625203aaf655a16fcbf3b77e2532c1478c9daa 100644 --- a/hw/timer/imx_epit.c +++ b/hw/timer/imx_epit.c @@ -383,7 +383,7 @@ static const VMStateDescription vmstate_imx_timer_epit = { .name = TYPE_IMX_EPIT, .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cr, IMXEPITState), VMSTATE_UINT32(sr, IMXEPITState), VMSTATE_UINT32(lr, IMXEPITState), diff --git a/hw/timer/imx_gpt.c b/hw/timer/imx_gpt.c index 7222b1b3874afc2930f6610e4bf7105ecadbd808..a8edaec8673a6b175a904b4fa89115a130f5d1b0 100644 --- a/hw/timer/imx_gpt.c +++ b/hw/timer/imx_gpt.c @@ -63,7 +63,7 @@ static const VMStateDescription vmstate_imx_timer_gpt = { .name = TYPE_IMX_GPT, .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(cr, IMXGPTState), VMSTATE_UINT32(pr, IMXGPTState), VMSTATE_UINT32(sr, IMXGPTState), diff --git a/hw/timer/mss-timer.c b/hw/timer/mss-timer.c index ee7438f1684fb2828fdcf0fb5e112fc2ec6d87ec..b66aed56eadfb32a8b08761d77eb7615067e7098 100644 --- a/hw/timer/mss-timer.c +++ b/hw/timer/mss-timer.c @@ -260,7 +260,7 @@ static const VMStateDescription vmstate_timers = { .name = "mss-timer-block", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(ptimer, struct Msf2Timer), VMSTATE_UINT32_ARRAY(regs, struct Msf2Timer, R_TIM1_MAX), VMSTATE_END_OF_LIST() @@ -271,7 +271,7 @@ static const VMStateDescription vmstate_mss_timer = { .name = TYPE_MSS_TIMER, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(freq_hz, MSSTimerState), VMSTATE_STRUCT_ARRAY(timers, MSSTimerState, NUM_TIMERS, 0, vmstate_timers, struct Msf2Timer), diff --git a/hw/timer/npcm7xx_timer.c b/hw/timer/npcm7xx_timer.c index a8bd93aeb2cfd761c200b16c21a3428291a8d81f..779c6049fab28372bc769e6c9f9ceb89f574d74e 100644 --- a/hw/timer/npcm7xx_timer.c +++ b/hw/timer/npcm7xx_timer.c @@ -637,7 +637,7 @@ static const VMStateDescription vmstate_npcm7xx_base_timer = { .name = "npcm7xx-base-timer", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER(qtimer, NPCM7xxBaseTimer), VMSTATE_INT64(expires_ns, NPCM7xxBaseTimer), VMSTATE_INT64(remaining_ns, NPCM7xxBaseTimer), @@ -649,7 +649,7 @@ static const VMStateDescription vmstate_npcm7xx_timer = { .name = "npcm7xx-timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(base_timer, NPCM7xxTimer, 0, vmstate_npcm7xx_base_timer, NPCM7xxBaseTimer), @@ -663,7 +663,7 @@ static const VMStateDescription vmstate_npcm7xx_watchdog_timer = { .name = "npcm7xx-watchdog-timer", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(base_timer, NPCM7xxWatchdogTimer, 0, vmstate_npcm7xx_base_timer, NPCM7xxBaseTimer), @@ -676,7 +676,7 @@ static const VMStateDescription vmstate_npcm7xx_timer_ctrl = { .name = "npcm7xx-timer-ctrl", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(tisr, NPCM7xxTimerCtrlState), VMSTATE_CLOCK(clock, NPCM7xxTimerCtrlState), VMSTATE_STRUCT_ARRAY(timer, NPCM7xxTimerCtrlState, diff --git a/hw/timer/nrf51_timer.c b/hw/timer/nrf51_timer.c index 50c6772383eec6d582aca5b4f5913917d4c91ac9..a33166a8817ea405016ac7b7da3055cf9537f331 100644 --- a/hw/timer/nrf51_timer.c +++ b/hw/timer/nrf51_timer.c @@ -361,7 +361,7 @@ static const VMStateDescription vmstate_nrf51_timer = { .name = TYPE_NRF51_TIMER, .version_id = 1, .post_load = nrf51_timer_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER(timer, NRF51TimerState), VMSTATE_INT64(timer_start_ns, NRF51TimerState), VMSTATE_INT64(update_counter_ns, NRF51TimerState), diff --git a/hw/timer/pxa2xx_timer.c b/hw/timer/pxa2xx_timer.c index 2ae5ae3212311eeaf18e56a7b583fb7c8c3477c9..6a7d5551f43b55cf64f3d2106a9382e475f3b20e 100644 --- a/hw/timer/pxa2xx_timer.c +++ b/hw/timer/pxa2xx_timer.c @@ -501,7 +501,7 @@ static const VMStateDescription vmstate_pxa2xx_timer0_regs = { .name = "pxa2xx_timer0", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(value, PXA2xxTimer0), VMSTATE_END_OF_LIST(), }, @@ -511,7 +511,7 @@ static const VMStateDescription vmstate_pxa2xx_timer4_regs = { .name = "pxa2xx_timer4", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(tm, PXA2xxTimer4, 1, vmstate_pxa2xx_timer0_regs, PXA2xxTimer0), VMSTATE_INT32(oldclock, PXA2xxTimer4), @@ -533,7 +533,7 @@ static const VMStateDescription vmstate_pxa2xx_timer_regs = { .version_id = 1, .minimum_version_id = 1, .post_load = pxa25x_timer_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(clock, PXA2xxTimerInfo), VMSTATE_INT32(oldclock, PXA2xxTimerInfo), VMSTATE_UINT64(lastload, PXA2xxTimerInfo), diff --git a/hw/timer/renesas_cmt.c b/hw/timer/renesas_cmt.c index 69eabc678a637909dbfd7b29e657491b7ed47333..08832932d2a4d10d4d26cc22031a45e0c66bdc0f 100644 --- a/hw/timer/renesas_cmt.c +++ b/hw/timer/renesas_cmt.c @@ -242,7 +242,7 @@ static const VMStateDescription vmstate_rcmt = { .name = "rx-cmt", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(cmstr, RCMTState), VMSTATE_UINT16_ARRAY(cmcr, RCMTState, CMT_CH), VMSTATE_UINT16_ARRAY(cmcnt, RCMTState, CMT_CH), diff --git a/hw/timer/renesas_tmr.c b/hw/timer/renesas_tmr.c index 43b31213bc90c1e07db5aecdb1b6a3d07515e8c8..1d47d0615a496f64a0b2125c65dc69478effeb60 100644 --- a/hw/timer/renesas_tmr.c +++ b/hw/timer/renesas_tmr.c @@ -447,7 +447,7 @@ static const VMStateDescription vmstate_rtmr = { .name = "rx-tmr", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(tick, RTMRState), VMSTATE_UINT8_ARRAY(tcnt, RTMRState, TMR_CH), VMSTATE_UINT8_ARRAY(tcora, RTMRState, TMR_CH), diff --git a/hw/timer/sifive_pwm.c b/hw/timer/sifive_pwm.c index c664480ccf526b9e34c36b3692dd4198e7e60469..e8610c37dd3fc1c47005d27dfb42ec1a2b48eff0 100644 --- a/hw/timer/sifive_pwm.c +++ b/hw/timer/sifive_pwm.c @@ -395,7 +395,7 @@ static const VMStateDescription vmstate_sifive_pwm = { .name = TYPE_SIFIVE_PWM, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_ARRAY(timer, SiFivePwmState, 4), VMSTATE_UINT64(tick_offset, SiFivePwmState), VMSTATE_UINT32(pwmcfg, SiFivePwmState), diff --git a/hw/timer/slavio_timer.c b/hw/timer/slavio_timer.c index 8c4f6eb06b6c40ddb056171939cea34cf1c75a5a..5507b0145b5d003388ecd5ed6e24c15247b658f8 100644 --- a/hw/timer/slavio_timer.c +++ b/hw/timer/slavio_timer.c @@ -344,7 +344,7 @@ static const VMStateDescription vmstate_timer = { .name ="timer", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(limit, CPUTimerState), VMSTATE_UINT32(count, CPUTimerState), VMSTATE_UINT32(counthigh, CPUTimerState), @@ -359,7 +359,7 @@ static const VMStateDescription vmstate_slavio_timer = { .name ="slavio_timer", .version_id = 3, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(cputimer, SLAVIO_TIMERState, MAX_CPUS + 1, 3, vmstate_timer, CPUTimerState), VMSTATE_END_OF_LIST() diff --git a/hw/timer/sse-counter.c b/hw/timer/sse-counter.c index 16c0e8ad15db9ea7ae038003f7dfdf7585729400..daceedf964ef4799e92d61be14a7495422c2735a 100644 --- a/hw/timer/sse-counter.c +++ b/hw/timer/sse-counter.c @@ -442,7 +442,7 @@ static const VMStateDescription sse_counter_vmstate = { .name = "sse-counter", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(clk, SSECounter), VMSTATE_END_OF_LIST() } diff --git a/hw/timer/sse-timer.c b/hw/timer/sse-timer.c index e92e83747d2a088198442e75e4a62613ce9b2920..cb20a9eb79eb748ed3527b90847adedf9658b224 100644 --- a/hw/timer/sse-timer.c +++ b/hw/timer/sse-timer.c @@ -428,7 +428,7 @@ static const VMStateDescription sse_timer_vmstate = { .name = "sse-timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER(timer, SSETimer), VMSTATE_UINT32(cntfrq, SSETimer), VMSTATE_UINT32(cntp_ctl, SSETimer), diff --git a/hw/timer/stellaris-gptm.c b/hw/timer/stellaris-gptm.c index fd71c79be4878dc5a1b26ed75b056ba688b680c0..f28958cefcac9bf7a89852e8b0694b4ff246418f 100644 --- a/hw/timer/stellaris-gptm.c +++ b/hw/timer/stellaris-gptm.c @@ -250,7 +250,7 @@ static const VMStateDescription vmstate_stellaris_gptm = { .name = "stellaris_gptm", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(config, gptm_state), VMSTATE_UINT32_ARRAY(mode, gptm_state, 2), VMSTATE_UINT32(control, gptm_state), diff --git a/hw/timer/stm32f2xx_timer.c b/hw/timer/stm32f2xx_timer.c index ba8694dcd349c73e16cedb2c84abb4ee3a1ad465..de4208b1a61a099c40e8da00b9d52e1213497337 100644 --- a/hw/timer/stm32f2xx_timer.c +++ b/hw/timer/stm32f2xx_timer.c @@ -274,7 +274,7 @@ static const VMStateDescription vmstate_stm32f2xx_timer = { .name = TYPE_STM32F2XX_TIMER, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(tick_offset, STM32F2XXTimerState), VMSTATE_UINT32(tim_cr1, STM32F2XXTimerState), VMSTATE_UINT32(tim_cr2, STM32F2XXTimerState), diff --git a/hw/tpm/tpm_crb.c b/hw/tpm/tpm_crb.c index ea930da545aff358d41c4d4c41676b87151d6bf5..5cd5a2533b8d87ab7a7ad046c31c953d9a5b75a7 100644 --- a/hw/tpm/tpm_crb.c +++ b/hw/tpm/tpm_crb.c @@ -220,7 +220,7 @@ static int tpm_crb_pre_save(void *opaque) static const VMStateDescription vmstate_tpm_crb = { .name = "tpm-crb", .pre_save = tpm_crb_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, CRBState, TPM_CRB_R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/tpm/tpm_spapr.c b/hw/tpm/tpm_spapr.c index dea7b1333b802adc66c3cec88c912dffb1e19939..e084e987e6e6534c7fefe5dc74802fb0d406fe4d 100644 --- a/hw/tpm/tpm_spapr.c +++ b/hw/tpm/tpm_spapr.c @@ -353,7 +353,7 @@ static const VMStateDescription vmstate_spapr_vtpm = { .name = "tpm-spapr", .pre_save = tpm_spapr_pre_save, .post_load = tpm_spapr_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_SPAPR_VIO(vdev, SpaprTpmState), VMSTATE_UINT8(state, SpaprTpmState), diff --git a/hw/tpm/tpm_tis_common.c b/hw/tpm/tpm_tis_common.c index 279ce436b545fc94c41dbb25e04042d7b000be3d..1bfa28bfd9533eb3587284dd546c9442e80b4273 100644 --- a/hw/tpm/tpm_tis_common.c +++ b/hw/tpm/tpm_tis_common.c @@ -879,7 +879,7 @@ int tpm_tis_pre_save(TPMState *s) const VMStateDescription vmstate_locty = { .name = "tpm-tis/locty", .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(state, TPMLocality), VMSTATE_UINT32(inte, TPMLocality), VMSTATE_UINT32(ints, TPMLocality), diff --git a/hw/tpm/tpm_tis_i2c.c b/hw/tpm/tpm_tis_i2c.c index 4ecea7fa3e9d646a2f09e9f21ea34a36589a289a..4bb09655b40285eed1617ae336a90fd91c0c5384 100644 --- a/hw/tpm/tpm_tis_i2c.c +++ b/hw/tpm/tpm_tis_i2c.c @@ -115,7 +115,7 @@ static const VMStateDescription vmstate_tpm_tis_i2c = { .version_id = 0, .pre_save = tpm_tis_i2c_pre_save, .post_load = tpm_tis_i2c_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(state.buffer, TPMStateI2C), VMSTATE_UINT16(state.rw_offset, TPMStateI2C), VMSTATE_UINT8(state.active_locty, TPMStateI2C), diff --git a/hw/tpm/tpm_tis_isa.c b/hw/tpm/tpm_tis_isa.c index 0367401586eb9fe89d2579c437f19a0744505e9a..8887b3c9c49fbc382b555a178e7cc3b9068f3cd4 100644 --- a/hw/tpm/tpm_tis_isa.c +++ b/hw/tpm/tpm_tis_isa.c @@ -53,7 +53,7 @@ static const VMStateDescription vmstate_tpm_tis_isa = { .name = "tpm-tis", .version_id = 0, .pre_save = tpm_tis_pre_save_isa, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(state.buffer, TPMStateISA), VMSTATE_UINT16(state.rw_offset, TPMStateISA), VMSTATE_UINT8(state.active_locty, TPMStateISA), diff --git a/hw/tpm/tpm_tis_sysbus.c b/hw/tpm/tpm_tis_sysbus.c index 2fc550f119704a0bcf229efa68b8fca5f14040f5..941f7f7f62c7538d2678b2e2e184ad9e19b14111 100644 --- a/hw/tpm/tpm_tis_sysbus.c +++ b/hw/tpm/tpm_tis_sysbus.c @@ -52,7 +52,7 @@ static const VMStateDescription vmstate_tpm_tis_sysbus = { .name = "tpm-tis", .version_id = 0, .pre_save = tpm_tis_pre_save_sysbus, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(state.buffer, TPMStateSysBus), VMSTATE_UINT16(state.rw_offset, TPMStateSysBus), VMSTATE_UINT8(state.active_locty, TPMStateSysBus), diff --git a/hw/usb/bus.c b/hw/usb/bus.c index 92d6ed5626141f8622c55bedb9dc23326b3338c8..59c39945ddd7befa83cb9f85c87b412360bf2f05 100644 --- a/hw/usb/bus.c +++ b/hw/usb/bus.c @@ -69,7 +69,7 @@ const VMStateDescription vmstate_usb_device = { .version_id = 1, .minimum_version_id = 1, .post_load = usb_device_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(addr, USBDevice), VMSTATE_INT32(state, USBDevice), VMSTATE_INT32(remote_wakeup, USBDevice), diff --git a/hw/usb/ccid-card-passthru.c b/hw/usb/ccid-card-passthru.c index 07ee42f304f9cd30d64fe0ec32570cb01978bffa..a515703904208547c10f37d0f0f9b2405715ca39 100644 --- a/hw/usb/ccid-card-passthru.c +++ b/hw/usb/ccid-card-passthru.c @@ -378,7 +378,7 @@ static const VMStateDescription passthru_vmstate = { .name = "ccid-card-passthru", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(vscard_in_data, PassthruState), VMSTATE_UINT32(vscard_in_pos, PassthruState), VMSTATE_UINT32(vscard_in_hdr, PassthruState), diff --git a/hw/usb/dev-hid.c b/hw/usb/dev-hid.c index bdd6d1ffafe432b75a5321305c5d0193e3e27bb1..9e358c934efda3a605b077f76d952ca570b76718 100644 --- a/hw/usb/dev-hid.c +++ b/hw/usb/dev-hid.c @@ -756,7 +756,7 @@ static const VMStateDescription vmstate_usb_ptr = { .version_id = 1, .minimum_version_id = 1, .post_load = usb_ptr_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(dev, USBHIDState), VMSTATE_HID_POINTER_DEVICE(hid, USBHIDState), VMSTATE_END_OF_LIST() @@ -767,7 +767,7 @@ static const VMStateDescription vmstate_usb_kbd = { .name = "usb-kbd", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(dev, USBHIDState), VMSTATE_HID_KEYBOARD_DEVICE(hid, USBHIDState), VMSTATE_END_OF_LIST() diff --git a/hw/usb/dev-hub.c b/hw/usb/dev-hub.c index 5703e0e826ec6484c5b7e92b248656fd382414eb..06e9537d03562befd95fb8f278c3ee41060a232e 100644 --- a/hw/usb/dev-hub.c +++ b/hw/usb/dev-hub.c @@ -623,7 +623,7 @@ static const VMStateDescription vmstate_usb_hub_port = { .name = "usb-hub-port", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(wPortStatus, USBHubPort), VMSTATE_UINT16(wPortChange, USBHubPort), VMSTATE_END_OF_LIST() @@ -642,7 +642,7 @@ static const VMStateDescription vmstate_usb_hub_port_timer = { .version_id = 1, .minimum_version_id = 1, .needed = usb_hub_port_timer_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(port_timer, USBHubState), VMSTATE_END_OF_LIST() }, @@ -652,13 +652,13 @@ static const VMStateDescription vmstate_usb_hub = { .name = "usb-hub", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(dev, USBHubState), VMSTATE_STRUCT_ARRAY(ports, USBHubState, MAX_PORTS, 0, vmstate_usb_hub_port, USBHubPort), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_usb_hub_port_timer, NULL } diff --git a/hw/usb/dev-mtp.c b/hw/usb/dev-mtp.c index 1cac1cd4350545996fad0e2c5414282ba80184f3..7e4a0765ae678263d3ecdf1f98499d2601756f97 100644 --- a/hw/usb/dev-mtp.c +++ b/hw/usb/dev-mtp.c @@ -2072,7 +2072,7 @@ static const VMStateDescription vmstate_usb_mtp = { .unmigratable = 1, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(dev, MTPState), VMSTATE_END_OF_LIST() } diff --git a/hw/usb/dev-smartcard-reader.c b/hw/usb/dev-smartcard-reader.c index be0a4fc3bc4dc63dab1f6759f6cbf9fe496c7694..c0d63e04251a2fbe9dc06094a5eb3aa66dcbee14 100644 --- a/hw/usb/dev-smartcard-reader.c +++ b/hw/usb/dev-smartcard-reader.c @@ -1367,7 +1367,7 @@ static const VMStateDescription bulk_in_vmstate = { .name = "CCID BulkIn state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BUFFER(data, BulkIn), VMSTATE_UINT32(len, BulkIn), VMSTATE_UINT32(pos, BulkIn), @@ -1379,7 +1379,7 @@ static const VMStateDescription answer_vmstate = { .name = "CCID Answer state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(slot, Answer), VMSTATE_UINT8(seq, Answer), VMSTATE_END_OF_LIST() @@ -1390,7 +1390,7 @@ static const VMStateDescription usb_device_vmstate = { .name = "usb_device", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(addr, USBDevice), VMSTATE_BUFFER(setup_buf, USBDevice), VMSTATE_BUFFER(data_buf, USBDevice), @@ -1404,7 +1404,7 @@ static const VMStateDescription ccid_vmstate = { .minimum_version_id = 1, .post_load = ccid_post_load, .pre_save = ccid_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(dev, USBCCIDState, 1, usb_device_vmstate, USBDevice), VMSTATE_UINT8(debug, USBCCIDState), VMSTATE_BUFFER(bulk_out_data, USBCCIDState), diff --git a/hw/usb/dev-storage.c b/hw/usb/dev-storage.c index a496c811a713ad2faae7028ff25f5d6dbd09aa06..341e505bd0a5703cc1f3aadfcc8efe4b8a85deca 100644 --- a/hw/usb/dev-storage.c +++ b/hw/usb/dev-storage.c @@ -572,7 +572,7 @@ static const VMStateDescription vmstate_usb_msd = { .name = "usb-storage", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(dev, MSDState), VMSTATE_UINT32(mode, MSDState), VMSTATE_UINT32(scsi_len, MSDState), diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c index f013ded91ebe9bc7cf6f3b8e9e26752327b58c0c..1804cb67997f0e823658541ad70b4a0d365f5b81 100644 --- a/hw/usb/dev-uas.c +++ b/hw/usb/dev-uas.c @@ -947,7 +947,7 @@ static void usb_uas_realize(USBDevice *dev, Error **errp) static const VMStateDescription vmstate_usb_uas = { .name = "usb-uas", .unmigratable = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(dev, UASDevice), VMSTATE_END_OF_LIST() } diff --git a/hw/usb/hcd-dwc2.c b/hw/usb/hcd-dwc2.c index a0c4e782b2a2e361317ddd49ecdf968903d67ef4..222eef82a552ea65d87526ce134713174f7135dd 100644 --- a/hw/usb/hcd-dwc2.c +++ b/hw/usb/hcd-dwc2.c @@ -1391,7 +1391,7 @@ static const VMStateDescription vmstate_dwc2_state_packet = { .name = "dwc2/packet", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(devadr, DWC2Packet), VMSTATE_UINT32(epnum, DWC2Packet), VMSTATE_UINT32(epdir, DWC2Packet), @@ -1411,7 +1411,7 @@ const VMStateDescription vmstate_dwc2_state = { .name = "dwc2", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(glbreg, DWC2State, DWC2_GLBREG_SIZE / sizeof(uint32_t)), VMSTATE_UINT32_ARRAY(fszreg, DWC2State, diff --git a/hw/usb/hcd-dwc3.c b/hw/usb/hcd-dwc3.c index 279263489e46d1c2cb8b9660a43907b42ffe14f1..09d8e25b971e139708b489a2a9740560d776ca58 100644 --- a/hw/usb/hcd-dwc3.c +++ b/hw/usb/hcd-dwc3.c @@ -648,7 +648,7 @@ static void usb_dwc3_init(Object *obj) static const VMStateDescription vmstate_usb_dwc3 = { .name = "usb-dwc3", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, USBDWC3, USB_DWC3_R_MAX), VMSTATE_UINT8(cfg.mode, USBDWC3), VMSTATE_UINT32(cfg.dwc_usb3_user, USBDWC3), diff --git a/hw/usb/hcd-ehci-pci.c b/hw/usb/hcd-ehci-pci.c index 345444a5739594223061d997717f9ae25c197447..0b26db74d89da9675a89f464c6950ae4b37bb3da 100644 --- a/hw/usb/hcd-ehci-pci.c +++ b/hw/usb/hcd-ehci-pci.c @@ -144,7 +144,7 @@ static const VMStateDescription vmstate_ehci_pci = { .name = "ehci", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(pcidev, EHCIPCIState), VMSTATE_STRUCT(ehci, EHCIPCIState, 2, vmstate_ehci, EHCIState), VMSTATE_END_OF_LIST() diff --git a/hw/usb/hcd-ehci-sysbus.c b/hw/usb/hcd-ehci-sysbus.c index a12e21884894ce0d4a54a5f1eb40d93d92868117..bfb774504cbe50579632fc064266f448e49e7e18 100644 --- a/hw/usb/hcd-ehci-sysbus.c +++ b/hw/usb/hcd-ehci-sysbus.c @@ -25,7 +25,7 @@ static const VMStateDescription vmstate_ehci_sysbus = { .name = "ehci-sysbus", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(ehci, EHCISysBusState, 2, vmstate_ehci, EHCIState), VMSTATE_END_OF_LIST() } diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c index 19b4534c20c10d78d5e3d1fdca7aefc168e9265a..870c72cb5962a0baeed625724a137d2e7a9f5720 100644 --- a/hw/usb/hcd-ehci.c +++ b/hw/usb/hcd-ehci.c @@ -2451,7 +2451,7 @@ static void usb_ehci_vm_state_change(void *opaque, bool running, RunState state) * USB-devices which have async handled packages have a packet in the * ep queue to match the completion with. */ - if (state == RUN_STATE_RUNNING) { + if (running) { ehci_advance_async_state(ehci); } @@ -2473,7 +2473,7 @@ const VMStateDescription vmstate_ehci = { .minimum_version_id = 1, .pre_save = usb_ehci_pre_save, .post_load = usb_ehci_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* mmio registers */ VMSTATE_UINT32(usbcmd, EHCIState), VMSTATE_UINT32(usbsts, EHCIState), diff --git a/hw/usb/hcd-ohci-pci.c b/hw/usb/hcd-ohci-pci.c index 6b630d35a7faf48e7b0c79736836cd38fee5df4d..33ed9b6f5a52f14a206d79abfd8347482c76e6ae 100644 --- a/hw/usb/hcd-ohci-pci.c +++ b/hw/usb/hcd-ohci-pci.c @@ -120,7 +120,7 @@ static const VMStateDescription vmstate_ohci = { .name = "ohci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, OHCIPCIState), VMSTATE_STRUCT(state, OHCIPCIState, 1, vmstate_ohci_state, OHCIState), VMSTATE_END_OF_LIST() diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c index 7ff1b65ced158d6c6dbb8628c38643e9b119c3c8..d73b53f33c83a12831ca4677d3776adcb85198bc 100644 --- a/hw/usb/hcd-ohci.c +++ b/hw/usb/hcd-ohci.c @@ -1984,7 +1984,7 @@ static const VMStateDescription vmstate_ohci_state_port = { .name = "ohci-core/port", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(ctrl, OHCIPort), VMSTATE_END_OF_LIST() }, @@ -2002,7 +2002,7 @@ static const VMStateDescription vmstate_ohci_eof_timer = { .version_id = 1, .minimum_version_id = 1, .needed = ohci_eof_timer_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(eof_timer, OHCIState), VMSTATE_END_OF_LIST() }, @@ -2012,7 +2012,7 @@ const VMStateDescription vmstate_ohci_state = { .name = "ohci-core", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(sof_time, OHCIState), VMSTATE_UINT32(ctl, OHCIState), VMSTATE_UINT32(status, OHCIState), @@ -2049,7 +2049,7 @@ const VMStateDescription vmstate_ohci_state = { VMSTATE_BOOL(async_complete, OHCIState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_ohci_eof_timer, NULL } diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c index 77baaa7a6b1099927c420decfc956e6f74429349..7d3c026daeda374420e7a39f0d57854c2d443928 100644 --- a/hw/usb/hcd-uhci.c +++ b/hw/usb/hcd-uhci.c @@ -339,7 +339,7 @@ static const VMStateDescription vmstate_uhci_port = { .name = "uhci port", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(ctrl, UHCIPort), VMSTATE_END_OF_LIST() } @@ -361,7 +361,7 @@ static const VMStateDescription vmstate_uhci = { .version_id = 3, .minimum_version_id = 1, .post_load = uhci_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, UHCIState), VMSTATE_UINT8_EQUAL(num_ports_vmstate, UHCIState, NULL), VMSTATE_STRUCT_ARRAY(ports, UHCIState, NB_PORTS, 1, diff --git a/hw/usb/hcd-xhci-pci.c b/hw/usb/hcd-xhci-pci.c index 643d4643e4d6cdfe660c6cc169874bd37799bf5a..4423983308af3cf70d44cf63b6b0ea4fcff34d82 100644 --- a/hw/usb/hcd-xhci-pci.c +++ b/hw/usb/hcd-xhci-pci.c @@ -178,7 +178,7 @@ static const VMStateDescription vmstate_xhci_pci = { .name = "xhci", .version_id = 1, .post_load = xhci_pci_vmstate_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, XHCIPciState), VMSTATE_MSIX(parent_obj, XHCIPciState), VMSTATE_STRUCT(xhci, XHCIPciState, 1, vmstate_xhci, XHCIState), diff --git a/hw/usb/hcd-xhci-sysbus.c b/hw/usb/hcd-xhci-sysbus.c index faf57b47975dc53dc43bd8fb1c6bd06451eb085b..d93bae31f936a5acae38047714161968ba16f9c4 100644 --- a/hw/usb/hcd-xhci-sysbus.c +++ b/hw/usb/hcd-xhci-sysbus.c @@ -91,7 +91,7 @@ static Property xhci_sysbus_props[] = { static const VMStateDescription vmstate_xhci_sysbus = { .name = "xhci-sysbus", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT(xhci, XHCISysbusState, 1, vmstate_xhci, XHCIState), VMSTATE_END_OF_LIST() } diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c index 4b60114207bde0a733f35574a71cfbe31d6b0701..ad40232eb695d8df3f3eb115c1b09ff01a18a92c 100644 --- a/hw/usb/hcd-xhci.c +++ b/hw/usb/hcd-xhci.c @@ -3522,7 +3522,7 @@ static int usb_xhci_post_load(void *opaque, int version_id) static const VMStateDescription vmstate_xhci_ring = { .name = "xhci-ring", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(dequeue, XHCIRing), VMSTATE_BOOL(ccs, XHCIRing), VMSTATE_END_OF_LIST() @@ -3532,7 +3532,7 @@ static const VMStateDescription vmstate_xhci_ring = { static const VMStateDescription vmstate_xhci_port = { .name = "xhci-port", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(portsc, XHCIPort), VMSTATE_END_OF_LIST() } @@ -3541,7 +3541,7 @@ static const VMStateDescription vmstate_xhci_port = { static const VMStateDescription vmstate_xhci_slot = { .name = "xhci-slot", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(enabled, XHCISlot), VMSTATE_BOOL(addressed, XHCISlot), VMSTATE_END_OF_LIST() @@ -3551,7 +3551,7 @@ static const VMStateDescription vmstate_xhci_slot = { static const VMStateDescription vmstate_xhci_event = { .name = "xhci-event", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(type, XHCIEvent), VMSTATE_UINT32(ccode, XHCIEvent), VMSTATE_UINT64(ptr, XHCIEvent), @@ -3571,7 +3571,7 @@ static bool xhci_er_full(void *opaque, int version_id) static const VMStateDescription vmstate_xhci_intr = { .name = "xhci-intr", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* registers */ VMSTATE_UINT32(iman, XHCIInterrupter), VMSTATE_UINT32(imod, XHCIInterrupter), @@ -3604,7 +3604,7 @@ const VMStateDescription vmstate_xhci = { .name = "xhci-core", .version_id = 1, .post_load = usb_xhci_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_VARRAY_UINT32(ports, XHCIState, numports, 1, vmstate_xhci_port, XHCIPort), VMSTATE_STRUCT_VARRAY_UINT32(slots, XHCIState, numslots, 1, diff --git a/hw/usb/host-libusb.c b/hw/usb/host-libusb.c index d7060a42d57e072303e509fde25eae91318847e6..80122b41259a1bb01c57e6a14b322ecf151c5c89 100644 --- a/hw/usb/host-libusb.c +++ b/hw/usb/host-libusb.c @@ -1753,7 +1753,7 @@ static const VMStateDescription vmstate_usb_host = { .version_id = 1, .minimum_version_id = 1, .post_load = usb_host_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(parent_obj, USBHostDevice), VMSTATE_END_OF_LIST() } diff --git a/hw/usb/imx-usb-phy.c b/hw/usb/imx-usb-phy.c index 1a97b36a1194f6c4ad84ec703ef037c995db94fc..18917d7599ec659c1ad861f1e5e5811d2782b79e 100644 --- a/hw/usb/imx-usb-phy.c +++ b/hw/usb/imx-usb-phy.c @@ -20,7 +20,7 @@ static const VMStateDescription vmstate_imx_usbphy = { .name = TYPE_IMX_USBPHY, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(usbphy, IMXUSBPHYState, USBPHY_MAX), VMSTATE_END_OF_LIST() }, diff --git a/hw/usb/meson.build b/hw/usb/meson.build index e94149ebdeb67986ab3f553a9011240068e76e59..2c13c528785eba8b20317474e50b1307a6abc47a 100644 --- a/hw/usb/meson.build +++ b/hw/usb/meson.build @@ -44,7 +44,9 @@ system_ss.add(when: 'CONFIG_USB_STORAGE_UAS', if_true: files('dev-uas.c')) system_ss.add(when: 'CONFIG_USB_AUDIO', if_true: files('dev-audio.c')) system_ss.add(when: 'CONFIG_USB_SERIAL', if_true: files('dev-serial.c')) system_ss.add(when: 'CONFIG_USB_NETWORK', if_true: files('dev-network.c')) -system_ss.add(when: ['CONFIG_POSIX', 'CONFIG_USB_STORAGE_MTP'], if_true: files('dev-mtp.c')) +if host_os != 'windows' + system_ss.add(when: 'CONFIG_USB_STORAGE_MTP', if_true: files('dev-mtp.c')) +endif # smartcard system_ss.add(when: 'CONFIG_USB_SMARTCARD', if_true: files('dev-smartcard-reader.c')) @@ -58,7 +60,9 @@ endif # U2F system_ss.add(when: 'CONFIG_USB_U2F', if_true: files('u2f.c')) -system_ss.add(when: ['CONFIG_LINUX', 'CONFIG_USB_U2F'], if_true: [libudev, files('u2f-passthru.c')]) +if host_os == 'linux' + system_ss.add(when: 'CONFIG_USB_U2F', if_true: [libudev, files('u2f-passthru.c')]) +endif if u2f.found() system_ss.add(when: 'CONFIG_USB_U2F', if_true: [u2f, files('u2f-emulated.c')]) endif diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c index c9893df86778096cb992ad1cebbe02018f572bcf..0f2dd2e5040e355e2bc8155262e02ca9bb740300 100644 --- a/hw/usb/redirect.c +++ b/hw/usb/redirect.c @@ -1403,7 +1403,7 @@ static void usbredir_vm_state_change(void *priv, bool running, RunState state) { USBRedirDevice *dev = priv; - if (state == RUN_STATE_RUNNING && dev->parser != NULL) { + if (running && dev->parser != NULL) { usbredirparser_do_write(dev->parser); /* Flush any pending writes */ } } @@ -2373,7 +2373,7 @@ static const VMStateDescription usbredir_bulk_receiving_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = usbredir_bulk_receiving_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(bulk_receiving_started, struct endp_data), VMSTATE_END_OF_LIST() } @@ -2391,7 +2391,7 @@ static const VMStateDescription usbredir_stream_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = usbredir_stream_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(max_streams, struct endp_data), VMSTATE_END_OF_LIST() } @@ -2401,7 +2401,7 @@ static const VMStateDescription usbredir_ep_vmstate = { .name = "usb-redir-ep", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(type, struct endp_data), VMSTATE_UINT8(interval, struct endp_data), VMSTATE_UINT8(interface, struct endp_data), @@ -2424,7 +2424,7 @@ static const VMStateDescription usbredir_ep_vmstate = { VMSTATE_INT32(bufpq_target_size, struct endp_data), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &usbredir_bulk_receiving_vmstate, &usbredir_stream_vmstate, NULL @@ -2481,7 +2481,7 @@ static const VMStateDescription usbredir_ep_packet_id_queue_vmstate = { .name = "usb-redir-packet-id-queue", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { { .name = "queue", .version_id = 0, @@ -2501,7 +2501,7 @@ static const VMStateDescription usbredir_device_info_vmstate = { .name = "usb-redir-device-info", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(speed, struct usb_redir_device_connect_header), VMSTATE_UINT8(device_class, struct usb_redir_device_connect_header), VMSTATE_UINT8(device_subclass, struct usb_redir_device_connect_header), @@ -2520,7 +2520,7 @@ static const VMStateDescription usbredir_interface_info_vmstate = { .name = "usb-redir-interface-info", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(interface_count, struct usb_redir_interface_info_header), VMSTATE_UINT8_ARRAY(interface, @@ -2543,7 +2543,7 @@ static const VMStateDescription usbredir_vmstate = { .minimum_version_id = 1, .pre_save = usbredir_pre_save, .post_load = usbredir_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(dev, USBRedirDevice), VMSTATE_TIMER_PTR(attach_timer, USBRedirDevice), { diff --git a/hw/usb/u2f-passthru.c b/hw/usb/u2f-passthru.c index fc93429c9c017a0eb296292ea8e43a0f5ddf4bf7..b7025d303d07c817d212b46fef60d5a109768d68 100644 --- a/hw/usb/u2f-passthru.c +++ b/hw/usb/u2f-passthru.c @@ -512,7 +512,7 @@ static const VMStateDescription u2f_passthru_vmstate = { .version_id = 1, .minimum_version_id = 1, .post_load = u2f_passthru_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_U2F_KEY(base, U2FPassthruState), VMSTATE_END_OF_LIST() } diff --git a/hw/usb/u2f.c b/hw/usb/u2f.c index 56001249a449a480eed7cf9c21108c1aea432cd7..1fb59cf404fb03575ac2506c5a382a3ccd102201 100644 --- a/hw/usb/u2f.c +++ b/hw/usb/u2f.c @@ -305,7 +305,7 @@ const VMStateDescription vmstate_u2f_key = { .name = "u2f-key", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_USB_DEVICE(dev, U2FKeyState), VMSTATE_UINT8(idle, U2FKeyState), VMSTATE_UINT8_2DARRAY(pending_in, U2FKeyState, diff --git a/hw/usb/xlnx-versal-usb2-ctrl-regs.c b/hw/usb/xlnx-versal-usb2-ctrl-regs.c index 1c094aa1a63fefcd36a2d7deba2c881ed9fd96ab..6fc453817eaf5ea3dc5f13e80ba4f51bcd036891 100644 --- a/hw/usb/xlnx-versal-usb2-ctrl-regs.c +++ b/hw/usb/xlnx-versal-usb2-ctrl-regs.c @@ -196,7 +196,7 @@ static const VMStateDescription vmstate_usb2_ctrl_regs = { .name = TYPE_XILINX_VERSAL_USB2_CTRL_REGS, .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, VersalUsb2CtrlRegs, USB2_REGS_R_MAX), VMSTATE_END_OF_LIST(), } diff --git a/hw/vfio/ap.c b/hw/vfio/ap.c index bbf69ff55ae8a922d0ab6d6f966a9a2283cdd2a3..e157aa1ff79c1ad96b9102d54d4c11da1bd719ba 100644 --- a/hw/vfio/ap.c +++ b/hw/vfio/ap.c @@ -11,10 +11,12 @@ */ #include "qemu/osdep.h" +#include CONFIG_DEVICES /* CONFIG_IOMMUFD */ #include #include #include "qapi/error.h" #include "hw/vfio/vfio-common.h" +#include "sysemu/iommufd.h" #include "hw/s390x/ap-device.h" #include "qemu/error-report.h" #include "qemu/event_notifier.h" @@ -158,18 +160,9 @@ static void vfio_ap_realize(DeviceState *dev, Error **errp) VFIOAPDevice *vapdev = VFIO_AP_DEVICE(dev); VFIODevice *vbasedev = &vapdev->vdev; - vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); - vbasedev->ops = &vfio_ap_ops; - vbasedev->type = VFIO_DEVICE_TYPE_AP; - vbasedev->dev = dev; - - /* - * vfio-ap devices operate in a way compatible with discarding of - * memory in RAM blocks, as no pages are pinned in the host. - * This needs to be set before vfio_get_device() for vfio common to - * handle ram_block_discard_disable(). - */ - vapdev->vdev.ram_block_discard_allowed = true; + if (vfio_device_get_name(vbasedev, errp) < 0) { + return; + } ret = vfio_attach_device(vbasedev->name, vbasedev, &address_space_memory, errp); @@ -204,6 +197,10 @@ static void vfio_ap_unrealize(DeviceState *dev) static Property vfio_ap_properties[] = { DEFINE_PROP_STRING("sysfsdev", VFIOAPDevice, vdev.sysfsdev), +#ifdef CONFIG_IOMMUFD + DEFINE_PROP_LINK("iommufd", VFIOAPDevice, vdev.iommufd, + TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *), +#endif DEFINE_PROP_END_OF_LIST(), }; @@ -224,11 +221,36 @@ static const VMStateDescription vfio_ap_vmstate = { .unmigratable = 1, }; +static void vfio_ap_instance_init(Object *obj) +{ + VFIOAPDevice *vapdev = VFIO_AP_DEVICE(obj); + VFIODevice *vbasedev = &vapdev->vdev; + + /* + * vfio-ap devices operate in a way compatible with discarding of + * memory in RAM blocks, as no pages are pinned in the host. + * This needs to be set before vfio_get_device() for vfio common to + * handle ram_block_discard_disable(). + */ + vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_AP, &vfio_ap_ops, + DEVICE(vapdev), true); +} + +#ifdef CONFIG_IOMMUFD +static void vfio_ap_set_fd(Object *obj, const char *str, Error **errp) +{ + vfio_device_set_fd(&VFIO_AP_DEVICE(obj)->vdev, str, errp); +} +#endif + static void vfio_ap_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_props(dc, vfio_ap_properties); +#ifdef CONFIG_IOMMUFD + object_class_property_add_str(klass, "fd", NULL, vfio_ap_set_fd); +#endif dc->vmsd = &vfio_ap_vmstate; dc->desc = "VFIO-based AP device assignment"; set_bit(DEVICE_CATEGORY_MISC, dc->categories); @@ -243,6 +265,7 @@ static const TypeInfo vfio_ap_info = { .name = TYPE_VFIO_AP_DEVICE, .parent = TYPE_AP_DEVICE, .instance_size = sizeof(VFIOAPDevice), + .instance_init = vfio_ap_instance_init, .class_init = vfio_ap_class_init, }; diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c index d857bb8d0fe4ff71b3cf635d6da43eccbe593d5f..90e4a534371684c08e112364e1537eb8979f73f4 100644 --- a/hw/vfio/ccw.c +++ b/hw/vfio/ccw.c @@ -15,12 +15,14 @@ */ #include "qemu/osdep.h" +#include CONFIG_DEVICES /* CONFIG_IOMMUFD */ #include #include #include #include "qapi/error.h" #include "hw/vfio/vfio-common.h" +#include "sysemu/iommufd.h" #include "hw/s390x/s390-ccw.h" #include "hw/s390x/vfio-ccw.h" #include "hw/qdev-properties.h" @@ -588,22 +590,9 @@ static void vfio_ccw_realize(DeviceState *dev, Error **errp) } } - vbasedev->ops = &vfio_ccw_ops; - vbasedev->type = VFIO_DEVICE_TYPE_CCW; - vbasedev->name = g_strdup_printf("%x.%x.%04x", vcdev->cdev.hostid.cssid, - vcdev->cdev.hostid.ssid, - vcdev->cdev.hostid.devid); - vbasedev->dev = dev; - - /* - * All vfio-ccw devices are believed to operate in a way compatible with - * discarding of memory in RAM blocks, ie. pages pinned in the host are - * in the current working set of the guest driver and therefore never - * overlap e.g., with pages available to the guest balloon driver. This - * needs to be set before vfio_get_device() for vfio common to handle - * ram_block_discard_disable(). - */ - vbasedev->ram_block_discard_allowed = true; + if (vfio_device_get_name(vbasedev, errp) < 0) { + return; + } ret = vfio_attach_device(cdev->mdevid, vbasedev, &address_space_memory, errp); @@ -677,6 +666,10 @@ static void vfio_ccw_unrealize(DeviceState *dev) static Property vfio_ccw_properties[] = { DEFINE_PROP_STRING("sysfsdev", VFIOCCWDevice, vdev.sysfsdev), DEFINE_PROP_BOOL("force-orb-pfch", VFIOCCWDevice, force_orb_pfch, false), +#ifdef CONFIG_IOMMUFD + DEFINE_PROP_LINK("iommufd", VFIOCCWDevice, vdev.iommufd, + TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *), +#endif DEFINE_PROP_END_OF_LIST(), }; @@ -685,12 +678,39 @@ static const VMStateDescription vfio_ccw_vmstate = { .unmigratable = 1, }; +static void vfio_ccw_instance_init(Object *obj) +{ + VFIOCCWDevice *vcdev = VFIO_CCW(obj); + VFIODevice *vbasedev = &vcdev->vdev; + + /* + * All vfio-ccw devices are believed to operate in a way compatible with + * discarding of memory in RAM blocks, ie. pages pinned in the host are + * in the current working set of the guest driver and therefore never + * overlap e.g., with pages available to the guest balloon driver. This + * needs to be set before vfio_get_device() for vfio common to handle + * ram_block_discard_disable(). + */ + vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_CCW, &vfio_ccw_ops, + DEVICE(vcdev), true); +} + +#ifdef CONFIG_IOMMUFD +static void vfio_ccw_set_fd(Object *obj, const char *str, Error **errp) +{ + vfio_device_set_fd(&VFIO_CCW(obj)->vdev, str, errp); +} +#endif + static void vfio_ccw_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass); device_class_set_props(dc, vfio_ccw_properties); +#ifdef CONFIG_IOMMUFD + object_class_property_add_str(klass, "fd", NULL, vfio_ccw_set_fd); +#endif dc->vmsd = &vfio_ccw_vmstate; dc->desc = "VFIO-based subchannel assignment"; set_bit(DEVICE_CATEGORY_MISC, dc->categories); @@ -708,6 +728,7 @@ static const TypeInfo vfio_ccw_info = { .name = TYPE_VFIO_CCW, .parent = TYPE_S390_CCW, .instance_size = sizeof(VFIOCCWDevice), + .instance_init = vfio_ccw_instance_init, .class_init = vfio_ccw_class_init, }; diff --git a/hw/vfio/common.c b/hw/vfio/common.c index e70fdf5e0cacf756fb8dcce2bfd906c65ae71684..0b3352f2a9d278f252a460e339732f1ccac0a96d 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -73,7 +73,7 @@ bool vfio_mig_active(void) return false; } - QLIST_FOREACH(vbasedev, &vfio_device_list, next) { + QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) { if (vbasedev->migration_blocker) { return false; } @@ -94,7 +94,7 @@ static bool vfio_multiple_devices_migration_is_supported(void) unsigned int device_num = 0; bool all_support_p2p = true; - QLIST_FOREACH(vbasedev, &vfio_device_list, next) { + QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) { if (vbasedev->migration) { device_num++; @@ -145,7 +145,7 @@ void vfio_unblock_multiple_devices_migration(void) bool vfio_viommu_preset(VFIODevice *vbasedev) { - return vbasedev->container->space->as != &address_space_memory; + return vbasedev->bcontainer->space->as != &address_space_memory; } static void vfio_set_migration_error(int err) @@ -177,7 +177,7 @@ bool vfio_device_state_is_precopy(VFIODevice *vbasedev) migration->device_state == VFIO_DEVICE_STATE_PRE_COPY_P2P; } -static bool vfio_devices_all_dirty_tracking(VFIOContainer *container) +static bool vfio_devices_all_dirty_tracking(VFIOContainerBase *bcontainer) { VFIODevice *vbasedev; MigrationState *ms = migrate_get_current(); @@ -187,7 +187,7 @@ static bool vfio_devices_all_dirty_tracking(VFIOContainer *container) return false; } - QLIST_FOREACH(vbasedev, &container->device_list, container_next) { + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { VFIOMigration *migration = vbasedev->migration; if (!migration) { @@ -203,11 +203,11 @@ static bool vfio_devices_all_dirty_tracking(VFIOContainer *container) return true; } -bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container) +bool vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer) { VFIODevice *vbasedev; - QLIST_FOREACH(vbasedev, &container->device_list, container_next) { + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { if (!vbasedev->dirty_pages_supported) { return false; } @@ -220,7 +220,8 @@ bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container) * Check if all VFIO devices are running and migration is active, which is * essentially equivalent to the migration being in pre-copy phase. */ -bool vfio_devices_all_running_and_mig_active(VFIOContainer *container) +bool +vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer) { VFIODevice *vbasedev; @@ -228,7 +229,7 @@ bool vfio_devices_all_running_and_mig_active(VFIOContainer *container) return false; } - QLIST_FOREACH(vbasedev, &container->device_list, container_next) { + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { VFIOMigration *migration = vbasedev->migration; if (!migration) { @@ -292,7 +293,7 @@ static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) { VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); - VFIOContainer *container = giommu->container; + VFIOContainerBase *bcontainer = giommu->bcontainer; hwaddr iova = iotlb->iova + giommu->iommu_offset; void *vaddr; int ret; @@ -322,21 +323,22 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) * of vaddr will always be there, even if the memory object is * destroyed and its backing memory munmap-ed. */ - ret = vfio_dma_map(container, iova, - iotlb->addr_mask + 1, vaddr, - read_only); + ret = vfio_container_dma_map(bcontainer, iova, + iotlb->addr_mask + 1, vaddr, + read_only); if (ret) { - error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " + error_report("vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx", %p) = %d (%s)", - container, iova, + bcontainer, iova, iotlb->addr_mask + 1, vaddr, ret, strerror(-ret)); } } else { - ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb); + ret = vfio_container_dma_unmap(bcontainer, iova, + iotlb->addr_mask + 1, iotlb); if (ret) { - error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " + error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx") = %d (%s)", - container, iova, + bcontainer, iova, iotlb->addr_mask + 1, ret, strerror(-ret)); vfio_set_migration_error(ret); } @@ -350,14 +352,15 @@ static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl, { VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener, listener); + VFIOContainerBase *bcontainer = vrdl->bcontainer; const hwaddr size = int128_get64(section->size); const hwaddr iova = section->offset_within_address_space; int ret; /* Unmap with a single call. */ - ret = vfio_dma_unmap(vrdl->container, iova, size , NULL); + ret = vfio_container_dma_unmap(bcontainer, iova, size , NULL); if (ret) { - error_report("%s: vfio_dma_unmap() failed: %s", __func__, + error_report("%s: vfio_container_dma_unmap() failed: %s", __func__, strerror(-ret)); } } @@ -367,6 +370,7 @@ static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl, { VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener, listener); + VFIOContainerBase *bcontainer = vrdl->bcontainer; const hwaddr end = section->offset_within_region + int128_get64(section->size); hwaddr start, next, iova; @@ -385,8 +389,8 @@ static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl, section->offset_within_address_space; vaddr = memory_region_get_ram_ptr(section->mr) + start; - ret = vfio_dma_map(vrdl->container, iova, next - start, - vaddr, section->readonly); + ret = vfio_container_dma_map(bcontainer, iova, next - start, + vaddr, section->readonly); if (ret) { /* Rollback */ vfio_ram_discard_notify_discard(rdl, section); @@ -396,7 +400,7 @@ static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl, return 0; } -static void vfio_register_ram_discard_listener(VFIOContainer *container, +static void vfio_register_ram_discard_listener(VFIOContainerBase *bcontainer, MemoryRegionSection *section) { RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); @@ -409,7 +413,7 @@ static void vfio_register_ram_discard_listener(VFIOContainer *container, g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), TARGET_PAGE_SIZE)); vrdl = g_new0(VFIORamDiscardListener, 1); - vrdl->container = container; + vrdl->bcontainer = bcontainer; vrdl->mr = section->mr; vrdl->offset_within_address_space = section->offset_within_address_space; vrdl->size = int128_get64(section->size); @@ -417,14 +421,14 @@ static void vfio_register_ram_discard_listener(VFIOContainer *container, section->mr); g_assert(vrdl->granularity && is_power_of_2(vrdl->granularity)); - g_assert(container->pgsizes && - vrdl->granularity >= 1ULL << ctz64(container->pgsizes)); + g_assert(bcontainer->pgsizes && + vrdl->granularity >= 1ULL << ctz64(bcontainer->pgsizes)); ram_discard_listener_init(&vrdl->listener, vfio_ram_discard_notify_populate, vfio_ram_discard_notify_discard, true); ram_discard_manager_register_listener(rdm, &vrdl->listener, section); - QLIST_INSERT_HEAD(&container->vrdl_list, vrdl, next); + QLIST_INSERT_HEAD(&bcontainer->vrdl_list, vrdl, next); /* * Sanity-check if we have a theoretically problematic setup where we could @@ -439,7 +443,7 @@ static void vfio_register_ram_discard_listener(VFIOContainer *container, * number of sections in the address space we could have over time, * also consuming DMA mappings. */ - if (container->dma_max_mappings) { + if (bcontainer->dma_max_mappings) { unsigned int vrdl_count = 0, vrdl_mappings = 0, max_memslots = 512; #ifdef CONFIG_KVM @@ -448,7 +452,7 @@ static void vfio_register_ram_discard_listener(VFIOContainer *container, } #endif - QLIST_FOREACH(vrdl, &container->vrdl_list, next) { + QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) { hwaddr start, end; start = QEMU_ALIGN_DOWN(vrdl->offset_within_address_space, @@ -460,23 +464,23 @@ static void vfio_register_ram_discard_listener(VFIOContainer *container, } if (vrdl_mappings + max_memslots - vrdl_count > - container->dma_max_mappings) { + bcontainer->dma_max_mappings) { warn_report("%s: possibly running out of DMA mappings. E.g., try" " increasing the 'block-size' of virtio-mem devies." " Maximum possible DMA mappings: %d, Maximum possible" - " memslots: %d", __func__, container->dma_max_mappings, + " memslots: %d", __func__, bcontainer->dma_max_mappings, max_memslots); } } } -static void vfio_unregister_ram_discard_listener(VFIOContainer *container, +static void vfio_unregister_ram_discard_listener(VFIOContainerBase *bcontainer, MemoryRegionSection *section) { RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); VFIORamDiscardListener *vrdl = NULL; - QLIST_FOREACH(vrdl, &container->vrdl_list, next) { + QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) { if (vrdl->mr == section->mr && vrdl->offset_within_address_space == section->offset_within_address_space) { @@ -538,7 +542,7 @@ static bool vfio_listener_valid_section(MemoryRegionSection *section, return true; } -static bool vfio_get_section_iova_range(VFIOContainer *container, +static bool vfio_get_section_iova_range(VFIOContainerBase *bcontainer, MemoryRegionSection *section, hwaddr *out_iova, hwaddr *out_end, Int128 *out_llend) @@ -566,7 +570,8 @@ static bool vfio_get_section_iova_range(VFIOContainer *container, static void vfio_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { - VFIOContainer *container = container_of(listener, VFIOContainer, listener); + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); hwaddr iova, end; Int128 llend, llsize; void *vaddr; @@ -577,7 +582,8 @@ static void vfio_listener_region_add(MemoryListener *listener, return; } - if (!vfio_get_section_iova_range(container, section, &iova, &end, &llend)) { + if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end, + &llend)) { if (memory_region_is_ram_device(section->mr)) { trace_vfio_listener_region_add_no_dma_map( memory_region_name(section->mr), @@ -588,7 +594,7 @@ static void vfio_listener_region_add(MemoryListener *listener, return; } - if (vfio_container_add_section_window(container, section, &err)) { + if (vfio_container_add_section_window(bcontainer, section, &err)) { goto fail; } @@ -610,7 +616,7 @@ static void vfio_listener_region_add(MemoryListener *listener, giommu->iommu_mr = iommu_mr; giommu->iommu_offset = section->offset_within_address_space - section->offset_within_region; - giommu->container = container; + giommu->bcontainer = bcontainer; llend = int128_add(int128_make64(section->offset_within_region), section->size); llend = int128_sub(llend, int128_one()); @@ -623,16 +629,17 @@ static void vfio_listener_region_add(MemoryListener *listener, iommu_idx); ret = memory_region_iommu_set_page_size_mask(giommu->iommu_mr, - container->pgsizes, + bcontainer->pgsizes, &err); if (ret) { g_free(giommu); goto fail; } - if (container->iova_ranges) { + if (bcontainer->iova_ranges) { ret = memory_region_iommu_set_iova_ranges(giommu->iommu_mr, - container->iova_ranges, &err); + bcontainer->iova_ranges, + &err); if (ret) { g_free(giommu); goto fail; @@ -645,7 +652,7 @@ static void vfio_listener_region_add(MemoryListener *listener, g_free(giommu); goto fail; } - QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); + QLIST_INSERT_HEAD(&bcontainer->giommu_list, giommu, giommu_next); memory_region_iommu_replay(giommu->iommu_mr, &giommu->n); return; @@ -659,7 +666,7 @@ static void vfio_listener_region_add(MemoryListener *listener, * about changes. */ if (memory_region_has_ram_discard_manager(section->mr)) { - vfio_register_ram_discard_listener(container, section); + vfio_register_ram_discard_listener(bcontainer, section); return; } @@ -672,7 +679,7 @@ static void vfio_listener_region_add(MemoryListener *listener, llsize = int128_sub(llend, int128_make64(iova)); if (memory_region_is_ram_device(section->mr)) { - hwaddr pgmask = (1ULL << ctz64(container->pgsizes)) - 1; + hwaddr pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1; if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) { trace_vfio_listener_region_add_no_dma_map( @@ -684,12 +691,12 @@ static void vfio_listener_region_add(MemoryListener *listener, } } - ret = vfio_dma_map(container, iova, int128_get64(llsize), - vaddr, section->readonly); + ret = vfio_container_dma_map(bcontainer, iova, int128_get64(llsize), + vaddr, section->readonly); if (ret) { - error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", " + error_setg(&err, "vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx", %p) = %d (%s)", - container, iova, int128_get64(llsize), vaddr, ret, + bcontainer, iova, int128_get64(llsize), vaddr, ret, strerror(-ret)); if (memory_region_is_ram_device(section->mr)) { /* Allow unexpected mappings not to be fatal for RAM devices */ @@ -711,9 +718,9 @@ fail: * can gracefully fail. Runtime, there's not much we can do other * than throw a hardware error. */ - if (!container->initialized) { - if (!container->error) { - error_propagate_prepend(&container->error, err, + if (!bcontainer->initialized) { + if (!bcontainer->error) { + error_propagate_prepend(&bcontainer->error, err, "Region %s: ", memory_region_name(section->mr)); } else { @@ -728,7 +735,8 @@ fail: static void vfio_listener_region_del(MemoryListener *listener, MemoryRegionSection *section) { - VFIOContainer *container = container_of(listener, VFIOContainer, listener); + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); hwaddr iova, end; Int128 llend, llsize; int ret; @@ -741,7 +749,7 @@ static void vfio_listener_region_del(MemoryListener *listener, if (memory_region_is_iommu(section->mr)) { VFIOGuestIOMMU *giommu; - QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { + QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) { if (MEMORY_REGION(giommu->iommu_mr) == section->mr && giommu->n.start == section->offset_within_region) { memory_region_unregister_iommu_notifier(section->mr, @@ -761,7 +769,8 @@ static void vfio_listener_region_del(MemoryListener *listener, */ } - if (!vfio_get_section_iova_range(container, section, &iova, &end, &llend)) { + if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end, + &llend)) { return; } @@ -772,10 +781,10 @@ static void vfio_listener_region_del(MemoryListener *listener, if (memory_region_is_ram_device(section->mr)) { hwaddr pgmask; - pgmask = (1ULL << ctz64(container->pgsizes)) - 1; + pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1; try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask)); } else if (memory_region_has_ram_discard_manager(section->mr)) { - vfio_unregister_ram_discard_listener(container, section); + vfio_unregister_ram_discard_listener(bcontainer, section); /* Unregistering will trigger an unmap. */ try_unmap = false; } @@ -784,27 +793,29 @@ static void vfio_listener_region_del(MemoryListener *listener, if (int128_eq(llsize, int128_2_64())) { /* The unmap ioctl doesn't accept a full 64-bit span. */ llsize = int128_rshift(llsize, 1); - ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); + ret = vfio_container_dma_unmap(bcontainer, iova, + int128_get64(llsize), NULL); if (ret) { - error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " + error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx") = %d (%s)", - container, iova, int128_get64(llsize), ret, + bcontainer, iova, int128_get64(llsize), ret, strerror(-ret)); } iova += int128_get64(llsize); } - ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL); + ret = vfio_container_dma_unmap(bcontainer, iova, + int128_get64(llsize), NULL); if (ret) { - error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " + error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx") = %d (%s)", - container, iova, int128_get64(llsize), ret, + bcontainer, iova, int128_get64(llsize), ret, strerror(-ret)); } } memory_region_unref(section->mr); - vfio_container_del_section_window(container, section); + vfio_container_del_section_window(bcontainer, section); } typedef struct VFIODirtyRanges { @@ -817,13 +828,13 @@ typedef struct VFIODirtyRanges { } VFIODirtyRanges; typedef struct VFIODirtyRangesListener { - VFIOContainer *container; + VFIOContainerBase *bcontainer; VFIODirtyRanges ranges; MemoryListener listener; } VFIODirtyRangesListener; static bool vfio_section_is_vfio_pci(MemoryRegionSection *section, - VFIOContainer *container) + VFIOContainerBase *bcontainer) { VFIOPCIDevice *pcidev; VFIODevice *vbasedev; @@ -831,7 +842,7 @@ static bool vfio_section_is_vfio_pci(MemoryRegionSection *section, owner = memory_region_owner(section->mr); - QLIST_FOREACH(vbasedev, &container->device_list, container_next) { + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { continue; } @@ -854,7 +865,7 @@ static void vfio_dirty_tracking_update(MemoryListener *listener, hwaddr iova, end, *min, *max; if (!vfio_listener_valid_section(section, "tracking_update") || - !vfio_get_section_iova_range(dirty->container, section, + !vfio_get_section_iova_range(dirty->bcontainer, section, &iova, &end, NULL)) { return; } @@ -878,7 +889,7 @@ static void vfio_dirty_tracking_update(MemoryListener *listener, * The alternative would be an IOVATree but that has a much bigger runtime * overhead and unnecessary complexity. */ - if (vfio_section_is_vfio_pci(section, dirty->container) && + if (vfio_section_is_vfio_pci(section, dirty->bcontainer) && iova >= UINT32_MAX) { min = &range->minpci64; max = &range->maxpci64; @@ -902,7 +913,7 @@ static const MemoryListener vfio_dirty_tracking_listener = { .region_add = vfio_dirty_tracking_update, }; -static void vfio_dirty_tracking_init(VFIOContainer *container, +static void vfio_dirty_tracking_init(VFIOContainerBase *bcontainer, VFIODirtyRanges *ranges) { VFIODirtyRangesListener dirty; @@ -912,10 +923,10 @@ static void vfio_dirty_tracking_init(VFIOContainer *container, dirty.ranges.min64 = UINT64_MAX; dirty.ranges.minpci64 = UINT64_MAX; dirty.listener = vfio_dirty_tracking_listener; - dirty.container = container; + dirty.bcontainer = bcontainer; memory_listener_register(&dirty.listener, - container->space->as); + bcontainer->space->as); *ranges = dirty.ranges; @@ -927,7 +938,7 @@ static void vfio_dirty_tracking_init(VFIOContainer *container, memory_listener_unregister(&dirty.listener); } -static void vfio_devices_dma_logging_stop(VFIOContainer *container) +static void vfio_devices_dma_logging_stop(VFIOContainerBase *bcontainer) { uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature), sizeof(uint64_t))] = {}; @@ -938,7 +949,7 @@ static void vfio_devices_dma_logging_stop(VFIOContainer *container) feature->flags = VFIO_DEVICE_FEATURE_SET | VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP; - QLIST_FOREACH(vbasedev, &container->device_list, container_next) { + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { if (!vbasedev->dirty_tracking) { continue; } @@ -952,7 +963,7 @@ static void vfio_devices_dma_logging_stop(VFIOContainer *container) } static struct vfio_device_feature * -vfio_device_feature_dma_logging_start_create(VFIOContainer *container, +vfio_device_feature_dma_logging_start_create(VFIOContainerBase *bcontainer, VFIODirtyRanges *tracking) { struct vfio_device_feature *feature; @@ -1025,21 +1036,21 @@ static void vfio_device_feature_dma_logging_start_destroy( g_free(feature); } -static int vfio_devices_dma_logging_start(VFIOContainer *container) +static int vfio_devices_dma_logging_start(VFIOContainerBase *bcontainer) { struct vfio_device_feature *feature; VFIODirtyRanges ranges; VFIODevice *vbasedev; int ret = 0; - vfio_dirty_tracking_init(container, &ranges); - feature = vfio_device_feature_dma_logging_start_create(container, + vfio_dirty_tracking_init(bcontainer, &ranges); + feature = vfio_device_feature_dma_logging_start_create(bcontainer, &ranges); if (!feature) { return -errno; } - QLIST_FOREACH(vbasedev, &container->device_list, container_next) { + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { if (vbasedev->dirty_tracking) { continue; } @@ -1056,7 +1067,7 @@ static int vfio_devices_dma_logging_start(VFIOContainer *container) out: if (ret) { - vfio_devices_dma_logging_stop(container); + vfio_devices_dma_logging_stop(bcontainer); } vfio_device_feature_dma_logging_start_destroy(feature); @@ -1066,13 +1077,14 @@ out: static void vfio_listener_log_global_start(MemoryListener *listener) { - VFIOContainer *container = container_of(listener, VFIOContainer, listener); + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); int ret; - if (vfio_devices_all_device_dirty_tracking(container)) { - ret = vfio_devices_dma_logging_start(container); + if (vfio_devices_all_device_dirty_tracking(bcontainer)) { + ret = vfio_devices_dma_logging_start(bcontainer); } else { - ret = vfio_set_dirty_page_tracking(container, true); + ret = vfio_container_set_dirty_page_tracking(bcontainer, true); } if (ret) { @@ -1084,13 +1096,14 @@ static void vfio_listener_log_global_start(MemoryListener *listener) static void vfio_listener_log_global_stop(MemoryListener *listener) { - VFIOContainer *container = container_of(listener, VFIOContainer, listener); + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); int ret = 0; - if (vfio_devices_all_device_dirty_tracking(container)) { - vfio_devices_dma_logging_stop(container); + if (vfio_devices_all_device_dirty_tracking(bcontainer)) { + vfio_devices_dma_logging_stop(bcontainer); } else { - ret = vfio_set_dirty_page_tracking(container, false); + ret = vfio_container_set_dirty_page_tracking(bcontainer, false); } if (ret) { @@ -1126,14 +1139,14 @@ static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova, return 0; } -int vfio_devices_query_dirty_bitmap(VFIOContainer *container, +int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer, VFIOBitmap *vbmap, hwaddr iova, hwaddr size) { VFIODevice *vbasedev; int ret; - QLIST_FOREACH(vbasedev, &container->device_list, container_next) { + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { ret = vfio_device_dma_logging_report(vbasedev, iova, size, vbmap->bitmap); if (ret) { @@ -1149,16 +1162,16 @@ int vfio_devices_query_dirty_bitmap(VFIOContainer *container, return 0; } -int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, +int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova, uint64_t size, ram_addr_t ram_addr) { bool all_device_dirty_tracking = - vfio_devices_all_device_dirty_tracking(container); + vfio_devices_all_device_dirty_tracking(bcontainer); uint64_t dirty_pages; VFIOBitmap vbmap; int ret; - if (!container->dirty_pages_supported && !all_device_dirty_tracking) { + if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) { cpu_physical_memory_set_dirty_range(ram_addr, size, tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE); @@ -1171,9 +1184,9 @@ int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, } if (all_device_dirty_tracking) { - ret = vfio_devices_query_dirty_bitmap(container, &vbmap, iova, size); + ret = vfio_devices_query_dirty_bitmap(bcontainer, &vbmap, iova, size); } else { - ret = vfio_query_dirty_bitmap(container, &vbmap, iova, size); + ret = vfio_container_query_dirty_bitmap(bcontainer, &vbmap, iova, size); } if (ret) { @@ -1183,8 +1196,7 @@ int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr, vbmap.pages); - trace_vfio_get_dirty_bitmap(container->fd, iova, size, vbmap.size, - ram_addr, dirty_pages); + trace_vfio_get_dirty_bitmap(iova, size, vbmap.size, ram_addr, dirty_pages); out: g_free(vbmap.bitmap); @@ -1201,7 +1213,7 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) vfio_giommu_dirty_notifier *gdn = container_of(n, vfio_giommu_dirty_notifier, n); VFIOGuestIOMMU *giommu = gdn->giommu; - VFIOContainer *container = giommu->container; + VFIOContainerBase *bcontainer = giommu->bcontainer; hwaddr iova = iotlb->iova + giommu->iommu_offset; ram_addr_t translated_addr; int ret = -EINVAL; @@ -1216,12 +1228,12 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) rcu_read_lock(); if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) { - ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1, + ret = vfio_get_dirty_bitmap(bcontainer, iova, iotlb->addr_mask + 1, translated_addr); if (ret) { error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx") = %d (%s)", - container, iova, iotlb->addr_mask + 1, ret, + bcontainer, iova, iotlb->addr_mask + 1, ret, strerror(-ret)); } } @@ -1246,16 +1258,17 @@ static int vfio_ram_discard_get_dirty_bitmap(MemoryRegionSection *section, * Sync the whole mapped region (spanning multiple individual mappings) * in one go. */ - return vfio_get_dirty_bitmap(vrdl->container, iova, size, ram_addr); + return vfio_get_dirty_bitmap(vrdl->bcontainer, iova, size, ram_addr); } -static int vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainer *container, - MemoryRegionSection *section) +static int +vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainerBase *bcontainer, + MemoryRegionSection *section) { RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); VFIORamDiscardListener *vrdl = NULL; - QLIST_FOREACH(vrdl, &container->vrdl_list, next) { + QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) { if (vrdl->mr == section->mr && vrdl->offset_within_address_space == section->offset_within_address_space) { @@ -1276,7 +1289,7 @@ static int vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainer *container, &vrdl); } -static int vfio_sync_dirty_bitmap(VFIOContainer *container, +static int vfio_sync_dirty_bitmap(VFIOContainerBase *bcontainer, MemoryRegionSection *section) { ram_addr_t ram_addr; @@ -1284,7 +1297,7 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *container, if (memory_region_is_iommu(section->mr)) { VFIOGuestIOMMU *giommu; - QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { + QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) { if (MEMORY_REGION(giommu->iommu_mr) == section->mr && giommu->n.start == section->offset_within_region) { Int128 llend; @@ -1308,13 +1321,13 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *container, } return 0; } else if (memory_region_has_ram_discard_manager(section->mr)) { - return vfio_sync_ram_discard_listener_dirty_bitmap(container, section); + return vfio_sync_ram_discard_listener_dirty_bitmap(bcontainer, section); } ram_addr = memory_region_get_ram_addr(section->mr) + section->offset_within_region; - return vfio_get_dirty_bitmap(container, + return vfio_get_dirty_bitmap(bcontainer, REAL_HOST_PAGE_ALIGN(section->offset_within_address_space), int128_get64(section->size), ram_addr); } @@ -1322,15 +1335,16 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *container, static void vfio_listener_log_sync(MemoryListener *listener, MemoryRegionSection *section) { - VFIOContainer *container = container_of(listener, VFIOContainer, listener); + VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, + listener); int ret; if (vfio_listener_skipped_section(section)) { return; } - if (vfio_devices_all_dirty_tracking(container)) { - ret = vfio_sync_dirty_bitmap(container, section); + if (vfio_devices_all_dirty_tracking(bcontainer)) { + ret = vfio_sync_dirty_bitmap(bcontainer, section); if (ret) { error_report("vfio: Failed to sync dirty bitmap, err: %d (%s)", ret, strerror(-ret)); @@ -1352,13 +1366,13 @@ void vfio_reset_handler(void *opaque) { VFIODevice *vbasedev; - QLIST_FOREACH(vbasedev, &vfio_device_list, next) { + QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) { if (vbasedev->dev->realized) { vbasedev->ops->vfio_compute_needs_reset(vbasedev); } } - QLIST_FOREACH(vbasedev, &vfio_device_list, next) { + QLIST_FOREACH(vbasedev, &vfio_device_list, global_next) { if (vbasedev->dev->realized && vbasedev->needs_reset) { vbasedev->ops->vfio_hot_reset_multi(vbasedev); } @@ -1449,10 +1463,13 @@ VFIOAddressSpace *vfio_get_address_space(AddressSpace *as) void vfio_put_address_space(VFIOAddressSpace *space) { - if (QLIST_EMPTY(&space->containers)) { - QLIST_REMOVE(space, list); - g_free(space); + if (!QLIST_EMPTY(&space->containers)) { + return; } + + QLIST_REMOVE(space, list); + g_free(space); + if (QLIST_EMPTY(&vfio_address_spaces)) { qemu_unregister_reset(vfio_reset_handler, NULL); } @@ -1481,3 +1498,26 @@ retry: return info; } + +int vfio_attach_device(char *name, VFIODevice *vbasedev, + AddressSpace *as, Error **errp) +{ + const VFIOIOMMUClass *ops = + VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_LEGACY)); + + if (vbasedev->iommufd) { + ops = VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_IOMMUFD)); + } + + assert(ops); + + return ops->attach_device(name, vbasedev, as, errp); +} + +void vfio_detach_device(VFIODevice *vbasedev) +{ + if (!vbasedev->bcontainer) { + return; + } + vbasedev->bcontainer->ops->detach_device(vbasedev); +} diff --git a/hw/vfio/container-base.c b/hw/vfio/container-base.c new file mode 100644 index 0000000000000000000000000000000000000000..913ae49077c4f09b7b27517c1231cfbe4befb7fb --- /dev/null +++ b/hw/vfio/container-base.c @@ -0,0 +1,111 @@ +/* + * VFIO BASE CONTAINER + * + * Copyright (C) 2023 Intel Corporation. + * Copyright Red Hat, Inc. 2023 + * + * Authors: Yi Liu + * Eric Auger + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "hw/vfio/vfio-container-base.h" + +int vfio_container_dma_map(VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + void *vaddr, bool readonly) +{ + g_assert(bcontainer->ops->dma_map); + return bcontainer->ops->dma_map(bcontainer, iova, size, vaddr, readonly); +} + +int vfio_container_dma_unmap(VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb) +{ + g_assert(bcontainer->ops->dma_unmap); + return bcontainer->ops->dma_unmap(bcontainer, iova, size, iotlb); +} + +int vfio_container_add_section_window(VFIOContainerBase *bcontainer, + MemoryRegionSection *section, + Error **errp) +{ + if (!bcontainer->ops->add_window) { + return 0; + } + + return bcontainer->ops->add_window(bcontainer, section, errp); +} + +void vfio_container_del_section_window(VFIOContainerBase *bcontainer, + MemoryRegionSection *section) +{ + if (!bcontainer->ops->del_window) { + return; + } + + return bcontainer->ops->del_window(bcontainer, section); +} + +int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer, + bool start) +{ + if (!bcontainer->dirty_pages_supported) { + return 0; + } + + g_assert(bcontainer->ops->set_dirty_page_tracking); + return bcontainer->ops->set_dirty_page_tracking(bcontainer, start); +} + +int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer, + VFIOBitmap *vbmap, + hwaddr iova, hwaddr size) +{ + g_assert(bcontainer->ops->query_dirty_bitmap); + return bcontainer->ops->query_dirty_bitmap(bcontainer, vbmap, iova, size); +} + +void vfio_container_init(VFIOContainerBase *bcontainer, VFIOAddressSpace *space, + const VFIOIOMMUClass *ops) +{ + bcontainer->ops = ops; + bcontainer->space = space; + bcontainer->error = NULL; + bcontainer->dirty_pages_supported = false; + bcontainer->dma_max_mappings = 0; + bcontainer->iova_ranges = NULL; + QLIST_INIT(&bcontainer->giommu_list); + QLIST_INIT(&bcontainer->vrdl_list); +} + +void vfio_container_destroy(VFIOContainerBase *bcontainer) +{ + VFIOGuestIOMMU *giommu, *tmp; + + QLIST_REMOVE(bcontainer, next); + + QLIST_FOREACH_SAFE(giommu, &bcontainer->giommu_list, giommu_next, tmp) { + memory_region_unregister_iommu_notifier( + MEMORY_REGION(giommu->iommu_mr), &giommu->n); + QLIST_REMOVE(giommu, giommu_next); + g_free(giommu); + } + + g_list_free_full(bcontainer->iova_ranges, g_free); +} + +static const TypeInfo types[] = { + { + .name = TYPE_VFIO_IOMMU, + .parent = TYPE_INTERFACE, + .class_size = sizeof(VFIOIOMMUClass), + }, +}; + +DEFINE_TYPES(types) diff --git a/hw/vfio/container.c b/hw/vfio/container.c index 242010036af33faa325a34008af40c2cc67a02ea..bd25b9fbad2e717e63c2ab0e331186e5f63cef49 100644 --- a/hw/vfio/container.c +++ b/hw/vfio/container.c @@ -33,6 +33,7 @@ #include "trace.h" #include "qapi/error.h" #include "migration/migration.h" +#include "pci.h" VFIOGroupList vfio_group_list = QLIST_HEAD_INITIALIZER(vfio_group_list); @@ -60,10 +61,11 @@ static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state) } } -static int vfio_dma_unmap_bitmap(VFIOContainer *container, +static int vfio_dma_unmap_bitmap(const VFIOContainer *container, hwaddr iova, ram_addr_t size, IOMMUTLBEntry *iotlb) { + const VFIOContainerBase *bcontainer = &container->bcontainer; struct vfio_iommu_type1_dma_unmap *unmap; struct vfio_bitmap *bitmap; VFIOBitmap vbmap; @@ -91,7 +93,7 @@ static int vfio_dma_unmap_bitmap(VFIOContainer *container, bitmap->size = vbmap.size; bitmap->data = (__u64 *)vbmap.bitmap; - if (vbmap.size > container->max_dirty_bitmap_size) { + if (vbmap.size > bcontainer->max_dirty_bitmap_size) { error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size); ret = -E2BIG; goto unmap_exit; @@ -115,9 +117,12 @@ unmap_exit: /* * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 */ -int vfio_dma_unmap(VFIOContainer *container, hwaddr iova, - ram_addr_t size, IOMMUTLBEntry *iotlb) +static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb) { + const VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); struct vfio_iommu_type1_dma_unmap unmap = { .argsz = sizeof(unmap), .flags = 0, @@ -127,9 +132,9 @@ int vfio_dma_unmap(VFIOContainer *container, hwaddr iova, bool need_dirty_sync = false; int ret; - if (iotlb && vfio_devices_all_running_and_mig_active(container)) { - if (!vfio_devices_all_device_dirty_tracking(container) && - container->dirty_pages_supported) { + if (iotlb && vfio_devices_all_running_and_mig_active(bcontainer)) { + if (!vfio_devices_all_device_dirty_tracking(bcontainer) && + bcontainer->dirty_pages_supported) { return vfio_dma_unmap_bitmap(container, iova, size, iotlb); } @@ -151,8 +156,8 @@ int vfio_dma_unmap(VFIOContainer *container, hwaddr iova, */ if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) && container->iommu_type == VFIO_TYPE1v2_IOMMU) { - trace_vfio_dma_unmap_overflow_workaround(); - unmap.size -= 1ULL << ctz64(container->pgsizes); + trace_vfio_legacy_dma_unmap_overflow_workaround(); + unmap.size -= 1ULL << ctz64(bcontainer->pgsizes); continue; } error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno)); @@ -160,7 +165,7 @@ int vfio_dma_unmap(VFIOContainer *container, hwaddr iova, } if (need_dirty_sync) { - ret = vfio_get_dirty_bitmap(container, iova, size, + ret = vfio_get_dirty_bitmap(bcontainer, iova, size, iotlb->translated_addr); if (ret) { return ret; @@ -170,9 +175,11 @@ int vfio_dma_unmap(VFIOContainer *container, hwaddr iova, return 0; } -int vfio_dma_map(VFIOContainer *container, hwaddr iova, - ram_addr_t size, void *vaddr, bool readonly) +static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova, + ram_addr_t size, void *vaddr, bool readonly) { + const VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); struct vfio_iommu_type1_dma_map map = { .argsz = sizeof(map), .flags = VFIO_DMA_MAP_FLAG_READ, @@ -191,7 +198,8 @@ int vfio_dma_map(VFIOContainer *container, hwaddr iova, * the VGA ROM space. */ if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || - (errno == EBUSY && vfio_dma_unmap(container, iova, size, NULL) == 0 && + (errno == EBUSY && + vfio_legacy_dma_unmap(bcontainer, iova, size, NULL) == 0 && ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { return 0; } @@ -200,17 +208,17 @@ int vfio_dma_map(VFIOContainer *container, hwaddr iova, return -errno; } -int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start) +static int +vfio_legacy_set_dirty_page_tracking(const VFIOContainerBase *bcontainer, + bool start) { + const VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); int ret; struct vfio_iommu_type1_dirty_bitmap dirty = { .argsz = sizeof(dirty), }; - if (!container->dirty_pages_supported) { - return 0; - } - if (start) { dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START; } else { @@ -227,9 +235,12 @@ int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start) return ret; } -int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap, - hwaddr iova, hwaddr size) +static int vfio_legacy_query_dirty_bitmap(const VFIOContainerBase *bcontainer, + VFIOBitmap *vbmap, + hwaddr iova, hwaddr size) { + const VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); struct vfio_iommu_type1_dirty_bitmap *dbitmap; struct vfio_iommu_type1_dirty_bitmap_get *range; int ret; @@ -296,7 +307,7 @@ bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info, } static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info, - VFIOContainer *container) + VFIOContainerBase *bcontainer) { struct vfio_info_cap_header *hdr; struct vfio_iommu_type1_info_cap_iova_range *cap; @@ -314,8 +325,8 @@ static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info, range_set_bounds(range, cap->iova_ranges[i].start, cap->iova_ranges[i].end); - container->iova_ranges = - range_list_insert(container->iova_ranges, range); + bcontainer->iova_ranges = + range_list_insert(bcontainer->iova_ranges, range); } return true; @@ -358,10 +369,34 @@ static int vfio_get_iommu_type(VFIOContainer *container, return -EINVAL; } -static int vfio_init_container(VFIOContainer *container, int group_fd, - Error **errp) +/* + * vfio_get_iommu_ops - get a VFIOIOMMUClass associated with a type + */ +static const VFIOIOMMUClass *vfio_get_iommu_class(int iommu_type, Error **errp) +{ + ObjectClass *klass = NULL; + + switch (iommu_type) { + case VFIO_TYPE1v2_IOMMU: + case VFIO_TYPE1_IOMMU: + klass = object_class_by_name(TYPE_VFIO_IOMMU_LEGACY); + break; + case VFIO_SPAPR_TCE_v2_IOMMU: + case VFIO_SPAPR_TCE_IOMMU: + klass = object_class_by_name(TYPE_VFIO_IOMMU_SPAPR); + break; + default: + g_assert_not_reached(); + }; + + return VFIO_IOMMU_CLASS(klass); +} + +static int vfio_set_iommu(VFIOContainer *container, int group_fd, + VFIOAddressSpace *space, Error **errp) { int iommu_type, ret; + const VFIOIOMMUClass *vioc; iommu_type = vfio_get_iommu_type(container, errp); if (iommu_type < 0) { @@ -390,6 +425,14 @@ static int vfio_init_container(VFIOContainer *container, int group_fd, } container->iommu_type = iommu_type; + + vioc = vfio_get_iommu_class(iommu_type, errp); + if (!vioc) { + error_setg(errp, "No available IOMMU models"); + return -EINVAL; + } + + vfio_container_init(&container->bcontainer, space, vioc); return 0; } @@ -442,6 +485,7 @@ static void vfio_get_iommu_info_migration(VFIOContainer *container, { struct vfio_info_cap_header *hdr; struct vfio_iommu_type1_info_cap_migration *cap_mig; + VFIOContainerBase *bcontainer = &container->bcontainer; hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION); if (!hdr) { @@ -456,22 +500,46 @@ static void vfio_get_iommu_info_migration(VFIOContainer *container, * qemu_real_host_page_size to mark those dirty. */ if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) { - container->dirty_pages_supported = true; - container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; - container->dirty_pgsizes = cap_mig->pgsize_bitmap; + bcontainer->dirty_pages_supported = true; + bcontainer->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; + bcontainer->dirty_pgsizes = cap_mig->pgsize_bitmap; } } -static void vfio_free_container(VFIOContainer *container) +static int vfio_legacy_setup(VFIOContainerBase *bcontainer, Error **errp) { - g_list_free_full(container->iova_ranges, g_free); - g_free(container); + VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); + g_autofree struct vfio_iommu_type1_info *info = NULL; + int ret; + + ret = vfio_get_iommu_info(container, &info); + if (ret) { + error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info"); + return ret; + } + + if (info->flags & VFIO_IOMMU_INFO_PGSIZES) { + bcontainer->pgsizes = info->iova_pgsizes; + } else { + bcontainer->pgsizes = qemu_real_host_page_size(); + } + + if (!vfio_get_info_dma_avail(info, &bcontainer->dma_max_mappings)) { + bcontainer->dma_max_mappings = 65535; + } + + vfio_get_info_iova_range(info, bcontainer); + + vfio_get_iommu_info_migration(container, info); + return 0; } static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, Error **errp) { VFIOContainer *container; + VFIOContainerBase *bcontainer; int ret, fd; VFIOAddressSpace *space; @@ -508,7 +576,8 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, * details once we know which type of IOMMU we are using. */ - QLIST_FOREACH(container, &space->containers, next) { + QLIST_FOREACH(bcontainer, &space->containers, next) { + container = container_of(bcontainer, VFIOContainer, bcontainer); if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { ret = vfio_ram_block_discard_disable(container, true); if (ret) { @@ -544,16 +613,10 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, } container = g_malloc0(sizeof(*container)); - container->space = space; container->fd = fd; - container->error = NULL; - container->dirty_pages_supported = false; - container->dma_max_mappings = 0; - container->iova_ranges = NULL; - QLIST_INIT(&container->giommu_list); - QLIST_INIT(&container->vrdl_list); - - ret = vfio_init_container(container, group->fd, errp); + bcontainer = &container->bcontainer; + + ret = vfio_set_iommu(container, group->fd, space, errp); if (ret) { goto free_container_exit; } @@ -564,82 +627,48 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, goto free_container_exit; } - switch (container->iommu_type) { - case VFIO_TYPE1v2_IOMMU: - case VFIO_TYPE1_IOMMU: - { - struct vfio_iommu_type1_info *info; - - ret = vfio_get_iommu_info(container, &info); - if (ret) { - error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info"); - goto enable_discards_exit; - } + assert(bcontainer->ops->setup); - if (info->flags & VFIO_IOMMU_INFO_PGSIZES) { - container->pgsizes = info->iova_pgsizes; - } else { - container->pgsizes = qemu_real_host_page_size(); - } - - if (!vfio_get_info_dma_avail(info, &container->dma_max_mappings)) { - container->dma_max_mappings = 65535; - } - - vfio_get_info_iova_range(info, container); - - vfio_get_iommu_info_migration(container, info); - g_free(info); - break; - } - case VFIO_SPAPR_TCE_v2_IOMMU: - case VFIO_SPAPR_TCE_IOMMU: - { - ret = vfio_spapr_container_init(container, errp); - if (ret) { - goto enable_discards_exit; - } - break; - } + ret = bcontainer->ops->setup(bcontainer, errp); + if (ret) { + goto enable_discards_exit; } vfio_kvm_device_add_group(group); QLIST_INIT(&container->group_list); - QLIST_INSERT_HEAD(&space->containers, container, next); + QLIST_INSERT_HEAD(&space->containers, bcontainer, next); group->container = container; QLIST_INSERT_HEAD(&container->group_list, group, container_next); - container->listener = vfio_memory_listener; - - memory_listener_register(&container->listener, container->space->as); + bcontainer->listener = vfio_memory_listener; + memory_listener_register(&bcontainer->listener, bcontainer->space->as); - if (container->error) { + if (bcontainer->error) { ret = -1; - error_propagate_prepend(errp, container->error, + error_propagate_prepend(errp, bcontainer->error, "memory listener initialization failed: "); goto listener_release_exit; } - container->initialized = true; + bcontainer->initialized = true; return 0; listener_release_exit: QLIST_REMOVE(group, container_next); - QLIST_REMOVE(container, next); + QLIST_REMOVE(bcontainer, next); vfio_kvm_device_del_group(group); - memory_listener_unregister(&container->listener); - if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU || - container->iommu_type == VFIO_SPAPR_TCE_IOMMU) { - vfio_spapr_container_deinit(container); + memory_listener_unregister(&bcontainer->listener); + if (bcontainer->ops->release) { + bcontainer->ops->release(bcontainer); } enable_discards_exit: vfio_ram_block_discard_disable(container, false); free_container_exit: - vfio_free_container(container); + g_free(container); close_fd_exit: close(fd); @@ -653,6 +682,7 @@ put_space_exit: static void vfio_disconnect_container(VFIOGroup *group) { VFIOContainer *container = group->container; + VFIOContainerBase *bcontainer = &container->bcontainer; QLIST_REMOVE(group, container_next); group->container = NULL; @@ -663,10 +693,9 @@ static void vfio_disconnect_container(VFIOGroup *group) * group. */ if (QLIST_EMPTY(&container->group_list)) { - memory_listener_unregister(&container->listener); - if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU || - container->iommu_type == VFIO_SPAPR_TCE_IOMMU) { - vfio_spapr_container_deinit(container); + memory_listener_unregister(&bcontainer->listener); + if (bcontainer->ops->release) { + bcontainer->ops->release(bcontainer); } } @@ -676,21 +705,13 @@ static void vfio_disconnect_container(VFIOGroup *group) } if (QLIST_EMPTY(&container->group_list)) { - VFIOAddressSpace *space = container->space; - VFIOGuestIOMMU *giommu, *tmp; - - QLIST_REMOVE(container, next); + VFIOAddressSpace *space = bcontainer->space; - QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) { - memory_region_unregister_iommu_notifier( - MEMORY_REGION(giommu->iommu_mr), &giommu->n); - QLIST_REMOVE(giommu, giommu_next); - g_free(giommu); - } + vfio_container_destroy(bcontainer); trace_vfio_disconnect_container(container->fd); close(container->fd); - vfio_free_container(container); + g_free(container); vfio_put_address_space(space); } @@ -705,7 +726,7 @@ static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp) QLIST_FOREACH(group, &vfio_group_list, next) { if (group->groupid == groupid) { /* Found it. Now is it already in the right context? */ - if (group->container->space->as == as) { + if (group->container->bcontainer.space->as == as) { return group; } else { error_setg(errp, "group %d used in multiple address spaces", @@ -848,7 +869,8 @@ static void vfio_put_base_device(VFIODevice *vbasedev) static int vfio_device_groupid(VFIODevice *vbasedev, Error **errp) { - char *tmp, group_path[PATH_MAX], *group_name; + char *tmp, group_path[PATH_MAX]; + g_autofree char *group_name = NULL; int ret, groupid; ssize_t len; @@ -864,7 +886,7 @@ static int vfio_device_groupid(VFIODevice *vbasedev, Error **errp) group_path[len] = 0; - group_name = basename(group_path); + group_name = g_path_get_basename(group_path); if (sscanf(group_name, "%d", &groupid) != 1) { error_setg_errno(errp, errno, "failed to read %s", group_path); return -errno; @@ -877,13 +899,13 @@ static int vfio_device_groupid(VFIODevice *vbasedev, Error **errp) * @name and @vbasedev->name are likely to be different depending * on the type of the device, hence the need for passing @name */ -int vfio_attach_device(char *name, VFIODevice *vbasedev, - AddressSpace *as, Error **errp) +static int vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev, + AddressSpace *as, Error **errp) { int groupid = vfio_device_groupid(vbasedev, errp); VFIODevice *vbasedev_iter; VFIOGroup *group; - VFIOContainer *container; + VFIOContainerBase *bcontainer; int ret; if (groupid < 0) { @@ -910,26 +932,214 @@ int vfio_attach_device(char *name, VFIODevice *vbasedev, return ret; } - container = group->container; - vbasedev->container = container; - QLIST_INSERT_HEAD(&container->device_list, vbasedev, container_next); + bcontainer = &group->container->bcontainer; + vbasedev->bcontainer = bcontainer; + QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next); QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next); return ret; } -void vfio_detach_device(VFIODevice *vbasedev) +static void vfio_legacy_detach_device(VFIODevice *vbasedev) { VFIOGroup *group = vbasedev->group; - if (!vbasedev->container) { - return; - } - QLIST_REMOVE(vbasedev, global_next); QLIST_REMOVE(vbasedev, container_next); - vbasedev->container = NULL; + vbasedev->bcontainer = NULL; trace_vfio_detach_device(vbasedev->name, group->groupid); vfio_put_base_device(vbasedev); vfio_put_group(group); } + +static int vfio_legacy_pci_hot_reset(VFIODevice *vbasedev, bool single) +{ + VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); + VFIOGroup *group; + struct vfio_pci_hot_reset_info *info = NULL; + struct vfio_pci_dependent_device *devices; + struct vfio_pci_hot_reset *reset; + int32_t *fds; + int ret, i, count; + bool multi = false; + + trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); + + if (!single) { + vfio_pci_pre_reset(vdev); + } + vdev->vbasedev.needs_reset = false; + + ret = vfio_pci_get_pci_hot_reset_info(vdev, &info); + + if (ret) { + goto out_single; + } + devices = &info->devices[0]; + + trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); + + /* Verify that we have all the groups required */ + for (i = 0; i < info->count; i++) { + PCIHostDeviceAddress host; + VFIOPCIDevice *tmp; + VFIODevice *vbasedev_iter; + + host.domain = devices[i].segment; + host.bus = devices[i].bus; + host.slot = PCI_SLOT(devices[i].devfn); + host.function = PCI_FUNC(devices[i].devfn); + + trace_vfio_pci_hot_reset_dep_devices(host.domain, + host.bus, host.slot, host.function, devices[i].group_id); + + if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { + continue; + } + + QLIST_FOREACH(group, &vfio_group_list, next) { + if (group->groupid == devices[i].group_id) { + break; + } + } + + if (!group) { + if (!vdev->has_pm_reset) { + error_report("vfio: Cannot reset device %s, " + "depends on group %d which is not owned.", + vdev->vbasedev.name, devices[i].group_id); + } + ret = -EPERM; + goto out; + } + + /* Prep dependent devices for reset and clear our marker. */ + QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { + if (!vbasedev_iter->dev->realized || + vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { + continue; + } + tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); + if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { + if (single) { + ret = -EINVAL; + goto out_single; + } + vfio_pci_pre_reset(tmp); + tmp->vbasedev.needs_reset = false; + multi = true; + break; + } + } + } + + if (!single && !multi) { + ret = -EINVAL; + goto out_single; + } + + /* Determine how many group fds need to be passed */ + count = 0; + QLIST_FOREACH(group, &vfio_group_list, next) { + for (i = 0; i < info->count; i++) { + if (group->groupid == devices[i].group_id) { + count++; + break; + } + } + } + + reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds))); + reset->argsz = sizeof(*reset) + (count * sizeof(*fds)); + fds = &reset->group_fds[0]; + + /* Fill in group fds */ + QLIST_FOREACH(group, &vfio_group_list, next) { + for (i = 0; i < info->count; i++) { + if (group->groupid == devices[i].group_id) { + fds[reset->count++] = group->fd; + break; + } + } + } + + /* Bus reset! */ + ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); + g_free(reset); + if (ret) { + ret = -errno; + } + + trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, + ret ? strerror(errno) : "Success"); + +out: + /* Re-enable INTx on affected devices */ + for (i = 0; i < info->count; i++) { + PCIHostDeviceAddress host; + VFIOPCIDevice *tmp; + VFIODevice *vbasedev_iter; + + host.domain = devices[i].segment; + host.bus = devices[i].bus; + host.slot = PCI_SLOT(devices[i].devfn); + host.function = PCI_FUNC(devices[i].devfn); + + if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { + continue; + } + + QLIST_FOREACH(group, &vfio_group_list, next) { + if (group->groupid == devices[i].group_id) { + break; + } + } + + if (!group) { + break; + } + + QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { + if (!vbasedev_iter->dev->realized || + vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { + continue; + } + tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); + if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { + vfio_pci_post_reset(tmp); + break; + } + } + } +out_single: + if (!single) { + vfio_pci_post_reset(vdev); + } + g_free(info); + + return ret; +} + +static void vfio_iommu_legacy_class_init(ObjectClass *klass, void *data) +{ + VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); + + vioc->setup = vfio_legacy_setup; + vioc->dma_map = vfio_legacy_dma_map; + vioc->dma_unmap = vfio_legacy_dma_unmap; + vioc->attach_device = vfio_legacy_attach_device; + vioc->detach_device = vfio_legacy_detach_device; + vioc->set_dirty_page_tracking = vfio_legacy_set_dirty_page_tracking; + vioc->query_dirty_bitmap = vfio_legacy_query_dirty_bitmap; + vioc->pci_hot_reset = vfio_legacy_pci_hot_reset; +}; + +static const TypeInfo types[] = { + { + .name = TYPE_VFIO_IOMMU_LEGACY, + .parent = TYPE_VFIO_IOMMU, + .class_init = vfio_iommu_legacy_class_init, + }, +}; + +DEFINE_TYPES(types) diff --git a/hw/vfio/display.c b/hw/vfio/display.c index 7a10fa8604aa54cdc31f1c82e05b50d43e15ae5e..1aa440c66340d491178ff5704f30c4cc49476707 100644 --- a/hw/vfio/display.c +++ b/hw/vfio/display.c @@ -560,7 +560,7 @@ const VMStateDescription vfio_display_vmstate = { .version_id = 1, .minimum_version_id = 1, .needed = migrate_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_POINTER(ramfb, VFIODisplay, ramfb_vmstate, RAMFBState), VMSTATE_END_OF_LIST(), } diff --git a/hw/vfio/helpers.c b/hw/vfio/helpers.c index 168847e7c51ef35afbea276745c1aa7e6cd94ce0..678987080228e9f69890bd955de452fe7ec06cca 100644 --- a/hw/vfio/helpers.c +++ b/hw/vfio/helpers.c @@ -27,6 +27,7 @@ #include "trace.h" #include "qapi/error.h" #include "qemu/error-report.h" +#include "monitor/monitor.h" /* * Common VFIO interrupt disable @@ -609,3 +610,56 @@ bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type) return ret; } + +int vfio_device_get_name(VFIODevice *vbasedev, Error **errp) +{ + struct stat st; + + if (vbasedev->fd < 0) { + if (stat(vbasedev->sysfsdev, &st) < 0) { + error_setg_errno(errp, errno, "no such host device"); + error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev); + return -errno; + } + /* User may specify a name, e.g: VFIO platform device */ + if (!vbasedev->name) { + vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); + } + } else { + if (!vbasedev->iommufd) { + error_setg(errp, "Use FD passing only with iommufd backend"); + return -EINVAL; + } + /* + * Give a name with fd so any function printing out vbasedev->name + * will not break. + */ + if (!vbasedev->name) { + vbasedev->name = g_strdup_printf("VFIO_FD%d", vbasedev->fd); + } + } + + return 0; +} + +void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp) +{ + int fd = monitor_fd_param(monitor_cur(), str, errp); + + if (fd < 0) { + error_prepend(errp, "Could not parse remote object fd %s:", str); + return; + } + vbasedev->fd = fd; +} + +void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops, + DeviceState *dev, bool ram_discard) +{ + vbasedev->type = type; + vbasedev->ops = ops; + vbasedev->dev = dev; + vbasedev->fd = -1; + + vbasedev->ram_block_discard_allowed = ram_discard; +} diff --git a/hw/vfio/iommufd.c b/hw/vfio/iommufd.c new file mode 100644 index 0000000000000000000000000000000000000000..9bfddc1360895413176a9f170e29e89027384a66 --- /dev/null +++ b/hw/vfio/iommufd.c @@ -0,0 +1,641 @@ +/* + * iommufd container backend + * + * Copyright (C) 2023 Intel Corporation. + * Copyright Red Hat, Inc. 2023 + * + * Authors: Yi Liu + * Eric Auger + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include +#include +#include + +#include "hw/vfio/vfio-common.h" +#include "qemu/error-report.h" +#include "trace.h" +#include "qapi/error.h" +#include "sysemu/iommufd.h" +#include "hw/qdev-core.h" +#include "sysemu/reset.h" +#include "qemu/cutils.h" +#include "qemu/chardev_open.h" +#include "pci.h" + +static int iommufd_cdev_map(const VFIOContainerBase *bcontainer, hwaddr iova, + ram_addr_t size, void *vaddr, bool readonly) +{ + const VFIOIOMMUFDContainer *container = + container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); + + return iommufd_backend_map_dma(container->be, + container->ioas_id, + iova, size, vaddr, readonly); +} + +static int iommufd_cdev_unmap(const VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb) +{ + const VFIOIOMMUFDContainer *container = + container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); + + /* TODO: Handle dma_unmap_bitmap with iotlb args (migration) */ + return iommufd_backend_unmap_dma(container->be, + container->ioas_id, iova, size); +} + +static int iommufd_cdev_kvm_device_add(VFIODevice *vbasedev, Error **errp) +{ + return vfio_kvm_device_add_fd(vbasedev->fd, errp); +} + +static void iommufd_cdev_kvm_device_del(VFIODevice *vbasedev) +{ + Error *err = NULL; + + if (vfio_kvm_device_del_fd(vbasedev->fd, &err)) { + error_report_err(err); + } +} + +static int iommufd_cdev_connect_and_bind(VFIODevice *vbasedev, Error **errp) +{ + IOMMUFDBackend *iommufd = vbasedev->iommufd; + struct vfio_device_bind_iommufd bind = { + .argsz = sizeof(bind), + .flags = 0, + }; + int ret; + + ret = iommufd_backend_connect(iommufd, errp); + if (ret) { + return ret; + } + + /* + * Add device to kvm-vfio to be prepared for the tracking + * in KVM. Especially for some emulated devices, it requires + * to have kvm information in the device open. + */ + ret = iommufd_cdev_kvm_device_add(vbasedev, errp); + if (ret) { + goto err_kvm_device_add; + } + + /* Bind device to iommufd */ + bind.iommufd = iommufd->fd; + ret = ioctl(vbasedev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind); + if (ret) { + error_setg_errno(errp, errno, "error bind device fd=%d to iommufd=%d", + vbasedev->fd, bind.iommufd); + goto err_bind; + } + + vbasedev->devid = bind.out_devid; + trace_iommufd_cdev_connect_and_bind(bind.iommufd, vbasedev->name, + vbasedev->fd, vbasedev->devid); + return ret; +err_bind: + iommufd_cdev_kvm_device_del(vbasedev); +err_kvm_device_add: + iommufd_backend_disconnect(iommufd); + return ret; +} + +static void iommufd_cdev_unbind_and_disconnect(VFIODevice *vbasedev) +{ + /* Unbind is automatically conducted when device fd is closed */ + iommufd_cdev_kvm_device_del(vbasedev); + iommufd_backend_disconnect(vbasedev->iommufd); +} + +static int iommufd_cdev_getfd(const char *sysfs_path, Error **errp) +{ + long int ret = -ENOTTY; + char *path, *vfio_dev_path = NULL, *vfio_path = NULL; + DIR *dir = NULL; + struct dirent *dent; + gchar *contents; + gsize length; + int major, minor; + dev_t vfio_devt; + + path = g_strdup_printf("%s/vfio-dev", sysfs_path); + dir = opendir(path); + if (!dir) { + error_setg_errno(errp, errno, "couldn't open directory %s", path); + goto out_free_path; + } + + while ((dent = readdir(dir))) { + if (!strncmp(dent->d_name, "vfio", 4)) { + vfio_dev_path = g_strdup_printf("%s/%s/dev", path, dent->d_name); + break; + } + } + + if (!vfio_dev_path) { + error_setg(errp, "failed to find vfio-dev/vfioX/dev"); + goto out_close_dir; + } + + if (!g_file_get_contents(vfio_dev_path, &contents, &length, NULL)) { + error_setg(errp, "failed to load \"%s\"", vfio_dev_path); + goto out_free_dev_path; + } + + if (sscanf(contents, "%d:%d", &major, &minor) != 2) { + error_setg(errp, "failed to get major:minor for \"%s\"", vfio_dev_path); + goto out_free_dev_path; + } + g_free(contents); + vfio_devt = makedev(major, minor); + + vfio_path = g_strdup_printf("/dev/vfio/devices/%s", dent->d_name); + ret = open_cdev(vfio_path, vfio_devt); + if (ret < 0) { + error_setg(errp, "Failed to open %s", vfio_path); + } + + trace_iommufd_cdev_getfd(vfio_path, ret); + g_free(vfio_path); + +out_free_dev_path: + g_free(vfio_dev_path); +out_close_dir: + closedir(dir); +out_free_path: + if (*errp) { + error_prepend(errp, VFIO_MSG_PREFIX, path); + } + g_free(path); + + return ret; +} + +static int iommufd_cdev_attach_ioas_hwpt(VFIODevice *vbasedev, uint32_t id, + Error **errp) +{ + int ret, iommufd = vbasedev->iommufd->fd; + struct vfio_device_attach_iommufd_pt attach_data = { + .argsz = sizeof(attach_data), + .flags = 0, + .pt_id = id, + }; + + /* Attach device to an IOAS or hwpt within iommufd */ + ret = ioctl(vbasedev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data); + if (ret) { + error_setg_errno(errp, errno, + "[iommufd=%d] error attach %s (%d) to id=%d", + iommufd, vbasedev->name, vbasedev->fd, id); + } else { + trace_iommufd_cdev_attach_ioas_hwpt(iommufd, vbasedev->name, + vbasedev->fd, id); + } + return ret; +} + +static int iommufd_cdev_detach_ioas_hwpt(VFIODevice *vbasedev, Error **errp) +{ + int ret, iommufd = vbasedev->iommufd->fd; + struct vfio_device_detach_iommufd_pt detach_data = { + .argsz = sizeof(detach_data), + .flags = 0, + }; + + ret = ioctl(vbasedev->fd, VFIO_DEVICE_DETACH_IOMMUFD_PT, &detach_data); + if (ret) { + error_setg_errno(errp, errno, "detach %s failed", vbasedev->name); + } else { + trace_iommufd_cdev_detach_ioas_hwpt(iommufd, vbasedev->name); + } + return ret; +} + +static int iommufd_cdev_attach_container(VFIODevice *vbasedev, + VFIOIOMMUFDContainer *container, + Error **errp) +{ + return iommufd_cdev_attach_ioas_hwpt(vbasedev, container->ioas_id, errp); +} + +static void iommufd_cdev_detach_container(VFIODevice *vbasedev, + VFIOIOMMUFDContainer *container) +{ + Error *err = NULL; + + if (iommufd_cdev_detach_ioas_hwpt(vbasedev, &err)) { + error_report_err(err); + } +} + +static void iommufd_cdev_container_destroy(VFIOIOMMUFDContainer *container) +{ + VFIOContainerBase *bcontainer = &container->bcontainer; + + if (!QLIST_EMPTY(&bcontainer->device_list)) { + return; + } + memory_listener_unregister(&bcontainer->listener); + vfio_container_destroy(bcontainer); + iommufd_backend_free_id(container->be, container->ioas_id); + g_free(container); +} + +static int iommufd_cdev_ram_block_discard_disable(bool state) +{ + /* + * We support coordinated discarding of RAM via the RamDiscardManager. + */ + return ram_block_uncoordinated_discard_disable(state); +} + +static int iommufd_cdev_get_info_iova_range(VFIOIOMMUFDContainer *container, + uint32_t ioas_id, Error **errp) +{ + VFIOContainerBase *bcontainer = &container->bcontainer; + struct iommu_ioas_iova_ranges *info; + struct iommu_iova_range *iova_ranges; + int ret, sz, fd = container->be->fd; + + info = g_malloc0(sizeof(*info)); + info->size = sizeof(*info); + info->ioas_id = ioas_id; + + ret = ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info); + if (ret && errno != EMSGSIZE) { + goto error; + } + + sz = info->num_iovas * sizeof(struct iommu_iova_range); + info = g_realloc(info, sizeof(*info) + sz); + info->allowed_iovas = (uintptr_t)(info + 1); + + ret = ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info); + if (ret) { + goto error; + } + + iova_ranges = (struct iommu_iova_range *)(uintptr_t)info->allowed_iovas; + + for (int i = 0; i < info->num_iovas; i++) { + Range *range = g_new(Range, 1); + + range_set_bounds(range, iova_ranges[i].start, iova_ranges[i].last); + bcontainer->iova_ranges = + range_list_insert(bcontainer->iova_ranges, range); + } + bcontainer->pgsizes = info->out_iova_alignment; + + g_free(info); + return 0; + +error: + ret = -errno; + g_free(info); + error_setg_errno(errp, errno, "Cannot get IOVA ranges"); + return ret; +} + +static int iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, + AddressSpace *as, Error **errp) +{ + VFIOContainerBase *bcontainer; + VFIOIOMMUFDContainer *container; + VFIOAddressSpace *space; + struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; + int ret, devfd; + uint32_t ioas_id; + Error *err = NULL; + const VFIOIOMMUClass *iommufd_vioc = + VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_IOMMUFD)); + + if (vbasedev->fd < 0) { + devfd = iommufd_cdev_getfd(vbasedev->sysfsdev, errp); + if (devfd < 0) { + return devfd; + } + vbasedev->fd = devfd; + } else { + devfd = vbasedev->fd; + } + + ret = iommufd_cdev_connect_and_bind(vbasedev, errp); + if (ret) { + goto err_connect_bind; + } + + space = vfio_get_address_space(as); + + /* try to attach to an existing container in this space */ + QLIST_FOREACH(bcontainer, &space->containers, next) { + container = container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); + if (bcontainer->ops != iommufd_vioc || + vbasedev->iommufd != container->be) { + continue; + } + if (iommufd_cdev_attach_container(vbasedev, container, &err)) { + const char *msg = error_get_pretty(err); + + trace_iommufd_cdev_fail_attach_existing_container(msg); + error_free(err); + err = NULL; + } else { + ret = iommufd_cdev_ram_block_discard_disable(true); + if (ret) { + error_setg(errp, + "Cannot set discarding of RAM broken (%d)", ret); + goto err_discard_disable; + } + goto found_container; + } + } + + /* Need to allocate a new dedicated container */ + ret = iommufd_backend_alloc_ioas(vbasedev->iommufd, &ioas_id, errp); + if (ret < 0) { + goto err_alloc_ioas; + } + + trace_iommufd_cdev_alloc_ioas(vbasedev->iommufd->fd, ioas_id); + + container = g_malloc0(sizeof(*container)); + container->be = vbasedev->iommufd; + container->ioas_id = ioas_id; + + bcontainer = &container->bcontainer; + vfio_container_init(bcontainer, space, iommufd_vioc); + QLIST_INSERT_HEAD(&space->containers, bcontainer, next); + + ret = iommufd_cdev_attach_container(vbasedev, container, errp); + if (ret) { + goto err_attach_container; + } + + ret = iommufd_cdev_ram_block_discard_disable(true); + if (ret) { + goto err_discard_disable; + } + + ret = iommufd_cdev_get_info_iova_range(container, ioas_id, &err); + if (ret) { + error_append_hint(&err, + "Fallback to default 64bit IOVA range and 4K page size\n"); + warn_report_err(err); + err = NULL; + bcontainer->pgsizes = qemu_real_host_page_size(); + } + + bcontainer->listener = vfio_memory_listener; + memory_listener_register(&bcontainer->listener, bcontainer->space->as); + + if (bcontainer->error) { + ret = -1; + error_propagate_prepend(errp, bcontainer->error, + "memory listener initialization failed: "); + goto err_listener_register; + } + + bcontainer->initialized = true; + +found_container: + ret = ioctl(devfd, VFIO_DEVICE_GET_INFO, &dev_info); + if (ret) { + error_setg_errno(errp, errno, "error getting device info"); + goto err_listener_register; + } + + /* + * TODO: examine RAM_BLOCK_DISCARD stuff, should we do group level + * for discarding incompatibility check as well? + */ + if (vbasedev->ram_block_discard_allowed) { + iommufd_cdev_ram_block_discard_disable(false); + } + + vbasedev->group = 0; + vbasedev->num_irqs = dev_info.num_irqs; + vbasedev->num_regions = dev_info.num_regions; + vbasedev->flags = dev_info.flags; + vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET); + vbasedev->bcontainer = bcontainer; + QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next); + QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next); + + trace_iommufd_cdev_device_info(vbasedev->name, devfd, vbasedev->num_irqs, + vbasedev->num_regions, vbasedev->flags); + return 0; + +err_listener_register: + iommufd_cdev_ram_block_discard_disable(false); +err_discard_disable: + iommufd_cdev_detach_container(vbasedev, container); +err_attach_container: + iommufd_cdev_container_destroy(container); +err_alloc_ioas: + vfio_put_address_space(space); + iommufd_cdev_unbind_and_disconnect(vbasedev); +err_connect_bind: + close(vbasedev->fd); + return ret; +} + +static void iommufd_cdev_detach(VFIODevice *vbasedev) +{ + VFIOContainerBase *bcontainer = vbasedev->bcontainer; + VFIOAddressSpace *space = bcontainer->space; + VFIOIOMMUFDContainer *container = container_of(bcontainer, + VFIOIOMMUFDContainer, + bcontainer); + QLIST_REMOVE(vbasedev, global_next); + QLIST_REMOVE(vbasedev, container_next); + vbasedev->bcontainer = NULL; + + if (!vbasedev->ram_block_discard_allowed) { + iommufd_cdev_ram_block_discard_disable(false); + } + + iommufd_cdev_detach_container(vbasedev, container); + iommufd_cdev_container_destroy(container); + vfio_put_address_space(space); + + iommufd_cdev_unbind_and_disconnect(vbasedev); + close(vbasedev->fd); +} + +static VFIODevice *iommufd_cdev_pci_find_by_devid(__u32 devid) +{ + VFIODevice *vbasedev_iter; + const VFIOIOMMUClass *iommufd_vioc = + VFIO_IOMMU_CLASS(object_class_by_name(TYPE_VFIO_IOMMU_IOMMUFD)); + + QLIST_FOREACH(vbasedev_iter, &vfio_device_list, global_next) { + if (vbasedev_iter->bcontainer->ops != iommufd_vioc) { + continue; + } + if (devid == vbasedev_iter->devid) { + return vbasedev_iter; + } + } + return NULL; +} + +static VFIOPCIDevice * +iommufd_cdev_dep_get_realized_vpdev(struct vfio_pci_dependent_device *dep_dev, + VFIODevice *reset_dev) +{ + VFIODevice *vbasedev_tmp; + + if (dep_dev->devid == reset_dev->devid || + dep_dev->devid == VFIO_PCI_DEVID_OWNED) { + return NULL; + } + + vbasedev_tmp = iommufd_cdev_pci_find_by_devid(dep_dev->devid); + if (!vbasedev_tmp || !vbasedev_tmp->dev->realized || + vbasedev_tmp->type != VFIO_DEVICE_TYPE_PCI) { + return NULL; + } + + return container_of(vbasedev_tmp, VFIOPCIDevice, vbasedev); +} + +static int iommufd_cdev_pci_hot_reset(VFIODevice *vbasedev, bool single) +{ + VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); + struct vfio_pci_hot_reset_info *info = NULL; + struct vfio_pci_dependent_device *devices; + struct vfio_pci_hot_reset *reset; + int ret, i; + bool multi = false; + + trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); + + if (!single) { + vfio_pci_pre_reset(vdev); + } + vdev->vbasedev.needs_reset = false; + + ret = vfio_pci_get_pci_hot_reset_info(vdev, &info); + + if (ret) { + goto out_single; + } + + assert(info->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID); + + devices = &info->devices[0]; + + if (!(info->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED)) { + if (!vdev->has_pm_reset) { + for (i = 0; i < info->count; i++) { + if (devices[i].devid == VFIO_PCI_DEVID_NOT_OWNED) { + error_report("vfio: Cannot reset device %s, " + "depends on device %04x:%02x:%02x.%x " + "which is not owned.", + vdev->vbasedev.name, devices[i].segment, + devices[i].bus, PCI_SLOT(devices[i].devfn), + PCI_FUNC(devices[i].devfn)); + } + } + } + ret = -EPERM; + goto out_single; + } + + trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); + + for (i = 0; i < info->count; i++) { + VFIOPCIDevice *tmp; + + trace_iommufd_cdev_pci_hot_reset_dep_devices(devices[i].segment, + devices[i].bus, + PCI_SLOT(devices[i].devfn), + PCI_FUNC(devices[i].devfn), + devices[i].devid); + + /* + * If a VFIO cdev device is resettable, all the dependent devices + * are either bound to same iommufd or within same iommu_groups as + * one of the iommufd bound devices. + */ + assert(devices[i].devid != VFIO_PCI_DEVID_NOT_OWNED); + + tmp = iommufd_cdev_dep_get_realized_vpdev(&devices[i], &vdev->vbasedev); + if (!tmp) { + continue; + } + + if (single) { + ret = -EINVAL; + goto out_single; + } + vfio_pci_pre_reset(tmp); + tmp->vbasedev.needs_reset = false; + multi = true; + } + + if (!single && !multi) { + ret = -EINVAL; + goto out_single; + } + + /* Use zero length array for hot reset with iommufd backend */ + reset = g_malloc0(sizeof(*reset)); + reset->argsz = sizeof(*reset); + + /* Bus reset! */ + ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); + g_free(reset); + if (ret) { + ret = -errno; + } + + trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, + ret ? strerror(errno) : "Success"); + + /* Re-enable INTx on affected devices */ + for (i = 0; i < info->count; i++) { + VFIOPCIDevice *tmp; + + tmp = iommufd_cdev_dep_get_realized_vpdev(&devices[i], &vdev->vbasedev); + if (!tmp) { + continue; + } + vfio_pci_post_reset(tmp); + } +out_single: + if (!single) { + vfio_pci_post_reset(vdev); + } + g_free(info); + + return ret; +} + +static void vfio_iommu_iommufd_class_init(ObjectClass *klass, void *data) +{ + VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); + + vioc->dma_map = iommufd_cdev_map; + vioc->dma_unmap = iommufd_cdev_unmap; + vioc->attach_device = iommufd_cdev_attach; + vioc->detach_device = iommufd_cdev_detach; + vioc->pci_hot_reset = iommufd_cdev_pci_hot_reset; +}; + +static const TypeInfo types[] = { + { + .name = TYPE_VFIO_IOMMU_IOMMUFD, + .parent = TYPE_VFIO_IOMMU, + .class_init = vfio_iommu_iommufd_class_init, + }, +}; + +DEFINE_TYPES(types) diff --git a/hw/vfio/meson.build b/hw/vfio/meson.build index 2a6912c94027d6213144f910d64625a469cc2b1f..bb98493b53e858c53181e224f9cb46892838a8be 100644 --- a/hw/vfio/meson.build +++ b/hw/vfio/meson.build @@ -2,10 +2,14 @@ vfio_ss = ss.source_set() vfio_ss.add(files( 'helpers.c', 'common.c', + 'container-base.c', 'container.c', - 'spapr.c', 'migration.c', )) +vfio_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr.c')) +vfio_ss.add(when: 'CONFIG_IOMMUFD', if_true: files( + 'iommufd.c', +)) vfio_ss.add(when: 'CONFIG_VFIO_PCI', if_true: files( 'display.c', 'pci-quirks.c', diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c index 28d422b39f9f70e94a2f396b0fb064c5de17dc28..70e6b1a709f9b67e4c9eb41033d76347275cac42 100644 --- a/hw/vfio/migration.c +++ b/hw/vfio/migration.c @@ -163,6 +163,19 @@ reset_device: return ret; } +/* + * Some device state transitions require resetting the device if they fail. + * This function sets the device in new_state and resets the device if that + * fails. Reset is done by using ERROR as the recover state. + */ +static int +vfio_migration_set_state_or_reset(VFIODevice *vbasedev, + enum vfio_device_mig_state new_state) +{ + return vfio_migration_set_state(vbasedev, new_state, + VFIO_DEVICE_STATE_ERROR); +} + static int vfio_load_buffer(QEMUFile *f, VFIODevice *vbasedev, uint64_t data_size) { @@ -422,12 +435,7 @@ static void vfio_save_cleanup(void *opaque) * after migration has completed, so it won't increase downtime. */ if (migration->device_state == VFIO_DEVICE_STATE_STOP_COPY) { - /* - * If setting the device in STOP state fails, the device should be - * reset. To do so, use ERROR state as a recover state. - */ - vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_STOP, - VFIO_DEVICE_STATE_ERROR); + vfio_migration_set_state_or_reset(vbasedev, VFIO_DEVICE_STATE_STOP); } g_free(migration->data_buffer); @@ -699,12 +707,7 @@ static void vfio_vmstate_change_prepare(void *opaque, bool running, VFIO_DEVICE_STATE_PRE_COPY_P2P : VFIO_DEVICE_STATE_RUNNING_P2P; - /* - * If setting the device in new_state fails, the device should be reset. - * To do so, use ERROR state as a recover state. - */ - ret = vfio_migration_set_state(vbasedev, new_state, - VFIO_DEVICE_STATE_ERROR); + ret = vfio_migration_set_state_or_reset(vbasedev, new_state); if (ret) { /* * Migration should be aborted in this case, but vm_state_notify() @@ -736,12 +739,7 @@ static void vfio_vmstate_change(void *opaque, bool running, RunState state) VFIO_DEVICE_STATE_STOP; } - /* - * If setting the device in new_state fails, the device should be reset. - * To do so, use ERROR state as a recover state. - */ - ret = vfio_migration_set_state(vbasedev, new_state, - VFIO_DEVICE_STATE_ERROR); + ret = vfio_migration_set_state_or_reset(vbasedev, new_state); if (ret) { /* * Migration should be aborted in this case, but vm_state_notify() @@ -770,12 +768,7 @@ static void vfio_migration_state_notifier(Notifier *notifier, void *data) case MIGRATION_STATUS_CANCELLING: case MIGRATION_STATUS_CANCELLED: case MIGRATION_STATUS_FAILED: - /* - * If setting the device in RUNNING state fails, the device should - * be reset. To do so, use ERROR state as a recover state. - */ - vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_RUNNING, - VFIO_DEVICE_STATE_ERROR); + vfio_migration_set_state_or_reset(vbasedev, VFIO_DEVICE_STATE_RUNNING); } } diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index c62c02f7b692c98bba1b931ebb1a4254a7f56061..d7fe06715c4b9cde66a68c31aaf405315921b0d6 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include CONFIG_DEVICES /* CONFIG_IOMMUFD */ #include #include @@ -42,6 +43,7 @@ #include "qapi/error.h" #include "migration/blocker.h" #include "migration/qemu-file.h" +#include "sysemu/iommufd.h" #define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug" @@ -2374,7 +2376,7 @@ static int vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp) return 0; } -static void vfio_pci_pre_reset(VFIOPCIDevice *vdev) +void vfio_pci_pre_reset(VFIOPCIDevice *vdev) { PCIDevice *pdev = &vdev->pdev; uint16_t cmd; @@ -2411,7 +2413,7 @@ static void vfio_pci_pre_reset(VFIOPCIDevice *vdev) vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2); } -static void vfio_pci_post_reset(VFIOPCIDevice *vdev) +void vfio_pci_post_reset(VFIOPCIDevice *vdev) { Error *err = NULL; int nr; @@ -2435,7 +2437,7 @@ static void vfio_pci_post_reset(VFIOPCIDevice *vdev) vfio_quirk_reset(vdev); } -static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name) +bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name) { char tmp[13]; @@ -2445,22 +2447,13 @@ static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name) return (strcmp(tmp, name) == 0); } -static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single) +int vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice *vdev, + struct vfio_pci_hot_reset_info **info_p) { - VFIOGroup *group; struct vfio_pci_hot_reset_info *info; - struct vfio_pci_dependent_device *devices; - struct vfio_pci_hot_reset *reset; - int32_t *fds; - int ret, i, count; - bool multi = false; + int ret, count; - trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); - - if (!single) { - vfio_pci_pre_reset(vdev); - } - vdev->vbasedev.needs_reset = false; + assert(info_p && !*info_p); info = g_malloc0(sizeof(*info)); info->argsz = sizeof(*info); @@ -2468,163 +2461,36 @@ static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single) ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); if (ret && errno != ENOSPC) { ret = -errno; + g_free(info); if (!vdev->has_pm_reset) { error_report("vfio: Cannot reset device %s, " "no available reset mechanism.", vdev->vbasedev.name); } - goto out_single; + return ret; } count = info->count; - info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices))); - info->argsz = sizeof(*info) + (count * sizeof(*devices)); - devices = &info->devices[0]; + info = g_realloc(info, sizeof(*info) + (count * sizeof(info->devices[0]))); + info->argsz = sizeof(*info) + (count * sizeof(info->devices[0])); ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info); if (ret) { ret = -errno; + g_free(info); error_report("vfio: hot reset info failed: %m"); - goto out_single; - } - - trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); - - /* Verify that we have all the groups required */ - for (i = 0; i < info->count; i++) { - PCIHostDeviceAddress host; - VFIOPCIDevice *tmp; - VFIODevice *vbasedev_iter; - - host.domain = devices[i].segment; - host.bus = devices[i].bus; - host.slot = PCI_SLOT(devices[i].devfn); - host.function = PCI_FUNC(devices[i].devfn); - - trace_vfio_pci_hot_reset_dep_devices(host.domain, - host.bus, host.slot, host.function, devices[i].group_id); - - if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { - continue; - } - - QLIST_FOREACH(group, &vfio_group_list, next) { - if (group->groupid == devices[i].group_id) { - break; - } - } - - if (!group) { - if (!vdev->has_pm_reset) { - error_report("vfio: Cannot reset device %s, " - "depends on group %d which is not owned.", - vdev->vbasedev.name, devices[i].group_id); - } - ret = -EPERM; - goto out; - } - - /* Prep dependent devices for reset and clear our marker. */ - QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { - if (!vbasedev_iter->dev->realized || - vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { - continue; - } - tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); - if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { - if (single) { - ret = -EINVAL; - goto out_single; - } - vfio_pci_pre_reset(tmp); - tmp->vbasedev.needs_reset = false; - multi = true; - break; - } - } - } - - if (!single && !multi) { - ret = -EINVAL; - goto out_single; - } - - /* Determine how many group fds need to be passed */ - count = 0; - QLIST_FOREACH(group, &vfio_group_list, next) { - for (i = 0; i < info->count; i++) { - if (group->groupid == devices[i].group_id) { - count++; - break; - } - } - } - - reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds))); - reset->argsz = sizeof(*reset) + (count * sizeof(*fds)); - fds = &reset->group_fds[0]; - - /* Fill in group fds */ - QLIST_FOREACH(group, &vfio_group_list, next) { - for (i = 0; i < info->count; i++) { - if (group->groupid == devices[i].group_id) { - fds[reset->count++] = group->fd; - break; - } - } + return ret; } - /* Bus reset! */ - ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); - g_free(reset); - - trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, - ret ? strerror(errno) : "Success"); - -out: - /* Re-enable INTx on affected devices */ - for (i = 0; i < info->count; i++) { - PCIHostDeviceAddress host; - VFIOPCIDevice *tmp; - VFIODevice *vbasedev_iter; - - host.domain = devices[i].segment; - host.bus = devices[i].bus; - host.slot = PCI_SLOT(devices[i].devfn); - host.function = PCI_FUNC(devices[i].devfn); - - if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { - continue; - } - - QLIST_FOREACH(group, &vfio_group_list, next) { - if (group->groupid == devices[i].group_id) { - break; - } - } - - if (!group) { - break; - } + *info_p = info; + return 0; +} - QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { - if (!vbasedev_iter->dev->realized || - vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { - continue; - } - tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); - if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { - vfio_pci_post_reset(tmp); - break; - } - } - } -out_single: - if (!single) { - vfio_pci_post_reset(vdev); - } - g_free(info); +static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single) +{ + VFIODevice *vbasedev = &vdev->vbasedev; + const VFIOIOMMUClass *ops = vbasedev->bcontainer->ops; - return ret; + return ops->pci_hot_reset(vbasedev, single); } /* @@ -2695,7 +2561,7 @@ const VMStateDescription vmstate_vfio_display = { .version_id = 1, .minimum_version_id = 1, .needed = vfio_display_migration_needed, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_STRUCT_POINTER(dpy, VFIOPCIDevice, vfio_display_vmstate, VFIODisplay), VMSTATE_END_OF_LIST() @@ -2706,12 +2572,12 @@ const VMStateDescription vmstate_vfio_pci_config = { .name = "VFIOPCIDevice", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(pdev, VFIOPCIDevice), VMSTATE_MSIX_TEST(pdev, VFIOPCIDevice, vfio_msix_present), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_vfio_display, NULL } @@ -3078,17 +2944,19 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) VFIODevice *vbasedev = &vdev->vbasedev; char *tmp, *subsys; Error *err = NULL; - struct stat st; int i, ret; bool is_mdev; char uuid[UUID_STR_LEN]; char *name; - if (!vbasedev->sysfsdev) { + if (vbasedev->fd < 0 && !vbasedev->sysfsdev) { if (!(~vdev->host.domain || ~vdev->host.bus || ~vdev->host.slot || ~vdev->host.function)) { error_setg(errp, "No provided host device"); error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F " +#ifdef CONFIG_IOMMUFD + "or -device vfio-pci,fd=DEVICE_FD " +#endif "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n"); return; } @@ -3098,17 +2966,10 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) vdev->host.slot, vdev->host.function); } - if (stat(vbasedev->sysfsdev, &st) < 0) { - error_setg_errno(errp, errno, "no such host device"); - error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev); + if (vfio_device_get_name(vbasedev, errp) < 0) { return; } - vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); - vbasedev->ops = &vfio_pci_ops; - vbasedev->type = VFIO_DEVICE_TYPE_PCI; - vbasedev->dev = DEVICE(vdev); - /* * Mediated devices *might* operate compatibly with discarding of RAM, but * we cannot know for certain, it depends on whether the mdev vendor driver @@ -3456,6 +3317,7 @@ static void vfio_instance_init(Object *obj) { PCIDevice *pci_dev = PCI_DEVICE(obj); VFIOPCIDevice *vdev = VFIO_PCI(obj); + VFIODevice *vbasedev = &vdev->vbasedev; device_add_bootindex_property(obj, &vdev->bootindex, "bootindex", NULL, @@ -3465,6 +3327,9 @@ static void vfio_instance_init(Object *obj) vdev->host.slot = ~0U; vdev->host.function = ~0U; + vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_PCI, &vfio_pci_ops, + DEVICE(vdev), false); + vdev->nv_gpudirect_clique = 0xFF; /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command @@ -3517,14 +3382,20 @@ static Property vfio_pci_dev_properties[] = { qdev_prop_nv_gpudirect_clique, uint8_t), DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice, msix_relo, OFF_AUTOPCIBAR_OFF), - /* - * TODO - support passed fds... is this necessary? - * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name), - * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name), - */ +#ifdef CONFIG_IOMMUFD + DEFINE_PROP_LINK("iommufd", VFIOPCIDevice, vbasedev.iommufd, + TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *), +#endif DEFINE_PROP_END_OF_LIST(), }; +#ifdef CONFIG_IOMMUFD +static void vfio_pci_set_fd(Object *obj, const char *str, Error **errp) +{ + vfio_device_set_fd(&VFIO_PCI(obj)->vbasedev, str, errp); +} +#endif + static void vfio_pci_dev_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -3532,6 +3403,9 @@ static void vfio_pci_dev_class_init(ObjectClass *klass, void *data) dc->reset = vfio_pci_reset; device_class_set_props(dc, vfio_pci_dev_properties); +#ifdef CONFIG_IOMMUFD + object_class_property_add_str(klass, "fd", NULL, vfio_pci_set_fd); +#endif dc->desc = "VFIO-based PCI device assignment"; set_bit(DEVICE_CATEGORY_MISC, dc->categories); pdc->realize = vfio_realize; diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h index fba8737ab2cb23118c0819f600379773d718ed18..6e64a2654e690af11b72710530a41135b726e96f 100644 --- a/hw/vfio/pci.h +++ b/hw/vfio/pci.h @@ -218,6 +218,12 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr); extern const PropertyInfo qdev_prop_nv_gpudirect_clique; +void vfio_pci_pre_reset(VFIOPCIDevice *vdev); +void vfio_pci_post_reset(VFIOPCIDevice *vdev); +bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name); +int vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice *vdev, + struct vfio_pci_hot_reset_info **info_p); + int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp); int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev, diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c index 8e3d4ac45824ec69afb523f8f0e668327122cd02..a8d9b7da633e0717421acbe9a951334b074b6607 100644 --- a/hw/vfio/platform.c +++ b/hw/vfio/platform.c @@ -15,11 +15,13 @@ */ #include "qemu/osdep.h" +#include CONFIG_DEVICES /* CONFIG_IOMMUFD */ #include "qapi/error.h" #include #include #include "hw/vfio/vfio-platform.h" +#include "sysemu/iommufd.h" #include "migration/vmstate.h" #include "qemu/error-report.h" #include "qemu/lockable.h" @@ -529,14 +531,13 @@ static VFIODeviceOps vfio_platform_ops = { */ static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp) { - struct stat st; int ret; - /* @sysfsdev takes precedence over @host */ - if (vbasedev->sysfsdev) { + /* @fd takes precedence over @sysfsdev which takes precedence over @host */ + if (vbasedev->fd < 0 && vbasedev->sysfsdev) { g_free(vbasedev->name); vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); - } else { + } else if (vbasedev->fd < 0) { if (!vbasedev->name || strchr(vbasedev->name, '/')) { error_setg(errp, "wrong host device name"); return -EINVAL; @@ -546,10 +547,9 @@ static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp) vbasedev->name); } - if (stat(vbasedev->sysfsdev, &st) < 0) { - error_setg_errno(errp, errno, - "failed to get the sysfs host device file status"); - return -errno; + ret = vfio_device_get_name(vbasedev, errp); + if (ret) { + return ret; } ret = vfio_attach_device(vbasedev->name, vbasedev, @@ -581,10 +581,6 @@ static void vfio_platform_realize(DeviceState *dev, Error **errp) VFIODevice *vbasedev = &vdev->vbasedev; int i, ret; - vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM; - vbasedev->dev = dev; - vbasedev->ops = &vfio_platform_ops; - qemu_mutex_init(&vdev->intp_mutex); trace_vfio_platform_realize(vbasedev->sysfsdev ? @@ -649,9 +645,29 @@ static Property vfio_platform_dev_properties[] = { DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice, mmap_timeout, 1100), DEFINE_PROP_BOOL("x-irqfd", VFIOPlatformDevice, irqfd_allowed, true), +#ifdef CONFIG_IOMMUFD + DEFINE_PROP_LINK("iommufd", VFIOPlatformDevice, vbasedev.iommufd, + TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *), +#endif DEFINE_PROP_END_OF_LIST(), }; +static void vfio_platform_instance_init(Object *obj) +{ + VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(obj); + VFIODevice *vbasedev = &vdev->vbasedev; + + vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_PLATFORM, &vfio_platform_ops, + DEVICE(vdev), false); +} + +#ifdef CONFIG_IOMMUFD +static void vfio_platform_set_fd(Object *obj, const char *str, Error **errp) +{ + vfio_device_set_fd(&VFIO_PLATFORM_DEVICE(obj)->vbasedev, str, errp); +} +#endif + static void vfio_platform_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -659,6 +675,9 @@ static void vfio_platform_class_init(ObjectClass *klass, void *data) dc->realize = vfio_platform_realize; device_class_set_props(dc, vfio_platform_dev_properties); +#ifdef CONFIG_IOMMUFD + object_class_property_add_str(klass, "fd", NULL, vfio_platform_set_fd); +#endif dc->vmsd = &vfio_platform_vmstate; dc->desc = "VFIO-based platform device assignment"; sbc->connect_irq_notifier = vfio_start_irqfd_injection; @@ -671,6 +690,7 @@ static const TypeInfo vfio_platform_dev_info = { .name = TYPE_VFIO_PLATFORM, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(VFIOPlatformDevice), + .instance_init = vfio_platform_instance_init, .class_init = vfio_platform_class_init, .class_size = sizeof(VFIOPlatformDeviceClass), }; diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c index 83da2f7ec213dab2acca9b96a1d07a22a49d22c4..0d949bb728212534a7e2296e491aa8d95f45945d 100644 --- a/hw/vfio/spapr.c +++ b/hw/vfio/spapr.c @@ -24,6 +24,12 @@ #include "qapi/error.h" #include "trace.h" +typedef struct VFIOSpaprContainer { + VFIOContainer container; + MemoryListener prereg_listener; + QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list; +} VFIOSpaprContainer; + static bool vfio_prereg_listener_skipped_section(MemoryRegionSection *section) { if (memory_region_is_iommu(section->mr)) { @@ -44,8 +50,10 @@ static void *vfio_prereg_gpa_to_vaddr(MemoryRegionSection *section, hwaddr gpa) static void vfio_prereg_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { - VFIOContainer *container = container_of(listener, VFIOContainer, - prereg_listener); + VFIOSpaprContainer *scontainer = container_of(listener, VFIOSpaprContainer, + prereg_listener); + VFIOContainer *container = &scontainer->container; + VFIOContainerBase *bcontainer = &container->bcontainer; const hwaddr gpa = section->offset_within_address_space; hwaddr end; int ret; @@ -88,9 +96,9 @@ static void vfio_prereg_listener_region_add(MemoryListener *listener, * can gracefully fail. Runtime, there's not much we can do other * than throw a hardware error. */ - if (!container->initialized) { - if (!container->error) { - error_setg_errno(&container->error, -ret, + if (!bcontainer->initialized) { + if (!bcontainer->error) { + error_setg_errno(&bcontainer->error, -ret, "Memory registering failed"); } } else { @@ -102,8 +110,9 @@ static void vfio_prereg_listener_region_add(MemoryListener *listener, static void vfio_prereg_listener_region_del(MemoryListener *listener, MemoryRegionSection *section) { - VFIOContainer *container = container_of(listener, VFIOContainer, - prereg_listener); + VFIOSpaprContainer *scontainer = container_of(listener, VFIOSpaprContainer, + prereg_listener); + VFIOContainer *container = &scontainer->container; const hwaddr gpa = section->offset_within_address_space; hwaddr end; int ret; @@ -146,12 +155,12 @@ static const MemoryListener vfio_prereg_listener = { .region_del = vfio_prereg_listener_region_del, }; -static void vfio_host_win_add(VFIOContainer *container, hwaddr min_iova, +static void vfio_host_win_add(VFIOSpaprContainer *scontainer, hwaddr min_iova, hwaddr max_iova, uint64_t iova_pgsizes) { VFIOHostDMAWindow *hostwin; - QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + QLIST_FOREACH(hostwin, &scontainer->hostwin_list, hostwin_next) { if (ranges_overlap(hostwin->min_iova, hostwin->max_iova - hostwin->min_iova + 1, min_iova, @@ -165,15 +174,15 @@ static void vfio_host_win_add(VFIOContainer *container, hwaddr min_iova, hostwin->min_iova = min_iova; hostwin->max_iova = max_iova; hostwin->iova_pgsizes = iova_pgsizes; - QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next); + QLIST_INSERT_HEAD(&scontainer->hostwin_list, hostwin, hostwin_next); } -static int vfio_host_win_del(VFIOContainer *container, +static int vfio_host_win_del(VFIOSpaprContainer *scontainer, hwaddr min_iova, hwaddr max_iova) { VFIOHostDMAWindow *hostwin; - QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + QLIST_FOREACH(hostwin, &scontainer->hostwin_list, hostwin_next) { if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) { QLIST_REMOVE(hostwin, hostwin_next); g_free(hostwin); @@ -184,7 +193,7 @@ static int vfio_host_win_del(VFIOContainer *container, return -1; } -static VFIOHostDMAWindow *vfio_find_hostwin(VFIOContainer *container, +static VFIOHostDMAWindow *vfio_find_hostwin(VFIOSpaprContainer *container, hwaddr iova, hwaddr end) { VFIOHostDMAWindow *hostwin; @@ -226,6 +235,7 @@ static int vfio_spapr_create_window(VFIOContainer *container, hwaddr *pgsize) { int ret = 0; + VFIOContainerBase *bcontainer = &container->bcontainer; IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr), pgmask; unsigned entries, bits_total, bits_per_level, max_levels; @@ -239,13 +249,13 @@ static int vfio_spapr_create_window(VFIOContainer *container, if (pagesize > rampagesize) { pagesize = rampagesize; } - pgmask = container->pgsizes & (pagesize | (pagesize - 1)); + pgmask = bcontainer->pgsizes & (pagesize | (pagesize - 1)); pagesize = pgmask ? (1ULL << (63 - clz64(pgmask))) : 0; if (!pagesize) { error_report("Host doesn't support page size 0x%"PRIx64 ", the supported mask is 0x%lx", memory_region_iommu_get_min_page_size(iommu_mr), - container->pgsizes); + bcontainer->pgsizes); return -EINVAL; } @@ -313,10 +323,15 @@ static int vfio_spapr_create_window(VFIOContainer *container, return 0; } -int vfio_container_add_section_window(VFIOContainer *container, - MemoryRegionSection *section, - Error **errp) +static int +vfio_spapr_container_add_section_window(VFIOContainerBase *bcontainer, + MemoryRegionSection *section, + Error **errp) { + VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); + VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer, + container); VFIOHostDMAWindow *hostwin; hwaddr pgsize = 0; int ret; @@ -332,7 +347,7 @@ int vfio_container_add_section_window(VFIOContainer *container, iova = section->offset_within_address_space; end = iova + int128_get64(section->size) - 1; - if (!vfio_find_hostwin(container, iova, end)) { + if (!vfio_find_hostwin(scontainer, iova, end)) { error_setg(errp, "Container %p can't map guest IOVA region" " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container, iova, end); @@ -346,7 +361,7 @@ int vfio_container_add_section_window(VFIOContainer *container, } /* For now intersections are not allowed, we may relax this later */ - QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + QLIST_FOREACH(hostwin, &scontainer->hostwin_list, hostwin_next) { if (ranges_overlap(hostwin->min_iova, hostwin->max_iova - hostwin->min_iova + 1, section->offset_within_address_space, @@ -368,7 +383,7 @@ int vfio_container_add_section_window(VFIOContainer *container, return ret; } - vfio_host_win_add(container, section->offset_within_address_space, + vfio_host_win_add(scontainer, section->offset_within_address_space, section->offset_within_address_space + int128_get64(section->size) - 1, pgsize); #ifdef CONFIG_KVM @@ -401,16 +416,22 @@ int vfio_container_add_section_window(VFIOContainer *container, return 0; } -void vfio_container_del_section_window(VFIOContainer *container, - MemoryRegionSection *section) +static void +vfio_spapr_container_del_section_window(VFIOContainerBase *bcontainer, + MemoryRegionSection *section) { + VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); + VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer, + container); + if (container->iommu_type != VFIO_SPAPR_TCE_v2_IOMMU) { return; } vfio_spapr_remove_window(container, section->offset_within_address_space); - if (vfio_host_win_del(container, + if (vfio_host_win_del(scontainer, section->offset_within_address_space, section->offset_within_address_space + int128_get64(section->size) - 1) < 0) { @@ -419,13 +440,36 @@ void vfio_container_del_section_window(VFIOContainer *container, } } -int vfio_spapr_container_init(VFIOContainer *container, Error **errp) +static void vfio_spapr_container_release(VFIOContainerBase *bcontainer) +{ + VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); + VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer, + container); + VFIOHostDMAWindow *hostwin, *next; + + if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { + memory_listener_unregister(&scontainer->prereg_listener); + } + QLIST_FOREACH_SAFE(hostwin, &scontainer->hostwin_list, hostwin_next, + next) { + QLIST_REMOVE(hostwin, hostwin_next); + g_free(hostwin); + } +} + +static int vfio_spapr_container_setup(VFIOContainerBase *bcontainer, + Error **errp) { + VFIOContainer *container = container_of(bcontainer, VFIOContainer, + bcontainer); + VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer, + container); struct vfio_iommu_spapr_tce_info info; bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU; int ret, fd = container->fd; - QLIST_INIT(&container->hostwin_list); + QLIST_INIT(&scontainer->hostwin_list); /* * The host kernel code implementing VFIO_IOMMU_DISABLE is called @@ -439,13 +483,13 @@ int vfio_spapr_container_init(VFIOContainer *container, Error **errp) return -errno; } } else { - container->prereg_listener = vfio_prereg_listener; + scontainer->prereg_listener = vfio_prereg_listener; - memory_listener_register(&container->prereg_listener, + memory_listener_register(&scontainer->prereg_listener, &address_space_memory); - if (container->error) { + if (bcontainer->error) { ret = -1; - error_propagate_prepend(errp, container->error, + error_propagate_prepend(errp, bcontainer->error, "RAM memory listener initialization failed: "); goto listener_unregister_exit; } @@ -461,7 +505,7 @@ int vfio_spapr_container_init(VFIOContainer *container, Error **errp) } if (v2) { - container->pgsizes = info.ddw.pgsizes; + bcontainer->pgsizes = info.ddw.pgsizes; /* * There is a default window in just created container. * To make region_add/del simpler, we better remove this @@ -476,8 +520,8 @@ int vfio_spapr_container_init(VFIOContainer *container, Error **errp) } } else { /* The default table uses 4K pages */ - container->pgsizes = 0x1000; - vfio_host_win_add(container, info.dma32_window_start, + bcontainer->pgsizes = 0x1000; + vfio_host_win_add(scontainer, info.dma32_window_start, info.dma32_window_start + info.dma32_window_size - 1, 0x1000); @@ -487,21 +531,27 @@ int vfio_spapr_container_init(VFIOContainer *container, Error **errp) listener_unregister_exit: if (v2) { - memory_listener_unregister(&container->prereg_listener); + memory_listener_unregister(&scontainer->prereg_listener); } return ret; } -void vfio_spapr_container_deinit(VFIOContainer *container) +static void vfio_iommu_spapr_class_init(ObjectClass *klass, void *data) { - VFIOHostDMAWindow *hostwin, *next; + VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); - if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { - memory_listener_unregister(&container->prereg_listener); - } - QLIST_FOREACH_SAFE(hostwin, &container->hostwin_list, hostwin_next, - next) { - QLIST_REMOVE(hostwin, hostwin_next); - g_free(hostwin); - } -} + vioc->add_window = vfio_spapr_container_add_section_window; + vioc->del_window = vfio_spapr_container_del_section_window; + vioc->release = vfio_spapr_container_release; + vioc->setup = vfio_spapr_container_setup; +}; + +static const TypeInfo types[] = { + { + .name = TYPE_VFIO_IOMMU_SPAPR, + .parent = TYPE_VFIO_IOMMU_LEGACY, + .class_init = vfio_iommu_spapr_class_init, + }, +}; + +DEFINE_TYPES(types) diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index 0eb2387cf24c920b0904ec4012b0fcd3f2e8b3cf..8fdde5445697789edeb4c6383566c1b417cc1595 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -116,8 +116,8 @@ vfio_region_unmap(const char *name, unsigned long offset, unsigned long end) "Re vfio_region_sparse_mmap_header(const char *name, int index, int nr_areas) "Device %s region %d: %d sparse mmap entries" vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sparse entry %d [0x%lx - 0x%lx]" vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%08x" -vfio_dma_unmap_overflow_workaround(void) "" -vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start, uint64_t dirty_pages) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64" dirty_pages=%"PRIu64 +vfio_legacy_dma_unmap_overflow_workaround(void) "" +vfio_get_dirty_bitmap(uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start, uint64_t dirty_pages) "iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64" dirty_pages=%"PRIu64 vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64 # platform.c @@ -164,3 +164,14 @@ vfio_state_pending_estimate(const char *name, uint64_t precopy, uint64_t postcop vfio_state_pending_exact(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t stopcopy_size, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy 0x%"PRIx64" postcopy 0x%"PRIx64" stopcopy size 0x%"PRIx64" precopy initial size 0x%"PRIx64" precopy dirty size 0x%"PRIx64 vfio_vmstate_change(const char *name, int running, const char *reason, const char *dev_state) " (%s) running %d reason %s device state %s" vfio_vmstate_change_prepare(const char *name, int running, const char *reason, const char *dev_state) " (%s) running %d reason %s device state %s" + +#iommufd.c + +iommufd_cdev_connect_and_bind(int iommufd, const char *name, int devfd, int devid) " [iommufd=%d] Successfully bound device %s (fd=%d): output devid=%d" +iommufd_cdev_getfd(const char *dev, int devfd) " %s (fd=%d)" +iommufd_cdev_attach_ioas_hwpt(int iommufd, const char *name, int devfd, int id) " [iommufd=%d] Successfully attached device %s (%d) to id=%d" +iommufd_cdev_detach_ioas_hwpt(int iommufd, const char *name) " [iommufd=%d] Successfully detached %s" +iommufd_cdev_fail_attach_existing_container(const char *msg) " %s" +iommufd_cdev_alloc_ioas(int iommufd, int ioas_id) " [iommufd=%d] new IOMMUFD container with ioasid=%d" +iommufd_cdev_device_info(char *name, int devfd, int num_irqs, int num_regions, int flags) " %s (%d) num_irqs=%d num_regions=%d flags=%d" +iommufd_cdev_pci_hot_reset_dep_devices(int domain, int bus, int slot, int function, int dev_id) "\t%04x:%02x:%02x.%x devid %d" diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build index c0055a78326c1344d2df79090443bdc4817a5cd0..47baf00366f8d4f653893cad9cd46b949b86fe93 100644 --- a/hw/virtio/meson.build +++ b/hw/virtio/meson.build @@ -74,8 +74,7 @@ specific_virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss) system_ss.add_all(when: 'CONFIG_VIRTIO', if_true: system_virtio_ss) system_ss.add(when: 'CONFIG_VIRTIO', if_false: files('vhost-stub.c')) system_ss.add(when: 'CONFIG_VIRTIO', if_false: files('virtio-stub.c')) -system_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-stub.c')) -system_ss.add(when: 'CONFIG_ALL', if_true: files('virtio-stub.c')) system_ss.add(files('virtio-hmp-cmds.c')) specific_ss.add_all(when: 'CONFIG_VIRTIO', if_true: specific_virtio_ss) +system_ss.add(when: 'CONFIG_ACPI', if_true: files('virtio-acpi.c')) diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index 637cac4edf0f3459915d9d840bfc02501e7ca7a5..77905d1994c75dc25b3dcb936a367cb1929f9385 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -30,16 +30,16 @@ vhost_user_write(uint32_t req, uint32_t flags) "req:%d flags:0x%"PRIx32"" vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p" # vhost-vdpa.c -vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8 -vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8 -vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8 -vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8 -vhost_vdpa_listener_region_add_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64 +vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8 +vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8 +vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8 +vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8 +vhost_vdpa_listener_region_add_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa_shared: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64 vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d" -vhost_vdpa_listener_region_del_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64 +vhost_vdpa_listener_region_del_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa_shared: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64 vhost_vdpa_listener_region_del(void *vdpa, uint64_t iova, uint64_t llend) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64 vhost_vdpa_add_status(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8 -vhost_vdpa_init(void *dev, void *vdpa) "dev: %p vdpa: %p" +vhost_vdpa_init(void *dev, void *s, void *vdpa) "dev: %p, common dev: %p vdpa: %p" vhost_vdpa_cleanup(void *dev, void *vdpa) "dev: %p vdpa: %p" vhost_vdpa_memslots_limit(void *dev, int ret) "dev: %p = 0x%x" vhost_vdpa_set_mem_table(void *dev, uint32_t nregions, uint32_t padding) "dev: %p nregions: %"PRIu32" padding: 0x%"PRIx32 diff --git a/hw/virtio/vdpa-dev.c b/hw/virtio/vdpa-dev.c index f22d5d5bc0ac66e8997037d84c732271d45e0ba0..eb9ecea83b3dd84f4a911e03a4f1f93ad68b3cb7 100644 --- a/hw/virtio/vdpa-dev.c +++ b/hw/virtio/vdpa-dev.c @@ -66,7 +66,6 @@ static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp) if (*errp) { return; } - v->vdpa.device_fd = v->vhostfd; v->vdev_id = vhost_vdpa_device_get_u32(v->vhostfd, VHOST_VDPA_GET_DEVICE_ID, errp); @@ -114,7 +113,9 @@ static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp) strerror(-ret)); goto free_vqs; } - v->vdpa.iova_range = iova_range; + v->vdpa.shared = g_new0(VhostVDPAShared, 1); + v->vdpa.shared->device_fd = v->vhostfd; + v->vdpa.shared->iova_range = iova_range; ret = vhost_dev_init(&v->dev, &v->vdpa, VHOST_BACKEND_TYPE_VDPA, 0, NULL); if (ret < 0) { @@ -162,6 +163,7 @@ vhost_cleanup: vhost_dev_cleanup(&v->dev); free_vqs: g_free(vqs); + g_free(v->vdpa.shared); out: qemu_close(v->vhostfd); v->vhostfd = -1; @@ -184,6 +186,7 @@ static void vhost_vdpa_device_unrealize(DeviceState *dev) g_free(s->config); g_free(s->dev.vqs); vhost_dev_cleanup(&s->dev); + g_free(s->vdpa.shared); qemu_close(s->vhostfd); s->vhostfd = -1; } @@ -341,7 +344,7 @@ static const VMStateDescription vmstate_vhost_vdpa_device = { .unmigratable = 1, .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c index 17f3fc6a082353ac0624fc70e935506aaf3639a2..833804dd40f2ec1f2567e719896ed35725a3cca4 100644 --- a/hw/virtio/vhost-backend.c +++ b/hw/virtio/vhost-backend.c @@ -158,6 +158,30 @@ static int vhost_kernel_set_vring_busyloop_timeout(struct vhost_dev *dev, return vhost_kernel_call(dev, VHOST_SET_VRING_BUSYLOOP_TIMEOUT, s); } +static int vhost_kernel_new_worker(struct vhost_dev *dev, + struct vhost_worker_state *worker) +{ + return vhost_kernel_call(dev, VHOST_NEW_WORKER, worker); +} + +static int vhost_kernel_free_worker(struct vhost_dev *dev, + struct vhost_worker_state *worker) +{ + return vhost_kernel_call(dev, VHOST_FREE_WORKER, worker); +} + +static int vhost_kernel_attach_vring_worker(struct vhost_dev *dev, + struct vhost_vring_worker *worker) +{ + return vhost_kernel_call(dev, VHOST_ATTACH_VRING_WORKER, worker); +} + +static int vhost_kernel_get_vring_worker(struct vhost_dev *dev, + struct vhost_vring_worker *worker) +{ + return vhost_kernel_call(dev, VHOST_GET_VRING_WORKER, worker); +} + static int vhost_kernel_set_features(struct vhost_dev *dev, uint64_t features) { @@ -313,6 +337,10 @@ const VhostOps kernel_ops = { .vhost_set_vring_err = vhost_kernel_set_vring_err, .vhost_set_vring_busyloop_timeout = vhost_kernel_set_vring_busyloop_timeout, + .vhost_get_vring_worker = vhost_kernel_get_vring_worker, + .vhost_attach_vring_worker = vhost_kernel_attach_vring_worker, + .vhost_new_worker = vhost_kernel_new_worker, + .vhost_free_worker = vhost_kernel_free_worker, .vhost_set_features = vhost_kernel_set_features, .vhost_get_features = vhost_kernel_get_features, .vhost_set_backend_cap = vhost_kernel_set_backend_cap, diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c index eb91723855b2ac1329afd2559aa9707b1409dcbc..cca2cd41be24b26e896382e5a47c2a0bceb8a36f 100644 --- a/hw/virtio/vhost-user-fs.c +++ b/hw/virtio/vhost-user-fs.c @@ -373,11 +373,11 @@ static const VMStateDescription vuf_backend_vmstate; static const VMStateDescription vuf_vmstate = { .name = "vhost-user-fs", .version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vuf_backend_vmstate, NULL, } @@ -389,7 +389,7 @@ static const VMStateDescription vuf_backend_vmstate = { .needed = vuf_is_internal_migration, .pre_load = vuf_check_migration_support, .pre_save = vuf_check_migration_support, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { { .name = "back-end", .info = &(const VMStateInfo) { diff --git a/hw/virtio/vhost-user-rng.c b/hw/virtio/vhost-user-rng.c index efc54cd3fb1f745fa44aac1d6e1d594e52c05839..24ac1a22c83140b07d3c47eeee196361db8a2953 100644 --- a/hw/virtio/vhost-user-rng.c +++ b/hw/virtio/vhost-user-rng.c @@ -129,6 +129,14 @@ static void vu_rng_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) { VHostUserRNG *rng = VHOST_USER_RNG(vdev); + /* + * We don't support interrupts, return early if index is set to + * VIRTIO_CONFIG_IRQ_IDX. + */ + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return; + } + vhost_virtqueue_mask(&rng->vhost_dev, vdev, idx, mask); } @@ -136,6 +144,14 @@ static bool vu_rng_guest_notifier_pending(VirtIODevice *vdev, int idx) { VHostUserRNG *rng = VHOST_USER_RNG(vdev); + /* + * We don't support interrupts, return early if index is set to + * VIRTIO_CONFIG_IRQ_IDX. + */ + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return false; + } + return vhost_virtqueue_pending(&rng->vhost_dev, idx); } diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 819b2d811af4247e52f7fd9f109e844218836d3c..ddae494ca8e8154ce03b88bc781fe9f1e639aceb 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -86,14 +86,14 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, * The caller must set asid = 0 if the device does not support asid. * This is not an ABI break since it is set to 0 by the initializer anyway. */ -int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, +int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova, hwaddr size, void *vaddr, bool readonly) { struct vhost_msg_v2 msg = {}; - int fd = v->device_fd; + int fd = s->device_fd; int ret = 0; - msg.type = v->msg_type; + msg.type = VHOST_IOTLB_MSG_V2; msg.asid = asid; msg.iotlb.iova = iova; msg.iotlb.size = size; @@ -101,7 +101,7 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW; msg.iotlb.type = VHOST_IOTLB_UPDATE; - trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.asid, msg.iotlb.iova, + trace_vhost_vdpa_dma_map(s, fd, msg.type, msg.asid, msg.iotlb.iova, msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type); @@ -118,20 +118,20 @@ int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, * The caller must set asid = 0 if the device does not support asid. * This is not an ABI break since it is set to 0 by the initializer anyway. */ -int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, +int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova, hwaddr size) { struct vhost_msg_v2 msg = {}; - int fd = v->device_fd; + int fd = s->device_fd; int ret = 0; - msg.type = v->msg_type; + msg.type = VHOST_IOTLB_MSG_V2; msg.asid = asid; msg.iotlb.iova = iova; msg.iotlb.size = size; msg.iotlb.type = VHOST_IOTLB_INVALIDATE; - trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.asid, msg.iotlb.iova, + trace_vhost_vdpa_dma_unmap(s, fd, msg.type, msg.asid, msg.iotlb.iova, msg.iotlb.size, msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { @@ -143,56 +143,55 @@ int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, return ret; } -static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v) +static void vhost_vdpa_listener_begin_batch(VhostVDPAShared *s) { - int fd = v->device_fd; + int fd = s->device_fd; struct vhost_msg_v2 msg = { - .type = v->msg_type, + .type = VHOST_IOTLB_MSG_V2, .iotlb.type = VHOST_IOTLB_BATCH_BEGIN, }; - trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.iotlb.type); + trace_vhost_vdpa_listener_begin_batch(s, fd, msg.type, msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { error_report("failed to write, fd=%d, errno=%d (%s)", fd, errno, strerror(errno)); } } -static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v) +static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s) { - if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) && - !v->iotlb_batch_begin_sent) { - vhost_vdpa_listener_begin_batch(v); + if (s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) && + !s->iotlb_batch_begin_sent) { + vhost_vdpa_listener_begin_batch(s); } - v->iotlb_batch_begin_sent = true; + s->iotlb_batch_begin_sent = true; } static void vhost_vdpa_listener_commit(MemoryListener *listener) { - struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); - struct vhost_dev *dev = v->dev; + VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener); struct vhost_msg_v2 msg = {}; - int fd = v->device_fd; + int fd = s->device_fd; - if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) { + if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) { return; } - if (!v->iotlb_batch_begin_sent) { + if (!s->iotlb_batch_begin_sent) { return; } - msg.type = v->msg_type; + msg.type = VHOST_IOTLB_MSG_V2; msg.iotlb.type = VHOST_IOTLB_BATCH_END; - trace_vhost_vdpa_listener_commit(v, fd, msg.type, msg.iotlb.type); + trace_vhost_vdpa_listener_commit(s, fd, msg.type, msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { error_report("failed to write, fd=%d, errno=%d (%s)", fd, errno, strerror(errno)); } - v->iotlb_batch_begin_sent = false; + s->iotlb_batch_begin_sent = false; } static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) @@ -200,7 +199,7 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n); hwaddr iova = iotlb->iova + iommu->iommu_offset; - struct vhost_vdpa *v = iommu->dev; + VhostVDPAShared *s = iommu->dev_shared; void *vaddr; int ret; Int128 llend; @@ -213,10 +212,10 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) RCU_READ_LOCK_GUARD(); /* check if RAM section out of device range */ llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova)); - if (int128_gt(llend, int128_make64(v->iova_range.last))) { + if (int128_gt(llend, int128_make64(s->iova_range.last))) { error_report("RAM section out of device range (max=0x%" PRIx64 ", end addr=0x%" PRIx64 ")", - v->iova_range.last, int128_get64(llend)); + s->iova_range.last, int128_get64(llend)); return; } @@ -226,20 +225,20 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL)) { return; } - ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova, + ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova, iotlb->addr_mask + 1, vaddr, read_only); if (ret) { error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", " "0x%" HWADDR_PRIx ", %p) = %d (%m)", - v, iova, iotlb->addr_mask + 1, vaddr, ret); + s, iova, iotlb->addr_mask + 1, vaddr, ret); } } else { - ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, + ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova, iotlb->addr_mask + 1); if (ret) { error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " "0x%" HWADDR_PRIx ") = %d (%m)", - v, iova, iotlb->addr_mask + 1, ret); + s, iova, iotlb->addr_mask + 1, ret); } } } @@ -247,7 +246,7 @@ static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) static void vhost_vdpa_iommu_region_add(MemoryListener *listener, MemoryRegionSection *section) { - struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener); struct vdpa_iommu *iommu; Int128 end; @@ -271,7 +270,7 @@ static void vhost_vdpa_iommu_region_add(MemoryListener *listener, iommu_idx); iommu->iommu_offset = section->offset_within_address_space - section->offset_within_region; - iommu->dev = v; + iommu->dev_shared = s; ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL); if (ret) { @@ -279,7 +278,7 @@ static void vhost_vdpa_iommu_region_add(MemoryListener *listener, return; } - QLIST_INSERT_HEAD(&v->iommu_list, iommu, iommu_next); + QLIST_INSERT_HEAD(&s->iommu_list, iommu, iommu_next); memory_region_iommu_replay(iommu->iommu_mr, &iommu->n); return; @@ -288,11 +287,11 @@ static void vhost_vdpa_iommu_region_add(MemoryListener *listener, static void vhost_vdpa_iommu_region_del(MemoryListener *listener, MemoryRegionSection *section) { - struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener); struct vdpa_iommu *iommu; - QLIST_FOREACH(iommu, &v->iommu_list, iommu_next) + QLIST_FOREACH(iommu, &s->iommu_list, iommu_next) { if (MEMORY_REGION(iommu->iommu_mr) == section->mr && iommu->n.start == section->offset_within_region) { @@ -308,7 +307,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { DMAMap mem_region = {}; - struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener); hwaddr iova; Int128 llend, llsize; void *vaddr; @@ -316,8 +315,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, int page_size = qemu_target_page_size(); int page_mask = -page_size; - if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, - v->iova_range.last, page_mask)) { + if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first, + s->iova_range.last, page_mask)) { return; } if (memory_region_is_iommu(section->mr)) { @@ -327,7 +326,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, if (unlikely((section->offset_within_address_space & ~page_mask) != (section->offset_within_region & ~page_mask))) { - trace_vhost_vdpa_listener_region_add_unaligned(v, section->mr->name, + trace_vhost_vdpa_listener_region_add_unaligned(s, section->mr->name, section->offset_within_address_space & ~page_mask, section->offset_within_region & ~page_mask); return; @@ -347,18 +346,18 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, section->offset_within_region + (iova - section->offset_within_address_space); - trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend), + trace_vhost_vdpa_listener_region_add(s, iova, int128_get64(llend), vaddr, section->readonly); llsize = int128_sub(llend, int128_make64(iova)); - if (v->shadow_data) { + if (s->shadow_data) { int r; mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, mem_region.size = int128_get64(llsize) - 1, mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly), - r = vhost_iova_tree_map_alloc(v->iova_tree, &mem_region); + r = vhost_iova_tree_map_alloc(s->iova_tree, &mem_region); if (unlikely(r != IOVA_OK)) { error_report("Can't allocate a mapping (%d)", r); goto fail; @@ -367,8 +366,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, iova = mem_region.iova; } - vhost_vdpa_iotlb_batch_begin_once(v); - ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova, + vhost_vdpa_iotlb_batch_begin_once(s); + ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova, int128_get64(llsize), vaddr, section->readonly); if (ret) { error_report("vhost vdpa map fail!"); @@ -378,8 +377,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, return; fail_map: - if (v->shadow_data) { - vhost_iova_tree_remove(v->iova_tree, mem_region); + if (s->shadow_data) { + vhost_iova_tree_remove(s->iova_tree, mem_region); } fail: @@ -396,15 +395,15 @@ fail: static void vhost_vdpa_listener_region_del(MemoryListener *listener, MemoryRegionSection *section) { - struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); + VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener); hwaddr iova; Int128 llend, llsize; int ret; int page_size = qemu_target_page_size(); int page_mask = -page_size; - if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, - v->iova_range.last, page_mask)) { + if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first, + s->iova_range.last, page_mask)) { return; } if (memory_region_is_iommu(section->mr)) { @@ -413,7 +412,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, if (unlikely((section->offset_within_address_space & ~page_mask) != (section->offset_within_region & ~page_mask))) { - trace_vhost_vdpa_listener_region_del_unaligned(v, section->mr->name, + trace_vhost_vdpa_listener_region_del_unaligned(s, section->mr->name, section->offset_within_address_space & ~page_mask, section->offset_within_region & ~page_mask); return; @@ -422,7 +421,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, iova = ROUND_UP(section->offset_within_address_space, page_size); llend = vhost_vdpa_section_end(section, page_mask); - trace_vhost_vdpa_listener_region_del(v, iova, + trace_vhost_vdpa_listener_region_del(s, iova, int128_get64(int128_sub(llend, int128_one()))); if (int128_ge(int128_make64(iova), llend)) { @@ -431,7 +430,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, llsize = int128_sub(llend, int128_make64(iova)); - if (v->shadow_data) { + if (s->shadow_data) { const DMAMap *result; const void *vaddr = memory_region_get_ram_ptr(section->mr) + section->offset_within_region + @@ -441,37 +440,37 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, .size = int128_get64(llsize) - 1, }; - result = vhost_iova_tree_find_iova(v->iova_tree, &mem_region); + result = vhost_iova_tree_find_iova(s->iova_tree, &mem_region); if (!result) { /* The memory listener map wasn't mapped */ return; } iova = result->iova; - vhost_iova_tree_remove(v->iova_tree, *result); + vhost_iova_tree_remove(s->iova_tree, *result); } - vhost_vdpa_iotlb_batch_begin_once(v); + vhost_vdpa_iotlb_batch_begin_once(s); /* * The unmap ioctl doesn't accept a full 64-bit. need to check it */ if (int128_eq(llsize, int128_2_64())) { llsize = int128_rshift(llsize, 1); - ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, + ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova, int128_get64(llsize)); if (ret) { error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " "0x%" HWADDR_PRIx ") = %d (%m)", - v, iova, int128_get64(llsize), ret); + s, iova, int128_get64(llsize), ret); } iova += int128_get64(llsize); } - ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, + ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova, int128_get64(llsize)); if (ret) { error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", " "0x%" HWADDR_PRIx ") = %d (%m)", - v, iova, int128_get64(llsize), ret); + s, iova, int128_get64(llsize), ret); } memory_region_unref(section->mr); @@ -492,7 +491,7 @@ static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request, void *arg) { struct vhost_vdpa *v = dev->opaque; - int fd = v->device_fd; + int fd = v->shared->device_fd; int ret; assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); @@ -511,6 +510,10 @@ static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) if (ret < 0) { return ret; } + if ((s & status) == status) { + /* Don't set bits already set */ + return 0; + } s |= status; @@ -579,16 +582,14 @@ static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v) static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) { - struct vhost_vdpa *v; + struct vhost_vdpa *v = opaque; assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); - trace_vhost_vdpa_init(dev, opaque); + trace_vhost_vdpa_init(dev, v->shared, opaque); int ret; - v = opaque; v->dev = dev; dev->opaque = opaque ; - v->listener = vhost_vdpa_memory_listener; - v->msg_type = VHOST_IOTLB_MSG_V2; + v->shared->listener = vhost_vdpa_memory_listener; vhost_vdpa_init_svq(dev, v); error_propagate(&dev->migration_blocker, v->migration_blocker); @@ -651,7 +652,7 @@ static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index) struct vhost_vdpa *v = dev->opaque; VirtIODevice *vdev = dev->vdev; VhostVDPAHostNotifier *n; - int fd = v->device_fd; + int fd = v->shared->device_fd; void *addr; char *name; @@ -748,10 +749,10 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev) trace_vhost_vdpa_cleanup(dev, v); if (vhost_vdpa_first_dev(dev)) { ram_block_discard_disable(false); + memory_listener_unregister(&v->shared->listener); } vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); - memory_listener_unregister(&v->listener); vhost_vdpa_svq_cleanup(dev); dev->opaque = NULL; @@ -828,6 +829,8 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev, static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) { + struct vhost_vdpa *v = dev->opaque; + uint64_t features; uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH | @@ -849,6 +852,7 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) } dev->backend_cap = features; + v->shared->backend_cap = features; return 0; } @@ -1059,7 +1063,8 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr) const DMAMap needle = { .translated_addr = addr, }; - const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, &needle); + const DMAMap *result = vhost_iova_tree_find_iova(v->shared->iova_tree, + &needle); hwaddr size; int r; @@ -1069,13 +1074,14 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr) } size = ROUND_UP(result->size, qemu_real_host_page_size()); - r = vhost_vdpa_dma_unmap(v, v->address_space_id, result->iova, size); + r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, result->iova, + size); if (unlikely(r < 0)) { error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r); return; } - vhost_iova_tree_remove(v->iova_tree, *result); + vhost_iova_tree_remove(v->shared->iova_tree, *result); } static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev, @@ -1103,19 +1109,19 @@ static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle, { int r; - r = vhost_iova_tree_map_alloc(v->iova_tree, needle); + r = vhost_iova_tree_map_alloc(v->shared->iova_tree, needle); if (unlikely(r != IOVA_OK)) { error_setg(errp, "Cannot allocate iova (%d)", r); return false; } - r = vhost_vdpa_dma_map(v, v->address_space_id, needle->iova, + r = vhost_vdpa_dma_map(v->shared, v->address_space_id, needle->iova, needle->size + 1, (void *)(uintptr_t)needle->translated_addr, needle->perm == IOMMU_RO); if (unlikely(r != 0)) { error_setg_errno(errp, -r, "Cannot map region to device"); - vhost_iova_tree_remove(v->iova_tree, *needle); + vhost_iova_tree_remove(v->shared->iova_tree, *needle); } return r == 0; @@ -1216,7 +1222,7 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev) goto err; } - vhost_svq_start(svq, dev->vdev, vq, v->iova_tree); + vhost_svq_start(svq, dev->vdev, vq, v->shared->iova_tree); ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err); if (unlikely(!ok)) { goto err_map; @@ -1279,7 +1285,7 @@ static void vhost_vdpa_suspend(struct vhost_dev *dev) if (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) { trace_vhost_vdpa_suspend(dev); - r = ioctl(v->device_fd, VHOST_VDPA_SUSPEND); + r = ioctl(v->shared->device_fd, VHOST_VDPA_SUSPEND); if (unlikely(r)) { error_report("Cannot suspend: %s(%d)", g_strerror(errno), errno); } else { @@ -1319,7 +1325,7 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) "IOMMU and try again"); return -1; } - memory_listener_register(&v->listener, dev->vdev->dma_as); + memory_listener_register(&v->shared->listener, dev->vdev->dma_as); return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); } @@ -1338,7 +1344,7 @@ static void vhost_vdpa_reset_status(struct vhost_dev *dev) vhost_vdpa_reset_device(dev); vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER); - memory_listener_unregister(&v->listener); + memory_listener_unregister(&v->shared->listener); } static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c index aa16d584eed391aaa189e31eda7dfa088e81c296..d5ca0b5a1055207b8f958933c7091dad98394c1d 100644 --- a/hw/virtio/vhost-vsock.c +++ b/hw/virtio/vhost-vsock.c @@ -111,7 +111,7 @@ static const VMStateDescription vmstate_virtio_vhost_vsock = { .name = "virtio-vhost_vsock", .minimum_version_id = VHOST_VSOCK_SAVEVM_VERSION, .version_id = VHOST_VSOCK_SAVEVM_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/virtio/virtio-acpi.c b/hw/virtio/virtio-acpi.c new file mode 100644 index 0000000000000000000000000000000000000000..e18cb38bdbe4bb201dd90b54660e854872ace3f2 --- /dev/null +++ b/hw/virtio/virtio-acpi.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * virtio ACPI Support + * + */ + +#include "hw/virtio/virtio-acpi.h" +#include "hw/acpi/aml-build.h" + +void virtio_acpi_dsdt_add(Aml *scope, const hwaddr base, const hwaddr size, + uint32_t mmio_irq, long int start_index, int num) +{ + hwaddr virtio_base = base; + uint32_t irq = mmio_irq; + long int i; + + for (i = start_index; i < start_index + num; i++) { + Aml *dev = aml_device("VR%02u", (unsigned)i); + aml_append(dev, aml_name_decl("_HID", aml_string("LNRO0005"))); + aml_append(dev, aml_name_decl("_UID", aml_int(i))); + aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + + Aml *crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(virtio_base, size, AML_READ_WRITE)); + aml_append(crs, + aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_EXCLUSIVE, &irq, 1)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); + virtio_base += size; + irq++; + } +} diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c index d004cf29d2663ef34956fb01f72d4d2e447830b1..486fe3da32ee83369c09fbcf3e56c73a2045cab3 100644 --- a/hw/virtio/virtio-balloon.c +++ b/hw/virtio/virtio-balloon.c @@ -817,7 +817,7 @@ static const VMStateDescription vmstate_virtio_balloon_free_page_hint = { .version_id = 1, .minimum_version_id = 1, .needed = virtio_balloon_free_page_support, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(free_page_hint_cmd_id, VirtIOBalloon), VMSTATE_UINT32(free_page_hint_status, VirtIOBalloon), VMSTATE_END_OF_LIST() @@ -829,7 +829,7 @@ static const VMStateDescription vmstate_virtio_balloon_page_poison = { .version_id = 1, .minimum_version_id = 1, .needed = virtio_balloon_page_poison_support, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(poison_val, VirtIOBalloon), VMSTATE_END_OF_LIST() } @@ -840,12 +840,12 @@ static const VMStateDescription vmstate_virtio_balloon_device = { .version_id = 1, .minimum_version_id = 1, .post_load = virtio_balloon_post_load_device, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(num_pages, VirtIOBalloon), VMSTATE_UINT32(actual, VirtIOBalloon), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_virtio_balloon_free_page_hint, &vmstate_virtio_balloon_page_poison, NULL @@ -996,7 +996,7 @@ static const VMStateDescription vmstate_virtio_balloon = { .name = "virtio-balloon", .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c index 0e2cc8d5a890c021c9b900186195f37d093c54f6..fe1313f2ad7bff0322eba72a94d5c4d493b0824f 100644 --- a/hw/virtio/virtio-crypto.c +++ b/hw/virtio/virtio-crypto.c @@ -1122,7 +1122,7 @@ static const VMStateDescription vmstate_virtio_crypto = { .unmigratable = 1, .minimum_version_id = VIRTIO_CRYPTO_VM_VERSION, .version_id = VIRTIO_CRYPTO_VM_VERSION, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 9d463efc52a7cf69cfd1d256ecaf907e8068a467..8a4bd933c627b49b88c53566b589269f0806db8c 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -1399,7 +1399,7 @@ static void virtio_iommu_instance_init(Object *obj) .name = "interval", \ .version_id = 1, \ .minimum_version_id = 1, \ - .fields = (VMStateField[]) { \ + .fields = (const VMStateField[]) { \ VMSTATE_UINT64(low, VirtIOIOMMUInterval), \ VMSTATE_UINT64(high, VirtIOIOMMUInterval), \ VMSTATE_END_OF_LIST() \ @@ -1411,7 +1411,7 @@ static void virtio_iommu_instance_init(Object *obj) .name = "mapping", \ .version_id = 1, \ .minimum_version_id = 1, \ - .fields = (VMStateField[]) { \ + .fields = (const VMStateField[]) { \ VMSTATE_UINT64(phys_addr, VirtIOIOMMUMapping),\ VMSTATE_UINT32(flags, VirtIOIOMMUMapping), \ VMSTATE_END_OF_LIST() \ @@ -1436,7 +1436,7 @@ static const VMStateDescription vmstate_endpoint = { .name = "endpoint", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(id, VirtIOIOMMUEndpoint), VMSTATE_END_OF_LIST() } @@ -1447,7 +1447,7 @@ static const VMStateDescription vmstate_domain = { .version_id = 2, .minimum_version_id = 2, .pre_load = domain_preload, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(id, VirtIOIOMMUDomain), VMSTATE_GTREE_V(mappings, VirtIOIOMMUDomain, 1, vmstate_interval_mapping, @@ -1499,7 +1499,7 @@ static const VMStateDescription vmstate_virtio_iommu_device = { .minimum_version_id = 2, .version_id = 2, .post_load = iommu_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_GTREE_DIRECT_KEY_V(domains, VirtIOIOMMU, 2, &vmstate_domain, VirtIOIOMMUDomain), VMSTATE_UINT8_V(config.bypass, VirtIOIOMMU, 2), @@ -1512,7 +1512,7 @@ static const VMStateDescription vmstate_virtio_iommu = { .minimum_version_id = 2, .priority = MIG_PRI_IOMMU, .version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c index 75ee38aa46b9d3de63626329ab2ec0f6bb5f18d5..99ab989852b0a60fcab9f2a25509bf3d21096d4d 100644 --- a/hw/virtio/virtio-mem.c +++ b/hw/virtio/virtio-mem.c @@ -605,8 +605,7 @@ static int virtio_mem_set_block_state(VirtIOMEM *vmem, uint64_t start_gpa, int fd = memory_region_get_fd(&vmem->memdev->mr); Error *local_err = NULL; - qemu_prealloc_mem(fd, area, size, 1, NULL, &local_err); - if (local_err) { + if (!qemu_prealloc_mem(fd, area, size, 1, NULL, &local_err)) { static bool warned; /* @@ -1249,8 +1248,7 @@ static int virtio_mem_prealloc_range_cb(VirtIOMEM *vmem, void *arg, int fd = memory_region_get_fd(&vmem->memdev->mr); Error *local_err = NULL; - qemu_prealloc_mem(fd, area, size, 1, NULL, &local_err); - if (local_err) { + if (!qemu_prealloc_mem(fd, area, size, 1, NULL, &local_err)) { error_report_err(local_err); return -ENOMEM; } @@ -1370,7 +1368,7 @@ static const VMStateDescription vmstate_virtio_mem_sanity_checks = { .name = "virtio-mem-device/sanity-checks", .pre_save = virtio_mem_mig_sanity_checks_pre_save, .post_load = virtio_mem_mig_sanity_checks_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(addr, VirtIOMEMMigSanityChecks), VMSTATE_UINT64(region_size, VirtIOMEMMigSanityChecks), VMSTATE_UINT64(block_size, VirtIOMEMMigSanityChecks), @@ -1393,7 +1391,7 @@ static const VMStateDescription vmstate_virtio_mem_device = { .version_id = 1, .priority = MIG_PRI_VIRTIO_MEM, .post_load = virtio_mem_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_WITH_TMP_TEST(VirtIOMEM, virtio_mem_vmstate_field_exists, VirtIOMEMMigSanityChecks, vmstate_virtio_mem_sanity_checks), @@ -1423,7 +1421,7 @@ static const VMStateDescription vmstate_virtio_mem_device_early = { .version_id = 1, .early_setup = true, .post_load = virtio_mem_post_load_early, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_WITH_TMP(VirtIOMEM, VirtIOMEMMigSanityChecks, vmstate_virtio_mem_sanity_checks), VMSTATE_UINT64(size, VirtIOMEM), @@ -1436,7 +1434,7 @@ static const VMStateDescription vmstate_virtio_mem = { .name = "virtio-mem", .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c index 22f15e1e02f1f56df43f28a19b2d18152ec4c234..22f9fbcf5a444f3abe9fd0ca900c4aaf07343da7 100644 --- a/hw/virtio/virtio-mmio.c +++ b/hw/virtio/virtio-mmio.c @@ -565,7 +565,7 @@ static const VMStateDescription vmstate_virtio_mmio_queue_state = { .name = "virtio_mmio/queue_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(num, VirtIOMMIOQueue), VMSTATE_BOOL(enabled, VirtIOMMIOQueue), VMSTATE_UINT32_ARRAY(desc, VirtIOMMIOQueue, 2), @@ -579,7 +579,7 @@ static const VMStateDescription vmstate_virtio_mmio_state_sub = { .name = "virtio_mmio/state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(guest_features, VirtIOMMIOProxy, 2), VMSTATE_STRUCT_ARRAY(vqs, VirtIOMMIOProxy, VIRTIO_QUEUE_MAX, 0, vmstate_virtio_mmio_queue_state, @@ -592,10 +592,10 @@ static const VMStateDescription vmstate_virtio_mmio = { .name = "virtio_mmio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_virtio_mmio_state_sub, NULL } diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index e4338795423d56b13bd67fa12fc5c808525aef69..1a7039fb0c68540b98f9319fab867f363c2dc23a 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -97,7 +97,7 @@ static const VMStateDescription vmstate_virtio_pci_modern_queue_state = { .name = "virtio_pci/modern_queue_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(num, VirtIOPCIQueue), VMSTATE_UNUSED(1), /* enabled was stored as be16 */ VMSTATE_BOOL(enabled, VirtIOPCIQueue), @@ -120,7 +120,7 @@ static const VMStateDescription vmstate_virtio_pci_modern_state_sub = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_pci_modern_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(dfselect, VirtIOPCIProxy), VMSTATE_UINT32(gfselect, VirtIOPCIProxy), VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2), @@ -135,10 +135,10 @@ static const VMStateDescription vmstate_virtio_pci = { .name = "virtio_pci", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_virtio_pci_modern_state_sub, NULL } diff --git a/hw/virtio/virtio-rng.c b/hw/virtio/virtio-rng.c index 7e12fc03bfcca8bdc7e34ec34e3cb668d3b35ca3..f74efffef7e77be1204470bb1dfa9c0cfd3835cf 100644 --- a/hw/virtio/virtio-rng.c +++ b/hw/virtio/virtio-rng.c @@ -242,7 +242,7 @@ static const VMStateDescription vmstate_virtio_rng = { .name = "virtio-rng", .minimum_version_id = 1, .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 3a160f86ed02ddaee4a501f4468a20f3ecd696f2..7549094154c7d5ad5119aff52b526bb13587e5de 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -2594,7 +2594,7 @@ static const VMStateDescription vmstate_virtqueue = { .name = "virtqueue_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(vring.avail, struct VirtQueue), VMSTATE_UINT64(vring.used, struct VirtQueue), VMSTATE_END_OF_LIST() @@ -2605,7 +2605,7 @@ static const VMStateDescription vmstate_packed_virtqueue = { .name = "packed_virtqueue_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(last_avail_idx, struct VirtQueue), VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue), VMSTATE_UINT16(used_idx, struct VirtQueue), @@ -2620,7 +2620,7 @@ static const VMStateDescription vmstate_virtio_virtqueues = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_virtqueue_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue), VMSTATE_END_OF_LIST() @@ -2632,7 +2632,7 @@ static const VMStateDescription vmstate_virtio_packed_virtqueues = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_packed_virtqueue_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue), VMSTATE_END_OF_LIST() @@ -2643,7 +2643,7 @@ static const VMStateDescription vmstate_ringsize = { .name = "ringsize_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(vring.num_default, struct VirtQueue), VMSTATE_END_OF_LIST() } @@ -2654,7 +2654,7 @@ static const VMStateDescription vmstate_virtio_ringsize = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_ringsize_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice, VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue), VMSTATE_END_OF_LIST() @@ -2697,7 +2697,7 @@ static const VMStateDescription vmstate_virtio_extra_state = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_extra_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { { .name = "extra_state", .version_id = 0, @@ -2716,7 +2716,7 @@ static const VMStateDescription vmstate_virtio_device_endian = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_device_endian_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(device_endian, VirtIODevice), VMSTATE_END_OF_LIST() } @@ -2727,7 +2727,7 @@ static const VMStateDescription vmstate_virtio_64bit_features = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_64bit_features_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(guest_features, VirtIODevice), VMSTATE_END_OF_LIST() } @@ -2738,7 +2738,7 @@ static const VMStateDescription vmstate_virtio_broken = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_broken_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(broken, VirtIODevice), VMSTATE_END_OF_LIST() } @@ -2749,7 +2749,7 @@ static const VMStateDescription vmstate_virtio_started = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_started_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(started, VirtIODevice), VMSTATE_END_OF_LIST() } @@ -2760,7 +2760,7 @@ static const VMStateDescription vmstate_virtio_disabled = { .version_id = 1, .minimum_version_id = 1, .needed = &virtio_disabled_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(disabled, VirtIODevice), VMSTATE_END_OF_LIST() } @@ -2770,10 +2770,10 @@ static const VMStateDescription vmstate_virtio = { .name = "virtio", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_virtio_device_endian, &vmstate_virtio_64bit_features, &vmstate_virtio_virtqueues, diff --git a/hw/watchdog/allwinner-wdt.c b/hw/watchdog/allwinner-wdt.c index 6205765efec7c172016fd87fbd1079e704da31a9..d35711c7c5b9bb324f11cf9ca133f01ce6e33d2e 100644 --- a/hw/watchdog/allwinner-wdt.c +++ b/hw/watchdog/allwinner-wdt.c @@ -313,7 +313,7 @@ static const VMStateDescription allwinner_wdt_vmstate = { .name = "allwinner-wdt", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(timer, AwWdtState), VMSTATE_UINT32_ARRAY(regs, AwWdtState, AW_WDT_REGS_NUM), VMSTATE_END_OF_LIST() diff --git a/hw/watchdog/cmsdk-apb-watchdog.c b/hw/watchdog/cmsdk-apb-watchdog.c index 5a2cd46eb7664a3a3ae6d39efeaea6f3df90867a..3091e5c3d54a824bfe024c90aeeea988721bbf22 100644 --- a/hw/watchdog/cmsdk-apb-watchdog.c +++ b/hw/watchdog/cmsdk-apb-watchdog.c @@ -361,7 +361,7 @@ static const VMStateDescription cmsdk_apb_watchdog_vmstate = { .name = "cmsdk-apb-watchdog", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CLOCK(wdogclk, CMSDKAPBWatchdog), VMSTATE_PTIMER(timer, CMSDKAPBWatchdog), VMSTATE_UINT32(control, CMSDKAPBWatchdog), diff --git a/hw/watchdog/sbsa_gwdt.c b/hw/watchdog/sbsa_gwdt.c index 7aa57a8c514f91493adf61de438c591249e60842..96895d76369d5821986f040910ad0c99934d6968 100644 --- a/hw/watchdog/sbsa_gwdt.c +++ b/hw/watchdog/sbsa_gwdt.c @@ -28,7 +28,7 @@ static const VMStateDescription vmstate_sbsa_gwdt = { .name = "sbsa-gwdt", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(timer, SBSA_GWDTState), VMSTATE_UINT32(wcs, SBSA_GWDTState), VMSTATE_UINT32(worl, SBSA_GWDTState), diff --git a/hw/watchdog/spapr_watchdog.c b/hw/watchdog/spapr_watchdog.c index 55ff1f03c1da7e1382815369acbe48e771b0ec6b..2bb1d3c5325585b9af160b8c9e1d3b43d2606589 100644 --- a/hw/watchdog/spapr_watchdog.c +++ b/hw/watchdog/spapr_watchdog.c @@ -226,7 +226,7 @@ static const VMStateDescription vmstate_wdt = { .version_id = 1, .minimum_version_id = 1, .needed = watchdog_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER(timer, SpaprWatchdog), VMSTATE_UINT8(action, SpaprWatchdog), VMSTATE_UINT8(leave_others, SpaprWatchdog), diff --git a/hw/watchdog/wdt_aspeed.c b/hw/watchdog/wdt_aspeed.c index 273a49d360189acbfd3682f95fa0319a03473664..d70b656f8e75332b039cecc21db9927ba0524678 100644 --- a/hw/watchdog/wdt_aspeed.c +++ b/hw/watchdog/wdt_aspeed.c @@ -218,7 +218,7 @@ static const VMStateDescription vmstate_aspeed_wdt = { .name = "vmstate_aspeed_wdt", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(timer, AspeedWDTState), VMSTATE_UINT32_ARRAY(regs, AspeedWDTState, ASPEED_WDT_REGS_MAX), VMSTATE_END_OF_LIST() diff --git a/hw/watchdog/wdt_diag288.c b/hw/watchdog/wdt_diag288.c index 76d89fbf785245c7d6d77a7f92ee28d0561648f4..1b73b16fb358a27dbe95af5d14377a23a645ea2f 100644 --- a/hw/watchdog/wdt_diag288.c +++ b/hw/watchdog/wdt_diag288.c @@ -23,7 +23,7 @@ static const VMStateDescription vmstate_diag288 = { .name = "vmstate_diag288", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(timer, DIAG288State), VMSTATE_BOOL(enabled, DIAG288State), VMSTATE_END_OF_LIST() diff --git a/hw/watchdog/wdt_i6300esb.c b/hw/watchdog/wdt_i6300esb.c index 54c167cd35832570667e1213c7553e0099d133bd..8bce0509cd5823d41508de054caf9bf9b6df7936 100644 --- a/hw/watchdog/wdt_i6300esb.c +++ b/hw/watchdog/wdt_i6300esb.c @@ -418,7 +418,7 @@ static const VMStateDescription vmstate_i6300esb = { */ .version_id = 10000, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PCI_DEVICE(dev, I6300State), VMSTATE_INT32(reboot_enabled, I6300State), VMSTATE_INT32(clock_scale, I6300State), diff --git a/hw/watchdog/wdt_ib700.c b/hw/watchdog/wdt_ib700.c index a1750a4957f02a3de173839b43a006762b8bbb9e..eea8da60596cbac218b9647f2c4e46b8ce0ad47f 100644 --- a/hw/watchdog/wdt_ib700.c +++ b/hw/watchdog/wdt_ib700.c @@ -95,7 +95,7 @@ static const VMStateDescription vmstate_ib700 = { .name = "ib700_wdt", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(timer, IB700State), VMSTATE_END_OF_LIST() } diff --git a/hw/watchdog/wdt_imx2.c b/hw/watchdog/wdt_imx2.c index 891d7beb2a18c80763a532773b54a9316647d34b..6452fc4721df04e8572ac75128cc1b4dc456acfd 100644 --- a/hw/watchdog/wdt_imx2.c +++ b/hw/watchdog/wdt_imx2.c @@ -234,7 +234,7 @@ static const MemoryRegionOps imx2_wdt_ops = { static const VMStateDescription vmstate_imx2_wdt = { .name = "imx2.wdt", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_PTIMER(timer, IMX2WdtState), VMSTATE_PTIMER(itimer, IMX2WdtState), VMSTATE_BOOL(wicr_locked, IMX2WdtState), diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c index 565dc39c8f622c05db37eea40f4391dc640d6ff6..47e6cb1db3ab19860653ffa8e2a9cd65b26bb986 100644 --- a/hw/xen/xen-hvm-common.c +++ b/hw/xen/xen-hvm-common.c @@ -623,7 +623,7 @@ void xen_hvm_change_state_handler(void *opaque, bool running, xen_set_ioreq_server_state(xen_domid, state->ioservid, - (rstate == RUN_STATE_RUNNING)); + running); } void xen_exit_notifier(Notifier *n, void *data) diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h index 5449b6d7428df3e9604c880b630c6f0565bb1dc2..cf5e8bde1ca298c6254888295758c6206061eac0 100644 --- a/include/block/aio-wait.h +++ b/include/block/aio-wait.h @@ -63,9 +63,6 @@ extern AioWait global_aio_wait; * @ctx: the aio context, or NULL if multiple aio contexts (for which the * caller does not hold a lock) are involved in the polling condition. * @cond: wait while this conditional expression is true - * @unlock: whether to unlock and then lock again @ctx. This applies - * only when waiting for another AioContext from the main loop. - * Otherwise it's ignored. * * Wait while a condition is true. Use this to implement synchronous * operations that require event loop activity. @@ -78,7 +75,7 @@ extern AioWait global_aio_wait; * wait on conditions between two IOThreads since that could lead to deadlock, * go via the main loop instead. */ -#define AIO_WAIT_WHILE_INTERNAL(ctx, cond, unlock) ({ \ +#define AIO_WAIT_WHILE_INTERNAL(ctx, cond) ({ \ bool waited_ = false; \ AioWait *wait_ = &global_aio_wait; \ AioContext *ctx_ = (ctx); \ @@ -95,13 +92,7 @@ extern AioWait global_aio_wait; assert(qemu_get_current_aio_context() == \ qemu_get_aio_context()); \ while ((cond)) { \ - if (unlock && ctx_) { \ - aio_context_release(ctx_); \ - } \ aio_poll(qemu_get_aio_context(), true); \ - if (unlock && ctx_) { \ - aio_context_acquire(ctx_); \ - } \ waited_ = true; \ } \ } \ @@ -109,10 +100,11 @@ extern AioWait global_aio_wait; waited_; }) #define AIO_WAIT_WHILE(ctx, cond) \ - AIO_WAIT_WHILE_INTERNAL(ctx, cond, true) + AIO_WAIT_WHILE_INTERNAL(ctx, cond) +/* TODO replace this with AIO_WAIT_WHILE() in a future patch */ #define AIO_WAIT_WHILE_UNLOCKED(ctx, cond) \ - AIO_WAIT_WHILE_INTERNAL(ctx, cond, false) + AIO_WAIT_WHILE_INTERNAL(ctx, cond) /** * aio_wait_kick: @@ -151,7 +143,7 @@ static inline bool in_aio_context_home_thread(AioContext *ctx) } if (ctx == qemu_get_aio_context()) { - return qemu_mutex_iothread_locked(); + return bql_locked(); } else { return false; } diff --git a/include/block/aio.h b/include/block/aio.h index f08b3580778da8bebb6265985cc7fc4eceecf33e..c802a392e5edb4e499406504a1335151aa7af468 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -278,23 +278,6 @@ void aio_context_ref(AioContext *ctx); */ void aio_context_unref(AioContext *ctx); -/* Take ownership of the AioContext. If the AioContext will be shared between - * threads, and a thread does not want to be interrupted, it will have to - * take ownership around calls to aio_poll(). Otherwise, aio_poll() - * automatically takes care of calling aio_context_acquire and - * aio_context_release. - * - * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A - * thread still has to call those to avoid being interrupted by the guest. - * - * Bottom halves, timers and callbacks can be created or removed without - * acquiring the AioContext. - */ -void aio_context_acquire(AioContext *ctx); - -/* Relinquish ownership of the AioContext. */ -void aio_context_release(AioContext *ctx); - /** * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will * run only once and as soon as possible. @@ -716,8 +699,7 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, * @max_batch: maximum number of requests in a batch, 0 means that the * engine will use its default */ -void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch, - Error **errp); +void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch); /** * aio_context_set_thread_pool_params: diff --git a/include/block/block-common.h b/include/block/block-common.h index d7599564db732059617ece7f4bccfe47a54866d3..a846023a09815b1fc4833789fb0f51d30fec3151 100644 --- a/include/block/block-common.h +++ b/include/block/block-common.h @@ -70,9 +70,6 @@ * automatically takes the graph rdlock when calling the wrapped function. In * the same way, no_co_wrapper_bdrv_wrlock functions automatically take the * graph wrlock. - * - * If the first parameter of the function is a BlockDriverState, BdrvChild or - * BlockBackend pointer, the AioContext lock for it is taken in the wrapper. */ #define no_co_wrapper #define no_co_wrapper_bdrv_rdlock diff --git a/include/block/block-global-state.h b/include/block/block-global-state.h index 6b21fbc73f779003a050df88dea20f32a54e4bff..4ec0b217f059944241461a4863d8eb502c5e4fce 100644 --- a/include/block/block-global-state.h +++ b/include/block/block-global-state.h @@ -31,11 +31,10 @@ /* * Global state (GS) API. These functions run under the BQL. * - * If a function modifies the graph, it also uses drain and/or - * aio_context_acquire/release to be sure it has unique access. - * aio_context locking is needed together with BQL because of - * the thread-safe I/O API that concurrently runs and accesses - * the graph without the BQL. + * If a function modifies the graph, it also uses the graph lock to be sure it + * has unique access. The graph lock is needed together with BQL because of the + * thread-safe I/O API that concurrently runs and accesses the graph without + * the BQL. * * It is important to note that not all of these functions are * necessarily limited to running under the BQL, but they would @@ -268,20 +267,6 @@ int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag); int bdrv_debug_resume(BlockDriverState *bs, const char *tag); bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag); -/** - * Locks the AioContext of @bs if it's not the current AioContext. This avoids - * double locking which could lead to deadlocks: This is a coroutine_fn, so we - * know we already own the lock of the current AioContext. - * - * May only be called in the main thread. - */ -void coroutine_fn bdrv_co_lock(BlockDriverState *bs); - -/** - * Unlocks the AioContext of @bs if it's not the current AioContext. - */ -void coroutine_fn bdrv_co_unlock(BlockDriverState *bs); - bool bdrv_child_change_aio_context(BdrvChild *c, AioContext *ctx, GHashTable *visited, Transaction *tran, Error **errp); diff --git a/include/block/block-io.h b/include/block/block-io.h index f8729ccc55ceb733d317a04e7ffc03f953c3b208..b49e0537dd4bf66fce5d4c16450bf0b041ca730b 100644 --- a/include/block/block-io.h +++ b/include/block/block-io.h @@ -31,8 +31,7 @@ /* * I/O API functions. These functions are thread-safe, and therefore - * can run in any thread as long as the thread has called - * aio_context_acquire/release(). + * can run in any thread. * * These functions can only call functions from I/O and Common categories, * but can be invoked by GS, "I/O or GS" and I/O APIs. @@ -333,11 +332,10 @@ bdrv_co_copy_range(BdrvChild *src, int64_t src_offset, * "I/O or GS" API functions. These functions can run without * the BQL, but only in one specific iothread/main loop. * - * More specifically, these functions use BDRV_POLL_WHILE(bs), which - * requires the caller to be either in the main thread and hold - * the BlockdriverState (bs) AioContext lock, or directly in the - * home thread that runs the bs AioContext. Calling them from - * another thread in another AioContext would cause deadlocks. + * More specifically, these functions use BDRV_POLL_WHILE(bs), which requires + * the caller to be either in the main thread or directly in the home thread + * that runs the bs AioContext. Calling them from another thread in another + * AioContext would cause deadlocks. * * Therefore, these functions are not proper I/O, because they * can't run in *any* iothreads, but only in a specific one. diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h index 4e31d161c57aeb5f51117973be8ff763d0d6548e..151279d481d031522053f1c3aa9f66aec0204e5f 100644 --- a/include/block/block_int-common.h +++ b/include/block/block_int-common.h @@ -1192,8 +1192,6 @@ struct BlockDriverState { /* The error object in use for blocking operations on backing_hd */ Error *backing_blocker; - /* Protected by AioContext lock */ - /* * If we are reading a disk image, give its size in sectors. * Generally read-only; it is written to by load_snapshot and diff --git a/include/block/blockjob.h b/include/block/blockjob.h index e594c10d2319982ef4bfbf584f08be3aada39dc0..7061ab7201a78d09ebd6a55703f2d694fabfabc2 100644 --- a/include/block/blockjob.h +++ b/include/block/blockjob.h @@ -54,7 +54,7 @@ typedef struct BlockJob { /** * Speed that was set with @block_job_set_speed. - * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE). + * Always modified and read under the BQL (GLOBAL_STATE_CODE). */ int64_t speed; @@ -66,7 +66,7 @@ typedef struct BlockJob { /** * Block other operations when block job is running. - * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE). + * Always modified and read under the BQL (GLOBAL_STATE_CODE). */ Error *blocker; @@ -89,7 +89,7 @@ typedef struct BlockJob { /** * BlockDriverStates that are involved in this block job. - * Always modified and read under QEMU global mutex (GLOBAL_STATE_CODE). + * Always modified and read under the BQL (GLOBAL_STATE_CODE). */ GSList *nodes; } BlockJob; diff --git a/include/block/graph-lock.h b/include/block/graph-lock.h index 22b5db1ed962f9bf5dd4d8a3d661d7b5dcde7b0e..d7545e82d068946933e4f40235ebe5b2faffdaf5 100644 --- a/include/block/graph-lock.h +++ b/include/block/graph-lock.h @@ -110,34 +110,17 @@ void unregister_aiocontext(AioContext *ctx); * * The wrlock can only be taken from the main loop, with BQL held, as only the * main loop is allowed to modify the graph. - * - * If @bs is non-NULL, its AioContext is temporarily released. - * - * This function polls. Callers must not hold the lock of any AioContext other - * than the current one and the one of @bs. */ void no_coroutine_fn TSA_ACQUIRE(graph_lock) TSA_NO_TSA -bdrv_graph_wrlock(BlockDriverState *bs); +bdrv_graph_wrlock(void); /* * bdrv_graph_wrunlock: * Write finished, reset global has_writer to 0 and restart * all readers that are waiting. - * - * If @bs is non-NULL, its AioContext is temporarily released. - */ -void no_coroutine_fn TSA_RELEASE(graph_lock) TSA_NO_TSA -bdrv_graph_wrunlock(BlockDriverState *bs); - -/* - * bdrv_graph_wrunlock_ctx: - * Write finished, reset global has_writer to 0 and restart - * all readers that are waiting. - * - * If @ctx is non-NULL, its lock is temporarily released. */ void no_coroutine_fn TSA_RELEASE(graph_lock) TSA_NO_TSA -bdrv_graph_wrunlock_ctx(AioContext *ctx); +bdrv_graph_wrunlock(void); /* * bdrv_graph_co_rdlock: diff --git a/include/block/snapshot.h b/include/block/snapshot.h index d49c5599d9bc6ca6916ad3ee218083bb3661c504..304cc6ea61c4a9d9ac96d572d164b897eee304a9 100644 --- a/include/block/snapshot.h +++ b/include/block/snapshot.h @@ -86,8 +86,6 @@ int bdrv_snapshot_load_tmp_by_id_or_name(BlockDriverState *bs, /* * Group operations. All block drivers are involved. - * These functions will properly handle dataplane (take aio_context_acquire - * when appropriate for appropriate block drivers */ bool bdrv_all_can_snapshot(bool has_devices, strList *devices, diff --git a/include/chardev/char-fe.h b/include/chardev/char-fe.h index 0ff6f875116d921c9333de45fa5c9fb5fbfdd67f..ecef1828355121c28a2c597aa2b5b2f16de3e207 100644 --- a/include/chardev/char-fe.h +++ b/include/chardev/char-fe.h @@ -7,8 +7,12 @@ typedef void IOEventHandler(void *opaque, QEMUChrEvent event); typedef int BackendChangeHandler(void *opaque); -/* This is the backend as seen by frontend, the actual backend is - * Chardev */ +/** + * struct CharBackend - back end as seen by front end + * @fe_is_open: the front end is ready for IO + * + * The actual backend is Chardev + */ struct CharBackend { Chardev *chr; IOEventHandler *chr_event; @@ -17,7 +21,7 @@ struct CharBackend { BackendChangeHandler *chr_be_change; void *opaque; int tag; - int fe_open; + bool fe_is_open; }; /** @@ -156,12 +160,13 @@ void qemu_chr_fe_set_echo(CharBackend *be, bool echo); /** * qemu_chr_fe_set_open: + * @be: a CharBackend + * @is_open: the front end open status * - * Set character frontend open status. This is an indication that the - * front end is ready (or not) to begin doing I/O. - * Without associated Chardev, do nothing. + * This is an indication that the front end is ready (or not) to begin + * doing I/O. Without associated Chardev, do nothing. */ -void qemu_chr_fe_set_open(CharBackend *be, int fe_open); +void qemu_chr_fe_set_open(CharBackend *be, bool is_open); /** * qemu_chr_fe_printf: diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index 41115d8919407109d83b9e633d0aacfa18dd03ac..fef3138d29fcd0b9f5963c7e7b60cc8cd715486b 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -92,7 +92,7 @@ RAMBlock *qemu_ram_block_by_name(const char *name); * * By the time this function returns, the returned pointer is not protected * by RCU anymore. If the caller is not within an RCU critical section and - * does not hold the iothread lock, it must have other means of protecting the + * does not hold the BQL, it must have other means of protecting the * pointer, such as a reference to the memory region that owns the RAMBlock. */ RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, diff --git a/include/exec/memory.h b/include/exec/memory.h index 831f7c996d9da49cdf9884fdeffa32959865cb07..177be23db709d8bab9cebfe6acbae57611073327 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -43,7 +43,7 @@ typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass; DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass, IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION) -#define TYPE_RAM_DISCARD_MANAGER "qemu:ram-discard-manager" +#define TYPE_RAM_DISCARD_MANAGER "ram-discard-manager" typedef struct RamDiscardManagerClass RamDiscardManagerClass; typedef struct RamDiscardManager RamDiscardManager; DECLARE_OBJ_CHECKERS(RamDiscardManager, RamDiscardManagerClass, @@ -1288,8 +1288,10 @@ void memory_region_init_io(MemoryRegion *mr, * * Note that this function does not do anything to cause the data in the * RAM memory region to be migrated; that is the responsibility of the caller. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_ram_nomigrate(MemoryRegion *mr, +bool memory_region_init_ram_nomigrate(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1310,8 +1312,10 @@ void memory_region_init_ram_nomigrate(MemoryRegion *mr, * * Note that this function does not do anything to cause the data in the * RAM memory region to be migrated; that is the responsibility of the caller. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, +bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1338,8 +1342,10 @@ void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, * * Note that this function does not do anything to cause the data in the * RAM memory region to be migrated; that is the responsibility of the caller. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_resizeable_ram(MemoryRegion *mr, +bool memory_region_init_resizeable_ram(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1370,8 +1376,10 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr, * * Note that this function does not do anything to cause the data in the * RAM memory region to be migrated; that is the responsibility of the caller. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_ram_from_file(MemoryRegion *mr, +bool memory_region_init_ram_from_file(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1398,8 +1406,10 @@ void memory_region_init_ram_from_file(MemoryRegion *mr, * * Note that this function does not do anything to cause the data in the * RAM memory region to be migrated; that is the responsibility of the caller. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_ram_from_fd(MemoryRegion *mr, +bool memory_region_init_ram_from_fd(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1494,8 +1504,10 @@ void memory_region_init_alias(MemoryRegion *mr, * must be unique within any device * @size: size of the region. * @errp: pointer to Error*, to store an error if it happens. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_rom_nomigrate(MemoryRegion *mr, +bool memory_region_init_rom_nomigrate(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1517,8 +1529,10 @@ void memory_region_init_rom_nomigrate(MemoryRegion *mr, * must be unique within any device * @size: size of the region. * @errp: pointer to Error*, to store an error if it happens. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, +bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr, Object *owner, const MemoryRegionOps *ops, void *opaque, @@ -1576,8 +1590,10 @@ void memory_region_init_iommu(void *_iommu_mr, * give the RAM block a unique name for migration purposes. * We should lift this restriction and allow arbitrary Objects. * If you pass a non-NULL non-device @owner then we will assert. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_ram(MemoryRegion *mr, +bool memory_region_init_ram(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1603,8 +1619,10 @@ void memory_region_init_ram(MemoryRegion *mr, * must be unique within any device * @size: size of the region. * @errp: pointer to Error*, to store an error if it happens. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_rom(MemoryRegion *mr, +bool memory_region_init_rom(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1634,8 +1652,10 @@ void memory_region_init_rom(MemoryRegion *mr, * must be unique within any device * @size: size of the region. * @errp: pointer to Error*, to store an error if it happens. + * + * Return: true on success, else false setting @errp with error. */ -void memory_region_init_rom_device(MemoryRegion *mr, +bool memory_region_init_rom_device(MemoryRegion *mr, Object *owner, const MemoryRegionOps *ops, void *opaque, @@ -1962,7 +1982,7 @@ int memory_region_get_fd(MemoryRegion *mr); * * Use with care; by the time this function returns, the returned pointer is * not protected by RCU anymore. If the caller is not within an RCU critical - * section and does not hold the iothread lock, it must have other means of + * section and does not hold the BQL, it must have other means of * protecting the pointer, such as a reference to the region that includes * the incoming ram_addr_t. * @@ -1979,7 +1999,7 @@ MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset); * * Use with care; by the time this function returns, the returned pointer is * not protected by RCU anymore. If the caller is not within an RCU critical - * section and does not hold the iothread lock, it must have other means of + * section and does not hold the BQL, it must have other means of * protecting the pointer, such as a reference to the region that includes * the incoming ram_addr_t. * diff --git a/include/exec/ramblock.h b/include/exec/ramblock.h index 69c6a53902939a2de7b38284baff5ed4a86daa63..3eb79723c6a8eca52625a95d7d637ae2d22e6c26 100644 --- a/include/exec/ramblock.h +++ b/include/exec/ramblock.h @@ -34,7 +34,7 @@ struct RAMBlock { ram_addr_t max_length; void (*resized)(const char*, uint64_t length, void *host); uint32_t flags; - /* Protected by iothread lock. */ + /* Protected by the BQL. */ char idstr[256]; /* RCU-enabled, writes protected by the ramlist lock */ QLIST_ENTRY(RAMBlock) next; diff --git a/include/hw/arm/armv7m.h b/include/hw/arm/armv7m.h index e2cebbd15c024fa08127dc7af7a2073a41e6c6c3..5c057ab2ec9e2ddbeacdf8b993bbf4f4c65354b4 100644 --- a/include/hw/arm/armv7m.h +++ b/include/hw/arm/armv7m.h @@ -43,6 +43,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(ARMv7MState, ARMV7M) * a qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET). * + Property "cpu-type": CPU type to instantiate * + Property "num-irq": number of external IRQ lines + * + Property "num-prio-bits": number of priority bits in the NVIC * + Property "memory": MemoryRegion defining the physical address space * that CPU accesses see. (The NVIC, bitbanding and other CPU-internal * devices will be automatically layered on top of this view.) diff --git a/include/hw/arm/stm32l4x5_soc.h b/include/hw/arm/stm32l4x5_soc.h new file mode 100644 index 0000000000000000000000000000000000000000..2fd44a36a9d2af7f35b99a73c50397a02654f5d2 --- /dev/null +++ b/include/hw/arm/stm32l4x5_soc.h @@ -0,0 +1,57 @@ +/* + * STM32L4x5 SoC family + * + * Copyright (c) 2023 Arnaud Minier + * Copyright (c) 2023 Inès Varhol + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * This work is heavily inspired by the stm32f405_soc by Alistair Francis. + * Original code is licensed under the MIT License: + * + * Copyright (c) 2014 Alistair Francis + */ + +/* + * The reference used is the STMicroElectronics RM0351 Reference manual + * for STM32L4x5 and STM32L4x6 advanced Arm ® -based 32-bit MCUs. + * https://www.st.com/en/microcontrollers-microprocessors/stm32l4x5/documentation.html + */ + +#ifndef HW_ARM_STM32L4x5_SOC_H +#define HW_ARM_STM32L4x5_SOC_H + +#include "exec/memory.h" +#include "hw/arm/armv7m.h" +#include "qom/object.h" + +#define TYPE_STM32L4X5_SOC "stm32l4x5-soc" +#define TYPE_STM32L4X5XC_SOC "stm32l4x5xc-soc" +#define TYPE_STM32L4X5XE_SOC "stm32l4x5xe-soc" +#define TYPE_STM32L4X5XG_SOC "stm32l4x5xg-soc" +OBJECT_DECLARE_TYPE(Stm32l4x5SocState, Stm32l4x5SocClass, STM32L4X5_SOC) + +struct Stm32l4x5SocState { + SysBusDevice parent_obj; + + ARMv7MState armv7m; + + MemoryRegion sram1; + MemoryRegion sram2; + MemoryRegion flash; + MemoryRegion flash_alias; + + Clock *sysclk; + Clock *refclk; +}; + +struct Stm32l4x5SocClass { + SysBusDeviceClass parent_class; + + size_t flash_size; +}; + +#endif diff --git a/include/hw/boards.h b/include/hw/boards.h index da85f86efb9185df05a444bd22eeec0557a3e3c8..bcfde8a84d1025fa795e65ca46d8b6877b586c80 100644 --- a/include/hw/boards.h +++ b/include/hw/boards.h @@ -24,6 +24,12 @@ OBJECT_DECLARE_TYPE(MachineState, MachineClass, MACHINE) extern MachineState *current_machine; +/** + * machine_class_default_cpu_type: Return the machine default CPU type. + * @mc: Machine class + */ +const char *machine_class_default_cpu_type(MachineClass *mc); + void machine_add_audiodev_property(MachineClass *mc); void machine_run_board_init(MachineState *machine, const char *mem_path, Error **errp); bool machine_usb(MachineState *machine); @@ -419,6 +425,9 @@ struct MachineState { } \ type_init(machine_initfn##_register_types) +extern GlobalProperty hw_compat_8_2[]; +extern const size_t hw_compat_8_2_len; + extern GlobalProperty hw_compat_8_1[]; extern const size_t hw_compat_8_1_len; diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index c0c8320413e5f66f3c026ec6b4b89374b9cde7b3..238c02c05ea5189fd3516ca3262cb264f03857aa 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -430,16 +430,13 @@ struct qemu_work_item; * @gdb_regs: Additional GDB registers. * @gdb_num_regs: Number of total registers accessible to GDB. * @gdb_num_g_regs: Number of registers in GDB 'g' packets. - * @next_cpu: Next CPU sharing TB cache. + * @node: QTAILQ of CPUs sharing TB cache. * @opaque: User data. * @mem_io_pc: Host Program Counter at which the memory was accessed. * @accel: Pointer to accelerator specific state. * @kvm_fd: vCPU file descriptor for KVM. * @work_mutex: Lock to prevent multiple access to @work_list. * @work_list: List of pending asynchronous work. - * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes - * to @trace_dstate). - * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask). * @plugin_mask: Plugin event bitmap. Modified only via async work. * @ignore_memory_transaction_failures: Cached copy of the MachineState * flag of the same name: allows the board to suppress calling of the @@ -779,6 +776,19 @@ void cpu_reset(CPUState *cpu); */ ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model); +/** + * cpu_model_from_type: + * @typename: The CPU type name + * + * Extract the CPU model name from the CPU type name. The + * CPU type name is either the combination of the CPU model + * name and suffix, or same to the CPU model name. + * + * Returns: CPU model name or NULL if the CPU class doesn't exist + * The user should g_free() the string once no longer needed. + */ +char *cpu_model_from_type(const char *typename); + /** * cpu_create: * @typename: The CPU type. diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index a10ceeabbfac8fbfa64b2f37aefb34d1065f9a64..916af29f7c0defcc91e0132392422ea4dee556c0 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -210,6 +210,9 @@ void pc_madt_cpu_entry(int uid, const CPUArchIdList *apic_ids, /* sgx.c */ void pc_machine_init_sgx_epc(PCMachineState *pcms); +extern GlobalProperty pc_compat_8_2[]; +extern const size_t pc_compat_8_2_len; + extern GlobalProperty pc_compat_8_1[]; extern const size_t pc_compat_8_1_len; diff --git a/include/hw/intc/loongarch_extioi.h b/include/hw/intc/loongarch_extioi.h index fbdef9a7b3b74ec70cd12878b5a2680d90d4a651..a0a46b888cda26c8cf084a75d690dca54daf40c3 100644 --- a/include/hw/intc/loongarch_extioi.h +++ b/include/hw/intc/loongarch_extioi.h @@ -40,25 +40,29 @@ #define EXTIOI_COREMAP_START (0xC00 - APIC_OFFSET) #define EXTIOI_COREMAP_END (0xD00 - APIC_OFFSET) +typedef struct ExtIOICore { + uint32_t coreisr[EXTIOI_IRQS_GROUP_COUNT]; + DECLARE_BITMAP(sw_isr[LS3A_INTC_IP], EXTIOI_IRQS); + qemu_irq parent_irq[LS3A_INTC_IP]; +} ExtIOICore; + #define TYPE_LOONGARCH_EXTIOI "loongarch.extioi" OBJECT_DECLARE_SIMPLE_TYPE(LoongArchExtIOI, LOONGARCH_EXTIOI) struct LoongArchExtIOI { SysBusDevice parent_obj; + uint32_t num_cpu; /* hardware state */ uint32_t nodetype[EXTIOI_IRQS_NODETYPE_COUNT / 2]; uint32_t bounce[EXTIOI_IRQS_GROUP_COUNT]; uint32_t isr[EXTIOI_IRQS / 32]; - uint32_t coreisr[EXTIOI_CPUS][EXTIOI_IRQS_GROUP_COUNT]; uint32_t enable[EXTIOI_IRQS / 32]; uint32_t ipmap[EXTIOI_IRQS_IPMAP_SIZE / 4]; uint32_t coremap[EXTIOI_IRQS / 4]; uint32_t sw_pending[EXTIOI_IRQS / 32]; - DECLARE_BITMAP(sw_isr[EXTIOI_CPUS][LS3A_INTC_IP], EXTIOI_IRQS); uint8_t sw_ipmap[EXTIOI_IRQS_IPMAP_SIZE]; uint8_t sw_coremap[EXTIOI_IRQS]; - qemu_irq parent_irq[EXTIOI_CPUS][LS3A_INTC_IP]; qemu_irq irq[EXTIOI_IRQS]; - MemoryRegion extioi_iocsr_mem[EXTIOI_CPUS]; + ExtIOICore *cpu; MemoryRegion extioi_system_mem; }; #endif /* LOONGARCH_EXTIOI_H */ diff --git a/include/hw/intc/loongarch_ipi.h b/include/hw/intc/loongarch_ipi.h index 6c6194786e807317a3aa800e94e36ee1bef4652d..1c1e834849e00c6c43c10787d56f01e611928a2b 100644 --- a/include/hw/intc/loongarch_ipi.h +++ b/include/hw/intc/loongarch_ipi.h @@ -47,7 +47,8 @@ struct LoongArchIPI { SysBusDevice parent_obj; MemoryRegion ipi_iocsr_mem; MemoryRegion ipi64_iocsr_mem; - IPICore ipi_core; + uint32_t num_cpu; + IPICore *cpu; }; #endif diff --git a/include/hw/loongarch/virt.h b/include/hw/loongarch/virt.h index 674f4655e0ee974c215a23234cff2e67fa2d03e1..6ef9a92394d82bb93f2434bddf47d6fff59a1e58 100644 --- a/include/hw/loongarch/virt.h +++ b/include/hw/loongarch/virt.h @@ -25,7 +25,7 @@ #define VIRT_LOWMEM_BASE 0 #define VIRT_LOWMEM_SIZE 0x10000000 -#define VIRT_HIGHMEM_BASE 0x90000000 +#define VIRT_HIGHMEM_BASE 0x80000000 #define VIRT_GED_EVT_ADDR 0x100e0000 #define VIRT_GED_MEM_ADDR (VIRT_GED_EVT_ADDR + ACPI_GED_EVT_SEL_LEN) #define VIRT_GED_REG_ADDR (VIRT_GED_MEM_ADDR + MEMORY_HOTPLUG_IO_LEN) @@ -50,6 +50,9 @@ struct LoongArchMachineState { DeviceState *platform_bus_dev; PCIBus *pci_bus; PFlashCFI01 *flash; + MemoryRegion system_iocsr; + MemoryRegion iocsr_mem; + AddressSpace as_iocsr; }; #define TYPE_LOONGARCH_MACHINE MACHINE_TYPE_NAME("virt") diff --git a/include/hw/m68k/q800.h b/include/hw/m68k/q800.h index a9661f65f69514f76ae3ad2bcc0bb355402e267b..34365c98608ce64f9fa96d1c513128e5a3c76014 100644 --- a/include/hw/m68k/q800.h +++ b/include/hw/m68k/q800.h @@ -55,6 +55,7 @@ struct Q800MachineState { MOS6522Q800VIA1State via1; MOS6522Q800VIA2State via2; dp8393xState dp8393x; + MemoryRegion dp8393x_prom; ESCCState escc; OrIRQState escc_orgate; SysBusESPState esp; diff --git a/include/hw/mips/bios.h b/include/hw/mips/bios.h deleted file mode 100644 index 44acb6815becc383b06bb5ab4290072a13afd961..0000000000000000000000000000000000000000 --- a/include/hw/mips/bios.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef HW_MIPS_BIOS_H -#define HW_MIPS_BIOS_H - -#include "qemu/units.h" -#include "cpu.h" - -#define BIOS_SIZE (4 * MiB) -#if TARGET_BIG_ENDIAN -#define BIOS_FILENAME "mips_bios.bin" -#else -#define BIOS_FILENAME "mipsel_bios.bin" -#endif - -#endif diff --git a/include/hw/misc/imx7_snvs.h b/include/hw/misc/imx7_snvs.h index 14a1d6fe6b03b451c84f3b48c9242c8ae94c1ab6..1272076086a7277ade897c1789fcc18b9912b34e 100644 --- a/include/hw/misc/imx7_snvs.h +++ b/include/hw/misc/imx7_snvs.h @@ -20,7 +20,9 @@ enum IMX7SNVSRegisters { SNVS_LPCR = 0x38, SNVS_LPCR_TOP = BIT(6), - SNVS_LPCR_DP_EN = BIT(5) + SNVS_LPCR_DP_EN = BIT(5), + SNVS_LPSRTCMR = 0x050, /* Secure Real Time Counter MSB Register */ + SNVS_LPSRTCLR = 0x054, /* Secure Real Time Counter LSB Register */ }; #define TYPE_IMX7_SNVS "imx7.snvs" @@ -31,6 +33,9 @@ struct IMX7SNVSState { SysBusDevice parent_obj; MemoryRegion mmio; + + uint64_t tick_offset; + uint64_t lpcr; }; #endif /* IMX7_SNVS_H */ diff --git a/include/hw/misc/xlnx-versal-cframe-reg.h b/include/hw/misc/xlnx-versal-cframe-reg.h index 0091505246f8fadb3aabec3bd918e0e45609e06b..83f6a077446e29d1b692eaaffb2ba54bdfb97b67 100644 --- a/include/hw/misc/xlnx-versal-cframe-reg.h +++ b/include/hw/misc/xlnx-versal-cframe-reg.h @@ -23,7 +23,7 @@ #include "hw/misc/xlnx-versal-cfu.h" #include "qemu/fifo32.h" -#define TYPE_XLNX_VERSAL_CFRAME_REG "xlnx,cframe-reg" +#define TYPE_XLNX_VERSAL_CFRAME_REG "xlnx-cframe-reg" OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFrameReg, XLNX_VERSAL_CFRAME_REG) #define TYPE_XLNX_VERSAL_CFRAME_BCAST_REG "xlnx.cframe-bcast-reg" diff --git a/include/hw/misc/xlnx-versal-cfu.h b/include/hw/misc/xlnx-versal-cfu.h index be62bab8c8c2cee2e39de8db2e5881c99d6bab8a..3de3ee49231a3c07d60ce30133a199f90e1e85f6 100644 --- a/include/hw/misc/xlnx-versal-cfu.h +++ b/include/hw/misc/xlnx-versal-cfu.h @@ -22,13 +22,13 @@ #include "hw/misc/xlnx-cfi-if.h" #include "qemu/fifo32.h" -#define TYPE_XLNX_VERSAL_CFU_APB "xlnx,versal-cfu-apb" +#define TYPE_XLNX_VERSAL_CFU_APB "xlnx-versal-cfu-apb" OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUAPB, XLNX_VERSAL_CFU_APB) -#define TYPE_XLNX_VERSAL_CFU_FDRO "xlnx,versal-cfu-fdro" +#define TYPE_XLNX_VERSAL_CFU_FDRO "xlnx-versal-cfu-fdro" OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUFDRO, XLNX_VERSAL_CFU_FDRO) -#define TYPE_XLNX_VERSAL_CFU_SFR "xlnx,versal-cfu-sfr" +#define TYPE_XLNX_VERSAL_CFU_SFR "xlnx-versal-cfu-sfr" OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCFUSFR, XLNX_VERSAL_CFU_SFR) REG32(CFU_ISR, 0x0) diff --git a/include/hw/misc/xlnx-versal-crl.h b/include/hw/misc/xlnx-versal-crl.h index 2857f4169a534ae022c9d9feea872f16853c2ed4..dfb8dff197d8026150badb553b15f6e7e46d9e90 100644 --- a/include/hw/misc/xlnx-versal-crl.h +++ b/include/hw/misc/xlnx-versal-crl.h @@ -13,7 +13,7 @@ #include "hw/register.h" #include "target/arm/cpu.h" -#define TYPE_XLNX_VERSAL_CRL "xlnx,versal-crl" +#define TYPE_XLNX_VERSAL_CRL "xlnx-versal-crl" OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCRL, XLNX_VERSAL_CRL) REG32(ERR_CTRL, 0x0) diff --git a/include/hw/nvram/fw_cfg_acpi.h b/include/hw/nvram/fw_cfg_acpi.h new file mode 100644 index 0000000000000000000000000000000000000000..b6553d86fcb0884f72dc7ee65e3f6cb9da3fba93 --- /dev/null +++ b/include/hw/nvram/fw_cfg_acpi.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * ACPI support for fw_cfg + * + */ + +#ifndef FW_CFG_ACPI_H +#define FW_CFG_ACPI_H + +#include "qemu/osdep.h" +#include "exec/hwaddr.h" + +void fw_cfg_acpi_dsdt_add(Aml *scope, const MemMapEntry *fw_cfg_memmap); + +#endif diff --git a/include/hw/nvram/xlnx-efuse.h b/include/hw/nvram/xlnx-efuse.h index 58414e468b53dd9a713a948955844eba5ae9d54a..cff7924106adbc6134a14b5468bbf79d66b6600d 100644 --- a/include/hw/nvram/xlnx-efuse.h +++ b/include/hw/nvram/xlnx-efuse.h @@ -30,7 +30,7 @@ #include "sysemu/block-backend.h" #include "hw/qdev-core.h" -#define TYPE_XLNX_EFUSE "xlnx,efuse" +#define TYPE_XLNX_EFUSE "xlnx-efuse" OBJECT_DECLARE_SIMPLE_TYPE(XlnxEFuse, XLNX_EFUSE); struct XlnxEFuse { diff --git a/include/hw/nvram/xlnx-versal-efuse.h b/include/hw/nvram/xlnx-versal-efuse.h index a873dc5cb01d8e6dd5d7b64202e773206b57d563..86e2261b9a306728dd5258704163456b4c753133 100644 --- a/include/hw/nvram/xlnx-versal-efuse.h +++ b/include/hw/nvram/xlnx-versal-efuse.h @@ -29,8 +29,8 @@ #define XLNX_VERSAL_EFUSE_CTRL_R_MAX ((0x100 / 4) + 1) -#define TYPE_XLNX_VERSAL_EFUSE_CTRL "xlnx,versal-efuse" -#define TYPE_XLNX_VERSAL_EFUSE_CACHE "xlnx,pmc-efuse-cache" +#define TYPE_XLNX_VERSAL_EFUSE_CTRL "xlnx-versal-efuse" +#define TYPE_XLNX_VERSAL_EFUSE_CACHE "xlnx-pmc-efuse-cache" OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalEFuseCtrl, XLNX_VERSAL_EFUSE_CTRL); OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalEFuseCache, XLNX_VERSAL_EFUSE_CACHE); diff --git a/include/hw/nvram/xlnx-zynqmp-efuse.h b/include/hw/nvram/xlnx-zynqmp-efuse.h index 6b051ec4f155f617c3145a341a04eaa7c7cbfb56..f5beacc2e6aaf2c649c117d5a70f67b3499d8630 100644 --- a/include/hw/nvram/xlnx-zynqmp-efuse.h +++ b/include/hw/nvram/xlnx-zynqmp-efuse.h @@ -29,7 +29,7 @@ #define XLNX_ZYNQMP_EFUSE_R_MAX ((0x10fc / 4) + 1) -#define TYPE_XLNX_ZYNQMP_EFUSE "xlnx,zynqmp-efuse" +#define TYPE_XLNX_ZYNQMP_EFUSE "xlnx-zynqmp-efuse" OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPEFuse, XLNX_ZYNQMP_EFUSE); struct XlnxZynqMPEFuse { diff --git a/include/hw/pci-host/gpex.h b/include/hw/pci-host/gpex.h index b0240bd7681fb36db4e6b64ed056855e28c363de..dce883573ba24c2ebcf68271b6f788624415942b 100644 --- a/include/hw/pci-host/gpex.h +++ b/include/hw/pci-host/gpex.h @@ -40,6 +40,15 @@ struct GPEXRootState { /*< public >*/ }; +struct GPEXConfig { + MemMapEntry ecam; + MemMapEntry mmio32; + MemMapEntry mmio64; + MemMapEntry pio; + int irq; + PCIBus *bus; +}; + struct GPEXHost { /*< private >*/ PCIExpressHost parent_obj; @@ -55,19 +64,22 @@ struct GPEXHost { int irq_num[GPEX_NUM_IRQS]; bool allow_unmapped_accesses; -}; -struct GPEXConfig { - MemMapEntry ecam; - MemMapEntry mmio32; - MemMapEntry mmio64; - MemMapEntry pio; - int irq; - PCIBus *bus; + struct GPEXConfig gpex_cfg; }; int gpex_set_irq_num(GPEXHost *s, int index, int gsi); void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg); +void acpi_dsdt_add_gpex_host(Aml *scope, uint32_t irq); + +#define PCI_HOST_PIO_BASE "x-pio-base" +#define PCI_HOST_PIO_SIZE "x-pio-size" +#define PCI_HOST_ECAM_BASE "x-ecam-base" +#define PCI_HOST_ECAM_SIZE "x-ecam-size" +#define PCI_HOST_BELOW_4G_MMIO_BASE "x-below-4g-mmio-base" +#define PCI_HOST_BELOW_4G_MMIO_SIZE "x-below-4g-mmio-size" +#define PCI_HOST_ABOVE_4G_MMIO_BASE "x-above-4g-mmio-base" +#define PCI_HOST_ABOVE_4G_MMIO_SIZE "x-above-4g-mmio-size" #endif /* HW_GPEX_H */ diff --git a/include/hw/pci/shpc.h b/include/hw/pci/shpc.h index 89c7a3b7fa9fcd88326f5e2dd500bd910ee7ede4..a0789df1538b2c02ae3b6149b2b63a4aad7aca26 100644 --- a/include/hw/pci/shpc.h +++ b/include/hw/pci/shpc.h @@ -52,7 +52,7 @@ void shpc_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, void shpc_device_unplug_request_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp); -extern VMStateInfo shpc_vmstate_info; +extern const VMStateInfo shpc_vmstate_info; #define SHPC_VMSTATE(_field, _type, _test) \ VMSTATE_BUFFER_UNSAFE_INFO_TEST(_field, _type, _test, 0, \ shpc_vmstate_info, 0) diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h index b7adbdb7b98057bc95fbd6c7aff46380e074a240..816f5d0e840b056d576254705e02ca8846bb9726 100644 --- a/include/hw/ppc/xive2_regs.h +++ b/include/hw/ppc/xive2_regs.h @@ -10,7 +10,7 @@ #ifndef PPC_XIVE2_REGS_H #define PPC_XIVE2_REGS_H -#include "cpu.h" +#include "qemu/bswap.h" /* * Thread Interrupt Management Area (TIMA) diff --git a/include/hw/qdev-properties-system.h b/include/hw/qdev-properties-system.h index 91f7a2452d94b777293b5da92fd096b84ff113b4..06c359c190aa97ee9ccd6c25e17184ce409a32e7 100644 --- a/include/hw/qdev-properties-system.h +++ b/include/hw/qdev-properties-system.h @@ -24,6 +24,7 @@ extern const PropertyInfo qdev_prop_off_auto_pcibar; extern const PropertyInfo qdev_prop_pcie_link_speed; extern const PropertyInfo qdev_prop_pcie_link_width; extern const PropertyInfo qdev_prop_cpus390entitlement; +extern const PropertyInfo qdev_prop_iothread_vq_mapping_list; #define DEFINE_PROP_PCI_DEVFN(_n, _s, _f, _d) \ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_pci_devfn, int32_t) @@ -82,4 +83,8 @@ extern const PropertyInfo qdev_prop_cpus390entitlement; DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_cpus390entitlement, \ CpuS390Entitlement) +#define DEFINE_PROP_IOTHREAD_VQ_MAPPING_LIST(_name, _state, _field) \ + DEFINE_PROP(_name, _state, _field, qdev_prop_iothread_vq_mapping_list, \ + IOThreadVirtQueueMappingList *) + #endif diff --git a/include/hw/qdev-properties.h b/include/hw/qdev-properties.h index 25743a29a000b15d7a15b15e6d83fa3a279d1edd..09aa04ca1e272b1a37353d74dc7c0ec8cb3668fc 100644 --- a/include/hw/qdev-properties.h +++ b/include/hw/qdev-properties.h @@ -230,8 +230,8 @@ void qdev_property_add_static(DeviceState *dev, Property *prop); * @target: Device which has properties to be aliased * @source: Object to add alias properties to * - * Add alias properties to the @source object for all qdev properties on - * the @target DeviceState. + * Add alias properties to the @source object for all properties on the @target + * DeviceState. * * This is useful when @target is an internal implementation object * owned by @source, and you want to expose all the properties of that diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h index e5c474b26ebc54fb6aa33e7d0b1a6542da54ff1c..f89790fd582a66ba06e79cf1c36e38ecb92d45a3 100644 --- a/include/hw/riscv/virt.h +++ b/include/hw/riscv/virt.h @@ -23,6 +23,7 @@ #include "hw/riscv/riscv_hart.h" #include "hw/sysbus.h" #include "hw/block/flash.h" +#include "hw/intc/riscv_imsic.h" #define VIRT_CPUS_MAX_BITS 9 #define VIRT_CPUS_MAX (1 << VIRT_CPUS_MAX_BITS) @@ -60,6 +61,7 @@ struct RISCVVirtState { char *oem_table_id; OnOffAuto acpi; const MemMapEntry *memmap; + struct GPEXHost *gpex_host; }; enum { @@ -127,4 +129,28 @@ enum { bool virt_is_acpi_enabled(RISCVVirtState *s); void virt_acpi_setup(RISCVVirtState *vms); +uint32_t imsic_num_bits(uint32_t count); + +/* + * The virt machine physical address space used by some of the devices + * namely ACLINT, PLIC, APLIC, and IMSIC depend on number of Sockets, + * number of CPUs, and number of IMSIC guest files. + * + * Various limits defined by VIRT_SOCKETS_MAX_BITS, VIRT_CPUS_MAX_BITS, + * and VIRT_IRQCHIP_MAX_GUESTS_BITS are tuned for maximum utilization + * of virt machine physical address space. + */ + +#define VIRT_IMSIC_GROUP_MAX_SIZE (1U << IMSIC_MMIO_GROUP_MIN_SHIFT) +#if VIRT_IMSIC_GROUP_MAX_SIZE < \ + IMSIC_GROUP_SIZE(VIRT_CPUS_MAX_BITS, VIRT_IRQCHIP_MAX_GUESTS_BITS) +#error "Can't accomodate single IMSIC group in address space" +#endif + +#define VIRT_IMSIC_MAX_SIZE (VIRT_SOCKETS_MAX * \ + VIRT_IMSIC_GROUP_MAX_SIZE) +#if 0x4000000 < VIRT_IMSIC_MAX_SIZE +#error "Can't accomodate all IMSIC groups in address space" +#endif + #endif diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h index 3692ca82f3154064e48f811b448bf7eb80303de3..10c4e8288d87c711dd826b5fe1f52d9a6ae3f6b2 100644 --- a/include/hw/scsi/scsi.h +++ b/include/hw/scsi/scsi.h @@ -69,14 +69,19 @@ struct SCSIDevice { DeviceState qdev; VMChangeStateEntry *vmsentry; - QEMUBH *bh; uint32_t id; BlockConf conf; SCSISense unit_attention; bool sense_is_ua; uint8_t sense[SCSI_SENSE_BUF_SIZE]; uint32_t sense_len; + + /* + * The requests list is only accessed from the AioContext that executes + * requests or from the main loop when IOThread processing is stopped. + */ QTAILQ_HEAD(, SCSIRequest) requests; + uint32_t channel; uint32_t lun; int blocksize; diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h index a4a22accb9434c5d04bef73172a0ccbe182405cb..9b7ef7d02b5a0ad5266bcc4d06cd6874178978e4 100644 --- a/include/hw/vfio/vfio-common.h +++ b/include/hw/vfio/vfio-common.h @@ -30,6 +30,7 @@ #include #endif #include "sysemu/sysemu.h" +#include "hw/vfio/vfio-container-base.h" #define VFIO_MSG_PREFIX "vfio %s: " @@ -72,54 +73,15 @@ typedef struct VFIOMigration { bool initial_data_sent; } VFIOMigration; -typedef struct VFIOAddressSpace { - AddressSpace *as; - QLIST_HEAD(, VFIOContainer) containers; - QLIST_ENTRY(VFIOAddressSpace) list; -} VFIOAddressSpace; - struct VFIOGroup; typedef struct VFIOContainer { - VFIOAddressSpace *space; + VFIOContainerBase bcontainer; int fd; /* /dev/vfio/vfio, empowered by the attached groups */ - MemoryListener listener; - MemoryListener prereg_listener; unsigned iommu_type; - Error *error; - bool initialized; - bool dirty_pages_supported; - uint64_t dirty_pgsizes; - uint64_t max_dirty_bitmap_size; - unsigned long pgsizes; - unsigned int dma_max_mappings; - QLIST_HEAD(, VFIOGuestIOMMU) giommu_list; - QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list; QLIST_HEAD(, VFIOGroup) group_list; - QLIST_HEAD(, VFIORamDiscardListener) vrdl_list; - QLIST_ENTRY(VFIOContainer) next; - QLIST_HEAD(, VFIODevice) device_list; - GList *iova_ranges; } VFIOContainer; -typedef struct VFIOGuestIOMMU { - VFIOContainer *container; - IOMMUMemoryRegion *iommu_mr; - hwaddr iommu_offset; - IOMMUNotifier n; - QLIST_ENTRY(VFIOGuestIOMMU) giommu_next; -} VFIOGuestIOMMU; - -typedef struct VFIORamDiscardListener { - VFIOContainer *container; - MemoryRegion *mr; - hwaddr offset_within_address_space; - hwaddr size; - uint64_t granularity; - RamDiscardListener listener; - QLIST_ENTRY(VFIORamDiscardListener) next; -} VFIORamDiscardListener; - typedef struct VFIOHostDMAWindow { hwaddr min_iova; hwaddr max_iova; @@ -127,6 +89,14 @@ typedef struct VFIOHostDMAWindow { QLIST_ENTRY(VFIOHostDMAWindow) hostwin_next; } VFIOHostDMAWindow; +typedef struct IOMMUFDBackend IOMMUFDBackend; + +typedef struct VFIOIOMMUFDContainer { + VFIOContainerBase bcontainer; + IOMMUFDBackend *be; + uint32_t ioas_id; +} VFIOIOMMUFDContainer; + typedef struct VFIODeviceOps VFIODeviceOps; typedef struct VFIODevice { @@ -134,7 +104,7 @@ typedef struct VFIODevice { QLIST_ENTRY(VFIODevice) container_next; QLIST_ENTRY(VFIODevice) global_next; struct VFIOGroup *group; - VFIOContainer *container; + VFIOContainerBase *bcontainer; char *sysfsdev; char *name; DeviceState *dev; @@ -154,6 +124,8 @@ typedef struct VFIODevice { OnOffAuto pre_copy_dirty_page_tracking; bool dirty_pages_supported; bool dirty_tracking; + int devid; + IOMMUFDBackend *iommufd; } VFIODevice; struct VFIODeviceOps { @@ -201,31 +173,10 @@ typedef struct VFIODisplay { } dmabuf; } VFIODisplay; -typedef struct { - unsigned long *bitmap; - hwaddr size; - hwaddr pages; -} VFIOBitmap; - VFIOAddressSpace *vfio_get_address_space(AddressSpace *as); void vfio_put_address_space(VFIOAddressSpace *space); -bool vfio_devices_all_running_and_saving(VFIOContainer *container); - -/* container->fd */ -int vfio_dma_unmap(VFIOContainer *container, hwaddr iova, - ram_addr_t size, IOMMUTLBEntry *iotlb); -int vfio_dma_map(VFIOContainer *container, hwaddr iova, - ram_addr_t size, void *vaddr, bool readonly); -int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start); -int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap, - hwaddr iova, hwaddr size); /* SPAPR specific */ -int vfio_container_add_section_window(VFIOContainer *container, - MemoryRegionSection *section, - Error **errp); -void vfio_container_del_section_window(VFIOContainer *container, - MemoryRegionSection *section); int vfio_spapr_container_init(VFIOContainer *container, Error **errp); void vfio_spapr_container_deinit(VFIOContainer *container); @@ -259,7 +210,6 @@ typedef QLIST_HEAD(VFIOGroupList, VFIOGroup) VFIOGroupList; typedef QLIST_HEAD(VFIODeviceList, VFIODevice) VFIODeviceList; extern VFIOGroupList vfio_group_list; extern VFIODeviceList vfio_device_list; - extern const MemoryListener vfio_memory_listener; extern int vfio_kvm_device_fd; @@ -292,11 +242,19 @@ bool vfio_migration_realize(VFIODevice *vbasedev, Error **errp); void vfio_migration_exit(VFIODevice *vbasedev); int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size); -bool vfio_devices_all_running_and_mig_active(VFIOContainer *container); -bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container); -int vfio_devices_query_dirty_bitmap(VFIOContainer *container, +bool +vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer); +bool +vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer); +int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer, VFIOBitmap *vbmap, hwaddr iova, hwaddr size); -int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, - uint64_t size, ram_addr_t ram_addr); +int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova, + uint64_t size, ram_addr_t ram_addr); + +/* Returns 0 on success, or a negative errno. */ +int vfio_device_get_name(VFIODevice *vbasedev, Error **errp); +void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp); +void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops, + DeviceState *dev, bool ram_discard); #endif /* HW_VFIO_VFIO_COMMON_H */ diff --git a/include/hw/vfio/vfio-container-base.h b/include/hw/vfio/vfio-container-base.h new file mode 100644 index 0000000000000000000000000000000000000000..b2813b0c117985425c842d91f011bb895955d738 --- /dev/null +++ b/include/hw/vfio/vfio-container-base.h @@ -0,0 +1,140 @@ +/* + * VFIO BASE CONTAINER + * + * Copyright (C) 2023 Intel Corporation. + * Copyright Red Hat, Inc. 2023 + * + * Authors: Yi Liu + * Eric Auger + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VFIO_VFIO_CONTAINER_BASE_H +#define HW_VFIO_VFIO_CONTAINER_BASE_H + +#include "exec/memory.h" + +typedef struct VFIODevice VFIODevice; +typedef struct VFIOIOMMUClass VFIOIOMMUClass; + +typedef struct { + unsigned long *bitmap; + hwaddr size; + hwaddr pages; +} VFIOBitmap; + +typedef struct VFIOAddressSpace { + AddressSpace *as; + QLIST_HEAD(, VFIOContainerBase) containers; + QLIST_ENTRY(VFIOAddressSpace) list; +} VFIOAddressSpace; + +/* + * This is the base object for vfio container backends + */ +typedef struct VFIOContainerBase { + const VFIOIOMMUClass *ops; + VFIOAddressSpace *space; + MemoryListener listener; + Error *error; + bool initialized; + uint64_t dirty_pgsizes; + uint64_t max_dirty_bitmap_size; + unsigned long pgsizes; + unsigned int dma_max_mappings; + bool dirty_pages_supported; + QLIST_HEAD(, VFIOGuestIOMMU) giommu_list; + QLIST_HEAD(, VFIORamDiscardListener) vrdl_list; + QLIST_ENTRY(VFIOContainerBase) next; + QLIST_HEAD(, VFIODevice) device_list; + GList *iova_ranges; +} VFIOContainerBase; + +typedef struct VFIOGuestIOMMU { + VFIOContainerBase *bcontainer; + IOMMUMemoryRegion *iommu_mr; + hwaddr iommu_offset; + IOMMUNotifier n; + QLIST_ENTRY(VFIOGuestIOMMU) giommu_next; +} VFIOGuestIOMMU; + +typedef struct VFIORamDiscardListener { + VFIOContainerBase *bcontainer; + MemoryRegion *mr; + hwaddr offset_within_address_space; + hwaddr size; + uint64_t granularity; + RamDiscardListener listener; + QLIST_ENTRY(VFIORamDiscardListener) next; +} VFIORamDiscardListener; + +int vfio_container_dma_map(VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + void *vaddr, bool readonly); +int vfio_container_dma_unmap(VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb); +int vfio_container_add_section_window(VFIOContainerBase *bcontainer, + MemoryRegionSection *section, + Error **errp); +void vfio_container_del_section_window(VFIOContainerBase *bcontainer, + MemoryRegionSection *section); +int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer, + bool start); +int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer, + VFIOBitmap *vbmap, + hwaddr iova, hwaddr size); + +void vfio_container_init(VFIOContainerBase *bcontainer, + VFIOAddressSpace *space, + const VFIOIOMMUClass *ops); +void vfio_container_destroy(VFIOContainerBase *bcontainer); + + +#define TYPE_VFIO_IOMMU "vfio-iommu" +#define TYPE_VFIO_IOMMU_LEGACY TYPE_VFIO_IOMMU "-legacy" +#define TYPE_VFIO_IOMMU_SPAPR TYPE_VFIO_IOMMU "-spapr" +#define TYPE_VFIO_IOMMU_IOMMUFD TYPE_VFIO_IOMMU "-iommufd" + +/* + * VFIOContainerBase is not an abstract QOM object because it felt + * unnecessary to expose all the IOMMU backends to the QEMU machine + * and human interface. However, we can still abstract the IOMMU + * backend handlers using a QOM interface class. This provides more + * flexibility when referencing the various implementations. + */ +DECLARE_CLASS_CHECKERS(VFIOIOMMUClass, VFIO_IOMMU, TYPE_VFIO_IOMMU) + +struct VFIOIOMMUClass { + InterfaceClass parent_class; + + /* basic feature */ + int (*setup)(VFIOContainerBase *bcontainer, Error **errp); + int (*dma_map)(const VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + void *vaddr, bool readonly); + int (*dma_unmap)(const VFIOContainerBase *bcontainer, + hwaddr iova, ram_addr_t size, + IOMMUTLBEntry *iotlb); + int (*attach_device)(const char *name, VFIODevice *vbasedev, + AddressSpace *as, Error **errp); + void (*detach_device)(VFIODevice *vbasedev); + /* migration feature */ + int (*set_dirty_page_tracking)(const VFIOContainerBase *bcontainer, + bool start); + int (*query_dirty_bitmap)(const VFIOContainerBase *bcontainer, + VFIOBitmap *vbmap, + hwaddr iova, hwaddr size); + /* PCI specific */ + int (*pci_hot_reset)(VFIODevice *vbasedev, bool single); + + /* SPAPR specific */ + int (*add_window)(VFIOContainerBase *bcontainer, + MemoryRegionSection *section, + Error **errp); + void (*del_window)(VFIOContainerBase *bcontainer, + MemoryRegionSection *section); + void (*release)(VFIOContainerBase *bcontainer); +}; +#endif /* HW_VFIO_VFIO_CONTAINER_BASE_H */ diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h index a86d103f8245b2b8602bd534960bf012e52cceb5..70c2e8ffeee5df3731c6ee43f824359e16cec8b6 100644 --- a/include/hw/virtio/vhost-backend.h +++ b/include/hw/virtio/vhost-backend.h @@ -45,6 +45,8 @@ struct vhost_memory; struct vhost_vring_file; struct vhost_vring_state; struct vhost_vring_addr; +struct vhost_vring_worker; +struct vhost_worker_state; struct vhost_scsi_target; struct vhost_iotlb_msg; struct vhost_virtqueue; @@ -85,6 +87,14 @@ typedef int (*vhost_set_vring_err_op)(struct vhost_dev *dev, struct vhost_vring_file *file); typedef int (*vhost_set_vring_busyloop_timeout_op)(struct vhost_dev *dev, struct vhost_vring_state *r); +typedef int (*vhost_attach_vring_worker_op)(struct vhost_dev *dev, + struct vhost_vring_worker *worker); +typedef int (*vhost_get_vring_worker_op)(struct vhost_dev *dev, + struct vhost_vring_worker *worker); +typedef int (*vhost_new_worker_op)(struct vhost_dev *dev, + struct vhost_worker_state *worker); +typedef int (*vhost_free_worker_op)(struct vhost_dev *dev, + struct vhost_worker_state *worker); typedef int (*vhost_set_features_op)(struct vhost_dev *dev, uint64_t features); typedef int (*vhost_get_features_op)(struct vhost_dev *dev, @@ -172,6 +182,10 @@ typedef struct VhostOps { vhost_set_vring_call_op vhost_set_vring_call; vhost_set_vring_err_op vhost_set_vring_err; vhost_set_vring_busyloop_timeout_op vhost_set_vring_busyloop_timeout; + vhost_new_worker_op vhost_new_worker; + vhost_free_worker_op vhost_free_worker; + vhost_get_vring_worker_op vhost_get_vring_worker; + vhost_attach_vring_worker_op vhost_attach_vring_worker; vhost_set_features_op vhost_set_features; vhost_get_features_op vhost_get_features; vhost_set_backend_cap_op vhost_set_backend_cap; diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h index 5407d54fd79bb6904729055fb05a8927d19ab17d..8f54e5edd4e969608e0be482abd48b9ff164f341 100644 --- a/include/hw/virtio/vhost-vdpa.h +++ b/include/hw/virtio/vhost-vdpa.h @@ -30,42 +30,52 @@ typedef struct VhostVDPAHostNotifier { void *addr; } VhostVDPAHostNotifier; -typedef struct vhost_vdpa { +/* Info shared by all vhost_vdpa device models */ +typedef struct vhost_vdpa_shared { int device_fd; - int index; - uint32_t msg_type; - bool iotlb_batch_begin_sent; - uint32_t address_space_id; MemoryListener listener; struct vhost_vdpa_iova_range iova_range; - uint64_t acked_features; - bool shadow_vqs_enabled; + QLIST_HEAD(, vdpa_iommu) iommu_list; + + /* IOVA mapping used by the Shadow Virtqueue */ + VhostIOVATree *iova_tree; + + /* Copy of backend features */ + uint64_t backend_cap; + + bool iotlb_batch_begin_sent; + /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */ bool shadow_data; +} VhostVDPAShared; + +typedef struct vhost_vdpa { + int index; + uint32_t address_space_id; + uint64_t acked_features; + bool shadow_vqs_enabled; /* Device suspended successfully */ bool suspended; - /* IOVA mapping used by the Shadow Virtqueue */ - VhostIOVATree *iova_tree; + VhostVDPAShared *shared; GPtrArray *shadow_vqs; const VhostShadowVirtqueueOps *shadow_vq_ops; void *shadow_vq_ops_opaque; struct vhost_dev *dev; Error *migration_blocker; VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX]; - QLIST_HEAD(, vdpa_iommu) iommu_list; IOMMUNotifier n; } VhostVDPA; int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range); int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx); -int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, +int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova, hwaddr size, void *vaddr, bool readonly); -int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, +int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova, hwaddr size); typedef struct vdpa_iommu { - struct vhost_vdpa *dev; + VhostVDPAShared *dev_shared; IOMMUMemoryRegion *iommu_mr; hwaddr iommu_offset; IOMMUNotifier n; diff --git a/include/hw/virtio/virtio-acpi.h b/include/hw/virtio/virtio-acpi.h new file mode 100644 index 0000000000000000000000000000000000000000..844e102569ef5e6dd2ca88301d64e555c55a9698 --- /dev/null +++ b/include/hw/virtio/virtio-acpi.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * ACPI support for virtio + */ + +#ifndef VIRTIO_ACPI_H +#define VIRTIO_ACPI_H + +#include "qemu/osdep.h" +#include "exec/hwaddr.h" + +void virtio_acpi_dsdt_add(Aml *scope, const hwaddr virtio_mmio_base, + const hwaddr virtio_mmio_size, uint32_t mmio_irq, + long int start_index, int num); + +#endif diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h index dafec432ce057b15e8a4184712a8f76f9837b05e..5e4091e4da25d800238f8ac0d127f15dd2d5db9c 100644 --- a/include/hw/virtio/virtio-blk.h +++ b/include/hw/virtio/virtio-blk.h @@ -21,6 +21,7 @@ #include "sysemu/block-backend.h" #include "sysemu/block-ram-registrar.h" #include "qom/object.h" +#include "qapi/qapi-types-virtio.h" #define TYPE_VIRTIO_BLK "virtio-blk-device" OBJECT_DECLARE_SIMPLE_TYPE(VirtIOBlock, VIRTIO_BLK) @@ -37,6 +38,7 @@ struct VirtIOBlkConf { BlockConf conf; IOThread *iothread; + IOThreadVirtQueueMappingList *iothread_vq_mapping_list; char *serial; uint32_t request_merging; uint16_t num_queues; @@ -54,7 +56,8 @@ struct VirtIOBlockReq; struct VirtIOBlock { VirtIODevice parent_obj; BlockBackend *blk; - void *rq; + QemuMutex rq_lock; + void *rq; /* protected by rq_lock */ VirtIOBlkConf conf; unsigned short sector_mask; bool original_wce; diff --git a/include/hw/virtio/virtio-scsi.h b/include/hw/virtio/virtio-scsi.h index 779568ab5d28f62f5be86863c68e6dc416fe4d47..7be01059185dc43b77b060ff9f22185983cf7f38 100644 --- a/include/hw/virtio/virtio-scsi.h +++ b/include/hw/virtio/virtio-scsi.h @@ -51,6 +51,7 @@ typedef struct virtio_scsi_config VirtIOSCSIConfig; struct VirtIOSCSIConf { uint32_t num_queues; uint32_t virtqueue_size; + bool worker_per_virtqueue; bool seg_max_adjust; uint32_t max_sectors; uint32_t cmd_per_lun; @@ -85,8 +86,9 @@ struct VirtIOSCSI { /* * TMFs deferred to main loop BH. These fields are protected by - * virtio_scsi_acquire(). + * tmf_bh_lock. */ + QemuMutex tmf_bh_lock; QEMUBH *tmf_bh; QTAILQ_HEAD(, VirtIOSCSIReq) tmf_bh_list; @@ -100,20 +102,6 @@ struct VirtIOSCSI { uint32_t host_features; }; -static inline void virtio_scsi_acquire(VirtIOSCSI *s) -{ - if (s->ctx) { - aio_context_acquire(s->ctx); - } -} - -static inline void virtio_scsi_release(VirtIOSCSI *s) -{ - if (s->ctx) { - aio_context_release(s->ctx); - } -} - void virtio_scsi_common_realize(DeviceState *dev, VirtIOHandleOutput ctrl, VirtIOHandleOutput evt, diff --git a/include/io/task.h b/include/io/task.h index dc7d32ebd07ecfd22c3c9d11c8618a2673952dcf..0b5342ee8432ff8a841cb210ed6bdfbf8aae4d5f 100644 --- a/include/io/task.h +++ b/include/io/task.h @@ -149,7 +149,7 @@ typedef void (*QIOTaskWorker)(QIOTask *task, * lookups) to be easily run non-blocking. Reporting the * results in the main thread context means that the caller * typically does not need to be concerned about thread - * safety wrt the QEMU global mutex. + * safety wrt the BQL. * * For example, the socket_listen() method will block the caller * while DNS lookups take place if given a name, instead of IP diff --git a/include/migration/register.h b/include/migration/register.h index fed1d04a3c3bca8dbcd991c6a037ad8f60f4c8cc..9ab1f79512c605f0c88a45b560c57486fa054441 100644 --- a/include/migration/register.h +++ b/include/migration/register.h @@ -17,7 +17,7 @@ #include "hw/vmstate-if.h" typedef struct SaveVMHandlers { - /* This runs inside the iothread lock. */ + /* This runs inside the BQL. */ SaveStateHandler *save_state; /* @@ -30,7 +30,7 @@ typedef struct SaveVMHandlers { int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque); int (*save_live_complete_precopy)(QEMUFile *f, void *opaque); - /* This runs both outside and inside the iothread lock. */ + /* This runs both outside and inside the BQL. */ bool (*is_active)(void *opaque); bool (*has_postcopy)(void *opaque); @@ -43,14 +43,14 @@ typedef struct SaveVMHandlers { */ bool (*is_active_iterate)(void *opaque); - /* This runs outside the iothread lock in the migration case, and + /* This runs outside the BQL in the migration case, and * within the lock in the savevm case. The callback had better only * use data that is local to the migration thread or protected * by other locks. */ int (*save_live_iterate)(QEMUFile *f, void *opaque); - /* This runs outside the iothread lock! */ + /* This runs outside the BQL! */ /* Note for save_live_pending: * must_precopy: * - must be migrated in precopy or in stopped state diff --git a/include/migration/snapshot.h b/include/migration/snapshot.h index e72083b117a65addeb0092a9e3085e23db344993..9e4dcaaa7510e1aa6738c8c06b74bb60d6b9db1a 100644 --- a/include/migration/snapshot.h +++ b/include/migration/snapshot.h @@ -16,6 +16,7 @@ #define QEMU_MIGRATION_SNAPSHOT_H #include "qapi/qapi-builtin-types.h" +#include "qapi/qapi-types-run-state.h" /** * save_snapshot: Save an internal snapshot. @@ -61,4 +62,10 @@ bool delete_snapshot(const char *name, bool has_devices, strList *devices, Error **errp); +/** + * load_snapshot_resume: Restore runstate after loading snapshot. + * @state: state to restore + */ +void load_snapshot_resume(RunState state); + #endif diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h index 9821918631655a27af63c8222f294048ea61e169..294d2d848621e8404e1c8d14aeb8c02a83f94695 100644 --- a/include/migration/vmstate.h +++ b/include/migration/vmstate.h @@ -209,7 +209,7 @@ struct VMStateDescription { bool (*dev_unplug_pending)(void *opaque); const VMStateField *fields; - const VMStateDescription **subsections; + const VMStateDescription * const *subsections; }; extern const VMStateInfo vmstate_info_bool; diff --git a/include/qapi/string-output-visitor.h b/include/qapi/string-output-visitor.h index 268dfe9986bb09cb95735c505c3a8fe8bc4d3674..b1ee473b3064ab6a41f2bcf2ee6557f8456ec94c 100644 --- a/include/qapi/string-output-visitor.h +++ b/include/qapi/string-output-visitor.h @@ -26,9 +26,9 @@ typedef struct StringOutputVisitor StringOutputVisitor; * If everything else succeeds, pass @result to visit_complete() to * collect the result of the visit. * - * The string output visitor does not implement support for visiting - * QAPI structs, alternates, null, or arbitrary QTypes. It also - * requires a non-null list argument to visit_start_list(). + * The string output visitor does not implement support for alternates, null, + * or arbitrary QTypes. Struct fields are not shown. It also requires a + * non-null list argument to visit_start_list(). */ Visitor *string_output_visitor_new(bool human, char **result); diff --git a/include/qemu/chardev_open.h b/include/qemu/chardev_open.h new file mode 100644 index 0000000000000000000000000000000000000000..64e8fcfdcb239e47774d55dfc57efff9101ce0a8 --- /dev/null +++ b/include/qemu/chardev_open.h @@ -0,0 +1,16 @@ +/* + * QEMU Chardev Helper + * + * Copyright (C) 2023 Intel Corporation. + * + * Authors: Yi Liu + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + */ + +#ifndef QEMU_CHARDEV_OPEN_H +#define QEMU_CHARDEV_OPEN_H + +int open_cdev(const char *devpath, dev_t cdev); +#endif diff --git a/include/qemu/coroutine-core.h b/include/qemu/coroutine-core.h index 230bb565177448b470eb41100aefb4b12c0e507e..503bad6e0e69cef4c10c778f49cae82b68d7bf3d 100644 --- a/include/qemu/coroutine-core.h +++ b/include/qemu/coroutine-core.h @@ -22,7 +22,7 @@ * rather than callbacks, for operations that need to give up control while * waiting for events to complete. * - * These functions are re-entrant and may be used outside the global mutex. + * These functions are re-entrant and may be used outside the BQL. * * Functions that execute in coroutine context cannot be called * directly from normal functions. Use @coroutine_fn to mark such diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h index a65be6697f51f20cf2c91c1632ab66e48afbbb69..e6aff453017d158059148f20924e9cd68287e601 100644 --- a/include/qemu/coroutine.h +++ b/include/qemu/coroutine.h @@ -26,7 +26,7 @@ * rather than callbacks, for operations that need to give up control while * waiting for events to complete. * - * These functions are re-entrant and may be used outside the global mutex. + * These functions are re-entrant and may be used outside the BQL. * * Functions that execute in coroutine context cannot be called * directly from normal functions. Use @coroutine_fn to mark such diff --git a/include/qemu/fifo8.h b/include/qemu/fifo8.h index 16be02f361fc37f2483030ba034e1fd381441ddd..c6295c6ff0cbc8efeed8e761493cdfdff4e05ce0 100644 --- a/include/qemu/fifo8.h +++ b/include/qemu/fifo8.h @@ -71,7 +71,7 @@ uint8_t fifo8_pop(Fifo8 *fifo); * fifo8_pop_buf: * @fifo: FIFO to pop from * @max: maximum number of bytes to pop - * @num: actual number of returned bytes + * @numptr: pointer filled with number of bytes returned (can be NULL) * * Pop a number of elements from the FIFO up to a maximum of max. The buffer * containing the popped data is returned. This buffer points directly into @@ -82,16 +82,43 @@ uint8_t fifo8_pop(Fifo8 *fifo); * around in the ring buffer; in this case only a contiguous part of the data * is returned. * - * The number of valid bytes returned is populated in *num; will always return - * at least 1 byte. max must not be 0 or greater than the number of bytes in - * the FIFO. + * The number of valid bytes returned is populated in *numptr; will always + * return at least 1 byte. max must not be 0 or greater than the number of + * bytes in the FIFO. * * Clients are responsible for checking the availability of requested data * using fifo8_num_used(). * * Returns: A pointer to popped data. */ -const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *num); +const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr); + +/** + * fifo8_peek_buf: read upto max bytes from the fifo + * @fifo: FIFO to read from + * @max: maximum number of bytes to peek + * @numptr: pointer filled with number of bytes returned (can be NULL) + * + * Peek into a number of elements from the FIFO up to a maximum of max. + * The buffer containing the data peeked into is returned. This buffer points + * directly into the FIFO backing store. Since data is invalidated once any + * of the fifo8_* APIs are called on the FIFO, it is the caller responsibility + * to access it before doing further API calls. + * + * The function may return fewer bytes than requested when the data wraps + * around in the ring buffer; in this case only a contiguous part of the data + * is returned. + * + * The number of valid bytes returned is populated in *numptr; will always + * return at least 1 byte. max must not be 0 or greater than the number of + * bytes in the FIFO. + * + * Clients are responsible for checking the availability of requested data + * using fifo8_num_used(). + * + * Returns: A pointer to peekable data. + */ +const uint8_t *fifo8_peek_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr); /** * fifo8_reset: diff --git a/include/qemu/job.h b/include/qemu/job.h index e502787dd87dc431c1d8fa8af92fc64808a28492..9ea98b5927738d58dcf8ab635e4c8ee3e6eef8f4 100644 --- a/include/qemu/job.h +++ b/include/qemu/job.h @@ -67,8 +67,6 @@ typedef struct Job { /** * The completion function that will be called when the job completes. - * Called with AioContext lock held, since many callback implementations - * use bdrv_* functions that require to hold the lock. */ BlockCompletionFunc *cb; @@ -264,9 +262,6 @@ struct JobDriver { * * This callback will not be invoked if the job has already failed. * If it fails, abort and then clean will be called. - * - * Called with AioContext lock held, since many callbacs implementations - * use bdrv_* functions that require to hold the lock. */ int (*prepare)(Job *job); @@ -277,9 +272,6 @@ struct JobDriver { * * All jobs will complete with a call to either .commit() or .abort() but * never both. - * - * Called with AioContext lock held, since many callback implementations - * use bdrv_* functions that require to hold the lock. */ void (*commit)(Job *job); @@ -290,9 +282,6 @@ struct JobDriver { * * All jobs will complete with a call to either .commit() or .abort() but * never both. - * - * Called with AioContext lock held, since many callback implementations - * use bdrv_* functions that require to hold the lock. */ void (*abort)(Job *job); @@ -301,9 +290,6 @@ struct JobDriver { * .commit() or .abort(). Regardless of which callback is invoked after * completion, .clean() will always be called, even if the job does not * belong to a transaction group. - * - * Called with AioContext lock held, since many callbacs implementations - * use bdrv_* functions that require to hold the lock. */ void (*clean)(Job *job); @@ -318,17 +304,12 @@ struct JobDriver { * READY). * (If the callback is NULL, the job is assumed to terminate * without I/O.) - * - * Called with AioContext lock held, since many callback implementations - * use bdrv_* functions that require to hold the lock. */ bool (*cancel)(Job *job, bool force); /** * Called when the job is freed. - * Called with AioContext lock held, since many callback implementations - * use bdrv_* functions that require to hold the lock. */ void (*free)(Job *job); }; @@ -424,7 +405,6 @@ void job_ref_locked(Job *job); * Release a reference that was previously acquired with job_ref_locked() or * job_create(). If it's the last reference to the object, it will be freed. * - * Takes AioContext lock internally to invoke a job->driver callback. * Called with job lock held. */ void job_unref_locked(Job *job); diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h index 68e70e61aa59c81256a46b7fae793a45ab99e8a2..5764db157c97684e1fe5849ddb5af65973d3c5a3 100644 --- a/include/qemu/main-loop.h +++ b/include/qemu/main-loop.h @@ -248,19 +248,19 @@ GSource *iohandler_get_g_source(void); AioContext *iohandler_get_aio_context(void); /** - * qemu_mutex_iothread_locked: Return lock status of the main loop mutex. + * bql_locked: Return lock status of the Big QEMU Lock (BQL) * - * The main loop mutex is the coarsest lock in QEMU, and as such it + * The Big QEMU Lock (BQL) is the coarsest lock in QEMU, and as such it * must always be taken outside other locks. This function helps * functions take different paths depending on whether the current - * thread is running within the main loop mutex. + * thread is running within the BQL. * * This function should never be used in the block layer, because * unit tests, block layer tools and qemu-storage-daemon do not * have a BQL. * Please instead refer to qemu_in_main_thread(). */ -bool qemu_mutex_iothread_locked(void); +bool bql_locked(void); /** * qemu_in_main_thread: return whether it's possible to safely access @@ -312,78 +312,76 @@ bool qemu_in_main_thread(void); } while (0) /** - * qemu_mutex_lock_iothread: Lock the main loop mutex. + * bql_lock: Lock the Big QEMU Lock (BQL). * - * This function locks the main loop mutex. The mutex is taken by + * This function locks the Big QEMU Lock (BQL). The lock is taken by * main() in vl.c and always taken except while waiting on - * external events (such as with select). The mutex should be taken + * external events (such as with select). The lock should be taken * by threads other than the main loop thread when calling * qemu_bh_new(), qemu_set_fd_handler() and basically all other * functions documented in this file. * - * NOTE: tools currently are single-threaded and qemu_mutex_lock_iothread + * NOTE: tools currently are single-threaded and bql_lock * is a no-op there. */ -#define qemu_mutex_lock_iothread() \ - qemu_mutex_lock_iothread_impl(__FILE__, __LINE__) -void qemu_mutex_lock_iothread_impl(const char *file, int line); +#define bql_lock() bql_lock_impl(__FILE__, __LINE__) +void bql_lock_impl(const char *file, int line); /** - * qemu_mutex_unlock_iothread: Unlock the main loop mutex. + * bql_unlock: Unlock the Big QEMU Lock (BQL). * - * This function unlocks the main loop mutex. The mutex is taken by + * This function unlocks the Big QEMU Lock. The lock is taken by * main() in vl.c and always taken except while waiting on - * external events (such as with select). The mutex should be unlocked + * external events (such as with select). The lock should be unlocked * as soon as possible by threads other than the main loop thread, * because it prevents the main loop from processing callbacks, * including timers and bottom halves. * - * NOTE: tools currently are single-threaded and qemu_mutex_unlock_iothread + * NOTE: tools currently are single-threaded and bql_unlock * is a no-op there. */ -void qemu_mutex_unlock_iothread(void); +void bql_unlock(void); /** - * QEMU_IOTHREAD_LOCK_GUARD + * BQL_LOCK_GUARD * - * Wrap a block of code in a conditional qemu_mutex_{lock,unlock}_iothread. + * Wrap a block of code in a conditional bql_{lock,unlock}. */ -typedef struct IOThreadLockAuto IOThreadLockAuto; +typedef struct BQLLockAuto BQLLockAuto; -static inline IOThreadLockAuto *qemu_iothread_auto_lock(const char *file, - int line) +static inline BQLLockAuto *bql_auto_lock(const char *file, int line) { - if (qemu_mutex_iothread_locked()) { + if (bql_locked()) { return NULL; } - qemu_mutex_lock_iothread_impl(file, line); + bql_lock_impl(file, line); /* Anything non-NULL causes the cleanup function to be called */ - return (IOThreadLockAuto *)(uintptr_t)1; + return (BQLLockAuto *)(uintptr_t)1; } -static inline void qemu_iothread_auto_unlock(IOThreadLockAuto *l) +static inline void bql_auto_unlock(BQLLockAuto *l) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } -G_DEFINE_AUTOPTR_CLEANUP_FUNC(IOThreadLockAuto, qemu_iothread_auto_unlock) +G_DEFINE_AUTOPTR_CLEANUP_FUNC(BQLLockAuto, bql_auto_unlock) -#define QEMU_IOTHREAD_LOCK_GUARD() \ - g_autoptr(IOThreadLockAuto) _iothread_lock_auto __attribute__((unused)) \ - = qemu_iothread_auto_lock(__FILE__, __LINE__) +#define BQL_LOCK_GUARD() \ + g_autoptr(BQLLockAuto) _bql_lock_auto __attribute__((unused)) \ + = bql_auto_lock(__FILE__, __LINE__) /* - * qemu_cond_wait_iothread: Wait on condition for the main loop mutex + * qemu_cond_wait_bql: Wait on condition for the Big QEMU Lock (BQL) * - * This function atomically releases the main loop mutex and causes + * This function atomically releases the Big QEMU Lock (BQL) and causes * the calling thread to block on the condition. */ -void qemu_cond_wait_iothread(QemuCond *cond); +void qemu_cond_wait_bql(QemuCond *cond); /* - * qemu_cond_timedwait_iothread: like the previous, but with timeout + * qemu_cond_timedwait_bql: like the previous, but with timeout */ -void qemu_cond_timedwait_iothread(QemuCond *cond, int ms); +void qemu_cond_timedwait_bql(QemuCond *cond, int ms); /* internal interfaces */ diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h index d30ba73eda2c7900b4ba66d576a58753b3bde8dd..9a405bed8937a28f5d655300ff2082e5602cfd24 100644 --- a/include/qemu/osdep.h +++ b/include/qemu/osdep.h @@ -678,8 +678,10 @@ typedef struct ThreadContext ThreadContext; * memory area starting at @area with the size of @sz. After a successful call, * each page in the area was faulted in writable at least once, for example, * after allocating file blocks for mapped files. + * + * Return: true on success, else false setting @errp with error. */ -void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, +bool qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, ThreadContext *tc, Error **errp); /** @@ -779,16 +781,6 @@ static inline int platform_does_not_support_system(const char *command) } #endif /* !HAVE_SYSTEM_FUNCTION */ -/** - * If the load average was unobtainable, -1 is returned - */ -#ifndef HAVE_GETLOADAVG_FUNCTION -static inline int getloadavg(double loadavg[], int nelem) -{ - return -1; -} -#endif /* !HAVE_GETLOADAVG_FUNCTION */ - #ifdef __cplusplus } #endif diff --git a/include/qemu/thread.h b/include/qemu/thread.h index dd3822d7cee9010fb4e07158ee86c429c30315f2..fb74e21c08a7662d81b36cd0f543b4f6a8d934e7 100644 --- a/include/qemu/thread.h +++ b/include/qemu/thread.h @@ -47,7 +47,7 @@ typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f, typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms, const char *f, int l); -extern QemuMutexLockFunc qemu_bql_mutex_lock_func; +extern QemuMutexLockFunc bql_mutex_lock_func; extern QemuMutexLockFunc qemu_mutex_lock_func; extern QemuMutexTrylockFunc qemu_mutex_trylock_func; extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func; diff --git a/include/standard-headers/drm/drm_fourcc.h b/include/standard-headers/drm/drm_fourcc.h index 72279f4d25d4b01fb876256511605e4aa78b5720..3afb70160f0bf5feaf7ae284ea19e6e51e822cf5 100644 --- a/include/standard-headers/drm/drm_fourcc.h +++ b/include/standard-headers/drm/drm_fourcc.h @@ -322,6 +322,8 @@ extern "C" { * index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian */ #define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */ +#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */ +#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */ /* * 2 plane YCbCr MSB aligned diff --git a/include/standard-headers/linux/fuse.h b/include/standard-headers/linux/fuse.h index 6b9793842c98496feff47cba2f0a49eecdfac7d6..fc0dcd10aededdb8ea0f2ddb7144dcf44854727d 100644 --- a/include/standard-headers/linux/fuse.h +++ b/include/standard-headers/linux/fuse.h @@ -209,7 +209,7 @@ * - add FUSE_HAS_EXPIRE_ONLY * * 7.39 - * - add FUSE_DIRECT_IO_RELAX + * - add FUSE_DIRECT_IO_ALLOW_MMAP * - add FUSE_STATX and related structures */ @@ -405,8 +405,7 @@ struct fuse_file_lock { * FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir, * symlink and mknod (single group that matches parent) * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation - * FUSE_DIRECT_IO_RELAX: relax restrictions in FOPEN_DIRECT_IO mode, for now - * allow shared mmap + * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode. */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -445,7 +444,10 @@ struct fuse_file_lock { #define FUSE_HAS_INODE_DAX (1ULL << 33) #define FUSE_CREATE_SUPP_GROUP (1ULL << 34) #define FUSE_HAS_EXPIRE_ONLY (1ULL << 35) -#define FUSE_DIRECT_IO_RELAX (1ULL << 36) +#define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36) + +/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ +#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP /** * CUSE INIT request/reply flags diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h index e5f558d9649396faed564c9156ab3d03b9aaa7ab..a39193213ff25ba24233fe28bc4977f38c294702 100644 --- a/include/standard-headers/linux/pci_regs.h +++ b/include/standard-headers/linux/pci_regs.h @@ -80,6 +80,7 @@ #define PCI_HEADER_TYPE_NORMAL 0 #define PCI_HEADER_TYPE_BRIDGE 1 #define PCI_HEADER_TYPE_CARDBUS 2 +#define PCI_HEADER_TYPE_MFD 0x80 /* Multi-Function Device (possible) */ #define PCI_BIST 0x0f /* 8 bits */ #define PCI_BIST_CODE_MASK 0x0f /* Return result */ @@ -637,6 +638,7 @@ #define PCI_EXP_RTCAP 0x1e /* Root Capabilities */ #define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */ #define PCI_EXP_RTSTA 0x20 /* Root Status */ +#define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */ #define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */ #define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */ /* @@ -930,12 +932,13 @@ /* Process Address Space ID */ #define PCI_PASID_CAP 0x04 /* PASID feature register */ -#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */ -#define PCI_PASID_CAP_PRIV 0x04 /* Privilege Mode Supported */ +#define PCI_PASID_CAP_EXEC 0x0002 /* Exec permissions Supported */ +#define PCI_PASID_CAP_PRIV 0x0004 /* Privilege Mode Supported */ +#define PCI_PASID_CAP_WIDTH 0x1f00 #define PCI_PASID_CTRL 0x06 /* PASID control register */ -#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */ -#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */ -#define PCI_PASID_CTRL_PRIV 0x04 /* Privilege Mode Enable */ +#define PCI_PASID_CTRL_ENABLE 0x0001 /* Enable bit */ +#define PCI_PASID_CTRL_EXEC 0x0002 /* Exec permissions Enable */ +#define PCI_PASID_CTRL_PRIV 0x0004 /* Privilege Mode Enable */ #define PCI_EXT_CAP_PASID_SIZEOF 8 /* Single Root I/O Virtualization */ @@ -975,6 +978,8 @@ #define PCI_LTR_VALUE_MASK 0x000003ff #define PCI_LTR_SCALE_MASK 0x00001c00 #define PCI_LTR_SCALE_SHIFT 10 +#define PCI_LTR_NOSNOOP_VALUE 0x03ff0000 /* Max No-Snoop Latency Value */ +#define PCI_LTR_NOSNOOP_SCALE 0x1c000000 /* Scale for Max Value */ #define PCI_EXT_CAP_LTR_SIZEOF 8 /* Access Control Service */ @@ -1042,9 +1047,16 @@ #define PCI_EXP_DPC_STATUS 0x08 /* DPC Status */ #define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */ #define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR 0x0000 /* Uncorrectable error */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE 0x0002 /* Rcvd ERR_NONFATAL */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE 0x0004 /* Rcvd ERR_FATAL */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT 0x0006 /* Reason in Trig Reason Extension field */ #define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */ #define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */ #define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO 0x0000 /* RP PIO error */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER 0x0020 /* DPC SW Trigger bit */ +#define PCI_EXP_DPC_RP_PIO_FEP 0x1f00 /* RP PIO First Err Ptr */ #define PCI_EXP_DPC_SOURCE_ID 0x0A /* DPC Source Identifier */ @@ -1088,6 +1100,8 @@ #define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */ #define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */ #define PCI_L1SS_CTL2 0x0c /* Control 2 Register */ +#define PCI_L1SS_CTL2_T_PWR_ON_SCALE 0x00000003 /* T_POWER_ON Scale */ +#define PCI_L1SS_CTL2_T_PWR_ON_VALUE 0x000000f8 /* T_POWER_ON Value */ /* Designated Vendor-Specific (DVSEC, PCI_EXT_CAP_ID_DVSEC) */ #define PCI_DVSEC_HEADER1 0x4 /* Designated Vendor-Specific Header1 */ diff --git a/include/standard-headers/linux/vhost_types.h b/include/standard-headers/linux/vhost_types.h index 5ad07e134aed3525943388a18b5bded129624f58..fd54044936fc50e41b041fdd1d74da1f264b2efe 100644 --- a/include/standard-headers/linux/vhost_types.h +++ b/include/standard-headers/linux/vhost_types.h @@ -185,5 +185,12 @@ struct vhost_vdpa_iova_range { * DRIVER_OK */ #define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6 +/* Device may expose the virtqueue's descriptor area, driver area and + * device area to a different group for ASID binding than where its + * buffers may reside. Requires VHOST_BACKEND_F_IOTLB_ASID. + */ +#define VHOST_BACKEND_F_DESC_ASID 0x7 +/* IOTLB don't flush memory mapping across device reset */ +#define VHOST_BACKEND_F_IOTLB_PERSIST 0x8 #endif diff --git a/include/standard-headers/linux/virtio_config.h b/include/standard-headers/linux/virtio_config.h index 8a7d0dc8b0070343636a72926dbea66ceb01b184..bfd1ca643e7f4a62127c295bdac8ccc4da068cc0 100644 --- a/include/standard-headers/linux/virtio_config.h +++ b/include/standard-headers/linux/virtio_config.h @@ -103,6 +103,11 @@ */ #define VIRTIO_F_NOTIFICATION_DATA 38 +/* This feature indicates that the driver uses the data provided by the device + * as a virtqueue identifier in available buffer notifications. + */ +#define VIRTIO_F_NOTIF_CONFIG_DATA 39 + /* * This feature indicates that the driver can reset a queue individually. */ diff --git a/include/standard-headers/linux/virtio_pci.h b/include/standard-headers/linux/virtio_pci.h index be912cfc957cd78f0cade4c99ad03c0513e03c4d..b7fdfd066878cb89a091edcfe79c11196f3361d7 100644 --- a/include/standard-headers/linux/virtio_pci.h +++ b/include/standard-headers/linux/virtio_pci.h @@ -166,6 +166,17 @@ struct virtio_pci_common_cfg { uint32_t queue_used_hi; /* read-write */ }; +/* + * Warning: do not use sizeof on this: use offsetofend for + * specific fields you need. + */ +struct virtio_pci_modern_common_cfg { + struct virtio_pci_common_cfg cfg; + + uint16_t queue_notify_data; /* read-write */ + uint16_t queue_reset; /* read-write */ +}; + /* Fields in VIRTIO_PCI_CAP_PCI_CFG: */ struct virtio_pci_cfg_cap { struct virtio_pci_cap cap; diff --git a/include/sysemu/hostmem.h b/include/sysemu/hostmem.h index 39326f1d4f9c0713fdcf0613623a941157cbeed8..0e411aaa29e1a8734c39cd9fb90046b012c4d5de 100644 --- a/include/sysemu/hostmem.h +++ b/include/sysemu/hostmem.h @@ -47,7 +47,15 @@ OBJECT_DECLARE_TYPE(HostMemoryBackend, HostMemoryBackendClass, struct HostMemoryBackendClass { ObjectClass parent_class; - void (*alloc)(HostMemoryBackend *backend, Error **errp); + /** + * alloc: Allocate memory from backend. + * + * @backend: the #HostMemoryBackend. + * @errp: pointer to Error*, to store an error if it happens. + * + * Return: true on success, else false setting @errp with error. + */ + bool (*alloc)(HostMemoryBackend *backend, Error **errp); }; /** diff --git a/include/sysemu/iommufd.h b/include/sysemu/iommufd.h new file mode 100644 index 0000000000000000000000000000000000000000..9af27ebd6ccb78ca8e16aa3c62629aab9f7f31e4 --- /dev/null +++ b/include/sysemu/iommufd.h @@ -0,0 +1,36 @@ +#ifndef SYSEMU_IOMMUFD_H +#define SYSEMU_IOMMUFD_H + +#include "qom/object.h" +#include "exec/hwaddr.h" +#include "exec/cpu-common.h" + +#define TYPE_IOMMUFD_BACKEND "iommufd" +OBJECT_DECLARE_TYPE(IOMMUFDBackend, IOMMUFDBackendClass, IOMMUFD_BACKEND) + +struct IOMMUFDBackendClass { + ObjectClass parent_class; +}; + +struct IOMMUFDBackend { + Object parent; + + /*< protected >*/ + int fd; /* /dev/iommu file descriptor */ + bool owned; /* is the /dev/iommu opened internally */ + uint32_t users; + + /*< public >*/ +}; + +int iommufd_backend_connect(IOMMUFDBackend *be, Error **errp); +void iommufd_backend_disconnect(IOMMUFDBackend *be); + +int iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id, + Error **errp); +void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id); +int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova, + ram_addr_t size, void *vaddr, bool readonly); +int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id, + hwaddr iova, ram_addr_t size); +#endif diff --git a/include/sysemu/qtest.h b/include/sysemu/qtest.h index 85f05b0e46a82de16e2e15b43c73c1a24cb9f47c..b5d5fd3463759532f0538f64e6d18ddacfb13d3b 100644 --- a/include/sysemu/qtest.h +++ b/include/sysemu/qtest.h @@ -23,6 +23,7 @@ static inline bool qtest_enabled(void) return qtest_allowed; } +#ifndef CONFIG_USER_ONLY void qtest_send_prefix(CharBackend *chr); void G_GNUC_PRINTF(2, 3) qtest_sendf(CharBackend *chr, const char *fmt, ...); void qtest_set_command_cb(bool (*pc_cb)(CharBackend *chr, gchar **words)); @@ -35,5 +36,6 @@ void qtest_server_set_send_handler(void (*send)(void *, const char *), void qtest_server_inproc_recv(void *opaque, const char *buf); int64_t qtest_get_virtual_clock(void); +#endif #endif diff --git a/include/sysemu/replay.h b/include/sysemu/replay.h index 08aae5869fcac6fe8a1e15ba0e4c13837d2db140..83995ae4bd4273263267a2361642e216fd8b5377 100644 --- a/include/sysemu/replay.h +++ b/include/sysemu/replay.h @@ -70,6 +70,11 @@ int replay_get_instructions(void); /*! Updates instructions counter in replay mode. */ void replay_account_executed_instructions(void); +/** + * replay_can_wait: check if we should pause for wait-io + */ +bool replay_can_wait(void); + /* Processing clocks and other time sources */ /*! Save the specified clock */ diff --git a/include/sysemu/runstate.h b/include/sysemu/runstate.h index c8c2bd8a61b37000c8fdb266fc2191c50d3eb351..0117d243c4ed7dfa6a2ff6a9fb631a18447fbd5c 100644 --- a/include/sysemu/runstate.h +++ b/include/sysemu/runstate.h @@ -40,6 +40,15 @@ static inline bool shutdown_caused_by_guest(ShutdownCause cause) return cause >= SHUTDOWN_CAUSE_GUEST_SHUTDOWN; } +/* + * In a "live" state, the vcpu clock is ticking, and the runstate notifiers + * think we are running. + */ +static inline bool runstate_is_live(RunState state) +{ + return state == RUN_STATE_RUNNING || state == RUN_STATE_SUSPENDED; +} + void vm_start(void); /** @@ -48,9 +57,20 @@ void vm_start(void); * @step_pending: whether any of the CPUs is about to be single-stepped by gdb */ int vm_prepare_start(bool step_pending); + +/** + * vm_resume: If @state is a live state, start the vm and set the state, + * else just set the state. + * + * @state: the state to restore + */ +void vm_resume(RunState state); + int vm_stop(RunState state); int vm_stop_force_state(RunState state); int vm_shutdown(void); +void vm_set_suspended(bool suspended); +bool vm_get_suspended(void); typedef enum WakeupReason { /* Always keep QEMU_WAKEUP_REASON_NONE = 0 */ diff --git a/include/ui/rect.h b/include/ui/rect.h index 94898f92d0d4e893d4590b1e6d41bf51d07a13d9..68f05d78a8e6b0cadc80c229dc2a1fce889e1adf 100644 --- a/include/ui/rect.h +++ b/include/ui/rect.h @@ -19,7 +19,7 @@ static inline void qemu_rect_init(QemuRect *rect, uint16_t width, uint16_t height) { rect->x = x; - rect->y = x; + rect->y = y; rect->width = width; rect->height = height; } diff --git a/iothread.c b/iothread.c index b753286414a7ec2f3c4de0e20f1478ac35224091..6c1fc8c8561d8a8a3b69f3d90d203b05a47ba52e 100644 --- a/iothread.c +++ b/iothread.c @@ -170,8 +170,7 @@ static void iothread_set_aio_context_params(EventLoopBase *base, Error **errp) } aio_context_set_aio_params(iothread->ctx, - iothread->parent_obj.aio_max_batch, - errp); + iothread->parent_obj.aio_max_batch); aio_context_set_thread_pool_params(iothread->ctx, base->thread_pool_min, base->thread_pool_max, errp); diff --git a/job.c b/job.c index 99a2e54b54a899033a9f20ce6252d5be40c7d60f..660ce22c56b6115155451f9b60c4f237ff99b8eb 100644 --- a/job.c +++ b/job.c @@ -464,12 +464,8 @@ void job_unref_locked(Job *job) assert(!job->txn); if (job->driver->free) { - AioContext *aio_context = job->aio_context; job_unlock(); - /* FIXME: aiocontext lock is required because cb calls blk_unref */ - aio_context_acquire(aio_context); job->driver->free(job); - aio_context_release(aio_context); job_lock(); } @@ -840,12 +836,10 @@ static void job_clean(Job *job) /* * Called with job_mutex held, but releases it temporarily. - * Takes AioContext lock internally to invoke a job->driver callback. */ static int job_finalize_single_locked(Job *job) { int job_ret; - AioContext *ctx = job->aio_context; assert(job_is_completed_locked(job)); @@ -854,7 +848,6 @@ static int job_finalize_single_locked(Job *job) job_ret = job->ret; job_unlock(); - aio_context_acquire(ctx); if (!job_ret) { job_commit(job); @@ -867,7 +860,6 @@ static int job_finalize_single_locked(Job *job) job->cb(job->opaque, job_ret); } - aio_context_release(ctx); job_lock(); /* Emit events only if we actually started */ @@ -886,17 +878,13 @@ static int job_finalize_single_locked(Job *job) /* * Called with job_mutex held, but releases it temporarily. - * Takes AioContext lock internally to invoke a job->driver callback. */ static void job_cancel_async_locked(Job *job, bool force) { - AioContext *ctx = job->aio_context; GLOBAL_STATE_CODE(); if (job->driver->cancel) { job_unlock(); - aio_context_acquire(ctx); force = job->driver->cancel(job, force); - aio_context_release(ctx); job_lock(); } else { /* No .cancel() means the job will behave as if force-cancelled */ @@ -931,7 +919,6 @@ static void job_cancel_async_locked(Job *job, bool force) /* * Called with job_mutex held, but releases it temporarily. - * Takes AioContext lock internally to invoke a job->driver callback. */ static void job_completed_txn_abort_locked(Job *job) { @@ -979,15 +966,12 @@ static void job_completed_txn_abort_locked(Job *job) static int job_prepare_locked(Job *job) { int ret; - AioContext *ctx = job->aio_context; GLOBAL_STATE_CODE(); if (job->ret == 0 && job->driver->prepare) { job_unlock(); - aio_context_acquire(ctx); ret = job->driver->prepare(job); - aio_context_release(ctx); job_lock(); job->ret = ret; job_update_rc_locked(job); diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h index 38e5957526c26a124df7d99e79a7cbfed6b25f6d..c59ea55cd8eb02b4bf52aa7e984831af7e829450 100644 --- a/linux-headers/asm-arm64/kvm.h +++ b/linux-headers/asm-arm64/kvm.h @@ -491,6 +491,38 @@ struct kvm_smccc_filter { #define KVM_HYPERCALL_EXIT_SMC (1U << 0) #define KVM_HYPERCALL_EXIT_16BIT (1U << 1) +/* + * Get feature ID registers userspace writable mask. + * + * From DDI0487J.a, D19.2.66 ("ID_AA64MMFR2_EL1, AArch64 Memory Model + * Feature Register 2"): + * + * "The Feature ID space is defined as the System register space in + * AArch64 with op0==3, op1=={0, 1, 3}, CRn==0, CRm=={0-7}, + * op2=={0-7}." + * + * This covers all currently known R/O registers that indicate + * anything useful feature wise, including the ID registers. + * + * If we ever need to introduce a new range, it will be described as + * such in the range field. + */ +#define KVM_ARM_FEATURE_ID_RANGE_IDX(op0, op1, crn, crm, op2) \ + ({ \ + __u64 __op1 = (op1) & 3; \ + __op1 -= (__op1 == 3); \ + (__op1 << 6 | ((crm) & 7) << 3 | (op2)); \ + }) + +#define KVM_ARM_FEATURE_ID_RANGE 0 +#define KVM_ARM_FEATURE_ID_RANGE_SIZE (3 * 8 * 8) + +struct reg_mask_range { + __u64 addr; /* Pointer to mask array */ + __u32 range; /* Requested range */ + __u32 reserved[13]; +}; + #endif #endif /* __ARM_KVM_H__ */ diff --git a/linux-headers/asm-generic/unistd.h b/linux-headers/asm-generic/unistd.h index abe087c53b4b04348431716c1cb638b99d5a788a..756b013fb8324bd7a320e60cebec2ca692faa149 100644 --- a/linux-headers/asm-generic/unistd.h +++ b/linux-headers/asm-generic/unistd.h @@ -71,7 +71,7 @@ __SYSCALL(__NR_fremovexattr, sys_fremovexattr) #define __NR_getcwd 17 __SYSCALL(__NR_getcwd, sys_getcwd) #define __NR_lookup_dcookie 18 -__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie) +__SYSCALL(__NR_lookup_dcookie, sys_ni_syscall) #define __NR_eventfd2 19 __SYSCALL(__NR_eventfd2, sys_eventfd2) #define __NR_epoll_create1 20 @@ -816,15 +816,21 @@ __SYSCALL(__NR_process_mrelease, sys_process_mrelease) __SYSCALL(__NR_futex_waitv, sys_futex_waitv) #define __NR_set_mempolicy_home_node 450 __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) - #define __NR_cachestat 451 __SYSCALL(__NR_cachestat, sys_cachestat) - #define __NR_fchmodat2 452 __SYSCALL(__NR_fchmodat2, sys_fchmodat2) +#define __NR_map_shadow_stack 453 +__SYSCALL(__NR_map_shadow_stack, sys_map_shadow_stack) +#define __NR_futex_wake 454 +__SYSCALL(__NR_futex_wake, sys_futex_wake) +#define __NR_futex_wait 455 +__SYSCALL(__NR_futex_wait, sys_futex_wait) +#define __NR_futex_requeue 456 +__SYSCALL(__NR_futex_requeue, sys_futex_requeue) #undef __NR_syscalls -#define __NR_syscalls 453 +#define __NR_syscalls 457 /* * 32 bit systems traditionally used different diff --git a/linux-headers/asm-loongarch/bitsperlong.h b/linux-headers/asm-loongarch/bitsperlong.h new file mode 100644 index 0000000000000000000000000000000000000000..6dc0bb0c13b29dd814f403f2fd4efb3b36be0619 --- /dev/null +++ b/linux-headers/asm-loongarch/bitsperlong.h @@ -0,0 +1 @@ +#include diff --git a/linux-headers/asm-loongarch/kvm.h b/linux-headers/asm-loongarch/kvm.h new file mode 100644 index 0000000000000000000000000000000000000000..c6ad2ee6106cb0389f0ce626c6d0df65647d351b --- /dev/null +++ b/linux-headers/asm-loongarch/kvm.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __UAPI_ASM_LOONGARCH_KVM_H +#define __UAPI_ASM_LOONGARCH_KVM_H + +#include + +/* + * KVM LoongArch specific structures and definitions. + * + * Some parts derived from the x86 version of this file. + */ + +#define __KVM_HAVE_READONLY_MEM + +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_DIRTY_LOG_PAGE_OFFSET 64 + +/* + * for KVM_GET_REGS and KVM_SET_REGS + */ +struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + __u64 gpr[32]; + __u64 pc; +}; + +/* + * for KVM_GET_FPU and KVM_SET_FPU + */ +struct kvm_fpu { + __u32 fcsr; + __u64 fcc; /* 8x8 */ + struct kvm_fpureg { + __u64 val64[4]; + } fpr[32]; +}; + +/* + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various + * registers. The id field is broken down as follows: + * + * bits[63..52] - As per linux/kvm.h + * bits[51..32] - Must be zero. + * bits[31..16] - Register set. + * + * Register set = 0: GP registers from kvm_regs (see definitions below). + * + * Register set = 1: CSR registers. + * + * Register set = 2: KVM specific registers (see definitions below). + * + * Register set = 3: FPU / SIMD registers (see definitions below). + * + * Other sets registers may be added in the future. Each set would + * have its own identifier in bits[31..16]. + */ + +#define KVM_REG_LOONGARCH_GPR (KVM_REG_LOONGARCH | 0x00000ULL) +#define KVM_REG_LOONGARCH_CSR (KVM_REG_LOONGARCH | 0x10000ULL) +#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL) +#define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL) +#define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL) +#define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL) +#define KVM_CSR_IDX_MASK 0x7fff +#define KVM_CPUCFG_IDX_MASK 0x7fff + +/* + * KVM_REG_LOONGARCH_KVM - KVM specific control registers. + */ + +#define KVM_REG_LOONGARCH_COUNTER (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_LOONGARCH_VCPU_RESET (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 2) + +#define LOONGARCH_REG_SHIFT 3 +#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT)) +#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG) +#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG) + +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* dummy definition */ +struct kvm_sregs { +}; + +struct kvm_iocsr_entry { + __u32 addr; + __u32 pad; + __u64 data; +}; + +#define KVM_NR_IRQCHIPS 1 +#define KVM_IRQCHIP_NUM_PINS 64 +#define KVM_MAX_CORES 256 + +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */ diff --git a/linux-headers/asm-loongarch/mman.h b/linux-headers/asm-loongarch/mman.h new file mode 100644 index 0000000000000000000000000000000000000000..8eebf89f5ab17884a98543f3b37a3b710355083b --- /dev/null +++ b/linux-headers/asm-loongarch/mman.h @@ -0,0 +1 @@ +#include diff --git a/linux-headers/asm-loongarch/unistd.h b/linux-headers/asm-loongarch/unistd.h new file mode 100644 index 0000000000000000000000000000000000000000..fcb668984f0336fa77740deb7b40b410450cd885 --- /dev/null +++ b/linux-headers/asm-loongarch/unistd.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_CLONE3 + +#include diff --git a/linux-headers/asm-mips/unistd_n32.h b/linux-headers/asm-mips/unistd_n32.h index 46d8500654c3c0648868c8d5d2a355acbd90dd97..994b6f008f5414d5f1cb6f6d08c5e5298bb70261 100644 --- a/linux-headers/asm-mips/unistd_n32.h +++ b/linux-headers/asm-mips/unistd_n32.h @@ -381,5 +381,9 @@ #define __NR_set_mempolicy_home_node (__NR_Linux + 450) #define __NR_cachestat (__NR_Linux + 451) #define __NR_fchmodat2 (__NR_Linux + 452) +#define __NR_map_shadow_stack (__NR_Linux + 453) +#define __NR_futex_wake (__NR_Linux + 454) +#define __NR_futex_wait (__NR_Linux + 455) +#define __NR_futex_requeue (__NR_Linux + 456) #endif /* _ASM_UNISTD_N32_H */ diff --git a/linux-headers/asm-mips/unistd_n64.h b/linux-headers/asm-mips/unistd_n64.h index c2f7ac673bb551c144d09e8e64b174995efec8c4..41dcf5877a1b80bbb01d20e8f3b77e16d9803d6d 100644 --- a/linux-headers/asm-mips/unistd_n64.h +++ b/linux-headers/asm-mips/unistd_n64.h @@ -357,5 +357,9 @@ #define __NR_set_mempolicy_home_node (__NR_Linux + 450) #define __NR_cachestat (__NR_Linux + 451) #define __NR_fchmodat2 (__NR_Linux + 452) +#define __NR_map_shadow_stack (__NR_Linux + 453) +#define __NR_futex_wake (__NR_Linux + 454) +#define __NR_futex_wait (__NR_Linux + 455) +#define __NR_futex_requeue (__NR_Linux + 456) #endif /* _ASM_UNISTD_N64_H */ diff --git a/linux-headers/asm-mips/unistd_o32.h b/linux-headers/asm-mips/unistd_o32.h index 757c68f2add872eb6a2ed66aa51229f7950b4530..ae9d334d96e3444c4eb39f924f5fec183100a094 100644 --- a/linux-headers/asm-mips/unistd_o32.h +++ b/linux-headers/asm-mips/unistd_o32.h @@ -427,5 +427,9 @@ #define __NR_set_mempolicy_home_node (__NR_Linux + 450) #define __NR_cachestat (__NR_Linux + 451) #define __NR_fchmodat2 (__NR_Linux + 452) +#define __NR_map_shadow_stack (__NR_Linux + 453) +#define __NR_futex_wake (__NR_Linux + 454) +#define __NR_futex_wait (__NR_Linux + 455) +#define __NR_futex_requeue (__NR_Linux + 456) #endif /* _ASM_UNISTD_O32_H */ diff --git a/linux-headers/asm-powerpc/unistd_32.h b/linux-headers/asm-powerpc/unistd_32.h index 8ef94bbac13839ce53ea6c6b79c6886f29055030..b9b23d66d7d9ae5f052f70577c38d573f3225ee9 100644 --- a/linux-headers/asm-powerpc/unistd_32.h +++ b/linux-headers/asm-powerpc/unistd_32.h @@ -434,6 +434,10 @@ #define __NR_set_mempolicy_home_node 450 #define __NR_cachestat 451 #define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 #endif /* _ASM_UNISTD_32_H */ diff --git a/linux-headers/asm-powerpc/unistd_64.h b/linux-headers/asm-powerpc/unistd_64.h index 0e7ee43e884fdded1759437a3efad8da9751467d..cbb4b3e8f7c2f1fa9c572b4c37bdd99637c8925b 100644 --- a/linux-headers/asm-powerpc/unistd_64.h +++ b/linux-headers/asm-powerpc/unistd_64.h @@ -406,6 +406,10 @@ #define __NR_set_mempolicy_home_node 450 #define __NR_cachestat 451 #define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 #endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-riscv/kvm.h b/linux-headers/asm-riscv/kvm.h index 992c5e407104958532d7ba930d50447ebe56fa25..60d3b21dead7d8846050d20a96ef1a0b3ad1ba20 100644 --- a/linux-headers/asm-riscv/kvm.h +++ b/linux-headers/asm-riscv/kvm.h @@ -80,6 +80,7 @@ struct kvm_riscv_csr { unsigned long sip; unsigned long satp; unsigned long scounteren; + unsigned long senvcfg; }; /* AIA CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ @@ -93,6 +94,11 @@ struct kvm_riscv_aia_csr { unsigned long iprio2h; }; +/* Smstateen CSR for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_smstateen_csr { + unsigned long sstateen0; +}; + /* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ struct kvm_riscv_timer { __u64 frequency; @@ -131,6 +137,8 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_ZICSR, KVM_RISCV_ISA_EXT_ZIFENCEI, KVM_RISCV_ISA_EXT_ZIHPM, + KVM_RISCV_ISA_EXT_SMSTATEEN, + KVM_RISCV_ISA_EXT_ZICOND, KVM_RISCV_ISA_EXT_MAX, }; @@ -148,6 +156,7 @@ enum KVM_RISCV_SBI_EXT_ID { KVM_RISCV_SBI_EXT_PMU, KVM_RISCV_SBI_EXT_EXPERIMENTAL, KVM_RISCV_SBI_EXT_VENDOR, + KVM_RISCV_SBI_EXT_DBCN, KVM_RISCV_SBI_EXT_MAX, }; @@ -178,10 +187,13 @@ enum KVM_RISCV_SBI_EXT_ID { #define KVM_REG_RISCV_CSR (0x03 << KVM_REG_RISCV_TYPE_SHIFT) #define KVM_REG_RISCV_CSR_GENERAL (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT) #define KVM_REG_RISCV_CSR_AIA (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_CSR_SMSTATEEN (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT) #define KVM_REG_RISCV_CSR_REG(name) \ (offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long)) #define KVM_REG_RISCV_CSR_AIA_REG(name) \ (offsetof(struct kvm_riscv_aia_csr, name) / sizeof(unsigned long)) +#define KVM_REG_RISCV_CSR_SMSTATEEN_REG(name) \ + (offsetof(struct kvm_riscv_smstateen_csr, name) / sizeof(unsigned long)) /* Timer registers are mapped as type 4 */ #define KVM_REG_RISCV_TIMER (0x04 << KVM_REG_RISCV_TYPE_SHIFT) diff --git a/linux-headers/asm-riscv/ptrace.h b/linux-headers/asm-riscv/ptrace.h new file mode 100644 index 0000000000000000000000000000000000000000..1e3166caca8c6e616c4674fe2f4a07a5d7b35f0f --- /dev/null +++ b/linux-headers/asm-riscv/ptrace.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_PTRACE_H +#define _ASM_RISCV_PTRACE_H + +#ifndef __ASSEMBLY__ + +#include + +#define PTRACE_GETFDPIC 33 + +#define PTRACE_GETFDPIC_EXEC 0 +#define PTRACE_GETFDPIC_INTERP 1 + +/* + * User-mode register state for core dumps, ptrace, sigcontext + * + * This decouples struct pt_regs from the userspace ABI. + * struct user_regs_struct must form a prefix of struct pt_regs. + */ +struct user_regs_struct { + unsigned long pc; + unsigned long ra; + unsigned long sp; + unsigned long gp; + unsigned long tp; + unsigned long t0; + unsigned long t1; + unsigned long t2; + unsigned long s0; + unsigned long s1; + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; + unsigned long a4; + unsigned long a5; + unsigned long a6; + unsigned long a7; + unsigned long s2; + unsigned long s3; + unsigned long s4; + unsigned long s5; + unsigned long s6; + unsigned long s7; + unsigned long s8; + unsigned long s9; + unsigned long s10; + unsigned long s11; + unsigned long t3; + unsigned long t4; + unsigned long t5; + unsigned long t6; +}; + +struct __riscv_f_ext_state { + __u32 f[32]; + __u32 fcsr; +}; + +struct __riscv_d_ext_state { + __u64 f[32]; + __u32 fcsr; +}; + +struct __riscv_q_ext_state { + __u64 f[64] __attribute__((aligned(16))); + __u32 fcsr; + /* + * Reserved for expansion of sigcontext structure. Currently zeroed + * upon signal, and must be zero upon sigreturn. + */ + __u32 reserved[3]; +}; + +struct __riscv_ctx_hdr { + __u32 magic; + __u32 size; +}; + +struct __riscv_extra_ext_header { + __u32 __padding[129] __attribute__((aligned(16))); + /* + * Reserved for expansion of sigcontext structure. Currently zeroed + * upon signal, and must be zero upon sigreturn. + */ + __u32 reserved; + struct __riscv_ctx_hdr hdr; +}; + +union __riscv_fp_state { + struct __riscv_f_ext_state f; + struct __riscv_d_ext_state d; + struct __riscv_q_ext_state q; +}; + +struct __riscv_v_ext_state { + unsigned long vstart; + unsigned long vl; + unsigned long vtype; + unsigned long vcsr; + unsigned long vlenb; + void *datap; + /* + * In signal handler, datap will be set a correct user stack offset + * and vector registers will be copied to the address of datap + * pointer. + */ +}; + +struct __riscv_v_regset_state { + unsigned long vstart; + unsigned long vl; + unsigned long vtype; + unsigned long vcsr; + unsigned long vlenb; + char vreg[]; +}; + +/* + * According to spec: The number of bits in a single vector register, + * VLEN >= ELEN, which must be a power of 2, and must be no greater than + * 2^16 = 65536bits = 8192bytes + */ +#define RISCV_MAX_VLENB (8192) + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_RISCV_PTRACE_H */ diff --git a/linux-headers/asm-s390/unistd_32.h b/linux-headers/asm-s390/unistd_32.h index 716fa368ca711658c7fa54fa815b370f34089fd7..c093e6d5f9111ae5be58b0702251e19ecffc98cc 100644 --- a/linux-headers/asm-s390/unistd_32.h +++ b/linux-headers/asm-s390/unistd_32.h @@ -425,5 +425,9 @@ #define __NR_set_mempolicy_home_node 450 #define __NR_cachestat 451 #define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 #endif /* _ASM_S390_UNISTD_32_H */ diff --git a/linux-headers/asm-s390/unistd_64.h b/linux-headers/asm-s390/unistd_64.h index b2a11b1d139f0787b3b7201a46326b6f821278f4..114c0569a49aa5e8a50f9834e2487f0d9abc12b7 100644 --- a/linux-headers/asm-s390/unistd_64.h +++ b/linux-headers/asm-s390/unistd_64.h @@ -373,5 +373,9 @@ #define __NR_set_mempolicy_home_node 450 #define __NR_cachestat 451 #define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 #endif /* _ASM_S390_UNISTD_64_H */ diff --git a/linux-headers/asm-x86/unistd_32.h b/linux-headers/asm-x86/unistd_32.h index d749ad1c24ec2dfee13a0665d781e03c235465cf..329649c377be129711ce0f9c76433ace56df4706 100644 --- a/linux-headers/asm-x86/unistd_32.h +++ b/linux-headers/asm-x86/unistd_32.h @@ -443,6 +443,10 @@ #define __NR_set_mempolicy_home_node 450 #define __NR_cachestat 451 #define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 #endif /* _ASM_UNISTD_32_H */ diff --git a/linux-headers/asm-x86/unistd_64.h b/linux-headers/asm-x86/unistd_64.h index cea67282ebfeadf28cb631222e535ab1ef8c520c..4583606ce68420d75bccf5c411da675f860744b3 100644 --- a/linux-headers/asm-x86/unistd_64.h +++ b/linux-headers/asm-x86/unistd_64.h @@ -366,6 +366,9 @@ #define __NR_cachestat 451 #define __NR_fchmodat2 452 #define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 #endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-x86/unistd_x32.h b/linux-headers/asm-x86/unistd_x32.h index 5b2e79bf4c4610246dfb9962052825d0be85806d..146d74d8e4b0146d80ead022189149e27441b8a1 100644 --- a/linux-headers/asm-x86/unistd_x32.h +++ b/linux-headers/asm-x86/unistd_x32.h @@ -318,6 +318,9 @@ #define __NR_set_mempolicy_home_node (__X32_SYSCALL_BIT + 450) #define __NR_cachestat (__X32_SYSCALL_BIT + 451) #define __NR_fchmodat2 (__X32_SYSCALL_BIT + 452) +#define __NR_futex_wake (__X32_SYSCALL_BIT + 454) +#define __NR_futex_wait (__X32_SYSCALL_BIT + 455) +#define __NR_futex_requeue (__X32_SYSCALL_BIT + 456) #define __NR_rt_sigaction (__X32_SYSCALL_BIT + 512) #define __NR_rt_sigreturn (__X32_SYSCALL_BIT + 513) #define __NR_ioctl (__X32_SYSCALL_BIT + 514) diff --git a/linux-headers/linux/iommufd.h b/linux-headers/linux/iommufd.h index 218bf7ac98d07c589058af8cb1156582d47423db..806d98d09c01d5532549675a9eb31256bc854170 100644 --- a/linux-headers/linux/iommufd.h +++ b/linux-headers/linux/iommufd.h @@ -47,6 +47,8 @@ enum { IOMMUFD_CMD_VFIO_IOAS, IOMMUFD_CMD_HWPT_ALLOC, IOMMUFD_CMD_GET_HW_INFO, + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING, + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP, }; /** @@ -347,20 +349,86 @@ struct iommu_vfio_ioas { }; #define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS) +/** + * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation + * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as + * the parent HWPT in a nesting configuration. + * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is + * enforced on device attachment + */ +enum iommufd_hwpt_alloc_flags { + IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1, +}; + +/** + * enum iommu_hwpt_vtd_s1_flags - Intel VT-d stage-1 page table + * entry attributes + * @IOMMU_VTD_S1_SRE: Supervisor request + * @IOMMU_VTD_S1_EAFE: Extended access enable + * @IOMMU_VTD_S1_WPE: Write protect enable + */ +enum iommu_hwpt_vtd_s1_flags { + IOMMU_VTD_S1_SRE = 1 << 0, + IOMMU_VTD_S1_EAFE = 1 << 1, + IOMMU_VTD_S1_WPE = 1 << 2, +}; + +/** + * struct iommu_hwpt_vtd_s1 - Intel VT-d stage-1 page table + * info (IOMMU_HWPT_DATA_VTD_S1) + * @flags: Combination of enum iommu_hwpt_vtd_s1_flags + * @pgtbl_addr: The base address of the stage-1 page table. + * @addr_width: The address width of the stage-1 page table + * @__reserved: Must be 0 + */ +struct iommu_hwpt_vtd_s1 { + __aligned_u64 flags; + __aligned_u64 pgtbl_addr; + __u32 addr_width; + __u32 __reserved; +}; + +/** + * enum iommu_hwpt_data_type - IOMMU HWPT Data Type + * @IOMMU_HWPT_DATA_NONE: no data + * @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table + */ +enum iommu_hwpt_data_type { + IOMMU_HWPT_DATA_NONE, + IOMMU_HWPT_DATA_VTD_S1, +}; + /** * struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC) * @size: sizeof(struct iommu_hwpt_alloc) - * @flags: Must be 0 + * @flags: Combination of enum iommufd_hwpt_alloc_flags * @dev_id: The device to allocate this HWPT for - * @pt_id: The IOAS to connect this HWPT to + * @pt_id: The IOAS or HWPT to connect this HWPT to * @out_hwpt_id: The ID of the new HWPT * @__reserved: Must be 0 + * @data_type: One of enum iommu_hwpt_data_type + * @data_len: Length of the type specific data + * @data_uptr: User pointer to the type specific data * * Explicitly allocate a hardware page table object. This is the same object * type that is returned by iommufd_device_attach() and represents the * underlying iommu driver's iommu_domain kernel object. * - * A HWPT will be created with the IOVA mappings from the given IOAS. + * A kernel-managed HWPT will be created with the mappings from the given + * IOAS via the @pt_id. The @data_type for this allocation must be set to + * IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a + * nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags. + * + * A user-managed nested HWPT will be created from a given parent HWPT via + * @pt_id, in which the parent HWPT must be allocated previously via the + * same ioctl from a given IOAS (@pt_id). In this case, the @data_type + * must be set to a pre-defined type corresponding to an I/O page table + * type supported by the underlying IOMMU hardware. + * + * If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and + * @data_uptr should be zero. Otherwise, both @data_len and @data_uptr + * must be given. */ struct iommu_hwpt_alloc { __u32 size; @@ -369,13 +437,26 @@ struct iommu_hwpt_alloc { __u32 pt_id; __u32 out_hwpt_id; __u32 __reserved; + __u32 data_type; + __u32 data_len; + __aligned_u64 data_uptr; }; #define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC) +/** + * enum iommu_hw_info_vtd_flags - Flags for VT-d hw_info + * @IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17: If set, disallow read-only mappings + * on a nested_parent domain. + * https://www.intel.com/content/www/us/en/content-details/772415/content-details.html + */ +enum iommu_hw_info_vtd_flags { + IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17 = 1 << 0, +}; + /** * struct iommu_hw_info_vtd - Intel VT-d hardware information * - * @flags: Must be 0 + * @flags: Combination of enum iommu_hw_info_vtd_flags * @__reserved: Must be 0 * * @cap_reg: Value of Intel VT-d capability register defined in VT-d spec @@ -404,6 +485,20 @@ enum iommu_hw_info_type { IOMMU_HW_INFO_TYPE_INTEL_VTD, }; +/** + * enum iommufd_hw_capabilities + * @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking + * If available, it means the following APIs + * are supported: + * + * IOMMU_HWPT_GET_DIRTY_BITMAP + * IOMMU_HWPT_SET_DIRTY_TRACKING + * + */ +enum iommufd_hw_capabilities { + IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0, +}; + /** * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO) * @size: sizeof(struct iommu_hw_info) @@ -415,6 +510,8 @@ enum iommu_hw_info_type { * the iommu type specific hardware information data * @out_data_type: Output the iommu hardware info type as defined in the enum * iommu_hw_info_type. + * @out_capabilities: Output the generic iommu capability info type as defined + * in the enum iommu_hw_capabilities. * @__reserved: Must be 0 * * Query an iommu type specific hardware information data from an iommu behind @@ -439,6 +536,81 @@ struct iommu_hw_info { __aligned_u64 data_uptr; __u32 out_data_type; __u32 __reserved; + __aligned_u64 out_capabilities; }; #define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO) + +/* + * enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty + * tracking + * @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking + */ +enum iommufd_hwpt_set_dirty_tracking_flags { + IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1, +}; + +/** + * struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING) + * @size: sizeof(struct iommu_hwpt_set_dirty_tracking) + * @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @__reserved: Must be 0 + * + * Toggle dirty tracking on an HW pagetable. + */ +struct iommu_hwpt_set_dirty_tracking { + __u32 size; + __u32 flags; + __u32 hwpt_id; + __u32 __reserved; +}; +#define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING) + +/** + * enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits + * @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing + * any dirty bits metadata. This flag + * can be passed in the expectation + * where the next operation is an unmap + * of the same IOVA range. + * + */ +enum iommufd_hwpt_get_dirty_bitmap_flags { + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1, +}; + +/** + * struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP) + * @size: sizeof(struct iommu_hwpt_get_dirty_bitmap) + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags + * @__reserved: Must be 0 + * @iova: base IOVA of the bitmap first bit + * @length: IOVA range size + * @page_size: page size granularity of each bit in the bitmap + * @data: bitmap where to set the dirty bits. The bitmap bits each + * represent a page_size which you deviate from an arbitrary iova. + * + * Checking a given IOVA is dirty: + * + * data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64)) + * + * Walk the IOMMU pagetables for a given IOVA range to return a bitmap + * with the dirty IOVAs. In doing so it will also by default clear any + * dirty bit metadata set in the IOPTE. + */ +struct iommu_hwpt_get_dirty_bitmap { + __u32 size; + __u32 hwpt_id; + __u32 flags; + __u32 __reserved; + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 page_size; + __aligned_u64 data; +}; +#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP) + #endif diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index 0d74ee999aa9e1fbd4e62ee6ef28e759e34b5adc..549fea3a978af9e6b9f2cbc4dab7f71e10ec8158 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -264,6 +264,7 @@ struct kvm_xen_exit { #define KVM_EXIT_RISCV_SBI 35 #define KVM_EXIT_RISCV_CSR 36 #define KVM_EXIT_NOTIFY 37 +#define KVM_EXIT_LOONGARCH_IOCSR 38 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -336,6 +337,13 @@ struct kvm_run { __u32 len; __u8 is_write; } mmio; + /* KVM_EXIT_LOONGARCH_IOCSR */ + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } iocsr_io; /* KVM_EXIT_HYPERCALL */ struct { __u64 nr; @@ -1188,6 +1196,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_COUNTER_OFFSET 227 #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 +#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230 #ifdef KVM_CAP_IRQ_ROUTING @@ -1358,6 +1367,7 @@ struct kvm_dirty_tlb { #define KVM_REG_ARM64 0x6000000000000000ULL #define KVM_REG_MIPS 0x7000000000000000ULL #define KVM_REG_RISCV 0x8000000000000000ULL +#define KVM_REG_LOONGARCH 0x9000000000000000ULL #define KVM_REG_SIZE_SHIFT 52 #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL @@ -1558,6 +1568,7 @@ struct kvm_s390_ucas_mapping { #define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags) /* Available with KVM_CAP_COUNTER_OFFSET */ #define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset) +#define KVM_ARM_GET_REG_WRITABLE_MASKS _IOR(KVMIO, 0xb6, struct reg_mask_range) /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) diff --git a/linux-headers/linux/psp-sev.h b/linux-headers/linux/psp-sev.h index 12ccb70099d417a8870fe38a73dd762a8ab3855c..bcb21339ee39fae6739e70c27e98b2d336dcc253 100644 --- a/linux-headers/linux/psp-sev.h +++ b/linux-headers/linux/psp-sev.h @@ -68,6 +68,7 @@ typedef enum { SEV_RET_INVALID_PARAM, SEV_RET_RESOURCE_LIMIT, SEV_RET_SECURE_DATA_INVALID, + SEV_RET_INVALID_KEY = 0x27, SEV_RET_MAX, } sev_ret_code; diff --git a/linux-headers/linux/stddef.h b/linux-headers/linux/stddef.h index 9bb07083ac89e7ea6d51903e19804cd403435978..bf9749dd1422607dd57e1f64b8a39f0b32ab0ede 100644 --- a/linux-headers/linux/stddef.h +++ b/linux-headers/linux/stddef.h @@ -27,8 +27,13 @@ union { \ struct { MEMBERS } ATTRS; \ struct TAG { MEMBERS } ATTRS NAME; \ - } + } ATTRS +#ifdef __cplusplus +/* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */ +#define __DECLARE_FLEX_ARRAY(T, member) \ + T member[0] +#else /** * __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union * @@ -49,3 +54,5 @@ #ifndef __counted_by #define __counted_by(m) #endif + +#endif /* _LINUX_STDDEF_H */ diff --git a/linux-headers/linux/userfaultfd.h b/linux-headers/linux/userfaultfd.h index 59978fbaae3379174fe21a00ce668b2db5eba302..953c75fedae9eac851fed94cac653c29aa493148 100644 --- a/linux-headers/linux/userfaultfd.h +++ b/linux-headers/linux/userfaultfd.h @@ -40,7 +40,8 @@ UFFD_FEATURE_EXACT_ADDRESS | \ UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \ UFFD_FEATURE_WP_UNPOPULATED | \ - UFFD_FEATURE_POISON) + UFFD_FEATURE_POISON | \ + UFFD_FEATURE_WP_ASYNC) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -216,6 +217,11 @@ struct uffdio_api { * (i.e. empty ptes). This will be the default behavior for shmem * & hugetlbfs, so this flag only affects anonymous memory behavior * when userfault write-protection mode is registered. + * + * UFFD_FEATURE_WP_ASYNC indicates that userfaultfd write-protection + * asynchronous mode is supported in which the write fault is + * automatically resolved and write-protection is un-set. + * It implies UFFD_FEATURE_WP_UNPOPULATED. */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) @@ -232,6 +238,7 @@ struct uffdio_api { #define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12) #define UFFD_FEATURE_WP_UNPOPULATED (1<<13) #define UFFD_FEATURE_POISON (1<<14) +#define UFFD_FEATURE_WP_ASYNC (1<<15) __u64 features; __u64 ioctls; diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h index acf72b4999fa75dfcd4c49426c9fdbc135251248..8e175ece31ce676ba43b5b2709f02ad8e69979c9 100644 --- a/linux-headers/linux/vfio.h +++ b/linux-headers/linux/vfio.h @@ -277,8 +277,8 @@ struct vfio_region_info { #define VFIO_REGION_INFO_FLAG_CAPS (1 << 3) /* Info supports caps */ __u32 index; /* Region index */ __u32 cap_offset; /* Offset within info struct of first cap */ - __u64 size; /* Region size (bytes) */ - __u64 offset; /* Region offset from start of device fd */ + __aligned_u64 size; /* Region size (bytes) */ + __aligned_u64 offset; /* Region offset from start of device fd */ }; #define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8) @@ -294,8 +294,8 @@ struct vfio_region_info { #define VFIO_REGION_INFO_CAP_SPARSE_MMAP 1 struct vfio_region_sparse_mmap_area { - __u64 offset; /* Offset of mmap'able area within region */ - __u64 size; /* Size of mmap'able area */ + __aligned_u64 offset; /* Offset of mmap'able area within region */ + __aligned_u64 size; /* Size of mmap'able area */ }; struct vfio_region_info_cap_sparse_mmap { @@ -450,9 +450,9 @@ struct vfio_device_migration_info { VFIO_DEVICE_STATE_V1_RESUMING) __u32 reserved; - __u64 pending_bytes; - __u64 data_offset; - __u64 data_size; + __aligned_u64 pending_bytes; + __aligned_u64 data_offset; + __aligned_u64 data_size; }; /* @@ -476,7 +476,7 @@ struct vfio_device_migration_info { struct vfio_region_info_cap_nvlink2_ssatgt { struct vfio_info_cap_header header; - __u64 tgt; + __aligned_u64 tgt; }; /* @@ -816,7 +816,7 @@ struct vfio_device_gfx_plane_info { __u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */ /* out */ __u32 drm_format; /* drm format of plane */ - __u64 drm_format_mod; /* tiled mode */ + __aligned_u64 drm_format_mod; /* tiled mode */ __u32 width; /* width of plane */ __u32 height; /* height of plane */ __u32 stride; /* stride of plane */ @@ -829,6 +829,7 @@ struct vfio_device_gfx_plane_info { __u32 region_index; /* region index */ __u32 dmabuf_id; /* dma-buf id */ }; + __u32 reserved; }; #define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14) @@ -863,9 +864,10 @@ struct vfio_device_ioeventfd { #define VFIO_DEVICE_IOEVENTFD_32 (1 << 2) /* 4-byte write */ #define VFIO_DEVICE_IOEVENTFD_64 (1 << 3) /* 8-byte write */ #define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf) - __u64 offset; /* device fd offset of write */ - __u64 data; /* data to be written */ + __aligned_u64 offset; /* device fd offset of write */ + __aligned_u64 data; /* data to be written */ __s32 fd; /* -1 for de-assignment */ + __u32 reserved; }; #define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16) @@ -1434,6 +1436,27 @@ struct vfio_device_feature_mig_data_size { #define VFIO_DEVICE_FEATURE_MIG_DATA_SIZE 9 +/** + * Upon VFIO_DEVICE_FEATURE_SET, set or clear the BUS mastering for the device + * based on the operation specified in op flag. + * + * The functionality is incorporated for devices that needs bus master control, + * but the in-band device interface lacks the support. Consequently, it is not + * applicable to PCI devices, as bus master control for PCI devices is managed + * in-band through the configuration space. At present, this feature is supported + * only for CDX devices. + * When the device's BUS MASTER setting is configured as CLEAR, it will result in + * blocking all incoming DMA requests from the device. On the other hand, configuring + * the device's BUS MASTER setting as SET (enable) will grant the device the + * capability to perform DMA to the host memory. + */ +struct vfio_device_feature_bus_master { + __u32 op; +#define VFIO_DEVICE_FEATURE_CLEAR_MASTER 0 /* Clear Bus Master */ +#define VFIO_DEVICE_FEATURE_SET_MASTER 1 /* Set Bus Master */ +}; +#define VFIO_DEVICE_FEATURE_BUS_MASTER 10 + /* -------- API for Type1 VFIO IOMMU -------- */ /** @@ -1449,7 +1472,7 @@ struct vfio_iommu_type1_info { __u32 flags; #define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */ #define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */ - __u64 iova_pgsizes; /* Bitmap of supported page sizes */ + __aligned_u64 iova_pgsizes; /* Bitmap of supported page sizes */ __u32 cap_offset; /* Offset within info struct of first cap */ __u32 pad; }; diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h index f5c48b61ab62244104bbf1b2100d3db7286f8c82..649560c685f13b73feaafb96b64c351b6eec2c25 100644 --- a/linux-headers/linux/vhost.h +++ b/linux-headers/linux/vhost.h @@ -219,4 +219,12 @@ */ #define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E) +/* Get the group for the descriptor table including driver & device areas + * of a virtqueue: read index, write group in num. + * The virtqueue index is stored in the index field of vhost_vring_state. + * The group ID of the descriptor table for this specific virtqueue + * is returned via num field of vhost_vring_state. + */ +#define VHOST_VDPA_GET_VRING_DESC_GROUP _IOWR(VHOST_VIRTIO, 0x7F, \ + struct vhost_vring_state) #endif diff --git a/linux-user/alpha/target_elf.h b/linux-user/alpha/target_elf.h index 344e9f4d395b06266e44dd04c79ca2c299164fc5..b77d638f6d4a8652344509010d0220945b2968a1 100644 --- a/linux-user/alpha/target_elf.h +++ b/linux-user/alpha/target_elf.h @@ -9,6 +9,6 @@ #define ALPHA_TARGET_ELF_H static inline const char *cpu_get_model(uint32_t eflags) { - return "any"; + return "ev67"; } #endif diff --git a/memory_ldst.c.inc b/memory_ldst.c.inc index 84b868f29464853b60cd070534d1d3a346692a6d..0e6f3940a9a155114f5967500a276059f0f67ca6 100644 --- a/memory_ldst.c.inc +++ b/memory_ldst.c.inc @@ -61,7 +61,7 @@ static inline uint32_t glue(address_space_ldl_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); return val; @@ -130,7 +130,7 @@ static inline uint64_t glue(address_space_ldq_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); return val; @@ -186,7 +186,7 @@ uint8_t glue(address_space_ldub, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); return val; @@ -234,7 +234,7 @@ static inline uint16_t glue(address_space_lduw_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); return val; @@ -295,7 +295,7 @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); } @@ -339,7 +339,7 @@ static inline void glue(address_space_stl_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); } @@ -391,7 +391,7 @@ void glue(address_space_stb, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); } @@ -435,7 +435,7 @@ static inline void glue(address_space_stw_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); } @@ -499,7 +499,7 @@ static void glue(address_space_stq_internal, SUFFIX)(ARG1_DECL, *result = r; } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } RCU_READ_UNLOCK(); } diff --git a/meson.build b/meson.build index 6c77d9687ded405fcd6cd8b231929663644c890a..38deb9363cabc9b7f5e2454ecdcd385c143e18f6 100644 --- a/meson.build +++ b/meson.build @@ -9,27 +9,18 @@ add_test_setup('thorough', env: ['G_TEST_SLOW=1', 'SPEED=thorough']) meson.add_postconf_script(find_program('scripts/symlink-install-tree.py')) +#################### +# Global variables # +#################### + not_found = dependency('', required: false) keyval = import('keyval') ss = import('sourceset') fs = import('fs') -targetos = host_machine.system() -sh = find_program('sh') +host_os = host_machine.system() config_host = keyval.load(meson.current_build_dir() / 'config-host.mak') -cc = meson.get_compiler('c') -all_languages = ['c'] -if targetos == 'windows' and add_languages('cpp', required: false, native: false) - all_languages += ['cpp'] - cxx = meson.get_compiler('cpp') -endif -if targetos == 'darwin' and \ - add_languages('objc', required: get_option('cocoa'), native: false) - all_languages += ['objc'] - objc = meson.get_compiler('objc') -endif - # Temporary directory used for files created while # configure runs. Since it is in the build directory # we can safely blow away any previous version of it @@ -49,7 +40,6 @@ qemu_moddir = get_option('libdir') / get_option('qemu_suffix') qemu_desktopdir = get_option('datadir') / 'applications' qemu_icondir = get_option('datadir') / 'icons' -config_host_data = configuration_data() genh = [] qapi_trace_events = [] @@ -61,6 +51,127 @@ supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv32', 'riscv64', 'x86', 'x86_64' cpu = host_machine.cpu_family() target_dirs = config_host['TARGET_DIRS'].split() + +############ +# Programs # +############ + +sh = find_program('sh') +python = import('python').find_installation() + +cc = meson.get_compiler('c') +all_languages = ['c'] +if host_os == 'windows' and add_languages('cpp', required: false, native: false) + all_languages += ['cpp'] + cxx = meson.get_compiler('cpp') +endif +if host_os == 'darwin' and \ + add_languages('objc', required: get_option('cocoa'), native: false) + all_languages += ['objc'] + objc = meson.get_compiler('objc') +endif + +dtrace = not_found +stap = not_found +if 'dtrace' in get_option('trace_backends') + dtrace = find_program('dtrace', required: true) + stap = find_program('stap', required: false) + if stap.found() + # Workaround to avoid dtrace(1) producing a file with 'hidden' symbol + # visibility. Define STAP_SDT_V2 to produce 'default' symbol visibility + # instead. QEMU --enable-modules depends on this because the SystemTap + # semaphores are linked into the main binary and not the module's shared + # object. + add_global_arguments('-DSTAP_SDT_V2', + native: false, language: all_languages) + endif +endif + +if get_option('iasl') == '' + iasl = find_program('iasl', required: false) +else + iasl = find_program(get_option('iasl'), required: true) +endif + +edk2_targets = [ 'arm-softmmu', 'aarch64-softmmu', 'i386-softmmu', 'x86_64-softmmu' ] +unpack_edk2_blobs = false +foreach target : edk2_targets + if target in target_dirs + bzip2 = find_program('bzip2', required: get_option('install_blobs')) + unpack_edk2_blobs = bzip2.found() + break + endif +endforeach + +##################### +# Option validation # +##################### + +# Fuzzing +if get_option('fuzzing') and get_option('fuzzing_engine') == '' and \ + not cc.links(''' + #include + #include + int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); + int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { return 0; } + ''', + args: ['-Werror', '-fsanitize=fuzzer']) + error('Your compiler does not support -fsanitize=fuzzer') +endif + +# Tracing backends +if 'ftrace' in get_option('trace_backends') and host_os != 'linux' + error('ftrace is supported only on Linux') +endif +if 'syslog' in get_option('trace_backends') and not cc.compiles(''' + #include + int main(void) { + openlog("qemu", LOG_PID, LOG_DAEMON); + syslog(LOG_INFO, "configure"); + return 0; + }''') + error('syslog is not supported on this system') +endif + +# Miscellaneous Linux-only features +get_option('mpath') \ + .require(host_os == 'linux', error_message: 'Multipath is supported only on Linux') + +multiprocess_allowed = get_option('multiprocess') \ + .require(host_os == 'linux', error_message: 'Multiprocess QEMU is supported only on Linux') \ + .allowed() + +vfio_user_server_allowed = get_option('vfio_user_server') \ + .require(host_os == 'linux', error_message: 'vfio-user server is supported only on Linux') \ + .allowed() + +have_tpm = get_option('tpm') \ + .require(host_os != 'windows', error_message: 'TPM emulation only available on POSIX systems') \ + .allowed() + +# vhost +have_vhost_user = get_option('vhost_user') \ + .disable_auto_if(host_os != 'linux') \ + .require(host_os != 'windows', + error_message: 'vhost-user is not available on Windows').allowed() +have_vhost_vdpa = get_option('vhost_vdpa') \ + .require(host_os == 'linux', + error_message: 'vhost-vdpa is only available on Linux').allowed() +have_vhost_kernel = get_option('vhost_kernel') \ + .require(host_os == 'linux', + error_message: 'vhost-kernel is only available on Linux').allowed() +have_vhost_user_crypto = get_option('vhost_crypto') \ + .require(have_vhost_user, + error_message: 'vhost-crypto requires vhost-user to be enabled').allowed() + +have_vhost = have_vhost_user or have_vhost_vdpa or have_vhost_kernel + +have_vhost_net_user = have_vhost_user and get_option('vhost_net').allowed() +have_vhost_net_vdpa = have_vhost_vdpa and get_option('vhost_net').allowed() +have_vhost_net_kernel = have_vhost_kernel and get_option('vhost_net').allowed() +have_vhost_net = have_vhost_net_kernel or have_vhost_net_user or have_vhost_net_vdpa + +# type of binaries to build have_linux_user = false have_bsd_user = false have_system = false @@ -70,23 +181,27 @@ foreach target : target_dirs have_system = have_system or target.endswith('-softmmu') endforeach have_user = have_linux_user or have_bsd_user + have_tools = get_option('tools') \ .disable_auto_if(not have_system) \ .allowed() have_ga = get_option('guest_agent') \ .disable_auto_if(not have_system and not have_tools) \ - .require(targetos in ['sunos', 'linux', 'windows', 'freebsd', 'netbsd', 'openbsd'], + .require(host_os in ['sunos', 'linux', 'windows', 'freebsd', 'netbsd', 'openbsd'], error_message: 'unsupported OS for QEMU guest agent') \ .allowed() +have_block = have_system or have_tools + enable_modules = get_option('modules') \ - .require(targetos != 'windows', + .require(host_os != 'windows', error_message: 'Modules are not available for Windows') \ .require(not get_option('prefer_static'), error_message: 'Modules are incompatible with static linking') \ .allowed() -have_block = have_system or have_tools -python = import('python').find_installation() +####################################### +# Variables for host and accelerators # +####################################### if cpu not in supported_cpus host_arch = 'unknown' @@ -114,15 +229,11 @@ elif cpu in ['riscv32'] kvm_targets = ['riscv32-softmmu'] elif cpu in ['riscv64'] kvm_targets = ['riscv64-softmmu'] +elif cpu in ['loongarch64'] + kvm_targets = ['loongarch64-softmmu'] else kvm_targets = [] endif - -kvm_targets_c = '""' -if get_option('kvm').allowed() and targetos == 'linux' - kvm_targets_c = '"' + '" ,"'.join(kvm_targets) + '"' -endif -config_host_data.set('CONFIG_KVM_TARGETS', kvm_targets_c) accelerator_targets = { 'CONFIG_KVM': kvm_targets } if cpu in ['x86', 'x86_64'] @@ -151,42 +262,10 @@ endif modular_tcg = [] # Darwin does not support references to thread-local variables in modules -if targetos != 'darwin' +if host_os != 'darwin' modular_tcg = ['i386-softmmu', 'x86_64-softmmu'] endif -edk2_targets = [ 'arm-softmmu', 'aarch64-softmmu', 'i386-softmmu', 'x86_64-softmmu' ] -unpack_edk2_blobs = false -foreach target : edk2_targets - if target in target_dirs - bzip2 = find_program('bzip2', required: get_option('install_blobs')) - unpack_edk2_blobs = bzip2.found() - break - endif -endforeach - -dtrace = not_found -stap = not_found -if 'dtrace' in get_option('trace_backends') - dtrace = find_program('dtrace', required: true) - stap = find_program('stap', required: false) - if stap.found() - # Workaround to avoid dtrace(1) producing a file with 'hidden' symbol - # visibility. Define STAP_SDT_V2 to produce 'default' symbol visibility - # instead. QEMU --enable-modules depends on this because the SystemTap - # semaphores are linked into the main binary and not the module's shared - # object. - add_global_arguments('-DSTAP_SDT_V2', - native: false, language: all_languages) - endif -endif - -if get_option('iasl') == '' - iasl = find_program('iasl', required: false) -else - iasl = find_program(get_option('iasl'), required: true) -endif - ################## # Compiler flags # ################## @@ -223,18 +302,18 @@ qemu_common_flags = [ qemu_cflags = [] qemu_ldflags = [] -if targetos == 'darwin' +if host_os == 'darwin' # Disable attempts to use ObjectiveC features in os/object.h since they # won't work when we're compiling with gcc as a C compiler. if compiler.get_id() == 'gcc' qemu_common_flags += '-DOS_OBJECT_USE_OBJC=0' endif -elif targetos == 'sunos' +elif host_os == 'sunos' # needed for CMSG_ macros in sys/socket.h qemu_common_flags += '-D_XOPEN_SOURCE=600' # needed for TIOCWIN* defines in termios.h qemu_common_flags += '-D__EXTENSIONS__' -elif targetos == 'haiku' +elif host_os == 'haiku' qemu_common_flags += ['-DB_USE_POSITIVE_POSIX_ERRORS', '-D_BSD_SOURCE', '-fPIC'] endif @@ -318,10 +397,10 @@ ucontext_probe = ''' # For POSIX prefer ucontext, but it's not always possible. The fallback # is sigcontext. supported_backends = [] -if targetos == 'windows' +if host_os == 'windows' supported_backends += ['windows'] else - if targetos != 'darwin' and cc.links(ucontext_probe) + if host_os != 'darwin' and cc.links(ucontext_probe) supported_backends += ['ucontext'] endif supported_backends += ['sigaltstack'] @@ -392,13 +471,13 @@ endif # The combination is known as "full relro", because .got.plt is read-only too. qemu_ldflags += cc.get_supported_link_arguments('-Wl,-z,relro', '-Wl,-z,now') -if targetos == 'windows' +if host_os == 'windows' qemu_ldflags += cc.get_supported_link_arguments('-Wl,--no-seh', '-Wl,--nxcompat') qemu_ldflags += cc.get_supported_link_arguments('-Wl,--dynamicbase', '-Wl,--high-entropy-va') endif # Exclude --warn-common with TSan to suppress warnings from the TSan libraries. -if targetos != 'sunos' and not get_option('tsan') +if host_os != 'sunos' and not get_option('tsan') qemu_ldflags += cc.get_supported_link_arguments('-Wl,--warn-common') endif @@ -433,6 +512,46 @@ if get_option('fuzzing') endif endif +if get_option('cfi') + cfi_flags=[] + # Check for dependency on LTO + if not get_option('b_lto') + error('Selected Control-Flow Integrity but LTO is disabled') + endif + if enable_modules + error('Selected Control-Flow Integrity is not compatible with modules') + endif + # Check for cfi flags. CFI requires LTO so we can't use + # get_supported_arguments, but need a more complex "compiles" which allows + # custom arguments + if cc.compiles('int main () { return 0; }', name: '-fsanitize=cfi-icall', + args: ['-flto', '-fsanitize=cfi-icall'] ) + cfi_flags += '-fsanitize=cfi-icall' + else + error('-fsanitize=cfi-icall is not supported by the compiler') + endif + if cc.compiles('int main () { return 0; }', + name: '-fsanitize-cfi-icall-generalize-pointers', + args: ['-flto', '-fsanitize=cfi-icall', + '-fsanitize-cfi-icall-generalize-pointers'] ) + cfi_flags += '-fsanitize-cfi-icall-generalize-pointers' + else + error('-fsanitize-cfi-icall-generalize-pointers is not supported by the compiler') + endif + if get_option('cfi_debug') + if cc.compiles('int main () { return 0; }', + name: '-fno-sanitize-trap=cfi-icall', + args: ['-flto', '-fsanitize=cfi-icall', + '-fno-sanitize-trap=cfi-icall'] ) + cfi_flags += '-fno-sanitize-trap=cfi-icall' + else + error('-fno-sanitize-trap=cfi-icall is not supported by the compiler') + endif + endif + add_global_arguments(cfi_flags, native: false, language: all_languages) + add_global_link_arguments(cfi_flags, native: false, language: all_languages) +endif + add_global_arguments(qemu_common_flags, native: false, language: all_languages) add_global_link_arguments(qemu_ldflags, native: false, language: all_languages) @@ -468,7 +587,7 @@ warn_flags = [ '-Wshadow=local', ] -if targetos != 'darwin' +if host_os != 'darwin' warn_flags += ['-Wthread-safety'] endif @@ -488,7 +607,7 @@ if 'objc' in all_languages # Note sanitizer flags are not applied to Objective-C sources! add_project_arguments(objc.get_supported_arguments(warn_flags), native: false, language: 'objc') endif -if targetos == 'linux' +if host_os == 'linux' add_project_arguments('-isystem', meson.current_source_dir() / 'linux-headers', '-isystem', 'linux-headers', language: all_languages) @@ -518,75 +637,10 @@ if sparse.found() '-Wno-non-pointer-null']) endif -########################################### -# Target-specific checks and dependencies # -########################################### - -# Fuzzing -if get_option('fuzzing') and get_option('fuzzing_engine') == '' and \ - not cc.links(''' - #include - #include - int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); - int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { return 0; } - ''', - args: ['-Werror', '-fsanitize=fuzzer']) - error('Your compiler does not support -fsanitize=fuzzer') -endif - -# Tracing backends -if 'ftrace' in get_option('trace_backends') and targetos != 'linux' - error('ftrace is supported only on Linux') -endif -if 'syslog' in get_option('trace_backends') and not cc.compiles(''' - #include - int main(void) { - openlog("qemu", LOG_PID, LOG_DAEMON); - syslog(LOG_INFO, "configure"); - return 0; - }''') - error('syslog is not supported on this system') -endif - -# Miscellaneous Linux-only features -get_option('mpath') \ - .require(targetos == 'linux', error_message: 'Multipath is supported only on Linux') - -multiprocess_allowed = get_option('multiprocess') \ - .require(targetos == 'linux', error_message: 'Multiprocess QEMU is supported only on Linux') \ - .allowed() - -vfio_user_server_allowed = get_option('vfio_user_server') \ - .require(targetos == 'linux', error_message: 'vfio-user server is supported only on Linux') \ - .allowed() - -have_tpm = get_option('tpm') \ - .require(targetos != 'windows', error_message: 'TPM emulation only available on POSIX systems') \ - .allowed() - -# vhost -have_vhost_user = get_option('vhost_user') \ - .disable_auto_if(targetos != 'linux') \ - .require(targetos != 'windows', - error_message: 'vhost-user is not available on Windows').allowed() -have_vhost_vdpa = get_option('vhost_vdpa') \ - .require(targetos == 'linux', - error_message: 'vhost-vdpa is only available on Linux').allowed() -have_vhost_kernel = get_option('vhost_kernel') \ - .require(targetos == 'linux', - error_message: 'vhost-kernel is only available on Linux').allowed() -have_vhost_user_crypto = get_option('vhost_crypto') \ - .require(have_vhost_user, - error_message: 'vhost-crypto requires vhost-user to be enabled').allowed() - -have_vhost = have_vhost_user or have_vhost_vdpa or have_vhost_kernel - -have_vhost_net_user = have_vhost_user and get_option('vhost_net').allowed() -have_vhost_net_vdpa = have_vhost_vdpa and get_option('vhost_net').allowed() -have_vhost_net_kernel = have_vhost_kernel and get_option('vhost_net').allowed() -have_vhost_net = have_vhost_net_kernel or have_vhost_net_user or have_vhost_net_vdpa +##################################### +# Host-specific libraries and flags # +##################################### -# Target-specific libraries and flags libm = cc.find_library('m', required: false) threads = dependency('threads') util = cc.find_library('util', required: false) @@ -596,13 +650,11 @@ version_res = [] coref = [] iokit = [] emulator_link_args = [] -nvmm =not_found -hvf = not_found midl = not_found widl = not_found pathcch = not_found host_dsosuf = '.so' -if targetos == 'windows' +if host_os == 'windows' midl = find_program('midl', required: false) widl = find_program('widl', required: false) pathcch = cc.find_library('pathcch') @@ -614,31 +666,34 @@ if targetos == 'windows' depend_files: files('pc-bios/qemu-nsis.ico'), include_directories: include_directories('.')) host_dsosuf = '.dll' -elif targetos == 'darwin' +elif host_os == 'darwin' coref = dependency('appleframeworks', modules: 'CoreFoundation') iokit = dependency('appleframeworks', modules: 'IOKit', required: false) host_dsosuf = '.dylib' -elif targetos == 'sunos' +elif host_os == 'sunos' socket = [cc.find_library('socket'), cc.find_library('nsl'), cc.find_library('resolv')] -elif targetos == 'haiku' +elif host_os == 'haiku' socket = [cc.find_library('posix_error_mapper'), cc.find_library('network'), cc.find_library('bsd')] -elif targetos == 'openbsd' +elif host_os == 'openbsd' if get_option('tcg').allowed() and target_dirs.length() > 0 # Disable OpenBSD W^X if available emulator_link_args = cc.get_supported_link_arguments('-Wl,-z,wxneeded') endif endif -# Target-specific configuration of accelerators +############################################### +# Host-specific configuration of accelerators # +############################################### + accelerators = [] -if get_option('kvm').allowed() and targetos == 'linux' +if get_option('kvm').allowed() and host_os == 'linux' accelerators += 'CONFIG_KVM' endif -if get_option('whpx').allowed() and targetos == 'windows' +if get_option('whpx').allowed() and host_os == 'windows' if get_option('whpx').enabled() and host_machine.cpu() != 'x86_64' error('WHPX requires 64-bit host') elif cc.has_header('winhvplatform.h', required: get_option('whpx')) and \ @@ -646,6 +701,8 @@ if get_option('whpx').allowed() and targetos == 'windows' accelerators += 'CONFIG_WHPX' endif endif + +hvf = not_found if get_option('hvf').allowed() hvf = dependency('appleframeworks', modules: 'Hypervisor', required: get_option('hvf')) @@ -653,7 +710,9 @@ if get_option('hvf').allowed() accelerators += 'CONFIG_HVF' endif endif -if targetos == 'netbsd' + +nvmm = not_found +if host_os == 'netbsd' nvmm = cc.find_library('nvmm', required: get_option('nvmm')) if nvmm.found() accelerators += 'CONFIG_NVMM' @@ -700,6 +759,85 @@ if 'CONFIG_WHPX' not in accelerators and get_option('whpx').enabled() error('WHPX not available on this platform') endif +xen = not_found +if get_option('xen').enabled() or (get_option('xen').auto() and have_system) + xencontrol = dependency('xencontrol', required: false, + method: 'pkg-config') + if xencontrol.found() + xen_pc = declare_dependency(version: xencontrol.version(), + dependencies: [ + xencontrol, + # disabler: true makes xen_pc.found() return false if any is not found + dependency('xenstore', required: false, + method: 'pkg-config', + disabler: true), + dependency('xenforeignmemory', required: false, + method: 'pkg-config', + disabler: true), + dependency('xengnttab', required: false, + method: 'pkg-config', + disabler: true), + dependency('xenevtchn', required: false, + method: 'pkg-config', + disabler: true), + dependency('xendevicemodel', required: false, + method: 'pkg-config', + disabler: true), + # optional, no "disabler: true" + dependency('xentoolcore', required: false, + method: 'pkg-config')]) + if xen_pc.found() + xen = xen_pc + endif + endif + if not xen.found() + xen_tests = [ '4.11.0', '4.10.0', '4.9.0', '4.8.0', '4.7.1' ] + xen_libs = { + '4.11.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn', 'xentoolcore' ], + '4.10.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn', 'xentoolcore' ], + '4.9.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ], + '4.8.0': [ 'xenstore', 'xenctrl', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ], + '4.7.1': [ 'xenstore', 'xenctrl', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ], + } + xen_deps = {} + foreach ver: xen_tests + # cache the various library tests to avoid polluting the logs + xen_test_deps = [] + foreach l: xen_libs[ver] + if l not in xen_deps + xen_deps += { l: cc.find_library(l, required: false) } + endif + xen_test_deps += xen_deps[l] + endforeach + + # Use -D to pick just one of the test programs in scripts/xen-detect.c + xen_version = ver.split('.') + xen_ctrl_version = xen_version[0] + \ + ('0' + xen_version[1]).substring(-2) + \ + ('0' + xen_version[2]).substring(-2) + if cc.links(files('scripts/xen-detect.c'), + args: '-DCONFIG_XEN_CTRL_INTERFACE_VERSION=' + xen_ctrl_version, + dependencies: xen_test_deps) + xen = declare_dependency(version: ver, dependencies: xen_test_deps) + break + endif + endforeach + endif + if xen.found() + accelerators += 'CONFIG_XEN' + elif get_option('xen').enabled() + error('could not compile and link Xen test program') + endif +endif +have_xen_pci_passthrough = get_option('xen_pci_passthrough') \ + .require(xen.found(), + error_message: 'Xen PCI passthrough requested but Xen not enabled') \ + .require(host_os == 'linux', + error_message: 'Xen PCI passthrough not available on this platform') \ + .require(cpu == 'x86' or cpu == 'x86_64', + error_message: 'Xen PCI passthrough not available on this platform') \ + .allowed() + ################ # Dependencies # ################ @@ -722,7 +860,7 @@ endif # This workaround is required due to a bug in pkg-config file for glib as it # doesn't define GLIB_STATIC_COMPILATION for pkg-config --static -if targetos == 'windows' and get_option('prefer_static') +if host_os == 'windows' and get_option('prefer_static') glib_cflags += ['-DGLIB_STATIC_COMPILATION'] endif @@ -983,12 +1121,12 @@ if vde.found() and not cc.links(''' endif pulse = not_found -if not get_option('pa').auto() or (targetos == 'linux' and have_system) +if not get_option('pa').auto() or (host_os == 'linux' and have_system) pulse = dependency('libpulse', required: get_option('pa'), method: 'pkg-config') endif alsa = not_found -if not get_option('alsa').auto() or (targetos == 'linux' and have_system) +if not get_option('alsa').auto() or (host_os == 'linux' and have_system) alsa = dependency('alsa', required: get_option('alsa'), method: 'pkg-config') endif @@ -998,7 +1136,7 @@ if not get_option('jack').auto() or have_system method: 'pkg-config') endif pipewire = not_found -if not get_option('pipewire').auto() or (targetos == 'linux' and have_system) +if not get_option('pipewire').auto() or (host_os == 'linux' and have_system) pipewire = dependency('libpipewire-0.3', version: '>=0.3.60', required: get_option('pipewire'), method: 'pkg-config') @@ -1043,17 +1181,11 @@ if not get_option('zstd').auto() or have_block endif virgl = not_found -have_vhost_user_gpu = have_tools and targetos == 'linux' and pixman.found() +have_vhost_user_gpu = have_tools and host_os == 'linux' and pixman.found() if not get_option('virglrenderer').auto() or have_system or have_vhost_user_gpu virgl = dependency('virglrenderer', method: 'pkg-config', required: get_option('virglrenderer')) - if virgl.found() - config_host_data.set('HAVE_VIRGL_D3D_INFO_EXT', - cc.has_member('struct virgl_renderer_resource_info_ext', 'd3d_tex2d', - prefix: '#include ', - dependencies: virgl)) - endif endif rutabaga = not_found if not get_option('rutabaga_gfx').auto() or have_system or have_vhost_user_gpu @@ -1074,7 +1206,7 @@ if not get_option('curl').auto() or have_block required: get_option('curl')) endif libudev = not_found -if targetos == 'linux' and (have_system or have_tools) +if host_os == 'linux' and (have_system or have_tools) libudev = dependency('libudev', method: 'pkg-config', required: get_option('libudev')) @@ -1082,7 +1214,7 @@ endif mpathlibs = [libudev] mpathpersist = not_found -if targetos == 'linux' and have_tools and get_option('mpath').allowed() +if host_os == 'linux' and have_tools and get_option('mpath').allowed() mpath_test_source = ''' #include #include @@ -1153,7 +1285,7 @@ if have_system and get_option('curses').allowed() return 0; }''' - curses_dep_list = targetos == 'windows' ? ['ncurses', 'ncursesw'] : ['ncursesw'] + curses_dep_list = host_os == 'windows' ? ['ncurses', 'ncursesw'] : ['ncursesw'] curses = dependency(curses_dep_list, required: false, method: 'pkg-config') @@ -1170,13 +1302,13 @@ if have_system and get_option('curses').allowed() endif if not curses.found() has_curses_h = cc.has_header('curses.h', args: curses_compile_args) - if targetos != 'windows' and not has_curses_h + if host_os != 'windows' and not has_curses_h message('Trying with /usr/include/ncursesw') curses_compile_args += ['-I/usr/include/ncursesw'] has_curses_h = cc.has_header('curses.h', args: curses_compile_args) endif if has_curses_h - curses_libname_list = (targetos == 'windows' ? ['pdcurses'] : ['ncursesw', 'cursesw']) + curses_libname_list = (host_os == 'windows' ? ['pdcurses'] : ['ncursesw', 'cursesw']) foreach curses_libname : curses_libname_list libcurses = cc.find_library(curses_libname, required: false) @@ -1399,7 +1531,7 @@ oss = not_found if get_option('oss').allowed() and have_system if not cc.has_header('sys/soundcard.h') # not found - elif targetos == 'netbsd' + elif host_os == 'netbsd' oss = cc.find_library('ossaudio', required: get_option('oss')) else oss = declare_dependency() @@ -1412,7 +1544,7 @@ if get_option('oss').allowed() and have_system endif endif dsound = not_found -if not get_option('dsound').auto() or (targetos == 'windows' and have_system) +if not get_option('dsound').auto() or (host_os == 'windows' and have_system) if cc.has_header('dsound.h') dsound = declare_dependency(link_args: ['-lole32', '-ldxguid']) endif @@ -1425,7 +1557,7 @@ if not get_option('dsound').auto() or (targetos == 'windows' and have_system) endif coreaudio = not_found -if not get_option('coreaudio').auto() or (targetos == 'darwin' and have_system) +if not get_option('coreaudio').auto() or (host_os == 'darwin' and have_system) coreaudio = dependency('appleframeworks', modules: 'CoreAudio', required: get_option('coreaudio')) endif @@ -1519,6 +1651,25 @@ if not gnutls_crypto.found() endif endif +capstone = not_found +if not get_option('capstone').auto() or have_system or have_user + capstone = dependency('capstone', version: '>=3.0.5', + method: 'pkg-config', + required: get_option('capstone')) + + # Some versions of capstone have broken pkg-config file + # that reports a wrong -I path, causing the #include to + # fail later. If the system has such a broken version + # do not use it. + if capstone.found() and not cc.compiles('#include ', + dependencies: [capstone]) + capstone = not_found + if get_option('capstone').enabled() + error('capstone requested, but it does not appear to work') + endif + endif +endif + gmp = dependency('gmp', required: false, method: 'pkg-config') if nettle.found() and gmp.found() hogweed = dependency('hogweed', version: '>=3.4', @@ -1673,98 +1824,18 @@ if not get_option('rdma').auto() or have_system endforeach endif -xen = not_found -if get_option('xen').enabled() or (get_option('xen').auto() and have_system) - xencontrol = dependency('xencontrol', required: false, - method: 'pkg-config') - if xencontrol.found() - xen_pc = declare_dependency(version: xencontrol.version(), - dependencies: [ - xencontrol, - # disabler: true makes xen_pc.found() return false if any is not found - dependency('xenstore', required: false, - method: 'pkg-config', - disabler: true), - dependency('xenforeignmemory', required: false, - method: 'pkg-config', - disabler: true), - dependency('xengnttab', required: false, - method: 'pkg-config', - disabler: true), - dependency('xenevtchn', required: false, - method: 'pkg-config', - disabler: true), - dependency('xendevicemodel', required: false, - method: 'pkg-config', - disabler: true), - # optional, no "disabler: true" - dependency('xentoolcore', required: false, - method: 'pkg-config')]) - if xen_pc.found() - xen = xen_pc - endif - endif - if not xen.found() - xen_tests = [ '4.11.0', '4.10.0', '4.9.0', '4.8.0', '4.7.1' ] - xen_libs = { - '4.11.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn', 'xentoolcore' ], - '4.10.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn', 'xentoolcore' ], - '4.9.0': [ 'xenstore', 'xenctrl', 'xendevicemodel', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ], - '4.8.0': [ 'xenstore', 'xenctrl', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ], - '4.7.1': [ 'xenstore', 'xenctrl', 'xenforeignmemory', 'xengnttab', 'xenevtchn' ], - } - xen_deps = {} - foreach ver: xen_tests - # cache the various library tests to avoid polluting the logs - xen_test_deps = [] - foreach l: xen_libs[ver] - if l not in xen_deps - xen_deps += { l: cc.find_library(l, required: false) } - endif - xen_test_deps += xen_deps[l] - endforeach - - # Use -D to pick just one of the test programs in scripts/xen-detect.c - xen_version = ver.split('.') - xen_ctrl_version = xen_version[0] + \ - ('0' + xen_version[1]).substring(-2) + \ - ('0' + xen_version[2]).substring(-2) - if cc.links(files('scripts/xen-detect.c'), - args: '-DCONFIG_XEN_CTRL_INTERFACE_VERSION=' + xen_ctrl_version, - dependencies: xen_test_deps) - xen = declare_dependency(version: ver, dependencies: xen_test_deps) - break - endif - endforeach - endif - if xen.found() - accelerators += 'CONFIG_XEN' - elif get_option('xen').enabled() - error('could not compile and link Xen test program') - endif -endif -have_xen_pci_passthrough = get_option('xen_pci_passthrough') \ - .require(xen.found(), - error_message: 'Xen PCI passthrough requested but Xen not enabled') \ - .require(targetos == 'linux', - error_message: 'Xen PCI passthrough not available on this platform') \ - .require(cpu == 'x86' or cpu == 'x86_64', - error_message: 'Xen PCI passthrough not available on this platform') \ - .allowed() - - cacard = not_found if not get_option('smartcard').auto() or have_system cacard = dependency('libcacard', required: get_option('smartcard'), version: '>=2.5.1', method: 'pkg-config') endif u2f = not_found -if have_system +if not get_option('u2f').auto() or have_system u2f = dependency('u2f-emu', required: get_option('u2f'), method: 'pkg-config') endif canokey = not_found -if have_system +if not get_option('canokey').auto() or have_system canokey = dependency('canokey-qemu', required: get_option('canokey'), method: 'pkg-config') endif @@ -1841,7 +1912,7 @@ has_statx = cc.has_header_symbol('sys/stat.h', 'STATX_BASIC_STATS', prefix: gnu_ has_statx_mnt_id = cc.has_header_symbol('sys/stat.h', 'STATX_MNT_ID', prefix: gnu_source_prefix) have_vhost_user_blk_server = get_option('vhost_user_blk_server') \ - .require(targetos == 'linux', + .require(host_os == 'linux', error_message: 'vhost_user_blk_server requires linux') \ .require(have_vhost_user, error_message: 'vhost_user_blk_server requires vhost-user support') \ @@ -1869,18 +1940,18 @@ if get_option('fuse_lseek').allowed() endif endif -have_libvduse = (targetos == 'linux') +have_libvduse = (host_os == 'linux') if get_option('libvduse').enabled() - if targetos != 'linux' + if host_os != 'linux' error('libvduse requires linux') endif elif get_option('libvduse').disabled() have_libvduse = false endif -have_vduse_blk_export = (have_libvduse and targetos == 'linux') +have_vduse_blk_export = (have_libvduse and host_os == 'linux') if get_option('vduse_blk_export').enabled() - if targetos != 'linux' + if host_os != 'linux' error('vduse_blk_export requires linux') elif not have_libvduse error('vduse_blk_export requires libvduse support') @@ -1926,6 +1997,8 @@ endif # config-host.h # ################# +config_host_data = configuration_data() + audio_drivers_selected = [] if have_system audio_drivers_available = { @@ -1946,7 +2019,7 @@ if have_system # Default to native drivers first, OSS second, SDL third audio_drivers_priority = \ [ 'pa', 'coreaudio', 'dsound', 'sndio', 'oss' ] + \ - (targetos == 'linux' ? [] : [ 'sdl' ]) + (host_os == 'linux' ? [] : [ 'sdl' ]) audio_drivers_default = [] foreach k: audio_drivers_priority if audio_drivers_available[k] @@ -1967,47 +2040,7 @@ endif config_host_data.set('CONFIG_AUDIO_DRIVERS', '"' + '", "'.join(audio_drivers_selected) + '", ') -if get_option('cfi') - cfi_flags=[] - # Check for dependency on LTO - if not get_option('b_lto') - error('Selected Control-Flow Integrity but LTO is disabled') - endif - if enable_modules - error('Selected Control-Flow Integrity is not compatible with modules') - endif - # Check for cfi flags. CFI requires LTO so we can't use - # get_supported_arguments, but need a more complex "compiles" which allows - # custom arguments - if cc.compiles('int main () { return 0; }', name: '-fsanitize=cfi-icall', - args: ['-flto', '-fsanitize=cfi-icall'] ) - cfi_flags += '-fsanitize=cfi-icall' - else - error('-fsanitize=cfi-icall is not supported by the compiler') - endif - if cc.compiles('int main () { return 0; }', - name: '-fsanitize-cfi-icall-generalize-pointers', - args: ['-flto', '-fsanitize=cfi-icall', - '-fsanitize-cfi-icall-generalize-pointers'] ) - cfi_flags += '-fsanitize-cfi-icall-generalize-pointers' - else - error('-fsanitize-cfi-icall-generalize-pointers is not supported by the compiler') - endif - if get_option('cfi_debug') - if cc.compiles('int main () { return 0; }', - name: '-fno-sanitize-trap=cfi-icall', - args: ['-flto', '-fsanitize=cfi-icall', - '-fno-sanitize-trap=cfi-icall'] ) - cfi_flags += '-fno-sanitize-trap=cfi-icall' - else - error('-fno-sanitize-trap=cfi-icall is not supported by the compiler') - endif - endif - add_global_arguments(cfi_flags, native: false, language: all_languages) - add_global_link_arguments(cfi_flags, native: false, language: all_languages) -endif - -have_host_block_device = (targetos != 'darwin' or +have_host_block_device = (host_os != 'darwin' or cc.has_header('IOKit/storage/IOMedia.h')) dbus_display = get_option('dbus_display') \ @@ -2018,17 +2051,17 @@ dbus_display = get_option('dbus_display') \ .allowed() have_virtfs = get_option('virtfs') \ - .require(targetos == 'linux' or targetos == 'darwin', + .require(host_os == 'linux' or host_os == 'darwin', error_message: 'virtio-9p (virtfs) requires Linux or macOS') \ - .require(targetos == 'linux' or cc.has_function('pthread_fchdir_np'), + .require(host_os == 'linux' or cc.has_function('pthread_fchdir_np'), error_message: 'virtio-9p (virtfs) on macOS requires the presence of pthread_fchdir_np') \ - .require(targetos == 'darwin' or libattr.found(), + .require(host_os == 'darwin' or libattr.found(), error_message: 'virtio-9p (virtfs) on Linux requires libattr-devel') \ .disable_auto_if(not have_tools and not have_system) \ .allowed() have_virtfs_proxy_helper = get_option('virtfs_proxy_helper') \ - .require(targetos != 'darwin', error_message: 'the virtfs proxy helper is incompatible with macOS') \ + .require(host_os != 'darwin', error_message: 'the virtfs proxy helper is incompatible with macOS') \ .require(have_virtfs, error_message: 'the virtfs proxy helper requires that virtfs is enabled') \ .disable_auto_if(not have_tools) \ .require(libcap_ng.found(), error_message: 'the virtfs proxy helper requires libcap-ng') \ @@ -2083,18 +2116,24 @@ if enable_modules endif have_slirp_smbd = get_option('slirp_smbd') \ - .require(targetos != 'windows', error_message: 'Host smbd not supported on this platform.') \ + .require(host_os != 'windows', error_message: 'Host smbd not supported on this platform.') \ .allowed() if have_slirp_smbd smbd_path = get_option('smbd') if smbd_path == '' - smbd_path = (targetos == 'sunos' ? '/usr/sfw/sbin/smbd' : '/usr/sbin/smbd') + smbd_path = (host_os == 'sunos' ? '/usr/sfw/sbin/smbd' : '/usr/sbin/smbd') endif config_host_data.set_quoted('CONFIG_SMBD_COMMAND', smbd_path) endif config_host_data.set('HOST_' + host_arch.to_upper(), 1) +kvm_targets_c = '""' +if get_option('kvm').allowed() and host_os == 'linux' + kvm_targets_c = '"' + '" ,"'.join(kvm_targets) + '"' +endif +config_host_data.set('CONFIG_KVM_TARGETS', kvm_targets_c) + if get_option('module_upgrades') and not enable_modules error('Cannot enable module-upgrades as modules are not enabled') endif @@ -2103,15 +2142,16 @@ config_host_data.set('CONFIG_MODULE_UPGRADES', get_option('module_upgrades')) config_host_data.set('CONFIG_ATTR', libattr.found()) config_host_data.set('CONFIG_BDRV_WHITELIST_TOOLS', get_option('block_drv_whitelist_in_tools')) config_host_data.set('CONFIG_BRLAPI', brlapi.found()) -config_host_data.set('CONFIG_BSD', targetos in bsd_oses) +config_host_data.set('CONFIG_BSD', host_os in bsd_oses) +config_host_data.set('CONFIG_CAPSTONE', capstone.found()) config_host_data.set('CONFIG_COCOA', cocoa.found()) -config_host_data.set('CONFIG_DARWIN', targetos == 'darwin') +config_host_data.set('CONFIG_DARWIN', host_os == 'darwin') config_host_data.set('CONFIG_FUZZ', get_option('fuzzing')) config_host_data.set('CONFIG_GCOV', get_option('b_coverage')) config_host_data.set('CONFIG_LIBUDEV', libudev.found()) -config_host_data.set('CONFIG_LINUX', targetos == 'linux') -config_host_data.set('CONFIG_POSIX', targetos != 'windows') -config_host_data.set('CONFIG_WIN32', targetos == 'windows') +config_host_data.set('CONFIG_LINUX', host_os == 'linux') +config_host_data.set('CONFIG_POSIX', host_os != 'windows') +config_host_data.set('CONFIG_WIN32', host_os == 'windows') config_host_data.set('CONFIG_LZO', lzo.found()) config_host_data.set('CONFIG_MPATH', mpathpersist.found()) config_host_data.set('CONFIG_BLKIO', blkio.found()) @@ -2167,8 +2207,9 @@ if seccomp.found() config_host_data.set('CONFIG_SECCOMP_SYSRAWRC', seccomp_has_sysrawrc) endif config_host_data.set('CONFIG_PIXMAN', pixman.found()) +config_host_data.set('CONFIG_SLIRP', slirp.found()) config_host_data.set('CONFIG_SNAPPY', snappy.found()) -config_host_data.set('CONFIG_SOLARIS', targetos == 'sunos') +config_host_data.set('CONFIG_SOLARIS', host_os == 'sunos') if get_option('tcg').allowed() config_host_data.set('CONFIG_TCG', 1) config_host_data.set('CONFIG_TCG_INTERPRETER', tcg_arch == 'tci') @@ -2192,6 +2233,12 @@ config_host_data.set('CONFIG_PNG', png.found()) config_host_data.set('CONFIG_VNC', vnc.found()) config_host_data.set('CONFIG_VNC_JPEG', jpeg.found()) config_host_data.set('CONFIG_VNC_SASL', sasl.found()) +if virgl.found() + config_host_data.set('HAVE_VIRGL_D3D_INFO_EXT', + cc.has_member('struct virgl_renderer_resource_info_ext', 'd3d_tex2d', + prefix: '#include ', + dependencies: virgl)) +endif config_host_data.set('CONFIG_VIRTFS', have_virtfs) config_host_data.set('CONFIG_VTE', vte.found()) config_host_data.set('CONFIG_XKBCOMMON', xkbcommon.found()) @@ -2263,7 +2310,7 @@ config_host_data.set('HAVE_PTY_H', cc.has_header('pty.h')) config_host_data.set('HAVE_SYS_DISK_H', cc.has_header('sys/disk.h')) config_host_data.set('HAVE_SYS_IOCCOM_H', cc.has_header('sys/ioccom.h')) config_host_data.set('HAVE_SYS_KCOV_H', cc.has_header('sys/kcov.h')) -if targetos == 'windows' +if host_os == 'windows' config_host_data.set('HAVE_AFUNIX_H', cc.has_header('afunix.h')) endif @@ -2296,7 +2343,6 @@ config_host_data.set('HAVE_GLIB_WITH_SLICE_ALLOCATOR', glib_has_gslice) config_host_data.set('HAVE_OPENPTY', cc.has_function('openpty', dependencies: util)) config_host_data.set('HAVE_STRCHRNUL', cc.has_function('strchrnul')) config_host_data.set('HAVE_SYSTEM_FUNCTION', cc.has_function('system', prefix: '#include ')) -config_host_data.set('HAVE_GETLOADAVG_FUNCTION', cc.has_function('getloadavg', prefix: '#include ')) if rbd.found() config_host_data.set('HAVE_RBD_NAMESPACE_EXISTS', cc.has_function('rbd_namespace_exists', @@ -2645,7 +2691,7 @@ config_host_data.set('CONFIG_USBFS', have_linux_user and cc.compiles(''' int main(void) { return 0; }''')) have_keyring = get_option('keyring') \ - .require(targetos == 'linux', error_message: 'keyring is only available on Linux') \ + .require(host_os == 'linux', error_message: 'keyring is only available on Linux') \ .require(cc.compiles(''' #include #include @@ -2754,9 +2800,9 @@ endif if get_option('membarrier').disabled() have_membarrier = false -elif targetos == 'windows' +elif host_os == 'windows' have_membarrier = true -elif targetos == 'linux' +elif host_os == 'linux' have_membarrier = cc.compiles(''' #include #include @@ -2793,7 +2839,7 @@ config_host_data.set('CONFIG_AF_VSOCK', cc.has_header_symbol( have_vss = false have_vss_sdk = false # old xp/2003 SDK -if targetos == 'windows' and 'cpp' in all_languages +if host_os == 'windows' and 'cpp' in all_languages have_vss = cxx.compiles(''' #define __MIDL_user_allocate_free_DEFINED__ #include @@ -2804,7 +2850,7 @@ config_host_data.set('HAVE_VSS_SDK', have_vss_sdk) # Older versions of MinGW do not import _lock_file and _unlock_file properly. # This was fixed for v6.0.0 with commit b48e3ac8969d. -if targetos == 'windows' +if host_os == 'windows' config_host_data.set('HAVE__LOCK_FILE', cc.links(''' #include int main(void) { @@ -2814,7 +2860,7 @@ if targetos == 'windows' }''', name: '_lock_file and _unlock_file')) endif -if targetos == 'windows' +if host_os == 'windows' mingw_has_setjmp_longjmp = cc.links(''' #include int main(void) { @@ -2840,21 +2886,9 @@ endif ######################## minikconf = find_program('scripts/minikconf.py') -config_targetos = { - (targetos == 'windows' ? 'CONFIG_WIN32' : 'CONFIG_POSIX'): 'y' -} -if targetos == 'darwin' - config_targetos += {'CONFIG_DARWIN': 'y'} -elif targetos == 'linux' - config_targetos += {'CONFIG_LINUX': 'y'} -endif -if targetos in bsd_oses - config_targetos += {'CONFIG_BSD': 'y'} -endif -config_all = {} +config_all_accel = {} config_all_devices = {} -config_all_disas = {} config_devices_mak_list = [] config_devices_h = {} config_target_h = {} @@ -2896,7 +2930,7 @@ host_kconfig = \ (have_vhost_vdpa ? ['CONFIG_VHOST_VDPA=y'] : []) + \ (have_vhost_kernel ? ['CONFIG_VHOST_KERNEL=y'] : []) + \ (have_virtfs ? ['CONFIG_VIRTFS=y'] : []) + \ - (targetos == 'linux' ? ['CONFIG_LINUX=y'] : []) + \ + (host_os == 'linux' ? ['CONFIG_LINUX=y'] : []) + \ (have_pvrdma ? ['CONFIG_PVRDMA=y'] : []) + \ (multiprocess_allowed ? ['CONFIG_MULTIPROCESS_ALLOWED=y'] : []) + \ (vfio_user_server_allowed ? ['CONFIG_VFIO_USER_SERVER_ALLOWED=y'] : []) + \ @@ -2910,7 +2944,7 @@ fdt_required = [] foreach target : target_dirs config_target = { 'TARGET_NAME': target.split('-')[0] } if target.endswith('linux-user') - if targetos != 'linux' + if host_os != 'linux' if default_targets continue endif @@ -2918,7 +2952,7 @@ foreach target : target_dirs endif config_target += { 'CONFIG_LINUX_USER': 'y' } elif target.endswith('bsd-user') - if targetos not in bsd_oses + if host_os not in bsd_oses if default_targets continue endif @@ -2941,7 +2975,7 @@ foreach target : target_dirs foreach sym: accelerators if sym == 'CONFIG_TCG' or target in accelerator_targets.get(sym, []) config_target += { sym: 'y' } - config_all += { sym: 'y' } + config_all_accel += { sym: 'y' } if target in modular_tcg config_target += { 'CONFIG_TCG_MODULAR': 'y' } else @@ -2980,7 +3014,6 @@ foreach target : target_dirs if host_arch.startswith(k) or config_target['TARGET_BASE_ARCH'].startswith(k) foreach sym: v config_target += { sym: 'y' } - config_all_disas += { sym: 'y' } endforeach endif endforeach @@ -3039,25 +3072,6 @@ foreach target : target_dirs endforeach target_dirs = actual_target_dirs -# This configuration is used to build files that are shared by -# multiple binaries, and then extracted out of the "common" -# static_library target. -# -# We do not use all_sources()/all_dependencies(), because it would -# build literally all source files, including devices only used by -# targets that are not built for this compilation. The CONFIG_ALL -# pseudo symbol replaces it. - -config_all += config_all_devices -config_all += config_targetos -config_all += config_all_disas -config_all += { - 'CONFIG_XEN': xen.found(), - 'CONFIG_SYSTEM_ONLY': have_system, - 'CONFIG_USER_ONLY': have_user, - 'CONFIG_ALL': true, -} - target_configs_h = [] foreach target: target_dirs target_configs_h += config_target_h[target] @@ -3070,28 +3084,9 @@ genh += custom_target('config-poison.h', command: [find_program('scripts/make-config-poison.sh'), target_configs_h]) -############## -# Submodules # -############## - -capstone = not_found -if not get_option('capstone').auto() or have_system or have_user - capstone = dependency('capstone', version: '>=3.0.5', - method: 'pkg-config', - required: get_option('capstone')) - - # Some versions of capstone have broken pkg-config file - # that reports a wrong -I path, causing the #include to - # fail later. If the system has such a broken version - # do not use it. - if capstone.found() and not cc.compiles('#include ', - dependencies: [capstone]) - capstone = not_found - if get_option('capstone').enabled() - error('capstone requested, but it does not appear to work') - endif - endif -endif +############### +# Subprojects # +############### libvfio_user_dep = not_found if have_system and vfio_user_server_allowed @@ -3135,9 +3130,19 @@ else fdt_opt = 'disabled' endif -config_host_data.set('CONFIG_CAPSTONE', capstone.found()) config_host_data.set('CONFIG_FDT', fdt.found()) -config_host_data.set('CONFIG_SLIRP', slirp.found()) + +vhost_user = not_found +if host_os == 'linux' and have_vhost_user + libvhost_user = subproject('libvhost-user') + vhost_user = libvhost_user.get_variable('vhost_user_dep') +endif + +libvduse = not_found +if have_libvduse + libvduse_proj = subproject('libvduse') + libvduse = libvduse_proj.get_variable('libvduse_dep') +endif ##################### # Generated sources # @@ -3221,39 +3226,6 @@ foreach d : hx_headers endforeach genh += hxdep -################### -# Collect sources # -################### - -authz_ss = ss.source_set() -blockdev_ss = ss.source_set() -block_ss = ss.source_set() -chardev_ss = ss.source_set() -common_ss = ss.source_set() -crypto_ss = ss.source_set() -hwcore_ss = ss.source_set() -io_ss = ss.source_set() -qmp_ss = ss.source_set() -qom_ss = ss.source_set() -system_ss = ss.source_set() -specific_fuzz_ss = ss.source_set() -specific_ss = ss.source_set() -stub_ss = ss.source_set() -trace_ss = ss.source_set() -user_ss = ss.source_set() -util_ss = ss.source_set() - -# accel modules -qtest_module_ss = ss.source_set() -tcg_module_ss = ss.source_set() - -modules = {} -target_modules = {} -hw_arch = {} -target_arch = {} -target_system_arch = {} -target_user_arch = {} - ############### # Trace files # ############### @@ -3358,6 +3330,7 @@ if have_system or have_user 'target/hppa', 'target/i386', 'target/i386/kvm', + 'target/loongarch', 'target/mips/tcg', 'target/nios2', 'target/ppc', @@ -3368,17 +3341,38 @@ if have_system or have_user ] endif -vhost_user = not_found -if targetos == 'linux' and have_vhost_user - libvhost_user = subproject('libvhost-user') - vhost_user = libvhost_user.get_variable('vhost_user_dep') -endif +################### +# Collect sources # +################### -libvduse = not_found -if have_libvduse - libvduse_proj = subproject('libvduse') - libvduse = libvduse_proj.get_variable('libvduse_dep') -endif +authz_ss = ss.source_set() +blockdev_ss = ss.source_set() +block_ss = ss.source_set() +chardev_ss = ss.source_set() +common_ss = ss.source_set() +crypto_ss = ss.source_set() +hwcore_ss = ss.source_set() +io_ss = ss.source_set() +qmp_ss = ss.source_set() +qom_ss = ss.source_set() +system_ss = ss.source_set() +specific_fuzz_ss = ss.source_set() +specific_ss = ss.source_set() +stub_ss = ss.source_set() +trace_ss = ss.source_set() +user_ss = ss.source_set() +util_ss = ss.source_set() + +# accel modules +qtest_module_ss = ss.source_set() +tcg_module_ss = ss.source_set() + +modules = {} +target_modules = {} +hw_arch = {} +target_arch = {} +target_system_arch = {} +target_user_arch = {} # NOTE: the trace/ subdirectory needs the qapi_trace_events variable # that is filled in by qapi/. @@ -3399,7 +3393,7 @@ if enable_modules modulecommon = declare_dependency(link_whole: libmodulecommon, compile_args: '-DBUILD_DSO') endif -qom_ss = qom_ss.apply(config_targetos, strict: false) +qom_ss = qom_ss.apply({}) libqom = static_library('qom', qom_ss.sources() + genh, dependencies: [qom_ss.dependencies()], name_suffix: 'fa', @@ -3414,10 +3408,10 @@ event_loop_base = static_library('event-loop-base', event_loop_base = declare_dependency(link_whole: event_loop_base, dependencies: [qom]) -stub_ss = stub_ss.apply(config_all, strict: false) +stub_ss = stub_ss.apply({}) util_ss.add_all(trace_ss) -util_ss = util_ss.apply(config_all, strict: false) +util_ss = util_ss.apply({}) libqemuutil = static_library('qemuutil', build_by_default: false, sources: util_ss.sources() + stub_ss.sources() + genh, @@ -3464,8 +3458,11 @@ if have_block # os-posix.c contains POSIX-specific functions used by qemu-storage-daemon, # os-win32.c does not - blockdev_ss.add(when: 'CONFIG_POSIX', if_true: files('os-posix.c')) - system_ss.add(when: 'CONFIG_WIN32', if_true: [files('os-win32.c')]) + if host_os == 'windows' + system_ss.add(files('os-win32.c')) + else + blockdev_ss.add(files('os-posix.c')) + endif endif common_ss.add(files('cpu-common.c')) @@ -3524,9 +3521,9 @@ specific_ss.add_all(when: 'CONFIG_TCG_BUILTIN', if_true: tcg_module_ss) target_modules += { 'accel' : { 'qtest': qtest_module_ss, 'tcg': tcg_real_module_ss }} -######################## -# Library dependencies # -######################## +############################################## +# Internal static_libraries and dependencies # +############################################## modinfo_collect = find_program('scripts/modinfo-collect.py') modinfo_generate = find_program('scripts/modinfo-generate.py') @@ -3541,7 +3538,7 @@ foreach d, list : modules foreach m, module_ss : list if enable_modules - module_ss = module_ss.apply(config_all, strict: false) + module_ss = module_ss.apply(config_all_devices, strict: false) sl = static_library(d + '-' + m, [genh, module_ss.sources()], dependencies: [modulecommon, module_ss.dependencies()], pic: true) if d == 'block' @@ -3577,7 +3574,6 @@ foreach d, list : target_modules foreach target : target_dirs if target.endswith('-softmmu') config_target = config_target_mak[target] - config_target += config_targetos target_inc = [include_directories('target' / config_target['TARGET_BASE_ARCH'])] c_args = ['-DNEED_CPU_H', '-DCONFIG_TARGET="@0@-config-target.h"'.format(target), @@ -3638,7 +3634,7 @@ qemu_syms = custom_target('qemu.syms', output: 'qemu.syms', capture: true, command: [undefsym, nm, '@INPUT@']) -authz_ss = authz_ss.apply(config_targetos, strict: false) +authz_ss = authz_ss.apply({}) libauthz = static_library('authz', authz_ss.sources() + genh, dependencies: [authz_ss.dependencies()], name_suffix: 'fa', @@ -3647,7 +3643,7 @@ libauthz = static_library('authz', authz_ss.sources() + genh, authz = declare_dependency(link_whole: libauthz, dependencies: qom) -crypto_ss = crypto_ss.apply(config_targetos, strict: false) +crypto_ss = crypto_ss.apply({}) libcrypto = static_library('crypto', crypto_ss.sources() + genh, dependencies: [crypto_ss.dependencies()], name_suffix: 'fa', @@ -3656,7 +3652,7 @@ libcrypto = static_library('crypto', crypto_ss.sources() + genh, crypto = declare_dependency(link_whole: libcrypto, dependencies: [authz, qom]) -io_ss = io_ss.apply(config_targetos, strict: false) +io_ss = io_ss.apply({}) libio = static_library('io', io_ss.sources() + genh, dependencies: [io_ss.dependencies()], link_with: libqemuutil, @@ -3672,7 +3668,7 @@ migration = declare_dependency(link_with: libmigration, dependencies: [zlib, qom, io]) system_ss.add(migration) -block_ss = block_ss.apply(config_targetos, strict: false) +block_ss = block_ss.apply({}) libblock = static_library('block', block_ss.sources() + genh, dependencies: block_ss.dependencies(), link_depends: block_syms, @@ -3683,7 +3679,7 @@ block = declare_dependency(link_whole: [libblock], link_args: '@block.syms', dependencies: [crypto, io]) -blockdev_ss = blockdev_ss.apply(config_targetos, strict: false) +blockdev_ss = blockdev_ss.apply({}) libblockdev = static_library('blockdev', blockdev_ss.sources() + genh, dependencies: blockdev_ss.dependencies(), name_suffix: 'fa', @@ -3692,7 +3688,7 @@ libblockdev = static_library('blockdev', blockdev_ss.sources() + genh, blockdev = declare_dependency(link_whole: [libblockdev], dependencies: [block, event_loop_base]) -qmp_ss = qmp_ss.apply(config_targetos, strict: false) +qmp_ss = qmp_ss.apply({}) libqmp = static_library('qmp', qmp_ss.sources() + genh, dependencies: qmp_ss.dependencies(), name_suffix: 'fa', @@ -3707,7 +3703,7 @@ libchardev = static_library('chardev', chardev_ss.sources() + genh, chardev = declare_dependency(link_whole: libchardev) -hwcore_ss = hwcore_ss.apply(config_targetos, strict: false) +hwcore_ss = hwcore_ss.apply({}) libhwcore = static_library('hwcore', sources: hwcore_ss.sources() + genh, name_suffix: 'fa', build_by_default: false) @@ -3737,18 +3733,20 @@ common_ss.add(qom, qemuutil) common_ss.add_all(when: 'CONFIG_SYSTEM_ONLY', if_true: [system_ss]) common_ss.add_all(when: 'CONFIG_USER_ONLY', if_true: user_ss) -common_all = common_ss.apply(config_all, strict: false) +# Note that this library is never used directly (only through extract_objects) +# and is not built by default; therefore, source files not used by the build +# configuration will be in build.ninja, but are never built by default. common_all = static_library('common', build_by_default: false, - sources: common_all.sources() + genh, + sources: common_ss.all_sources() + genh, include_directories: common_user_inc, implicit_include_directories: false, - dependencies: common_all.dependencies(), + dependencies: common_ss.all_dependencies(), name_suffix: 'fa') feature_to_c = find_program('scripts/feature_to_c.py') -if targetos == 'darwin' +if host_os == 'darwin' entitlement = find_program('scripts/entitlement.sh') endif @@ -3764,9 +3762,8 @@ foreach target : target_dirs '-DCONFIG_DEVICES="@0@-config-devices.h"'.format(target)] link_args = emulator_link_args - config_target += config_targetos target_inc = [include_directories('target' / config_target['TARGET_BASE_ARCH'])] - if targetos == 'linux' + if host_os == 'linux' target_inc += include_directories('linux-headers', is_system: true) endif if target.endswith('-softmmu') @@ -3776,9 +3773,11 @@ foreach target : target_dirs arch_deps += t.dependencies() hw_dir = target_name == 'sparc64' ? 'sparc64' : target_base_arch - hw = hw_arch[hw_dir].apply(config_target, strict: false) - arch_srcs += hw.sources() - arch_deps += hw.dependencies() + if hw_arch.has_key(hw_dir) + hw = hw_arch[hw_dir].apply(config_target, strict: false) + arch_srcs += hw.sources() + arch_deps += hw.dependencies() + endif arch_srcs += config_devices_h[target] link_args += ['@block.syms', '@qemu.syms'] @@ -3796,7 +3795,7 @@ foreach target : target_dirs endif if 'CONFIG_BSD_USER' in config_target base_dir = 'bsd-user' - target_inc += include_directories('bsd-user/' / targetos) + target_inc += include_directories('bsd-user/' / host_os) target_inc += include_directories('bsd-user/host/' / host_arch) dir = base_dir / abi arch_srcs += files(dir / 'signal.c', dir / 'target_arch_cpu.c') @@ -3853,7 +3852,7 @@ foreach target : target_dirs 'sources': files('system/main.c'), 'dependencies': [] }] - if targetos == 'windows' and (sdl.found() or gtk.found()) + if host_os == 'windows' and (sdl.found() or gtk.found()) execs += [{ 'name': 'qemu-system-' + target_name + 'w', 'win_subsystem': 'windows', @@ -3880,7 +3879,7 @@ foreach target : target_dirs endif foreach exe: execs exe_name = exe['name'] - if targetos == 'darwin' + if host_os == 'darwin' exe_name += '-unsigned' endif @@ -3893,7 +3892,7 @@ foreach target : target_dirs link_args: link_args, win_subsystem: exe['win_subsystem']) - if targetos == 'darwin' + if host_os == 'darwin' icon = 'pc-bios/qemu.rsrc' build_input = [emulator, files(icon)] install_input = [ @@ -3949,7 +3948,7 @@ endforeach if get_option('plugins') install_headers('include/qemu/qemu-plugin.h') - if targetos == 'windows' + if host_os == 'windows' # On windows, we want to deliver the qemu_plugin_api.lib file in the qemu installer, # so that plugin authors can compile against it. install_data(win32_qemu_plugin_api_lib, install_dir: 'lib') @@ -3990,7 +3989,7 @@ if have_tools subdir('contrib/vhost-user-scsi') endif - if targetos == 'linux' + if host_os == 'linux' executable('qemu-bridge-helper', files('qemu-bridge-helper.c'), dependencies: [qemuutil, libcap_ng], install: true, @@ -4057,7 +4056,7 @@ summary(summary_info, bool_yn: true, section: 'Build environment') # Directories summary_info += {'Install prefix': get_option('prefix')} summary_info += {'BIOS directory': qemu_datadir} -pathsep = targetos == 'windows' ? ';' : ':' +pathsep = host_os == 'windows' ? ';' : ':' summary_info += {'firmware path': pathsep.join(get_option('qemu_firmwarepath'))} summary_info += {'binary directory': get_option('prefix') / get_option('bindir')} summary_info += {'library directory': get_option('prefix') / get_option('libdir')} @@ -4065,7 +4064,7 @@ summary_info += {'module directory': qemu_moddir} summary_info += {'libexec directory': get_option('prefix') / get_option('libexecdir')} summary_info += {'include directory': get_option('prefix') / get_option('includedir')} summary_info += {'config directory': get_option('prefix') / get_option('sysconfdir')} -if targetos != 'windows' +if host_os != 'windows' summary_info += {'local state directory': get_option('prefix') / get_option('localstatedir')} summary_info += {'Manual directory': get_option('prefix') / get_option('mandir')} else @@ -4088,7 +4087,7 @@ if config_host.has_key('GDB') endif summary_info += {'iasl': iasl} summary_info += {'genisoimage': config_host['GENISOIMAGE']} -if targetos == 'windows' and have_ga +if host_os == 'windows' and have_ga summary_info += {'wixl': wixl} endif if slirp.found() and have_system @@ -4186,7 +4185,7 @@ if get_option('cfi') endif summary_info += {'strip binaries': get_option('strip')} summary_info += {'sparse': sparse} -summary_info += {'mingw32 support': targetos == 'windows'} +summary_info += {'mingw32 support': host_os == 'windows'} summary(summary_info, bool_yn: true, section: 'Compilation') # snarf the cross-compilation information for tests @@ -4209,18 +4208,18 @@ endif # Targets and accelerators summary_info = {} if have_system - summary_info += {'KVM support': config_all.has_key('CONFIG_KVM')} - summary_info += {'HVF support': config_all.has_key('CONFIG_HVF')} - summary_info += {'WHPX support': config_all.has_key('CONFIG_WHPX')} - summary_info += {'NVMM support': config_all.has_key('CONFIG_NVMM')} + summary_info += {'KVM support': config_all_accel.has_key('CONFIG_KVM')} + summary_info += {'HVF support': config_all_accel.has_key('CONFIG_HVF')} + summary_info += {'WHPX support': config_all_accel.has_key('CONFIG_WHPX')} + summary_info += {'NVMM support': config_all_accel.has_key('CONFIG_NVMM')} summary_info += {'Xen support': xen.found()} if xen.found() summary_info += {'xen ctrl version': xen.version()} endif - summary_info += {'Xen emulation': config_all.has_key('CONFIG_XEN_EMU')} + summary_info += {'Xen emulation': config_all_devices.has_key('CONFIG_XEN_EMU')} endif -summary_info += {'TCG support': config_all.has_key('CONFIG_TCG')} -if config_all.has_key('CONFIG_TCG') +summary_info += {'TCG support': config_all_accel.has_key('CONFIG_TCG')} +if config_all_accel.has_key('CONFIG_TCG') if get_option('tcg_interpreter') summary_info += {'TCG backend': 'TCI (TCG with bytecode interpreter, slow)'} else @@ -4285,7 +4284,7 @@ summary(summary_info, bool_yn: true, section: 'Crypto') # UI summary_info = {} -if targetos == 'darwin' +if host_os == 'darwin' summary_info += {'Cocoa support': cocoa} endif summary_info += {'SDL support': sdl} @@ -4307,17 +4306,23 @@ summary_info += {'curses support': curses} summary_info += {'brlapi support': brlapi} summary(summary_info, bool_yn: true, section: 'User interface') +# Graphics backends +summary_info = {} +summary_info += {'VirGL support': virgl} +summary_info += {'Rutabaga support': rutabaga} +summary(summary_info, bool_yn: true, section: 'Graphics backends') + # Audio backends summary_info = {} -if targetos not in ['darwin', 'haiku', 'windows'] +if host_os not in ['darwin', 'haiku', 'windows'] summary_info += {'OSS support': oss} summary_info += {'sndio support': sndio} -elif targetos == 'darwin' +elif host_os == 'darwin' summary_info += {'CoreAudio support': coreaudio} -elif targetos == 'windows' +elif host_os == 'windows' summary_info += {'DirectSound support': dsound} endif -if targetos == 'linux' +if host_os == 'linux' summary_info += {'ALSA support': alsa} summary_info += {'PulseAudio support': pulse} endif @@ -4327,7 +4332,7 @@ summary(summary_info, bool_yn: true, section: 'Audio backends') # Network backends summary_info = {} -if targetos == 'darwin' +if host_os == 'darwin' summary_info += {'vmnet.framework support': vmnet} endif summary_info += {'AF_XDP support': libxdp} @@ -4342,8 +4347,6 @@ summary_info = {} summary_info += {'libtasn1': tasn1} summary_info += {'PAM': pam} summary_info += {'iconv support': iconv} -summary_info += {'virgl support': virgl} -summary_info += {'rutabaga support': rutabaga} summary_info += {'blkio support': blkio} summary_info += {'curl support': curl} summary_info += {'Multipath support': mpathpersist} @@ -4364,7 +4367,7 @@ summary_info += {'OpenGL support (epoxy)': opengl} summary_info += {'GBM': gbm} summary_info += {'libiscsi support': libiscsi} summary_info += {'libnfs support': libnfs} -if targetos == 'windows' +if host_os == 'windows' if have_ga summary_info += {'QGA VSS support': have_qga_vss} endif @@ -4406,20 +4409,20 @@ if host_arch == 'unknown' endif endif -if not supported_oses.contains(targetos) +if not supported_oses.contains(host_os) message() warning('UNSUPPORTED HOST OS') message() - message('Support for host OS ' + targetos + 'is not currently maintained.') + message('Support for host OS ' + host_os + 'is not currently maintained.') message('configure has succeeded and you can continue to build, but') message('the QEMU project does not guarantee that QEMU will compile or') message('work on this operating system. You can help by volunteering') message('to maintain it and providing a build host for our continuous') message('integration setup. This will ensure that future versions of QEMU') - message('will keep working on ' + targetos + '.') + message('will keep working on ' + host_os + '.') endif -if host_arch == 'unknown' or not supported_oses.contains(targetos) +if host_arch == 'unknown' or not supported_oses.contains(host_os) message() message('If you want to help supporting QEMU on this platform, please') message('contact the developers at qemu-devel@nongnu.org.') @@ -4433,8 +4436,8 @@ if get_option('relocatable') and \ warning('bindir not included within prefix, the installation will not be relocatable.') actually_reloc = false endif -if not actually_reloc and (targetos == 'windows' or get_option('relocatable')) - if targetos == 'windows' +if not actually_reloc and (host_os == 'windows' or get_option('relocatable')) + if host_os == 'windows' message() warning('Windows installs should usually be relocatable.') endif diff --git a/meson_options.txt b/meson_options.txt index c9baeda6395634c3478a3c2a3a8c8f57fbe0b592..0a99a059ec8c4c7a48f37d158de3822af5e57dbb 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -101,7 +101,7 @@ option('cfi_debug', type: 'boolean', value: false, description: 'Verbose errors in case of CFI violation') option('multiprocess', type: 'feature', value: 'auto', description: 'Out of process device emulation support') -option('relocatable', type : 'boolean', value : 'true', +option('relocatable', type : 'boolean', value : true, description: 'toggle relocatable install') option('vfio_user_server', type: 'feature', value: 'disabled', description: 'vfio-user server support') diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c index 24347ab0f756a61509954e1d9da103e15fc99242..2708abf3d762de774ed294d3fdb8e56690d2974c 100644 --- a/migration/block-dirty-bitmap.c +++ b/migration/block-dirty-bitmap.c @@ -464,7 +464,7 @@ static void send_bitmap_bits(QEMUFile *f, DBMSaveState *s, g_free(buf); } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static void dirty_bitmap_do_save_cleanup(DBMSaveState *s) { SaveBitmapState *dbms; @@ -479,7 +479,7 @@ static void dirty_bitmap_do_save_cleanup(DBMSaveState *s) } } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs, const char *bs_name, GHashTable *alias_map) { @@ -598,7 +598,7 @@ static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs, return 0; } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static int init_dirty_bitmap_migration(DBMSaveState *s) { BlockDriverState *bs; @@ -607,7 +607,7 @@ static int init_dirty_bitmap_migration(DBMSaveState *s) BlockBackend *blk; GHashTable *alias_map = NULL; - /* Runs in the migration thread, but holds the iothread lock */ + /* Runs in the migration thread, but holds the BQL */ GLOBAL_STATE_CODE(); GRAPH_RDLOCK_GUARD_MAINLOOP(); @@ -742,7 +742,7 @@ static int dirty_bitmap_save_iterate(QEMUFile *f, void *opaque) return s->bulk_completed; } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static int dirty_bitmap_save_complete(QEMUFile *f, void *opaque) { @@ -774,7 +774,7 @@ static void dirty_bitmap_state_pending(void *opaque, SaveBitmapState *dbms; uint64_t pending = 0; - qemu_mutex_lock_iothread(); + bql_lock(); QSIMPLEQ_FOREACH(dbms, &s->dbms_list, entry) { uint64_t gran = bdrv_dirty_bitmap_granularity(dbms->bitmap); @@ -784,7 +784,7 @@ static void dirty_bitmap_state_pending(void *opaque, pending += DIV_ROUND_UP(sectors * BDRV_SECTOR_SIZE, gran); } - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_dirty_bitmap_state_pending(pending); diff --git a/migration/block.c b/migration/block.c index a15f9bddcb9d8abaa8e9a31dd9f72ff732395b59..8c6ebafacc1ffe930d1d4f19d968817b14852c69 100644 --- a/migration/block.c +++ b/migration/block.c @@ -66,7 +66,7 @@ typedef struct BlkMigDevState { /* Protected by block migration lock. */ int64_t completed_sectors; - /* During migration this is protected by iothread lock / AioContext. + /* During migration this is protected by bdrv_dirty_bitmap_lock(). * Allocation and free happen during setup and cleanup respectively. */ BdrvDirtyBitmap *dirty_bitmap; @@ -101,7 +101,7 @@ typedef struct BlkMigState { int prev_progress; int bulk_completed; - /* Lock must be taken _inside_ the iothread lock and any AioContexts. */ + /* Lock must be taken _inside_ the BQL. */ QemuMutex lock; } BlkMigState; @@ -117,7 +117,7 @@ static void blk_mig_unlock(void) qemu_mutex_unlock(&block_mig_state.lock); } -/* Must run outside of the iothread lock during the bulk phase, +/* Must run outside of the BQL during the bulk phase, * or the VM will stall. */ @@ -269,8 +269,7 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds) int64_t count; if (bmds->shared_base) { - qemu_mutex_lock_iothread(); - aio_context_acquire(blk_get_aio_context(bb)); + bql_lock(); /* Skip unallocated sectors; intentionally treats failure or * partial sector as an allocated sector */ while (cur_sector < total_sectors && @@ -281,8 +280,7 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds) } cur_sector += count >> BDRV_SECTOR_BITS; } - aio_context_release(blk_get_aio_context(bb)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } if (cur_sector >= total_sectors) { @@ -313,28 +311,23 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds) block_mig_state.submitted++; blk_mig_unlock(); - /* We do not know if bs is under the main thread (and thus does - * not acquire the AioContext when doing AIO) or rather under - * dataplane. Thus acquire both the iothread mutex and the - * AioContext. - * - * This is ugly and will disappear when we make bdrv_* thread-safe, - * without the need to acquire the AioContext. + /* + * The migration thread does not have an AioContext. Lock the BQL so that + * I/O runs in the main loop AioContext (see + * qemu_get_current_aio_context()). */ - qemu_mutex_lock_iothread(); - aio_context_acquire(blk_get_aio_context(bmds->blk)); + bql_lock(); bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE, nr_sectors * BDRV_SECTOR_SIZE); blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov, 0, blk_mig_read_cb, blk); - aio_context_release(blk_get_aio_context(bmds->blk)); - qemu_mutex_unlock_iothread(); + bql_unlock(); bmds->cur_sector = cur_sector + nr_sectors; return (bmds->cur_sector >= total_sectors); } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static int set_dirty_tracking(void) { @@ -361,7 +354,7 @@ fail: return ret; } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static void unset_dirty_tracking(void) { @@ -512,7 +505,7 @@ static void blk_mig_reset_dirty_cursor(void) } } -/* Called with iothread lock and AioContext taken. */ +/* Called with the BQL taken. */ static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds, int is_async) @@ -594,7 +587,7 @@ error: return ret; } -/* Called with iothread lock taken. +/* Called with the BQL taken. * * return value: * 0: too much data for max_downtime @@ -606,9 +599,7 @@ static int blk_mig_save_dirty_block(QEMUFile *f, int is_async) int ret = 1; QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { - aio_context_acquire(blk_get_aio_context(bmds->blk)); ret = mig_save_device_dirty(f, bmds, is_async); - aio_context_release(blk_get_aio_context(bmds->blk)); if (ret <= 0) { break; } @@ -658,7 +649,7 @@ static int flush_blks(QEMUFile *f) return ret; } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static int64_t get_remaining_dirty(void) { @@ -666,9 +657,9 @@ static int64_t get_remaining_dirty(void) int64_t dirty = 0; QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { - aio_context_acquire(blk_get_aio_context(bmds->blk)); + bdrv_dirty_bitmap_lock(bmds->dirty_bitmap); dirty += bdrv_get_dirty_count(bmds->dirty_bitmap); - aio_context_release(blk_get_aio_context(bmds->blk)); + bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap); } return dirty; @@ -676,12 +667,11 @@ static int64_t get_remaining_dirty(void) -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static void block_migration_cleanup_bmds(void) { BlkMigDevState *bmds; BlockDriverState *bs; - AioContext *ctx; unset_dirty_tracking(); @@ -693,20 +683,14 @@ static void block_migration_cleanup_bmds(void) bdrv_op_unblock_all(bs, bmds->blocker); } error_free(bmds->blocker); - - /* Save ctx, because bmds->blk can disappear during blk_unref. */ - ctx = blk_get_aio_context(bmds->blk); - aio_context_acquire(ctx); blk_unref(bmds->blk); - aio_context_release(ctx); - g_free(bmds->blk_name); g_free(bmds->aio_bitmap); g_free(bmds); } } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static void block_migration_cleanup(void *opaque) { BlkMigBlock *blk; @@ -783,12 +767,12 @@ static int block_save_iterate(QEMUFile *f, void *opaque) } ret = 0; } else { - /* Always called with iothread lock taken for + /* Always called with the BQL taken for * simplicity, block_save_complete also calls it. */ - qemu_mutex_lock_iothread(); + bql_lock(); ret = blk_mig_save_dirty_block(f, 1); - qemu_mutex_unlock_iothread(); + bql_unlock(); } if (ret < 0) { return ret; @@ -811,7 +795,7 @@ static int block_save_iterate(QEMUFile *f, void *opaque) return (delta_bytes > 0); } -/* Called with iothread lock taken. */ +/* Called with the BQL taken. */ static int block_save_complete(QEMUFile *f, void *opaque) { @@ -860,9 +844,9 @@ static void block_state_pending(void *opaque, uint64_t *must_precopy, /* Estimate pending number of bytes to send */ uint64_t pending; - qemu_mutex_lock_iothread(); + bql_lock(); pending = get_remaining_dirty(); - qemu_mutex_unlock_iothread(); + bql_unlock(); blk_mig_lock(); pending += block_mig_state.submitted * BLK_MIG_BLOCK_SIZE + diff --git a/migration/channel.c b/migration/channel.c index ca3319a30985c420020281391206ef418a014e3c..f9de064f3b134dfe9a2128e9189d5565dd7f81e7 100644 --- a/migration/channel.c +++ b/migration/channel.c @@ -117,9 +117,12 @@ int migration_channel_read_peek(QIOChannel *ioc, len = qio_channel_readv_full(ioc, &iov, 1, NULL, NULL, QIO_CHANNEL_READ_FLAG_MSG_PEEK, errp); - if (len <= 0 && len != QIO_CHANNEL_ERR_BLOCK) { - error_setg(errp, - "Failed to peek at channel"); + if (len < 0 && len != QIO_CHANNEL_ERR_BLOCK) { + return -1; + } + + if (len == 0) { + error_setg(errp, "Failed to peek at channel"); return -1; } diff --git a/migration/colo.c b/migration/colo.c index 4447e349149a19bf1ed9969e923b1ea8b2296989..315e31fe32e30eda99f81d4fff04525252852bbc 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -420,13 +420,13 @@ static int colo_do_checkpoint_transaction(MigrationState *s, qio_channel_io_seek(QIO_CHANNEL(bioc), 0, 0, NULL); bioc->usage = 0; - qemu_mutex_lock_iothread(); + bql_lock(); if (failover_get_state() != FAILOVER_STATUS_NONE) { - qemu_mutex_unlock_iothread(); + bql_unlock(); goto out; } vm_stop_force_state(RUN_STATE_COLO); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("run", "stop"); /* * Failover request bh could be called after vm_stop_force_state(), @@ -435,23 +435,23 @@ static int colo_do_checkpoint_transaction(MigrationState *s, if (failover_get_state() != FAILOVER_STATUS_NONE) { goto out; } - qemu_mutex_lock_iothread(); + bql_lock(); replication_do_checkpoint_all(&local_err); if (local_err) { - qemu_mutex_unlock_iothread(); + bql_unlock(); goto out; } colo_send_message(s->to_dst_file, COLO_MESSAGE_VMSTATE_SEND, &local_err); if (local_err) { - qemu_mutex_unlock_iothread(); + bql_unlock(); goto out; } /* Note: device state is saved into buffer */ ret = qemu_save_device_state(fb); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (ret < 0) { goto out; } @@ -504,9 +504,9 @@ static int colo_do_checkpoint_transaction(MigrationState *s, ret = 0; - qemu_mutex_lock_iothread(); + bql_lock(); vm_start(); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("stop", "run"); out: @@ -557,15 +557,15 @@ static void colo_process_checkpoint(MigrationState *s) fb = qemu_file_new_output(QIO_CHANNEL(bioc)); object_unref(OBJECT(bioc)); - qemu_mutex_lock_iothread(); + bql_lock(); replication_start_all(REPLICATION_MODE_PRIMARY, &local_err); if (local_err) { - qemu_mutex_unlock_iothread(); + bql_unlock(); goto out; } vm_start(); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("stop", "run"); timer_mod(s->colo_delay_timer, qemu_clock_get_ms(QEMU_CLOCK_HOST) + @@ -639,14 +639,14 @@ out: void migrate_start_colo_process(MigrationState *s) { - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_event_init(&s->colo_checkpoint_event, false); s->colo_delay_timer = timer_new_ms(QEMU_CLOCK_HOST, colo_checkpoint_notify, s); qemu_sem_init(&s->colo_exit_sem, 0); colo_process_checkpoint(s); - qemu_mutex_lock_iothread(); + bql_lock(); } static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, @@ -657,9 +657,9 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, Error *local_err = NULL; int ret; - qemu_mutex_lock_iothread(); + bql_lock(); vm_stop_force_state(RUN_STATE_COLO); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("run", "stop"); /* FIXME: This is unnecessary for periodic checkpoint mode */ @@ -677,10 +677,10 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, return; } - qemu_mutex_lock_iothread(); + bql_lock(); cpu_synchronize_all_states(); ret = qemu_loadvm_state_main(mis->from_src_file, mis); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (ret < 0) { error_setg(errp, "Load VM's live state (ram) error"); @@ -719,14 +719,14 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, return; } - qemu_mutex_lock_iothread(); + bql_lock(); vmstate_loading = true; colo_flush_ram_cache(); ret = qemu_load_device_state(fb); if (ret < 0) { error_setg(errp, "COLO: load device state failed"); vmstate_loading = false; - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } @@ -734,7 +734,7 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, if (local_err) { error_propagate(errp, local_err); vmstate_loading = false; - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } @@ -743,7 +743,7 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, if (local_err) { error_propagate(errp, local_err); vmstate_loading = false; - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } /* Notify all filters of all NIC to do checkpoint */ @@ -752,13 +752,13 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, if (local_err) { error_propagate(errp, local_err); vmstate_loading = false; - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } vmstate_loading = false; vm_start(); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("stop", "run"); if (failover_get_state() == FAILOVER_STATUS_RELAUNCH) { @@ -851,14 +851,14 @@ static void *colo_process_incoming_thread(void *opaque) fb = qemu_file_new_input(QIO_CHANNEL(bioc)); object_unref(OBJECT(bioc)); - qemu_mutex_lock_iothread(); + bql_lock(); replication_start_all(REPLICATION_MODE_SECONDARY, &local_err); if (local_err) { - qemu_mutex_unlock_iothread(); + bql_unlock(); goto out; } vm_start(); - qemu_mutex_unlock_iothread(); + bql_unlock(); trace_colo_vm_state_change("stop", "run"); colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_READY, @@ -920,7 +920,7 @@ int coroutine_fn colo_incoming_co(void) Error *local_err = NULL; QemuThread th; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); if (!migration_incoming_colo_enabled()) { return 0; @@ -940,12 +940,12 @@ int coroutine_fn colo_incoming_co(void) qemu_coroutine_yield(); mis->colo_incoming_co = NULL; - qemu_mutex_unlock_iothread(); + bql_unlock(); /* Wait checkpoint incoming thread exit before free resource */ qemu_thread_join(&th); - qemu_mutex_lock_iothread(); + bql_lock(); - /* We hold the global iothread lock, so it is safe here */ + /* We hold the global BQL, so it is safe here */ colo_release_ram_cache(); return 0; diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c index 036ac017fc91f111dcfca1b2dc34bf8be8cb0648..1d2e85746fb7b10eb7f149976970f9a92125af8a 100644 --- a/migration/dirtyrate.c +++ b/migration/dirtyrate.c @@ -90,13 +90,13 @@ static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages, void global_dirty_log_change(unsigned int flag, bool start) { - qemu_mutex_lock_iothread(); + bql_lock(); if (start) { memory_global_dirty_log_start(flag); } else { memory_global_dirty_log_stop(flag); } - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* @@ -106,12 +106,12 @@ void global_dirty_log_change(unsigned int flag, bool start) */ static void global_dirty_log_sync(unsigned int flag, bool one_shot) { - qemu_mutex_lock_iothread(); + bql_lock(); memory_global_dirty_log_sync(false); if (one_shot) { memory_global_dirty_log_stop(flag); } - qemu_mutex_unlock_iothread(); + bql_unlock(); } static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat) @@ -129,8 +129,7 @@ static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat) return g_new0(DirtyPageRecord, nvcpu); } -static void vcpu_dirty_stat_collect(VcpuStat *stat, - DirtyPageRecord *records, +static void vcpu_dirty_stat_collect(DirtyPageRecord *records, bool start) { CPUState *cpu; @@ -158,7 +157,7 @@ retry: WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) { gen_id = cpu_list_generation_id_get(); records = vcpu_dirty_stat_alloc(stat); - vcpu_dirty_stat_collect(stat, records, true); + vcpu_dirty_stat_collect(records, true); } duration = dirty_stat_wait(calc_time_ms, init_time_ms); @@ -172,7 +171,7 @@ retry: cpu_list_unlock(); goto retry; } - vcpu_dirty_stat_collect(stat, records, false); + vcpu_dirty_stat_collect(records, false); } for (i = 0; i < stat->nvcpu; i++) { @@ -610,7 +609,7 @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config) int64_t start_time; DirtyPageRecord dirty_pages; - qemu_mutex_lock_iothread(); + bql_lock(); memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE); /* @@ -627,7 +626,7 @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config) * KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled. */ dirtyrate_manual_reset_protect(); - qemu_mutex_unlock_iothread(); + bql_unlock(); record_dirtypages_bitmap(&dirty_pages, true); diff --git a/migration/global_state.c b/migration/global_state.c index 4e2a9d8ec0ad0ee0a3f691625c4a89482cebc735..3a9796cae280fd055e2da03e39d1d9d52f3ffefa 100644 --- a/migration/global_state.c +++ b/migration/global_state.c @@ -22,7 +22,16 @@ typedef struct { uint32_t size; - uint8_t runstate[100]; + + /* + * runstate was 100 bytes, zero padded, but we trimmed it to add a + * few fields and maintain backwards compatibility. + */ + uint8_t runstate[32]; + uint8_t has_vm_was_suspended; + uint8_t vm_was_suspended; + uint8_t unused[66]; + RunState state; bool received; } GlobalState; @@ -35,6 +44,10 @@ static void global_state_do_store(RunState state) assert(strlen(state_str) < sizeof(global_state.runstate)); strpadcpy((char *)global_state.runstate, sizeof(global_state.runstate), state_str, '\0'); + global_state.has_vm_was_suspended = true; + global_state.vm_was_suspended = vm_get_suspended(); + + memset(global_state.unused, 0, sizeof(global_state.unused)); } void global_state_store(void) @@ -59,24 +72,7 @@ RunState global_state_get_runstate(void) static bool global_state_needed(void *opaque) { - GlobalState *s = opaque; - char *runstate = (char *)s->runstate; - - /* If it is not optional, it is mandatory */ - - if (migrate_get_current()->store_global_state) { - return true; - } - - /* If state is running or paused, it is not needed */ - - if (strcmp(runstate, "running") == 0 || - strcmp(runstate, "paused") == 0) { - return false; - } - - /* for any other state it is needed */ - return true; + return migrate_get_current()->store_global_state; } static int global_state_post_load(void *opaque, int version_id) @@ -93,7 +89,7 @@ static int global_state_post_load(void *opaque, int version_id) sizeof(s->runstate)) == sizeof(s->runstate)) { /* * This condition should never happen during migration, because - * all runstate names are shorter than 100 bytes (the size of + * all runstate names are shorter than 32 bytes (the size of * s->runstate). However, a malicious stream could overflow * the qapi_enum_parse() call, so we force the last character * to a NUL byte. @@ -110,6 +106,14 @@ static int global_state_post_load(void *opaque, int version_id) } s->state = r; + /* + * global_state is saved on the outgoing side before forcing a stopped + * state, so it may have saved state=suspended and vm_was_suspended=0. + * Now we are in a paused state, and when we later call vm_start, it must + * restore the suspended state, so we must set vm_was_suspended=1 here. + */ + vm_set_suspended(s->vm_was_suspended || r == RUN_STATE_SUSPENDED); + return 0; } @@ -131,9 +135,12 @@ static const VMStateDescription vmstate_globalstate = { .post_load = global_state_post_load, .pre_save = global_state_pre_save, .needed = global_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(size, GlobalState), VMSTATE_BUFFER(runstate, GlobalState), + VMSTATE_UINT8(has_vm_was_suspended, GlobalState), + VMSTATE_UINT8(vm_was_suspended, GlobalState), + VMSTATE_BUFFER(unused, GlobalState), VMSTATE_END_OF_LIST() }, }; diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c index 86ae832176a03512e4d5a4c9896facebc55edf4d..740a219aa498831955af418e8d67c3953b80f8e6 100644 --- a/migration/migration-hmp-cmds.c +++ b/migration/migration-hmp-cmds.c @@ -399,15 +399,17 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) void hmp_loadvm(Monitor *mon, const QDict *qdict) { - int saved_vm_running = runstate_is_running(); + RunState saved_state = runstate_get(); + const char *name = qdict_get_str(qdict, "name"); Error *err = NULL; vm_stop(RUN_STATE_RESTORE_VM); - if (load_snapshot(name, NULL, false, NULL, &err) && saved_vm_running) { - vm_start(); + if (load_snapshot(name, NULL, false, NULL, &err)) { + load_snapshot_resume(saved_state); } + hmp_handle_error(mon, err); } @@ -852,14 +854,11 @@ static void vm_completion(ReadLineState *rs, const char *str) for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) { SnapshotInfoList *snapshots, *snapshot; - AioContext *ctx = bdrv_get_aio_context(bs); bool ok = false; - aio_context_acquire(ctx); if (bdrv_can_snapshot(bs)) { ok = bdrv_query_snapshot_info_list(bs, &snapshots, NULL) == 0; } - aio_context_release(ctx); if (!ok) { continue; } diff --git a/migration/migration.c b/migration/migration.c index 3ce04b2aafe2cf40ca47af3743938a54626d2383..219447dea174ee69f3f9760fa9ee8fa0c804bbe7 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -523,28 +523,26 @@ static void qemu_start_incoming_migration(const char *uri, bool has_channels, /* * Having preliminary checks for uri and channel */ - if (uri && has_channels) { - error_setg(errp, "'uri' and 'channels' arguments are mutually " - "exclusive; exactly one of the two should be present in " - "'migrate-incoming' qmp command "); + if (!uri == !channels) { + error_setg(errp, "need either 'uri' or 'channels' argument"); return; - } else if (channels) { + } + + if (channels) { /* To verify that Migrate channel list has only item */ if (channels->next) { error_setg(errp, "Channel list has more than one entries"); return; } addr = channels->value->addr; - } else if (uri) { + } + + if (uri) { /* caller uses the old URI syntax */ if (!migrate_uri_parse(uri, &channel, errp)) { return; } addr = channel->addr; - } else { - error_setg(errp, "neither 'uri' or 'channels' argument are " - "specified in 'migrate-incoming' qmp command "); - return; } /* transport mechanism not suitable for migration? */ @@ -604,7 +602,7 @@ static void process_incoming_migration_bh(void *opaque) */ if (!migrate_late_block_activate() || (autostart && (!global_state_received() || - global_state_get_runstate() == RUN_STATE_RUNNING))) { + runstate_is_live(global_state_get_runstate())))) { /* Make sure all file formats throw away their mutable metadata. * If we get an error here, just don't restart the VM yet. */ bdrv_activate_all(&local_err); @@ -628,7 +626,7 @@ static void process_incoming_migration_bh(void *opaque) dirty_bitmap_mig_before_vm_start(); if (!global_state_received() || - global_state_get_runstate() == RUN_STATE_RUNNING) { + runstate_is_live(global_state_get_runstate())) { if (autostart) { vm_start(); } else { @@ -699,6 +697,13 @@ process_incoming_migration_co(void *opaque) } if (ret < 0) { + MigrationState *s = migrate_get_current(); + + if (migrate_has_error(s)) { + WITH_QEMU_LOCK_GUARD(&s->error_mutex) { + error_report_err(s->error); + } + } error_report("load of migration failed: %s", strerror(-ret)); goto fail; } @@ -724,11 +729,8 @@ fail: /** * migration_incoming_setup: Setup incoming migration * @f: file for main migration channel - * @errp: where to put errors - * - * Returns: %true on success, %false on error. */ -static bool migration_incoming_setup(QEMUFile *f, Error **errp) +static void migration_incoming_setup(QEMUFile *f) { MigrationIncomingState *mis = migration_incoming_get_current(); @@ -736,7 +738,6 @@ static bool migration_incoming_setup(QEMUFile *f, Error **errp) mis->from_src_file = f; } qemu_file_set_blocking(f, false); - return true; } void migration_incoming_process(void) @@ -778,11 +779,9 @@ static bool postcopy_try_recover(void) return false; } -void migration_fd_process_incoming(QEMUFile *f, Error **errp) +void migration_fd_process_incoming(QEMUFile *f) { - if (!migration_incoming_setup(f, errp)) { - return; - } + migration_incoming_setup(f); if (postcopy_try_recover()) { return; } @@ -836,10 +835,9 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) * issue is not possible. */ ret = migration_channel_read_peek(ioc, (void *)&channel_magic, - sizeof(channel_magic), &local_err); + sizeof(channel_magic), errp); if (ret != 0) { - error_propagate(errp, local_err); return; } @@ -849,16 +847,12 @@ void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp) } if (multifd_load_setup(errp) != 0) { - error_setg(errp, "Failed to setup multifd channels"); return; } if (default_channel) { f = qemu_file_new_input(ioc); - - if (!migration_incoming_setup(f, errp)) { - return; - } + migration_incoming_setup(f); } else { /* Multiple connections */ assert(migration_needs_multiple_sockets()); @@ -1294,12 +1288,12 @@ static void migrate_fd_cleanup(MigrationState *s) QEMUFile *tmp; trace_migrate_fd_cleanup(); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (s->migration_thread_running) { qemu_thread_join(&s->thread); s->migration_thread_running = false; } - qemu_mutex_lock_iothread(); + bql_lock(); multifd_save_cleanup(); qemu_mutex_lock(&s->qemu_file_lock); @@ -1588,7 +1582,6 @@ int migrate_init(MigrationState *s, Error **errp) s->migration_thread_running = false; error_free(s->error); s->error = NULL; - s->hostname = NULL; s->vmdesc = NULL; migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); @@ -1836,8 +1829,6 @@ bool migration_is_blocked(Error **errp) static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, bool resume, Error **errp) { - Error *local_err = NULL; - if (blk_inc) { warn_report("parameter 'inc' is deprecated;" " use blockdev-mirror with NBD instead"); @@ -1907,8 +1898,7 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, "current migration capabilities"); return false; } - if (!migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, true, &local_err)) { - error_propagate(errp, local_err); + if (!migrate_cap_set(MIGRATION_CAPABILITY_BLOCK, true, errp)) { return false; } s->must_remove_block_options = true; @@ -1939,28 +1929,26 @@ void qmp_migrate(const char *uri, bool has_channels, /* * Having preliminary checks for uri and channel */ - if (uri && has_channels) { - error_setg(errp, "'uri' and 'channels' arguments are mutually " - "exclusive; exactly one of the two should be present in " - "'migrate' qmp command "); + if (!uri == !channels) { + error_setg(errp, "need either 'uri' or 'channels' argument"); return; - } else if (channels) { + } + + if (channels) { /* To verify that Migrate channel list has only item */ if (channels->next) { error_setg(errp, "Channel list has more than one entries"); return; } addr = channels->value->addr; - } else if (uri) { + } + + if (uri) { /* caller uses the old URI syntax */ if (!migrate_uri_parse(uri, &channel, errp)) { return; } addr = channel->addr; - } else { - error_setg(errp, "neither 'uri' or 'channels' argument are " - "specified in 'migrate' qmp command "); - return; } /* transport mechanism not suitable for migration? */ @@ -2411,12 +2399,11 @@ static int postcopy_start(MigrationState *ms, Error **errp) } trace_postcopy_start(); - qemu_mutex_lock_iothread(); + bql_lock(); trace_postcopy_start_set_run(); migration_downtime_start(ms); - qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); global_state_store(); ret = migration_stop_vm(RUN_STATE_FINISH_MIGRATE); if (ret < 0) { @@ -2520,7 +2507,7 @@ static int postcopy_start(MigrationState *ms, Error **errp) migration_downtime_end(ms); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (migrate_postcopy_ram()) { /* @@ -2561,13 +2548,13 @@ fail: error_report_err(local_err); } } - qemu_mutex_unlock_iothread(); + bql_unlock(); return -1; } /** * migration_maybe_pause: Pause if required to by - * migrate_pause_before_switchover called with the iothread locked + * migrate_pause_before_switchover called with the BQL locked * Returns: 0 on success */ static int migration_maybe_pause(MigrationState *s, @@ -2595,14 +2582,14 @@ static int migration_maybe_pause(MigrationState *s, * wait for the 'pause_sem' semaphore. */ if (s->state != MIGRATION_STATUS_CANCELLING) { - qemu_mutex_unlock_iothread(); + bql_unlock(); migrate_set_state(&s->state, *current_active_state, MIGRATION_STATUS_PRE_SWITCHOVER); qemu_sem_wait(&s->pause_sem); migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER, new_state); *current_active_state = new_state; - qemu_mutex_lock_iothread(); + bql_lock(); } return s->state == new_state ? 0 : -EINVAL; @@ -2613,9 +2600,8 @@ static int migration_completion_precopy(MigrationState *s, { int ret; - qemu_mutex_lock_iothread(); + bql_lock(); migration_downtime_start(s); - qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); s->vm_old_state = runstate_get(); global_state_store(); @@ -2641,7 +2627,7 @@ static int migration_completion_precopy(MigrationState *s, ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false, s->block_inactive); out_unlock: - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } @@ -2649,9 +2635,9 @@ static void migration_completion_postcopy(MigrationState *s) { trace_migration_completion_postcopy_end(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_savevm_state_complete_postcopy(s->to_dst_file); - qemu_mutex_unlock_iothread(); + bql_unlock(); /* * Shutdown the postcopy fast path thread. This is only needed when dest @@ -2675,14 +2661,14 @@ static void migration_completion_failed(MigrationState *s, */ Error *local_err = NULL; - qemu_mutex_lock_iothread(); + bql_lock(); bdrv_activate_all(&local_err); if (local_err) { error_report_err(local_err); } else { s->block_inactive = false; } - qemu_mutex_unlock_iothread(); + bql_unlock(); } migrate_set_state(&s->state, current_active_state, @@ -3122,7 +3108,7 @@ static void migration_iteration_finish(MigrationState *s) /* If we enabled cpu throttling for auto-converge, turn it off. */ cpu_throttle_stop(); - qemu_mutex_lock_iothread(); + bql_lock(); switch (s->state) { case MIGRATION_STATUS_COMPLETED: migration_calculate_complete(s); @@ -3136,7 +3122,7 @@ static void migration_iteration_finish(MigrationState *s) case MIGRATION_STATUS_FAILED: case MIGRATION_STATUS_CANCELLED: case MIGRATION_STATUS_CANCELLING: - if (s->vm_old_state == RUN_STATE_RUNNING) { + if (runstate_is_live(s->vm_old_state)) { if (!runstate_check(RUN_STATE_SHUTDOWN)) { vm_start(); } @@ -3153,7 +3139,7 @@ static void migration_iteration_finish(MigrationState *s) break; } migrate_fd_cleanup_schedule(s); - qemu_mutex_unlock_iothread(); + bql_unlock(); } static void bg_migration_iteration_finish(MigrationState *s) @@ -3165,7 +3151,7 @@ static void bg_migration_iteration_finish(MigrationState *s) */ ram_write_tracking_stop(); - qemu_mutex_lock_iothread(); + bql_lock(); switch (s->state) { case MIGRATION_STATUS_COMPLETED: migration_calculate_complete(s); @@ -3184,7 +3170,7 @@ static void bg_migration_iteration_finish(MigrationState *s) } migrate_fd_cleanup_schedule(s); - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* @@ -3306,9 +3292,9 @@ static void *migration_thread(void *opaque) object_ref(OBJECT(s)); update_iteration_initial_status(s); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_savevm_state_header(s->to_dst_file); - qemu_mutex_unlock_iothread(); + bql_unlock(); /* * If we opened the return path, we need to make sure dst has it @@ -3336,9 +3322,9 @@ static void *migration_thread(void *opaque) qemu_savevm_send_colo_enable(s->to_dst_file); } - qemu_mutex_lock_iothread(); + bql_lock(); qemu_savevm_state_setup(s->to_dst_file); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_ACTIVE); @@ -3392,7 +3378,7 @@ static void bg_migration_vm_start_bh(void *opaque) qemu_bh_delete(s->vm_start_bh); s->vm_start_bh = NULL; - vm_start(); + vm_resume(s->vm_old_state); migration_downtime_end(s); } @@ -3449,10 +3435,10 @@ static void *bg_migration_thread(void *opaque) ram_write_tracking_prepare(); #endif - qemu_mutex_lock_iothread(); + bql_lock(); qemu_savevm_state_header(s->to_dst_file); qemu_savevm_state_setup(s->to_dst_file); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_ACTIVE); @@ -3462,13 +3448,8 @@ static void *bg_migration_thread(void *opaque) trace_migration_thread_setup_complete(); migration_downtime_start(s); - qemu_mutex_lock_iothread(); + bql_lock(); - /* - * If VM is currently in suspended state, then, to make a valid runstate - * transition in vm_stop_force_state() we need to wakeup it up. - */ - qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); s->vm_old_state = runstate_get(); global_state_store(); @@ -3505,7 +3486,7 @@ static void *bg_migration_thread(void *opaque) s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s); qemu_bh_schedule(s->vm_start_bh); - qemu_mutex_unlock_iothread(); + bql_unlock(); while (migration_is_active(s)) { MigIterateState iter_state = bg_migration_iteration_run(s); @@ -3534,7 +3515,7 @@ fail: if (early_fail) { migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_FAILED); - qemu_mutex_unlock_iothread(); + bql_unlock(); } bg_migration_iteration_finish(s); diff --git a/migration/migration.h b/migration/migration.h index cf2c9c88e01d670b5b4c9d3de315a1b37a4f0d21..17972dac34e1341f2991041b17145f59564d271c 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -474,14 +474,12 @@ struct MigrationState { void migrate_set_state(int *state, int old_state, int new_state); -void migration_fd_process_incoming(QEMUFile *f, Error **errp); +void migration_fd_process_incoming(QEMUFile *f); void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp); void migration_incoming_process(void); bool migration_has_all_channels(void); -uint64_t migrate_max_downtime(void); - void migrate_set_error(MigrationState *s, const Error *error); bool migrate_has_error(MigrationState *s); diff --git a/migration/multifd.c b/migration/multifd.c index 409460684f29a4671f3da69fe5da20c02322de62..25cbc6dc6be83cc089b00c03e491ed3c1c5eaafa 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -228,20 +228,20 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp) } if (msg.id > migrate_multifd_channels()) { - error_setg(errp, "multifd: received channel version %u " - "expected %u", msg.version, MULTIFD_VERSION); + error_setg(errp, "multifd: received channel id %u is greater than " + "number of channels %u", msg.id, migrate_multifd_channels()); return -1; } return msg.id; } -static MultiFDPages_t *multifd_pages_init(size_t size) +static MultiFDPages_t *multifd_pages_init(uint32_t n) { MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1); - pages->allocated = size; - pages->offset = g_new0(ram_addr_t, size); + pages->allocated = n; + pages->offset = g_new0(ram_addr_t, n); return pages; } @@ -250,7 +250,6 @@ static void multifd_pages_clear(MultiFDPages_t *pages) { pages->num = 0; pages->allocated = 0; - pages->packet_num = 0; pages->block = NULL; g_free(pages->offset); pages->offset = NULL; @@ -391,7 +390,7 @@ struct { * false. */ -static int multifd_send_pages(QEMUFile *f) +static int multifd_send_pages(void) { int i; static int next_channel; @@ -437,7 +436,7 @@ static int multifd_send_pages(QEMUFile *f) return 1; } -int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset) +int multifd_queue_page(RAMBlock *block, ram_addr_t offset) { MultiFDPages_t *pages = multifd_send_state->pages; bool changed = false; @@ -457,12 +456,12 @@ int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset) changed = true; } - if (multifd_send_pages(f) < 0) { + if (multifd_send_pages() < 0) { return -1; } if (changed) { - return multifd_queue_page(f, block, offset); + return multifd_queue_page(block, offset); } return 1; @@ -584,7 +583,7 @@ static int multifd_zero_copy_flush(QIOChannel *c) return ret; } -int multifd_send_sync_main(QEMUFile *f) +int multifd_send_sync_main(void) { int i; bool flush_zero_copy; @@ -593,7 +592,7 @@ int multifd_send_sync_main(QEMUFile *f) return 0; } if (multifd_send_state->pages->num) { - if (multifd_send_pages(f) < 0) { + if (multifd_send_pages() < 0) { error_report("%s: multifd_send_pages fail", __func__); return -1; } @@ -787,6 +786,7 @@ static void multifd_tls_outgoing_handshake(QIOTask *task, trace_multifd_tls_outgoing_handshake_error(ioc, error_get_pretty(err)); + migrate_set_error(migrate_get_current(), err); /* * Error happen, mark multifd_send_thread status as 'quit' although it * is not created, and then tell who pay attention to me. @@ -794,6 +794,7 @@ static void multifd_tls_outgoing_handshake(QIOTask *task, p->quit = true; qemu_sem_post(&multifd_send_state->channels_ready); qemu_sem_post(&p->sem_sync); + error_free(err); } static void *multifd_tls_handshake_thread(void *opaque) @@ -847,14 +848,13 @@ static bool multifd_channel_connect(MultiFDSendParams *p, * so we mustn't call multifd_send_thread until then */ return multifd_tls_channel_connect(p, ioc, errp); - - } else { - migration_ioc_register_yank(ioc); - p->registered_yank = true; - p->c = ioc; - qemu_thread_create(&p->thread, p->name, multifd_send_thread, p, - QEMU_THREAD_JOINABLE); } + + migration_ioc_register_yank(ioc); + p->registered_yank = true; + p->c = ioc; + qemu_thread_create(&p->thread, p->name, multifd_send_thread, p, + QEMU_THREAD_JOINABLE); return true; } @@ -950,12 +950,10 @@ int multifd_save_setup(Error **errp) for (i = 0; i < thread_count; i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; - Error *local_err = NULL; int ret; - ret = multifd_send_state->ops->send_setup(p, &local_err); + ret = multifd_send_state->ops->send_setup(p, errp); if (ret) { - error_propagate(errp, local_err); return ret; } } @@ -1194,12 +1192,10 @@ int multifd_load_setup(Error **errp) for (i = 0; i < thread_count; i++) { MultiFDRecvParams *p = &multifd_recv_state->params[i]; - Error *local_err = NULL; int ret; - ret = multifd_recv_state->ops->recv_setup(p, &local_err); + ret = multifd_recv_state->ops->recv_setup(p, errp); if (ret) { - error_propagate(errp, local_err); return ret; } } diff --git a/migration/multifd.h b/migration/multifd.h index a835643b48c85ccbfc20d705eb67af5b09323d17..35d11f103cd0d520a809a7a4f42ceb59cce304b9 100644 --- a/migration/multifd.h +++ b/migration/multifd.h @@ -21,8 +21,8 @@ void multifd_load_shutdown(void); bool multifd_recv_all_channels_created(void); void multifd_recv_new_channel(QIOChannel *ioc, Error **errp); void multifd_recv_sync_main(void); -int multifd_send_sync_main(QEMUFile *f); -int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset); +int multifd_send_sync_main(void); +int multifd_queue_page(RAMBlock *block, ram_addr_t offset); /* Multifd Compression flags */ #define MULTIFD_FLAG_SYNC (1 << 0) @@ -58,8 +58,6 @@ typedef struct { uint32_t num; /* number of allocated pages */ uint32_t allocated; - /* global number of generated multifd packets */ - uint64_t packet_num; /* offset of each page */ ram_addr_t *offset; RAMBlock *block; diff --git a/migration/options.c b/migration/options.c index 8d8ec73ad95bd1dc7db9cc9881099524085544ae..3e3e0b93b439c4ccc88919c42cf16b5bdb6d91c5 100644 --- a/migration/options.c +++ b/migration/options.c @@ -833,8 +833,10 @@ uint64_t migrate_max_postcopy_bandwidth(void) MigMode migrate_mode(void) { MigrationState *s = migrate_get_current(); + MigMode mode = s->parameters.mode; - return s->parameters.mode; + assert(mode >= 0 && mode < MIG_MODE__MAX); + return mode; } int migrate_multifd_channels(void) diff --git a/migration/ram.c b/migration/ram.c index 8c7886ab797b8a91d9ecf060e8ca016bd436ae46..c0cdcccb75c555269295f316a7f52450085f9676 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -1250,10 +1250,9 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss) return pages; } -static int ram_save_multifd_page(QEMUFile *file, RAMBlock *block, - ram_addr_t offset) +static int ram_save_multifd_page(RAMBlock *block, ram_addr_t offset) { - if (multifd_queue_page(file, block, offset) < 0) { + if (multifd_queue_page(block, offset) < 0) { return -1; } stat64_add(&mig_stats.normal_pages, 1); @@ -1336,7 +1335,7 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss) if (migrate_multifd() && !migrate_multifd_flush_after_each_section()) { QEMUFile *f = rs->pss[RAM_CHANNEL_PRECOPY].pss_channel; - int ret = multifd_send_sync_main(f); + int ret = multifd_send_sync_main(); if (ret < 0) { return ret; } @@ -2067,7 +2066,7 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss) * still see partially copied pages which is data corruption. */ if (migrate_multifd() && !migration_in_postcopy()) { - return ram_save_multifd_page(pss->pss_channel, block, offset); + return ram_save_multifd_page(block, offset); } return ram_save_page(rs, pss); @@ -2395,7 +2394,7 @@ static void ram_save_cleanup(void *opaque) /* We don't use dirty log with background snapshots */ if (!migrate_background_snapshot()) { - /* caller have hold iothread lock or is in a bh, so there is + /* caller have hold BQL or is in a bh, so there is * no writing race against the migration bitmap */ if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) { @@ -2984,9 +2983,9 @@ static int ram_save_setup(QEMUFile *f, void *opaque) migration_ops = g_malloc0(sizeof(MigrationOps)); migration_ops->ram_save_target_page = ram_save_target_page_legacy; - qemu_mutex_unlock_iothread(); - ret = multifd_send_sync_main(f); - qemu_mutex_lock_iothread(); + bql_unlock(); + ret = multifd_send_sync_main(); + bql_lock(); if (ret < 0) { return ret; } @@ -3109,7 +3108,7 @@ out: if (ret >= 0 && migration_is_setup_or_active(migrate_get_current()->state)) { if (migrate_multifd() && migrate_multifd_flush_after_each_section()) { - ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel); + ret = multifd_send_sync_main(); if (ret < 0) { return ret; } @@ -3131,7 +3130,7 @@ out: * * Returns zero to indicate success or negative on error * - * Called with iothread lock + * Called with the BQL * * @f: QEMUFile where to send the data * @opaque: RAMState pointer @@ -3183,7 +3182,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) } } - ret = multifd_send_sync_main(rs->pss[RAM_CHANNEL_PRECOPY].pss_channel); + ret = multifd_send_sync_main(); if (ret < 0) { return ret; } @@ -3221,11 +3220,11 @@ static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy, uint64_t remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; if (!migration_in_postcopy() && remaining_size < s->threshold_size) { - qemu_mutex_lock_iothread(); + bql_lock(); WITH_RCU_READ_LOCK_GUARD() { migration_bitmap_sync_precopy(rs, false); } - qemu_mutex_unlock_iothread(); + bql_unlock(); remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; } @@ -3453,7 +3452,7 @@ void colo_incoming_start_dirty_log(void) { RAMBlock *block = NULL; /* For memory_global_dirty_log_start below. */ - qemu_mutex_lock_iothread(); + bql_lock(); qemu_mutex_lock_ramlist(); memory_global_dirty_log_sync(false); @@ -3467,7 +3466,7 @@ void colo_incoming_start_dirty_log(void) } ram_state->migration_dirty_pages = 0; qemu_mutex_unlock_ramlist(); - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* It is need to hold the global lock to call this helper */ diff --git a/migration/rdma.c b/migration/rdma.c index 04debab5d94ce9300754a69e939b12cb2ba5bfd7..a355dcea89896bf468f349daf5909a0d919284fb 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -238,6 +238,7 @@ static const char *control_desc(unsigned int rdma_control) return strs[rdma_control]; } +#if !defined(htonll) static uint64_t htonll(uint64_t v) { union { uint32_t lv[2]; uint64_t llv; } u; @@ -245,13 +246,16 @@ static uint64_t htonll(uint64_t v) u.lv[1] = htonl(v & 0xFFFFFFFFULL); return u.llv; } +#endif +#if !defined(ntohll) static uint64_t ntohll(uint64_t v) { union { uint32_t lv[2]; uint64_t llv; } u; u.llv = v; return ((uint64_t)ntohl(u.lv[0]) << 32) | (uint64_t) ntohl(u.lv[1]); } +#endif static void dest_block_to_network(RDMADestBlock *db) { @@ -4035,7 +4039,6 @@ static void rdma_accept_incoming_migration(void *opaque) { RDMAContext *rdma = opaque; QEMUFile *f; - Error *local_err = NULL; trace_qemu_rdma_accept_incoming_migration(); if (qemu_rdma_accept(rdma) < 0) { @@ -4057,10 +4060,7 @@ static void rdma_accept_incoming_migration(void *opaque) } rdma->migration_started_on_destination = 1; - migration_fd_process_incoming(f, &local_err); - if (local_err) { - error_reportf_err(local_err, "RDMA ERROR:"); - } + migration_fd_process_incoming(f); } void rdma_start_incoming_migration(InetSocketAddress *host_port, diff --git a/migration/savevm.c b/migration/savevm.c index eec5503a422081a5e2fce7238820ec6fc8db1b8b..6410705ebeb921749cf5ab1fab86760be11af8ea 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -438,7 +438,7 @@ static const VMStateDescription vmstate_target_page_bits = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_target_page_bits_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(target_page_bits, SaveState), VMSTATE_END_OF_LIST() } @@ -454,7 +454,7 @@ static const VMStateDescription vmstate_capabilites = { .version_id = 1, .minimum_version_id = 1, .needed = vmstate_capabilites_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_V(caps_count, SaveState, 1), VMSTATE_VARRAY_UINT32_ALLOC(capabilities, SaveState, caps_count, 1, vmstate_info_capability, @@ -499,7 +499,7 @@ static const VMStateDescription vmstate_uuid = { .minimum_version_id = 1, .needed = vmstate_uuid_needed, .post_load = vmstate_uuid_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY_V(uuid.data, SaveState, sizeof(QemuUUID), 1), VMSTATE_END_OF_LIST() } @@ -512,12 +512,12 @@ static const VMStateDescription vmstate_configuration = { .post_load = configuration_post_load, .pre_save = configuration_pre_save, .post_save = configuration_post_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(len, SaveState), VMSTATE_VBUFFER_ALLOC_UINT32(name, SaveState, 0, NULL, len), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription *[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_target_page_bits, &vmstate_capabilites, &vmstate_uuid, @@ -551,11 +551,11 @@ static void dump_vmstate_vmsf(FILE *out_file, const VMStateField *field, } static void dump_vmstate_vmss(FILE *out_file, - const VMStateDescription **subsection, + const VMStateDescription *subsection, int indent) { - if (*subsection != NULL) { - dump_vmstate_vmsd(out_file, *subsection, indent, true); + if (subsection != NULL) { + dump_vmstate_vmsd(out_file, subsection, indent, true); } } @@ -597,7 +597,7 @@ static void dump_vmstate_vmsd(FILE *out_file, fprintf(out_file, "\n%*s]", indent, ""); } if (vmsd->subsections != NULL) { - const VMStateDescription **subsection = vmsd->subsections; + const VMStateDescription * const *subsection = vmsd->subsections; bool first; fprintf(out_file, ",\n%*s\"Subsections\": [\n", indent, ""); @@ -606,7 +606,7 @@ static void dump_vmstate_vmsd(FILE *out_file, if (!first) { fprintf(out_file, ",\n"); } - dump_vmstate_vmss(out_file, subsection, indent + 2); + dump_vmstate_vmss(out_file, *subsection, indent + 2); subsection++; first = false; } @@ -831,7 +831,7 @@ void unregister_savevm(VMStateIf *obj, const char *idstr, void *opaque) static void vmstate_check(const VMStateDescription *vmsd) { const VMStateField *field = vmsd->fields; - const VMStateDescription **subsection = vmsd->subsections; + const VMStateDescription * const *subsection = vmsd->subsections; if (field) { while (field->name) { @@ -3046,10 +3046,9 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate, QEMUSnapshotInfo sn1, *sn = &sn1; int ret = -1, ret2; QEMUFile *f; - int saved_vm_running; + RunState saved_state = runstate_get(); uint64_t vm_state_size; g_autoptr(GDateTime) now = g_date_time_new_now_local(); - AioContext *aio_context; GLOBAL_STATE_CODE(); @@ -3092,17 +3091,12 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate, if (bs == NULL) { return false; } - aio_context = bdrv_get_aio_context(bs); - - saved_vm_running = runstate_is_running(); global_state_store(); vm_stop(RUN_STATE_SAVE_VM); bdrv_drain_all_begin(); - aio_context_acquire(aio_context); - memset(sn, 0, sizeof(*sn)); /* fill auxiliary fields */ @@ -3139,14 +3133,6 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate, goto the_end; } - /* The bdrv_all_create_snapshot() call that follows acquires the AioContext - * for itself. BDRV_POLL_WHILE() does not support nested locking because - * it only releases the lock once. Therefore synchronous I/O will deadlock - * unless we release the AioContext before bdrv_all_create_snapshot(). - */ - aio_context_release(aio_context); - aio_context = NULL; - ret = bdrv_all_create_snapshot(sn, bs, vm_state_size, has_devices, devices, errp); if (ret < 0) { @@ -3157,15 +3143,9 @@ bool save_snapshot(const char *name, bool overwrite, const char *vmstate, ret = 0; the_end: - if (aio_context) { - aio_context_release(aio_context); - } - bdrv_drain_all_end(); - if (saved_vm_running) { - vm_start(); - } + vm_resume(saved_state); return ret == 0; } @@ -3258,7 +3238,6 @@ bool load_snapshot(const char *name, const char *vmstate, QEMUSnapshotInfo sn; QEMUFile *f; int ret; - AioContext *aio_context; MigrationIncomingState *mis = migration_incoming_get_current(); if (!bdrv_all_can_snapshot(has_devices, devices, errp)) { @@ -3278,12 +3257,9 @@ bool load_snapshot(const char *name, const char *vmstate, if (!bs_vm_state) { return false; } - aio_context = bdrv_get_aio_context(bs_vm_state); /* Don't even try to load empty VM states */ - aio_context_acquire(aio_context); ret = bdrv_snapshot_find(bs_vm_state, &sn, name); - aio_context_release(aio_context); if (ret < 0) { return false; } else if (sn.vm_state_size == 0) { @@ -3320,10 +3296,8 @@ bool load_snapshot(const char *name, const char *vmstate, ret = -EINVAL; goto err_drain; } - aio_context_acquire(aio_context); ret = qemu_loadvm_state(f); migration_incoming_state_destroy(); - aio_context_release(aio_context); bdrv_drain_all_end(); @@ -3339,6 +3313,14 @@ err_drain: return false; } +void load_snapshot_resume(RunState state) +{ + vm_resume(state); + if (state == RUN_STATE_RUNNING && runstate_get() == RUN_STATE_SUSPENDED) { + qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, &error_abort); + } +} + bool delete_snapshot(const char *name, bool has_devices, strList *devices, Error **errp) { @@ -3403,16 +3385,15 @@ static void snapshot_load_job_bh(void *opaque) { Job *job = opaque; SnapshotJob *s = container_of(job, SnapshotJob, common); - int orig_vm_running; + RunState orig_state = runstate_get(); job_progress_set_remaining(&s->common, 1); - orig_vm_running = runstate_is_running(); vm_stop(RUN_STATE_RESTORE_VM); s->ret = load_snapshot(s->tag, s->vmstate, true, s->devices, s->errp); - if (s->ret && orig_vm_running) { - vm_start(); + if (s->ret) { + load_snapshot_resume(orig_state); } job_progress_update(&s->common, 1); diff --git a/migration/vmstate.c b/migration/vmstate.c index b7723a4187144d4e978713a8994ee200c1249afb..ef26f26ccdca2339a562edc9773b849b68919064 100644 --- a/migration/vmstate.c +++ b/migration/vmstate.c @@ -452,13 +452,15 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd, } static const VMStateDescription * -vmstate_get_subsection(const VMStateDescription **sub, char *idstr) +vmstate_get_subsection(const VMStateDescription * const *sub, + const char *idstr) { - while (sub && *sub) { - if (strcmp(idstr, (*sub)->name) == 0) { - return *sub; + if (sub) { + for (const VMStateDescription *s = *sub; s ; s = *++sub) { + if (strcmp(idstr, s->name) == 0) { + return s; + } } - sub++; } return NULL; } @@ -517,7 +519,7 @@ static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd, static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd, void *opaque, JSONWriter *vmdesc) { - const VMStateDescription **sub = vmsd->subsections; + const VMStateDescription * const *sub = vmsd->subsections; bool vmdesc_has_subsections = false; int ret = 0; diff --git a/nbd/server.c b/nbd/server.c index 895cf0a7525bdf85996564faa05d515042c7a45e..941832f178ea6715de44bfd0511fe25426d772b5 100644 --- a/nbd/server.c +++ b/nbd/server.c @@ -122,26 +122,28 @@ struct NBDMetaContexts { }; struct NBDClient { - int refcount; + int refcount; /* atomic */ void (*close_fn)(NBDClient *client, bool negotiated); + QemuMutex lock; + NBDExport *exp; QCryptoTLSCreds *tlscreds; char *tlsauthz; QIOChannelSocket *sioc; /* The underlying data channel */ QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */ - Coroutine *recv_coroutine; + Coroutine *recv_coroutine; /* protected by lock */ CoMutex send_lock; Coroutine *send_coroutine; - bool read_yielding; - bool quiescing; + bool read_yielding; /* protected by lock */ + bool quiescing; /* protected by lock */ QTAILQ_ENTRY(NBDClient) next; - int nb_requests; - bool closing; + int nb_requests; /* protected by lock */ + bool closing; /* protected by lock */ uint32_t check_align; /* If non-zero, check for aligned client requests */ @@ -1415,11 +1417,18 @@ nbd_read_eof(NBDClient *client, void *buffer, size_t size, Error **errp) len = qio_channel_readv(client->ioc, &iov, 1, errp); if (len == QIO_CHANNEL_ERR_BLOCK) { - client->read_yielding = true; + WITH_QEMU_LOCK_GUARD(&client->lock) { + client->read_yielding = true; + + /* Prompt main loop thread to re-run nbd_drained_poll() */ + aio_wait_kick(); + } qio_channel_yield(client->ioc, G_IO_IN); - client->read_yielding = false; - if (client->quiescing) { - return -EAGAIN; + WITH_QEMU_LOCK_GUARD(&client->lock) { + client->read_yielding = false; + if (client->quiescing) { + return -EAGAIN; + } } continue; } else if (len < 0) { @@ -1501,14 +1510,17 @@ static int coroutine_fn nbd_receive_request(NBDClient *client, NBDRequest *reque #define MAX_NBD_REQUESTS 16 +/* Runs in export AioContext and main loop thread */ void nbd_client_get(NBDClient *client) { - client->refcount++; + qatomic_inc(&client->refcount); } void nbd_client_put(NBDClient *client) { - if (--client->refcount == 0) { + assert(qemu_in_main_thread()); + + if (qatomic_fetch_dec(&client->refcount) == 1) { /* The last reference should be dropped by client->close, * which is called by client_close. */ @@ -1525,17 +1537,47 @@ void nbd_client_put(NBDClient *client) blk_exp_unref(&client->exp->common); } g_free(client->contexts.bitmaps); + qemu_mutex_destroy(&client->lock); g_free(client); } } +/* + * Tries to release the reference to @client, but only if other references + * remain. This is an optimization for the common case where we want to avoid + * the expense of scheduling nbd_client_put() in the main loop thread. + * + * Returns true upon success or false if the reference was not released because + * it is the last reference. + */ +static bool nbd_client_put_nonzero(NBDClient *client) +{ + int old = qatomic_read(&client->refcount); + int expected; + + do { + if (old == 1) { + return false; + } + + expected = old; + old = qatomic_cmpxchg(&client->refcount, expected, expected - 1); + } while (old != expected); + + return true; +} + static void client_close(NBDClient *client, bool negotiated) { - if (client->closing) { - return; - } + assert(qemu_in_main_thread()); + + WITH_QEMU_LOCK_GUARD(&client->lock) { + if (client->closing) { + return; + } - client->closing = true; + client->closing = true; + } /* Force requests to finish. They will drop their own references, * then we'll close the socket and free the NBDClient. @@ -1549,6 +1591,7 @@ static void client_close(NBDClient *client, bool negotiated) } } +/* Runs in export AioContext with client->lock held */ static NBDRequestData *nbd_request_get(NBDClient *client) { NBDRequestData *req; @@ -1557,11 +1600,11 @@ static NBDRequestData *nbd_request_get(NBDClient *client) client->nb_requests++; req = g_new0(NBDRequestData, 1); - nbd_client_get(client); req->client = client; return req; } +/* Runs in export AioContext with client->lock held */ static void nbd_request_put(NBDRequestData *req) { NBDClient *client = req->client; @@ -1578,8 +1621,6 @@ static void nbd_request_put(NBDRequestData *req) } nbd_client_receive_next_request(client); - - nbd_client_put(client); } static void blk_aio_attached(AioContext *ctx, void *opaque) @@ -1587,14 +1628,18 @@ static void blk_aio_attached(AioContext *ctx, void *opaque) NBDExport *exp = opaque; NBDClient *client; + assert(qemu_in_main_thread()); + trace_nbd_blk_aio_attached(exp->name, ctx); exp->common.ctx = ctx; QTAILQ_FOREACH(client, &exp->clients, next) { - assert(client->nb_requests == 0); - assert(client->recv_coroutine == NULL); - assert(client->send_coroutine == NULL); + WITH_QEMU_LOCK_GUARD(&client->lock) { + assert(client->nb_requests == 0); + assert(client->recv_coroutine == NULL); + assert(client->send_coroutine == NULL); + } } } @@ -1602,6 +1647,8 @@ static void blk_aio_detach(void *opaque) { NBDExport *exp = opaque; + assert(qemu_in_main_thread()); + trace_nbd_blk_aio_detach(exp->name, exp->common.ctx); exp->common.ctx = NULL; @@ -1612,8 +1659,12 @@ static void nbd_drained_begin(void *opaque) NBDExport *exp = opaque; NBDClient *client; + assert(qemu_in_main_thread()); + QTAILQ_FOREACH(client, &exp->clients, next) { - client->quiescing = true; + WITH_QEMU_LOCK_GUARD(&client->lock) { + client->quiescing = true; + } } } @@ -1622,28 +1673,48 @@ static void nbd_drained_end(void *opaque) NBDExport *exp = opaque; NBDClient *client; + assert(qemu_in_main_thread()); + QTAILQ_FOREACH(client, &exp->clients, next) { - client->quiescing = false; - nbd_client_receive_next_request(client); + WITH_QEMU_LOCK_GUARD(&client->lock) { + client->quiescing = false; + nbd_client_receive_next_request(client); + } } } +/* Runs in export AioContext */ +static void nbd_wake_read_bh(void *opaque) +{ + NBDClient *client = opaque; + qio_channel_wake_read(client->ioc); +} + static bool nbd_drained_poll(void *opaque) { NBDExport *exp = opaque; NBDClient *client; + assert(qemu_in_main_thread()); + QTAILQ_FOREACH(client, &exp->clients, next) { - if (client->nb_requests != 0) { - /* - * If there's a coroutine waiting for a request on nbd_read_eof() - * enter it here so we don't depend on the client to wake it up. - */ - if (client->recv_coroutine != NULL && client->read_yielding) { - qio_channel_wake_read(client->ioc); - } + WITH_QEMU_LOCK_GUARD(&client->lock) { + if (client->nb_requests != 0) { + /* + * If there's a coroutine waiting for a request on nbd_read_eof() + * enter it here so we don't depend on the client to wake it up. + * + * Schedule a BH in the export AioContext to avoid missing the + * wake up due to the race between qio_channel_wake_read() and + * qio_channel_yield(). + */ + if (client->recv_coroutine != NULL && client->read_yielding) { + aio_bh_schedule_oneshot(nbd_export_aio_context(client->exp), + nbd_wake_read_bh, client); + } - return true; + return true; + } } } @@ -1654,6 +1725,8 @@ static void nbd_eject_notifier(Notifier *n, void *data) { NBDExport *exp = container_of(n, NBDExport, eject_notifier); + assert(qemu_in_main_thread()); + blk_exp_request_shutdown(&exp->common); } @@ -2539,7 +2612,6 @@ static int coroutine_fn nbd_co_receive_request(NBDRequestData *req, int ret; g_assert(qemu_in_coroutine()); - assert(client->recv_coroutine == qemu_coroutine_self()); ret = nbd_receive_request(client, request, errp); if (ret < 0) { return ret; @@ -2936,15 +3008,23 @@ static coroutine_fn int nbd_handle_request(NBDClient *client, static coroutine_fn void nbd_trip(void *opaque) { NBDClient *client = opaque; - NBDRequestData *req; + NBDRequestData *req = NULL; NBDRequest request = { 0 }; /* GCC thinks it can be used uninitialized */ int ret; Error *local_err = NULL; + /* + * Note that nbd_client_put() and client_close() must be called from the + * main loop thread. Use aio_co_reschedule_self() to switch AioContext + * before calling these functions. + */ + trace_nbd_trip(); + + qemu_mutex_lock(&client->lock); + if (client->closing) { - nbd_client_put(client); - return; + goto done; } if (client->quiescing) { @@ -2952,14 +3032,27 @@ static coroutine_fn void nbd_trip(void *opaque) * We're switching between AIO contexts. Don't attempt to receive a new * request and kick the main context which may be waiting for us. */ - nbd_client_put(client); client->recv_coroutine = NULL; aio_wait_kick(); - return; + goto done; } req = nbd_request_get(client); - ret = nbd_co_receive_request(req, &request, &local_err); + + /* + * nbd_co_receive_request() returns -EAGAIN when nbd_drained_begin() has + * set client->quiescing but by the time we get back nbd_drained_end() may + * have already cleared client->quiescing. In that case we try again + * because nothing else will spawn an nbd_trip() coroutine until we set + * client->recv_coroutine = NULL further down. + */ + do { + assert(client->recv_coroutine == qemu_coroutine_self()); + qemu_mutex_unlock(&client->lock); + ret = nbd_co_receive_request(req, &request, &local_err); + qemu_mutex_lock(&client->lock); + } while (ret == -EAGAIN && !client->quiescing); + client->recv_coroutine = NULL; if (client->closing) { @@ -2971,15 +3064,16 @@ static coroutine_fn void nbd_trip(void *opaque) } if (ret == -EAGAIN) { - assert(client->quiescing); goto done; } nbd_client_receive_next_request(client); + if (ret == -EIO) { goto disconnect; } + qemu_mutex_unlock(&client->lock); qio_channel_set_cork(client->ioc, true); if (ret < 0) { @@ -2999,6 +3093,10 @@ static coroutine_fn void nbd_trip(void *opaque) g_free(request.contexts->bitmaps); g_free(request.contexts); } + + qio_channel_set_cork(client->ioc, false); + qemu_mutex_lock(&client->lock); + if (ret < 0) { error_prepend(&local_err, "Failed to send reply: "); goto disconnect; @@ -3013,21 +3111,36 @@ static coroutine_fn void nbd_trip(void *opaque) goto disconnect; } - qio_channel_set_cork(client->ioc, false); done: - nbd_request_put(req); - nbd_client_put(client); + if (req) { + nbd_request_put(req); + } + + qemu_mutex_unlock(&client->lock); + + if (!nbd_client_put_nonzero(client)) { + aio_co_reschedule_self(qemu_get_aio_context()); + nbd_client_put(client); + } return; disconnect: if (local_err) { error_reportf_err(local_err, "Disconnect client, due to: "); } + nbd_request_put(req); + qemu_mutex_unlock(&client->lock); + + aio_co_reschedule_self(qemu_get_aio_context()); client_close(client, true); nbd_client_put(client); } +/* + * Runs in export AioContext and main loop thread. Caller must hold + * client->lock. + */ static void nbd_client_receive_next_request(NBDClient *client) { if (!client->recv_coroutine && client->nb_requests < MAX_NBD_REQUESTS && @@ -3053,7 +3166,9 @@ static coroutine_fn void nbd_co_client_start(void *opaque) return; } - nbd_client_receive_next_request(client); + WITH_QEMU_LOCK_GUARD(&client->lock) { + nbd_client_receive_next_request(client); + } } /* @@ -3070,6 +3185,7 @@ void nbd_client_new(QIOChannelSocket *sioc, Coroutine *co; client = g_new0(NBDClient, 1); + qemu_mutex_init(&client->lock); client->refcount = 1; client->tlscreds = tlscreds; if (tlscreds) { diff --git a/net/can/meson.build b/net/can/meson.build index 45693c82c9d2e1d251d24a788e6fa37b154e8820..af3b27921cd1d402e187ecd08a528e2ab4763b96 100644 --- a/net/can/meson.build +++ b/net/can/meson.build @@ -1,5 +1,7 @@ can_ss = ss.source_set() can_ss.add(files('can_core.c', 'can_host.c')) -can_ss.add(when: 'CONFIG_LINUX', if_true: files('can_socketcan.c')) +if host_os == 'linux' + can_ss.add(files('can_socketcan.c')) +endif system_ss.add_all(when: 'CONFIG_CAN_BUS', if_true: can_ss) diff --git a/net/colo-compare.c b/net/colo-compare.c index 7f9e6f89ce058fdaa89b0084cf350298ed20b8ab..f2dfc0ebdcd81d5a4b13842ef883a1455d7838d9 100644 --- a/net/colo-compare.c +++ b/net/colo-compare.c @@ -1439,12 +1439,10 @@ static void colo_compare_finalize(Object *obj) qemu_bh_delete(s->event_bh); AioContext *ctx = iothread_get_aio_context(s->iothread); - aio_context_acquire(ctx); AIO_WAIT_WHILE(ctx, !s->out_sendco.done); if (s->notify_dev) { AIO_WAIT_WHILE(ctx, !s->notify_sendco.done); } - aio_context_release(ctx); /* Release all unhandled packets after compare thead exited */ g_queue_foreach(&s->conn_list, colo_flush_packets, s); diff --git a/net/meson.build b/net/meson.build index ce99bd4447f484e8842d7a5c47403217343dd0b5..9432a588e4e77b0726365145347670958805c841 100644 --- a/net/meson.build +++ b/net/meson.build @@ -41,23 +41,21 @@ system_ss.add(when: libxdp, if_true: files('af-xdp.c')) if have_vhost_net_user system_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost-user.c'), if_false: files('vhost-user-stub.c')) - system_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-user-stub.c')) endif -if targetos == 'windows' +if host_os == 'windows' system_ss.add(files('tap-win32.c')) -elif targetos == 'linux' +elif host_os == 'linux' system_ss.add(files('tap.c', 'tap-linux.c')) -elif targetos in bsd_oses +elif host_os in bsd_oses system_ss.add(files('tap.c', 'tap-bsd.c')) -elif targetos == 'sunos' +elif host_os == 'sunos' system_ss.add(files('tap.c', 'tap-solaris.c')) else system_ss.add(files('tap.c', 'tap-stub.c')) endif if have_vhost_net_vdpa system_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost-vdpa.c'), if_false: files('vhost-vdpa-stub.c')) - system_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-vdpa-stub.c')) endif vmnet_files = files( diff --git a/net/stream.c b/net/stream.c index 9204b4c96e4079e343ff1b39733e59de910732fb..97e6ec6679e4dad45001b695e6f1551784453bc8 100644 --- a/net/stream.c +++ b/net/stream.c @@ -165,6 +165,7 @@ static gboolean net_stream_send(QIOChannel *ioc, s->ioc_write_tag = 0; } if (s->listener) { + qemu_set_info_str(&s->nc, "listening"); qio_net_listener_set_client_func(s->listener, net_stream_listen, s, NULL); } @@ -173,7 +174,6 @@ static gboolean net_stream_send(QIOChannel *ioc, net_socket_rs_init(&s->rs, net_stream_rs_finalize, false); s->nc.link_down = true; - qemu_set_info_str(&s->nc, "%s", ""); qapi_event_send_netdev_stream_disconnected(s->nc.name); net_stream_arm_reconnect(s); @@ -272,9 +272,11 @@ static void net_stream_server_listening(QIOTask *task, gpointer opaque) QIOChannelSocket *listen_sioc = QIO_CHANNEL_SOCKET(s->listen_ioc); SocketAddress *addr; int ret; + Error *err = NULL; - if (listen_sioc->fd < 0) { - qemu_set_info_str(&s->nc, "connection error"); + if (qio_task_propagate_error(task, &err)) { + qemu_set_info_str(&s->nc, "error: %s", error_get_pretty(err)); + error_free(err); return; } @@ -292,6 +294,7 @@ static void net_stream_server_listening(QIOTask *task, gpointer opaque) s->nc.link_down = true; s->listener = qio_net_listener_new(); + qemu_set_info_str(&s->nc, "listening"); net_socket_rs_init(&s->rs, net_stream_rs_finalize, false); qio_net_listener_set_client_func(s->listener, net_stream_listen, s, NULL); qio_net_listener_add(s->listener, listen_sioc); @@ -309,6 +312,7 @@ static int net_stream_server_init(NetClientState *peer, nc = qemu_new_net_client(&net_stream_info, peer, model, name); s = DO_UPCAST(NetStreamState, nc, nc); + qemu_set_info_str(&s->nc, "initializing"); s->listen_ioc = QIO_CHANNEL(listen_sioc); qio_channel_socket_listen_async(listen_sioc, addr, 0, @@ -325,9 +329,11 @@ static void net_stream_client_connected(QIOTask *task, gpointer opaque) SocketAddress *addr; gchar *uri; int ret; + Error *err = NULL; - if (sioc->fd < 0) { - qemu_set_info_str(&s->nc, "connection error"); + if (qio_task_propagate_error(task, &err)) { + qemu_set_info_str(&s->nc, "error: %s", error_get_pretty(err)); + error_free(err); goto error; } @@ -382,6 +388,7 @@ static gboolean net_stream_reconnect(gpointer data) static void net_stream_arm_reconnect(NetStreamState *s) { if (s->reconnect && s->timer_tag == 0) { + qemu_set_info_str(&s->nc, "connecting"); s->timer_tag = g_timeout_add_seconds(s->reconnect, net_stream_reconnect, s); } @@ -400,6 +407,7 @@ static int net_stream_client_init(NetClientState *peer, nc = qemu_new_net_client(&net_stream_info, peer, model, name); s = DO_UPCAST(NetStreamState, nc, nc); + qemu_set_info_str(&s->nc, "connecting"); s->ioc = QIO_CHANNEL(sioc); s->nc.link_down = true; diff --git a/net/tap.c b/net/tap.c index c23d0323c2aefe27003f7d8c6a80529d2c40f25b..c698b704753b1943b0398af24914757b5c236295 100644 --- a/net/tap.c +++ b/net/tap.c @@ -219,7 +219,7 @@ static void tap_send(void *opaque) /* * When the host keeps receiving more packets while tap_send() is - * running we can hog the QEMU global mutex. Limit the number of + * running we can hog the BQL. Limit the number of * packets that are processed per tap_send() callback to prevent * stalling the guest. */ diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index d0614d79549b3e240f40e5e9116670f89c3fc356..3726ee5d67634b70ae9cc5ff2424494c521fec70 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -236,10 +236,11 @@ static void vhost_vdpa_cleanup(NetClientState *nc) g_free(s->vhost_net); s->vhost_net = NULL; } - if (s->vhost_vdpa.device_fd >= 0) { - qemu_close(s->vhost_vdpa.device_fd); - s->vhost_vdpa.device_fd = -1; + if (s->vhost_vdpa.index != 0) { + return; } + qemu_close(s->vhost_vdpa.shared->device_fd); + g_free(s->vhost_vdpa.shared); } /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */ @@ -286,15 +287,6 @@ static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf, return size; } -/** From any vdpa net client, get the netclient of the first queue pair */ -static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s) -{ - NICState *nic = qemu_get_nic(s->nc.peer); - NetClientState *nc0 = qemu_get_peer(nic->ncs, 0); - - return DO_UPCAST(VhostVDPAState, nc, nc0); -} - static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable) { struct vhost_vdpa *v = &s->vhost_vdpa; @@ -350,8 +342,8 @@ static void vhost_vdpa_net_data_start_first(VhostVDPAState *s) migration_add_notifier(&s->migration_state, vdpa_net_migration_state_notifier); if (v->shadow_vqs_enabled) { - v->iova_tree = vhost_iova_tree_new(v->iova_range.first, - v->iova_range.last); + v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, + v->shared->iova_range.last); } } @@ -365,22 +357,16 @@ static int vhost_vdpa_net_data_start(NetClientState *nc) if (s->always_svq || migration_is_setup_or_active(migrate_get_current()->state)) { v->shadow_vqs_enabled = true; - v->shadow_data = true; } else { v->shadow_vqs_enabled = false; - v->shadow_data = false; } if (v->index == 0) { + v->shared->shadow_data = v->shadow_vqs_enabled; vhost_vdpa_net_data_start_first(s); return 0; } - if (v->shadow_vqs_enabled) { - VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s); - v->iova_tree = s0->vhost_vdpa.iova_tree; - } - return 0; } @@ -413,9 +399,8 @@ static void vhost_vdpa_net_client_stop(NetClientState *nc) dev = s->vhost_vdpa.dev; if (dev->vq_index + dev->nvqs == dev->vq_index_end) { - g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete); - } else { - s->vhost_vdpa.iova_tree = NULL; + g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, + vhost_iova_tree_delete); } } @@ -460,7 +445,7 @@ static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v, }; int r; - r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid); + r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid); if (unlikely(r < 0)) { error_report("Can't set vq group %u asid %u, errno=%d (%s)", asid.index, asid.num, errno, g_strerror(errno)); @@ -470,7 +455,7 @@ static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v, static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) { - VhostIOVATree *tree = v->iova_tree; + VhostIOVATree *tree = v->shared->iova_tree; DMAMap needle = { /* * No need to specify size or to look for more translations since @@ -486,7 +471,8 @@ static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) return; } - r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1); + r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova, + map->size + 1); if (unlikely(r != 0)) { error_report("Device cannot unmap: %s(%d)", g_strerror(r), r); } @@ -504,13 +490,13 @@ static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size, map.translated_addr = (hwaddr)(uintptr_t)buf; map.size = size - 1; map.perm = write ? IOMMU_RW : IOMMU_RO, - r = vhost_iova_tree_map_alloc(v->iova_tree, &map); + r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map); if (unlikely(r != IOVA_OK)) { error_report("Cannot map injected element"); return r; } - r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova, + r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova, vhost_vdpa_net_cvq_cmd_page_len(), buf, !write); if (unlikely(r < 0)) { goto dma_map_err; @@ -519,13 +505,13 @@ static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size, return 0; dma_map_err: - vhost_iova_tree_remove(v->iova_tree, map); + vhost_iova_tree_remove(v->shared->iova_tree, map); return r; } static int vhost_vdpa_net_cvq_start(NetClientState *nc) { - VhostVDPAState *s, *s0; + VhostVDPAState *s; struct vhost_vdpa *v; int64_t cvq_group; int r; @@ -536,12 +522,10 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc) s = DO_UPCAST(VhostVDPAState, nc, nc); v = &s->vhost_vdpa; - s0 = vhost_vdpa_net_first_nc_vdpa(s); - v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled; - v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled; + v->shadow_vqs_enabled = v->shared->shadow_data; s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID; - if (s->vhost_vdpa.shadow_data) { + if (v->shared->shadow_data) { /* SVQ is already configured for all virtqueues */ goto out; } @@ -558,7 +542,7 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc) return 0; } - cvq_group = vhost_vdpa_get_vring_group(v->device_fd, + cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd, v->dev->vq_index_end - 1, &err); if (unlikely(cvq_group < 0)) { @@ -579,24 +563,22 @@ out: return 0; } - if (s0->vhost_vdpa.iova_tree) { - /* - * SVQ is already configured for all virtqueues. Reuse IOVA tree for - * simplicity, whether CVQ shares ASID with guest or not, because: - * - Memory listener need access to guest's memory addresses allocated - * in the IOVA tree. - * - There should be plenty of IOVA address space for both ASID not to - * worry about collisions between them. Guest's translations are - * still validated with virtio virtqueue_pop so there is no risk for - * the guest to access memory that it shouldn't. - * - * To allocate a iova tree per ASID is doable but it complicates the - * code and it is not worth it for the moment. - */ - v->iova_tree = s0->vhost_vdpa.iova_tree; - } else { - v->iova_tree = vhost_iova_tree_new(v->iova_range.first, - v->iova_range.last); + /* + * If other vhost_vdpa already have an iova_tree, reuse it for simplicity, + * whether CVQ shares ASID with guest or not, because: + * - Memory listener need access to guest's memory addresses allocated in + * the IOVA tree. + * - There should be plenty of IOVA address space for both ASID not to + * worry about collisions between them. Guest's translations are still + * validated with virtio virtqueue_pop so there is no risk for the guest + * to access memory that it shouldn't. + * + * To allocate a iova tree per ASID is doable but it complicates the code + * and it is not worth it for the moment. + */ + if (!v->shared->iova_tree) { + v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, + v->shared->iova_range.last); } r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, @@ -1661,6 +1643,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, bool svq, struct vhost_vdpa_iova_range iova_range, uint64_t features, + VhostVDPAShared *shared, Error **errp) { NetClientState *nc = NULL; @@ -1686,16 +1669,17 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, qemu_set_info_str(nc, TYPE_VHOST_VDPA); s = DO_UPCAST(VhostVDPAState, nc, nc); - s->vhost_vdpa.device_fd = vdpa_device_fd; s->vhost_vdpa.index = queue_pair_index; s->always_svq = svq; s->migration_state.notify = NULL; s->vhost_vdpa.shadow_vqs_enabled = svq; - s->vhost_vdpa.iova_range = iova_range; - s->vhost_vdpa.shadow_data = svq; if (queue_pair_index == 0) { vhost_vdpa_net_valid_svq_features(features, &s->vhost_vdpa.migration_blocker); + s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1); + s->vhost_vdpa.shared->device_fd = vdpa_device_fd; + s->vhost_vdpa.shared->iova_range = iova_range; + s->vhost_vdpa.shared->shadow_data = svq; } else if (!is_datapath) { s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), PROT_READ | PROT_WRITE, @@ -1708,11 +1692,16 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, s->vhost_vdpa.shadow_vq_ops_opaque = s; s->cvq_isolated = cvq_isolated; } + if (queue_pair_index != 0) { + s->vhost_vdpa.shared = shared; + } + ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); if (ret) { qemu_del_net_client(nc); return NULL; } + return nc; } @@ -1824,17 +1813,26 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, ncs = g_malloc0(sizeof(*ncs) * queue_pairs); for (i = 0; i < queue_pairs; i++) { + VhostVDPAShared *shared = NULL; + + if (i) { + shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared; + } ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, vdpa_device_fd, i, 2, true, opts->x_svq, - iova_range, features, errp); + iova_range, features, shared, errp); if (!ncs[i]) goto err; } if (has_cvq) { + VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]); + VhostVDPAShared *shared = s0->vhost_vdpa.shared; + nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, vdpa_device_fd, i, 1, false, - opts->x_svq, iova_range, features, errp); + opts->x_svq, iova_range, features, shared, + errp); if (!nc) goto err; } diff --git a/pc-bios/edk2-aarch64-code.fd.bz2 b/pc-bios/edk2-aarch64-code.fd.bz2 index 985e69a66ac6207382464c23b351ea008d1d661b..fdcf89022adbb5e565008e73852f1578645254f1 100644 Binary files a/pc-bios/edk2-aarch64-code.fd.bz2 and b/pc-bios/edk2-aarch64-code.fd.bz2 differ diff --git a/pc-bios/edk2-arm-code.fd.bz2 b/pc-bios/edk2-arm-code.fd.bz2 index ae797a8c8e12d44051a97c1472fb0cc62fb4ae17..9d829f4f08760a2894ddc71fc189b6ae86f12008 100644 Binary files a/pc-bios/edk2-arm-code.fd.bz2 and b/pc-bios/edk2-arm-code.fd.bz2 differ diff --git a/pc-bios/edk2-i386-code.fd.bz2 b/pc-bios/edk2-i386-code.fd.bz2 index e703c2f954d49ca0c14373bcfd0b6a95a34c69e5..1a9d3925746694479681033df1fac24b4fa1c07f 100644 Binary files a/pc-bios/edk2-i386-code.fd.bz2 and b/pc-bios/edk2-i386-code.fd.bz2 differ diff --git a/pc-bios/edk2-i386-secure-code.fd.bz2 b/pc-bios/edk2-i386-secure-code.fd.bz2 index 7230d44615069e0add59261fbfb8c4743374d7ba..fab0a77b3057565067237a6175e929cdde717def 100644 Binary files a/pc-bios/edk2-i386-secure-code.fd.bz2 and b/pc-bios/edk2-i386-secure-code.fd.bz2 differ diff --git a/pc-bios/edk2-riscv-code.fd.bz2 b/pc-bios/edk2-riscv-code.fd.bz2 index c1cc08561d0bc57f976be1b57eb90d537c7d0a9b..6394fbfff383f9c869bcbdfc990c1e0ac3f72ccf 100644 Binary files a/pc-bios/edk2-riscv-code.fd.bz2 and b/pc-bios/edk2-riscv-code.fd.bz2 differ diff --git a/pc-bios/edk2-x86_64-code.fd.bz2 b/pc-bios/edk2-x86_64-code.fd.bz2 index 9b7767a3accfc09f81e5c4d67e3ad5e5aa50ae36..0d325bd48395064d1145f3e788620902835045f4 100644 Binary files a/pc-bios/edk2-x86_64-code.fd.bz2 and b/pc-bios/edk2-x86_64-code.fd.bz2 differ diff --git a/pc-bios/edk2-x86_64-microvm.fd.bz2 b/pc-bios/edk2-x86_64-microvm.fd.bz2 index 17460dd380971986d93d1929065d8e8fcdb5e6f9..829429082bf45028b489e447759e507eda770dc5 100644 Binary files a/pc-bios/edk2-x86_64-microvm.fd.bz2 and b/pc-bios/edk2-x86_64-microvm.fd.bz2 differ diff --git a/pc-bios/edk2-x86_64-secure-code.fd.bz2 b/pc-bios/edk2-x86_64-secure-code.fd.bz2 index fd0efeacbf9d15e456b527310ad0e751bc135987..c6b039819c9257fbfa7a60483b0536fc0f6abe09 100644 Binary files a/pc-bios/edk2-x86_64-secure-code.fd.bz2 and b/pc-bios/edk2-x86_64-secure-code.fd.bz2 differ diff --git a/pc-bios/hppa-firmware.img b/pc-bios/hppa-firmware.img index ab715f0ecdafe01bfab60e2d8ac74eb7ea4cb12a..1b3a8418250db977e0c0c4ae7194ba7e0199c30d 100644 Binary files a/pc-bios/hppa-firmware.img and b/pc-bios/hppa-firmware.img differ diff --git a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin index 9a2ba3f2a4d0e1e52c909cd71960410e118ef374..60ca1165c820ff29b984566db22b9453541da19a 100644 Binary files a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin and b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin differ diff --git a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin index 5d4e812819ea3721f52612281e72981b07fa22b2..bae158d4577341eb3d752ca13cc66149961f8cb4 100644 Binary files a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin and b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin differ diff --git a/plugins/meson.build b/plugins/meson.build index 6b2d7a92926d586f0aee6ca1286412244439b0c5..51b4350c2a043f2940ff55d7ca087cd51c6026fa 100644 --- a/plugins/meson.build +++ b/plugins/meson.build @@ -1,7 +1,7 @@ plugin_ldflags = [] # Modules need more symbols than just those in plugins/qemu-plugins.symbols if not enable_modules - if targetos == 'darwin' + if host_os == 'darwin' configure_file( input: files('qemu-plugins.symbols'), output: 'qemu-plugins-ld64.symbols', @@ -14,7 +14,7 @@ if not enable_modules endif if get_option('plugins') - if targetos == 'windows' + if host_os == 'windows' dlltool = find_program('dlltool', required: true) # Generate a .lib file for plugins to link against. diff --git a/qapi/misc.json b/qapi/misc.json index cda2effa8155a8a6175036f678877ec2c7cdd1bf..3622d98d010a6195f78804f61fba7198c0e8b55f 100644 --- a/qapi/misc.json +++ b/qapi/misc.json @@ -134,7 +134,7 @@ ## # @stop: # -# Stop all guest VCPU execution. +# Stop guest VM execution. # # Since: 0.14 # @@ -143,6 +143,9 @@ # the guest remains paused once migration finishes, as if the -S # option was passed on the command line. # +# In the "suspended" state, it will completely stop the VM and +# cause a transition to the "paused" state. (Since 9.0) +# # Example: # # -> { "execute": "stop" } @@ -153,7 +156,7 @@ ## # @cont: # -# Resume guest VCPU execution. +# Resume guest VM execution. # # Since: 0.14 # @@ -165,6 +168,10 @@ # guest starts once migration finishes, removing the effect of the # -S command line option if it was passed. # +# If the VM was previously suspended, and not been reset or woken, +# this command will transition back to the "suspended" state. +# (Since 9.0) +# # Example: # # -> { "execute": "cont" } diff --git a/qapi/qom.json b/qapi/qom.json index c53ef978ff7e6a81f8c926159fe52c0d349696ac..95516ba325e541e5eeb3e1f588474cdf63ad68a5 100644 --- a/qapi/qom.json +++ b/qapi/qom.json @@ -794,6 +794,23 @@ { 'struct': 'VfioUserServerProperties', 'data': { 'socket': 'SocketAddress', 'device': 'str' } } +## +# @IOMMUFDProperties: +# +# Properties for iommufd objects. +# +# @fd: file descriptor name previously passed via 'getfd' command, +# which represents a pre-opened /dev/iommu. This allows the +# iommufd object to be shared accross several subsystems +# (VFIO, VDPA, ...), and the file descriptor to be shared +# with other process, e.g. DPDK. (default: QEMU opens +# /dev/iommu by itself) +# +# Since: 9.0 +## +{ 'struct': 'IOMMUFDProperties', + 'data': { '*fd': 'str' } } + ## # @RngProperties: # @@ -934,6 +951,7 @@ 'input-barrier', { 'name': 'input-linux', 'if': 'CONFIG_LINUX' }, + 'iommufd', 'iothread', 'main-loop', { 'name': 'memory-backend-epc', @@ -1003,6 +1021,7 @@ 'input-barrier': 'InputBarrierProperties', 'input-linux': { 'type': 'InputLinuxProperties', 'if': 'CONFIG_LINUX' }, + 'iommufd': 'IOMMUFDProperties', 'iothread': 'IothreadProperties', 'main-loop': 'MainLoopProperties', 'memory-backend-epc': { 'type': 'MemoryBackendEpcProperties', diff --git a/qapi/run-state.json b/qapi/run-state.json index f216ba54ec4c16146e0f77f316be61ab42fa7f9b..ca05502e0ad574e820b7dd70ce4be0c937a3575b 100644 --- a/qapi/run-state.json +++ b/qapi/run-state.json @@ -102,7 +102,7 @@ ## # @StatusInfo: # -# Information about VCPU run state +# Information about VM run state # # @running: true if all VCPUs are runnable, false if not runnable # @@ -130,9 +130,9 @@ ## # @query-status: # -# Query the run status of all VCPUs +# Query the run status of the VM # -# Returns: @StatusInfo reflecting all VCPUs +# Returns: @StatusInfo reflecting the VM # # Since: 0.14 # diff --git a/qapi/string-output-visitor.c b/qapi/string-output-visitor.c index c0cb72dbe4de88dcb473d40155a107339b62abe1..f0c1dea89e5ca0378336e5c56b9836b0d6e0fa78 100644 --- a/qapi/string-output-visitor.c +++ b/qapi/string-output-visitor.c @@ -292,6 +292,20 @@ static bool print_type_null(Visitor *v, const char *name, QNull **obj, return true; } +static bool start_struct(Visitor *v, const char *name, void **obj, + size_t size, Error **errp) +{ + return true; +} + +static void end_struct(Visitor *v, void **obj) +{ + StringOutputVisitor *sov = to_sov(v); + + /* TODO actually print struct fields */ + string_output_set(sov, g_strdup("")); +} + static bool start_list(Visitor *v, const char *name, GenericList **list, size_t size, Error **errp) @@ -379,6 +393,8 @@ Visitor *string_output_visitor_new(bool human, char **result) v->visitor.type_str = print_type_str; v->visitor.type_number = print_type_number; v->visitor.type_null = print_type_null; + v->visitor.start_struct = start_struct; + v->visitor.end_struct = end_struct; v->visitor.start_list = start_list; v->visitor.next_list = next_list; v->visitor.end_list = end_list; diff --git a/qapi/virtio.json b/qapi/virtio.json index e6dcee7b83d142f569b3424f0ae31f09000d25f9..19c7c36e364e4829cc436b1c27e7611f9d7d5c4d 100644 --- a/qapi/virtio.json +++ b/qapi/virtio.json @@ -928,3 +928,32 @@ 'data': { 'path': 'str', 'queue': 'uint16', '*index': 'uint16' }, 'returns': 'VirtioQueueElement', 'features': [ 'unstable' ] } + +## +# @IOThreadVirtQueueMapping: +# +# Describes the subset of virtqueues assigned to an IOThread. +# +# @iothread: the id of IOThread object +# +# @vqs: an optional array of virtqueue indices that will be handled by this +# IOThread. When absent, virtqueues are assigned round-robin across all +# IOThreadVirtQueueMappings provided. Either all IOThreadVirtQueueMappings +# must have @vqs or none of them must have it. +# +# Since: 9.0 +## + +{ 'struct': 'IOThreadVirtQueueMapping', + 'data': { 'iothread': 'str', '*vqs': ['uint16'] } } + +## +# @DummyVirtioForceArrays: +# +# Not used by QMP; hack to let us use IOThreadVirtQueueMappingList internally +# +# Since: 9.0 +## + +{ 'struct': 'DummyVirtioForceArrays', + 'data': { 'unused-iothread-vq-mapping': ['IOThreadVirtQueueMapping'] } } diff --git a/qemu-img.c b/qemu-img.c index 5a77f67719316a103255189bea89d3702a0e1eda..b59d52df9419768231f9a6cffdd8214889b3e28c 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -960,7 +960,6 @@ static int img_commit(int argc, char **argv) Error *local_err = NULL; CommonBlockJobCBInfo cbi; bool image_opts = false; - AioContext *aio_context; int64_t rate_limit = 0; fmt = NULL; @@ -1078,12 +1077,9 @@ static int img_commit(int argc, char **argv) .bs = bs, }; - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); commit_active_start("commit", bs, base_bs, JOB_DEFAULT, rate_limit, BLOCKDEV_ON_ERROR_REPORT, NULL, common_block_job_cb, &cbi, false, &local_err); - aio_context_release(aio_context); if (local_err) { goto done; } @@ -4373,9 +4369,9 @@ static int img_amend(int argc, char **argv) amend_opts = qemu_opts_append(amend_opts, bs->drv->amend_opts); opts = qemu_opts_create(amend_opts, NULL, 0, &error_abort); if (!qemu_opts_do_parse(opts, options, NULL, &err)) { + qemu_opts_del(opts); /* Try to parse options using the create options */ amend_opts = qemu_opts_append(amend_opts, bs->drv->create_opts); - qemu_opts_del(opts); opts = qemu_opts_create(amend_opts, NULL, 0, &error_abort); if (qemu_opts_do_parse(opts, options, NULL, NULL)) { error_append_hint(&err, diff --git a/qemu-io.c b/qemu-io.c index 050c70835f933134013e36d64528343213cfe144..6cb1e00385eac5a684fbc8efc82b4034c1275ee3 100644 --- a/qemu-io.c +++ b/qemu-io.c @@ -414,15 +414,7 @@ static void prep_fetchline(void *opaque) static int do_qemuio_command(const char *cmd) { - int ret; - AioContext *ctx = - qemuio_blk ? blk_get_aio_context(qemuio_blk) : qemu_get_aio_context(); - - aio_context_acquire(ctx); - ret = qemuio_command(qemuio_blk, cmd); - aio_context_release(ctx); - - return ret; + return qemuio_command(qemuio_blk, cmd); } static int command_loop(void) diff --git a/qemu-nbd.c b/qemu-nbd.c index 186e6468b1abf2fde0d911bececb1e60d9fd266a..bac0b5e3ecda1b3a7624dc7a5c9da4919003fce0 100644 --- a/qemu-nbd.c +++ b/qemu-nbd.c @@ -1123,9 +1123,7 @@ int main(int argc, char **argv) qdict_put_str(raw_opts, "file", bs->node_name); qdict_put_int(raw_opts, "offset", dev_offset); - aio_context_acquire(qemu_get_aio_context()); bs = bdrv_open(NULL, NULL, raw_opts, flags, &error_fatal); - aio_context_release(qemu_get_aio_context()); blk_remove_bs(blk); blk_insert_bs(blk, bs, &error_fatal); diff --git a/qemu-options.hx b/qemu-options.hx index 42fd09e4de96e962cd5873c49501f6e1dbb5e346..b66570ae006768cfe1263943c018c71df97dfe77 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -2087,6 +2087,8 @@ DEF("display", HAS_ARG, QEMU_OPTION_display, #endif #if defined(CONFIG_COCOA) "-display cocoa[,full-grab=on|off][,swap-opt-cmd=on|off]\n" + " [,show-cursor=on|off][,left-command-key=on|off]\n" + " [,full-screen=on|off][,zoom-to-fit=on|off]\n" #endif #if defined(CONFIG_OPENGL) "-display egl-headless[,rendernode=]\n" @@ -2094,9 +2096,6 @@ DEF("display", HAS_ARG, QEMU_OPTION_display, #if defined(CONFIG_DBUS_DISPLAY) "-display dbus[,addr=]\n" " [,gl=on|core|es|off][,rendernode=]\n" -#endif -#if defined(CONFIG_COCOA) - "-display cocoa[,show-cursor=on|off][,left-command-key=on|off]\n" #endif "-display none\n" " select display backend type\n" @@ -2191,10 +2190,26 @@ SRST provides drop-down menus and other UI elements to configure and control the VM during runtime. Valid parameters are: + ``full-grab=on|off`` : Capture all key presses, including system combos. + This requires accessibility permissions, since it + performs a global grab on key events. + (default: off) See + https://support.apple.com/en-in/guide/mac-help/mh32356/mac + + ``swap-opt-cmd=on|off`` : Swap the Option and Command keys so that their + key codes match their position on non-Mac + keyboards and you can use Meta/Super and Alt + where you expect them. (default: off) + ``show-cursor=on|off`` : Force showing the mouse cursor ``left-command-key=on|off`` : Disable forwarding left command key to host + ``full-screen=on|off`` : Start in fullscreen mode + + ``zoom-to-fit=on|off`` : Expand video output to the window size, + defaults to "off" + ``egl-headless[,rendernode=]`` Offload all OpenGL operations to a local DRI device. For any graphical display, this display needs to be paired with either @@ -4086,9 +4101,13 @@ DEF("fw_cfg", HAS_ARG, QEMU_OPTION_fwcfg, SRST ``-fw_cfg [name=]name,file=file`` Add named fw\_cfg entry with contents from file file. + If the filename contains comma, you must double it (for instance, + "file=my,,file" to use file "my,file"). ``-fw_cfg [name=]name,string=str`` Add named fw\_cfg entry with contents from string str. + If the string contains comma, you must double it (for instance, + "string=my,,string" to use file "my,string"). The terminating NUL character of the contents of str will not be included as part of the fw\_cfg item data. To insert contents with @@ -5224,6 +5243,18 @@ SRST The ``share`` boolean option is on by default with memfd. + ``-object iommufd,id=id[,fd=fd]`` + Creates an iommufd backend which allows control of DMA mapping + through the ``/dev/iommu`` device. + + The ``id`` parameter is a unique ID which frontends (such as + vfio-pci of vdpa) will use to connect with the iommufd backend. + + The ``fd`` parameter is an optional pre-opened file descriptor + resulting from ``/dev/iommu`` opening. Usually the iommufd is shared + across all subsystems, bringing the benefit of centralized + reference counting. + ``-object rng-builtin,id=id`` Creates a random number generator backend which obtains entropy from QEMU builtin functions. The ``id`` parameter is a unique ID diff --git a/qga/meson.build b/qga/meson.build index ff7a8496e48f23c90b1f176644d13e261d3ffb70..1c3d2a3d1b7f17a50f1d2d9ac3871448a8cf8541 100644 --- a/qga/meson.build +++ b/qga/meson.build @@ -7,7 +7,7 @@ if not have_ga endif have_qga_vss = get_option('qga_vss') \ - .require(targetos == 'windows', + .require(host_os == 'windows', error_message: 'VSS support requires Windows') \ .require('cpp' in all_languages, error_message: 'VSS support requires a C++ compiler') \ @@ -67,29 +67,31 @@ qga_ss.add(files( 'main.c', 'cutils.c', )) -qga_ss.add(when: 'CONFIG_POSIX', if_true: files( - 'channel-posix.c', - 'commands-posix.c', - 'commands-posix-ssh.c', -)) -qga_ss.add(when: 'CONFIG_LINUX', if_true: files( - 'commands-linux.c', -)) -qga_ss.add(when: 'CONFIG_BSD', if_true: files( - 'commands-bsd.c', -)) -qga_ss.add(when: 'CONFIG_WIN32', if_true: files( - 'channel-win32.c', - 'commands-win32.c', - 'service-win32.c', - 'vss-win32.c' -)) +if host_os == 'windows' + qga_ss.add(files( + 'channel-win32.c', + 'commands-win32.c', + 'service-win32.c', + 'vss-win32.c' + )) +else + qga_ss.add(files( + 'channel-posix.c', + 'commands-posix.c', + 'commands-posix-ssh.c', + )) + if host_os == 'linux' + qga_ss.add(files('commands-linux.c')) + elif host_os in bsd_oses + qga_ss.add(files('commands-bsd.c')) + endif +endif -qga_ss = qga_ss.apply(config_targetos, strict: false) +qga_ss = qga_ss.apply({}) gen_tlb = [] qga_libs = [] -if targetos == 'windows' +if host_os == 'windows' qga_libs += ['-lws2_32', '-lwinmm', '-lpowrprof', '-lwtsapi32', '-lwininet', '-liphlpapi', '-lnetapi32', '-lsetupapi', '-lcfgmgr32'] if have_qga_vss @@ -99,7 +101,7 @@ if targetos == 'windows' endif qga_objs = [] -if targetos == 'windows' +if host_os == 'windows' windmc = find_program('windmc', required: true) windres = find_program('windres', required: true) @@ -121,7 +123,7 @@ qga = executable('qemu-ga', qga_ss.sources() + qga_objs, install: true) all_qga += qga -if targetos == 'windows' +if host_os == 'windows' qemu_ga_msi_arch = { 'x86': ['-D', 'Arch=32'], 'x86_64': ['-a', 'x64', '-D', 'Arch=64'] @@ -140,7 +142,7 @@ if targetos == 'windows' qemu_ga_msi_vss = ['-D', 'InstallVss'] deps += qga_vss endif - if glib.version() < '2.73.2' + if glib.version().version_compare('<2.73.2') libpcre = 'libpcre1' else libpcre = 'libpcre2' @@ -183,7 +185,7 @@ test_env.set('G_TEST_BUILDDIR', meson.current_build_dir()) # the leak detector in build-oss-fuzz Gitlab CI test. we should re-enable # this when an alternative is implemented or when the underlying glib # issue is identified/fix -#if targetos != 'windows' +#if host_os != 'windows' if false srcs = [files('commands-posix-ssh.c')] i = 0 diff --git a/qom/object.c b/qom/object.c index 95c0dc8285fe70ceec211d56c6de7327045cff05..654e1afaf2eb8e820459433783860a23e0b5713f 100644 --- a/qom/object.c +++ b/qom/object.c @@ -138,9 +138,50 @@ static TypeImpl *type_new(const TypeInfo *info) return ti; } +static bool type_name_is_valid(const char *name) +{ + const int slen = strlen(name); + int plen; + + g_assert(slen > 1); + + /* + * Ideally, the name should start with a letter - however, we've got + * too many names starting with a digit already, so allow digits here, + * too (except '0' which is not used yet) + */ + if (!g_ascii_isalnum(name[0]) || name[0] == '0') { + return false; + } + + plen = strspn(name, "abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "0123456789-_."); + + /* Allow some legacy names with '+' in it for compatibility reasons */ + if (name[plen] == '+') { + if (plen == 6 && g_str_has_prefix(name, "power")) { + /* Allow "power5+" and "power7+" CPU names*/ + return true; + } + if (plen >= 17 && g_str_has_prefix(name, "Sun-UltraSparc-I")) { + /* Allow "Sun-UltraSparc-IV+" and "Sun-UltraSparc-IIIi+" */ + return true; + } + } + + return plen == slen; +} + static TypeImpl *type_register_internal(const TypeInfo *info) { TypeImpl *ti; + + if (!type_name_is_valid(info->name)) { + fprintf(stderr, "Registering '%s' with illegal type name\n", info->name); + abort(); + } + ti = type_new(info); type_table_add(ti); diff --git a/replay/replay-char.c b/replay/replay-char.c index a31aded032e7b99b1f6ff067d434084b0a8d2a9c..72b1f832dde6446800211196c4034d6df7f8f75e 100644 --- a/replay/replay-char.c +++ b/replay/replay-char.c @@ -113,8 +113,7 @@ void replay_char_write_event_load(int *res, int *offset) *offset = replay_get_dword(); replay_finish_event(); } else { - error_report("Missing character write event in the replay log"); - exit(1); + replay_sync_error("Missing character write event in the replay log"); } } @@ -135,8 +134,7 @@ int replay_char_read_all_load(uint8_t *buf) replay_finish_event(); return res; } else { - error_report("Missing character read all event in the replay log"); - exit(1); + replay_sync_error("Missing character read all event in the replay log"); } } diff --git a/replay/replay-debugging.c b/replay/replay-debugging.c index 3e60549a4aebfad97123ce05bfd645fd58bf188d..82c66fff26207b7d8827960c711c70424f3d16db 100644 --- a/replay/replay-debugging.c +++ b/replay/replay-debugging.c @@ -144,7 +144,6 @@ static char *replay_find_nearest_snapshot(int64_t icount, char *ret = NULL; int rv; int nb_sns, i; - AioContext *aio_context; *snapshot_icount = -1; @@ -152,11 +151,8 @@ static char *replay_find_nearest_snapshot(int64_t icount, if (!bs) { goto fail; } - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); nb_sns = bdrv_snapshot_list(bs, &sn_tab); - aio_context_release(aio_context); for (i = 0; i < nb_sns; i++) { rv = bdrv_all_has_snapshot(sn_tab[i].name, false, NULL, NULL); diff --git a/replay/replay-internal.c b/replay/replay-internal.c index 77d0c82327ed5aeeee63f446bddc147d592af7eb..13fcbdd8f42f34fcbb5c6e58a322d861332b25e0 100644 --- a/replay/replay-internal.c +++ b/replay/replay-internal.c @@ -175,11 +175,12 @@ void replay_fetch_data_kind(void) if (replay_file) { if (!replay_state.has_unread_data) { replay_state.data_kind = replay_get_byte(); + replay_state.current_event++; if (replay_state.data_kind == EVENT_INSTRUCTION) { replay_state.instruction_count = replay_get_dword(); } replay_check_error(); - replay_state.has_unread_data = 1; + replay_state.has_unread_data = true; if (replay_state.data_kind >= EVENT_COUNT) { error_report("Replay: unknown event kind %d", replay_state.data_kind); @@ -191,7 +192,7 @@ void replay_fetch_data_kind(void) void replay_finish_event(void) { - replay_state.has_unread_data = 0; + replay_state.has_unread_data = false; replay_fetch_data_kind(); } @@ -216,7 +217,7 @@ void replay_mutex_lock(void) { if (replay_mode != REPLAY_MODE_NONE) { unsigned long id; - g_assert(!qemu_mutex_iothread_locked()); + g_assert(!bql_locked()); g_assert(!replay_mutex_locked()); qemu_mutex_lock(&lock); id = mutex_tail++; diff --git a/replay/replay-internal.h b/replay/replay-internal.h index b6836354ac5317b1fc4d91c8505b1b8f5118baea..75249b7693612d2fd33ce6d37003d68928aac709 100644 --- a/replay/replay-internal.h +++ b/replay/replay-internal.h @@ -25,7 +25,12 @@ typedef enum ReplayAsyncEventKind { REPLAY_ASYNC_COUNT } ReplayAsyncEventKind; -/* Any changes to order/number of events will need to bump REPLAY_VERSION */ +/* + * Any changes to order/number of events will need to bump + * REPLAY_VERSION to prevent confusion with old logs. Also don't + * forget to update replay_event_name() to make your debugging life + * easier. + */ enum ReplayEvents { /* for instruction event */ EVENT_INSTRUCTION, @@ -63,26 +68,33 @@ enum ReplayEvents { EVENT_COUNT }; +/** + * typedef ReplayState - global tracking Replay state + * + * This structure tracks where we are in the current ReplayState + * including the logged events from the recorded replay stream. Some + * of the data is also stored/restored from VMStateDescription when VM + * save/restore events take place. + * + * @cached_clock: Cached clocks values + * @current_icount: number of processed instructions + * @instruction_count: number of instructions until next event + * @current_event: current event index + * @data_kind: current event + * @has_unread_data: true if event not yet processed + * @file_offset: offset into replay log at replay snapshot + * @block_request_id: current serialised block request id + * @read_event_id: current async read event id + */ typedef struct ReplayState { - /*! Cached clock values. */ int64_t cached_clock[REPLAY_CLOCK_COUNT]; - /*! Current icount - number of processed instructions. */ uint64_t current_icount; - /*! Number of instructions to be executed before other events happen. */ int instruction_count; - /*! Type of the currently executed event. */ + unsigned int current_event; unsigned int data_kind; - /*! Flag which indicates that event is not processed yet. */ - unsigned int has_unread_data; - /*! Temporary variable for saving current log offset. */ + bool has_unread_data; uint64_t file_offset; - /*! Next block operation id. - This counter is global, because requests from different - block devices should not get overlapping ids. */ uint64_t block_request_id; - /*! Prior value of the host clock */ - uint64_t host_clock_last; - /*! Asynchronous event id read from the log */ uint64_t read_event_id; } ReplayState; extern ReplayState replay_state; @@ -183,6 +195,16 @@ void replay_event_net_save(void *opaque); /*! Reads network from the file. */ void *replay_event_net_load(void); +/* Diagnostics */ + +/** + * replay_sync_error(): report sync error and exit + * + * When we reach an error condition we want to report it centrally so + * we can also dump some useful information into the logs. + */ +G_NORETURN void replay_sync_error(const char *error); + /* VMState-related functions */ /* Registers replay VMState. diff --git a/replay/replay-snapshot.c b/replay/replay-snapshot.c index 10a7cf79927ecbb5dcb5a8d7a51cc6d218c3b1a4..ccb4d89dda748b99107c8730b5e855e2371ff6af 100644 --- a/replay/replay-snapshot.c +++ b/replay/replay-snapshot.c @@ -47,16 +47,17 @@ static int replay_post_load(void *opaque, int version_id) static const VMStateDescription vmstate_replay = { .name = "replay", - .version_id = 2, - .minimum_version_id = 2, + .version_id = 3, + .minimum_version_id = 3, .pre_save = replay_pre_save, .post_load = replay_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64_ARRAY(cached_clock, ReplayState, REPLAY_CLOCK_COUNT), VMSTATE_UINT64(current_icount, ReplayState), VMSTATE_INT32(instruction_count, ReplayState), + VMSTATE_UINT32(current_event, ReplayState), VMSTATE_UINT32(data_kind, ReplayState), - VMSTATE_UINT32(has_unread_data, ReplayState), + VMSTATE_BOOL(has_unread_data, ReplayState), VMSTATE_UINT64(file_offset, ReplayState), VMSTATE_UINT64(block_request_id, ReplayState), VMSTATE_UINT64(read_event_id, ReplayState), diff --git a/replay/replay.c b/replay/replay.c index 0f7d766efe819b021403660f77d6eac347c71712..3fd241a4fccc229291b794f4f6d26be02bb332d9 100644 --- a/replay/replay.c +++ b/replay/replay.c @@ -38,6 +38,107 @@ static GSList *replay_blockers; uint64_t replay_break_icount = -1ULL; QEMUTimer *replay_break_timer; +/* Pretty print event names */ + +static const char *replay_async_event_name(ReplayAsyncEventKind event) +{ + switch (event) { +#define ASYNC_EVENT(_x) case REPLAY_ASYNC_EVENT_ ## _x: return "ASYNC_EVENT_"#_x + ASYNC_EVENT(BH); + ASYNC_EVENT(BH_ONESHOT); + ASYNC_EVENT(INPUT); + ASYNC_EVENT(INPUT_SYNC); + ASYNC_EVENT(CHAR_READ); + ASYNC_EVENT(BLOCK); + ASYNC_EVENT(NET); +#undef ASYNC_EVENT + default: + g_assert_not_reached(); + } +} + +static const char *replay_clock_event_name(ReplayClockKind clock) +{ + switch (clock) { +#define CLOCK_EVENT(_x) case REPLAY_CLOCK_ ## _x: return "CLOCK_" #_x + CLOCK_EVENT(HOST); + CLOCK_EVENT(VIRTUAL_RT); +#undef CLOCK_EVENT + default: + g_assert_not_reached(); + } +} + +/* Pretty print shutdown event names */ +static const char *replay_shutdown_event_name(ShutdownCause cause) +{ + switch (cause) { +#define SHUTDOWN_EVENT(_x) case SHUTDOWN_CAUSE_ ## _x: return "SHUTDOWN_CAUSE_" #_x + SHUTDOWN_EVENT(NONE); + SHUTDOWN_EVENT(HOST_ERROR); + SHUTDOWN_EVENT(HOST_QMP_QUIT); + SHUTDOWN_EVENT(HOST_QMP_SYSTEM_RESET); + SHUTDOWN_EVENT(HOST_SIGNAL); + SHUTDOWN_EVENT(HOST_UI); + SHUTDOWN_EVENT(GUEST_SHUTDOWN); + SHUTDOWN_EVENT(GUEST_RESET); + SHUTDOWN_EVENT(GUEST_PANIC); + SHUTDOWN_EVENT(SUBSYSTEM_RESET); + SHUTDOWN_EVENT(SNAPSHOT_LOAD); +#undef SHUTDOWN_EVENT + default: + g_assert_not_reached(); + } +} + +static const char *replay_checkpoint_event_name(enum ReplayCheckpoint checkpoint) +{ + switch (checkpoint) { +#define CHECKPOINT_EVENT(_x) case CHECKPOINT_ ## _x: return "CHECKPOINT_" #_x + CHECKPOINT_EVENT(CLOCK_WARP_START); + CHECKPOINT_EVENT(CLOCK_WARP_ACCOUNT); + CHECKPOINT_EVENT(RESET_REQUESTED); + CHECKPOINT_EVENT(SUSPEND_REQUESTED); + CHECKPOINT_EVENT(CLOCK_VIRTUAL); + CHECKPOINT_EVENT(CLOCK_HOST); + CHECKPOINT_EVENT(CLOCK_VIRTUAL_RT); + CHECKPOINT_EVENT(INIT); + CHECKPOINT_EVENT(RESET); +#undef CHECKPOINT_EVENT + default: + g_assert_not_reached(); + } +} + +static const char *replay_event_name(enum ReplayEvents event) +{ + /* First deal with the simple ones */ + switch (event) { +#define EVENT(_x) case EVENT_ ## _x: return "EVENT_"#_x + EVENT(INSTRUCTION); + EVENT(INTERRUPT); + EVENT(EXCEPTION); + EVENT(CHAR_WRITE); + EVENT(CHAR_READ_ALL); + EVENT(AUDIO_OUT); + EVENT(AUDIO_IN); + EVENT(RANDOM); +#undef EVENT + default: + if (event >= EVENT_ASYNC && event <= EVENT_ASYNC_LAST) { + return replay_async_event_name(event - EVENT_ASYNC); + } else if (event >= EVENT_SHUTDOWN && event <= EVENT_SHUTDOWN_LAST) { + return replay_shutdown_event_name(event - EVENT_SHUTDOWN); + } else if (event >= EVENT_CLOCK && event <= EVENT_CLOCK_LAST) { + return replay_clock_event_name(event - EVENT_CLOCK); + } else if (event >= EVENT_CHECKPOINT && event <= EVENT_CHECKPOINT_LAST) { + return replay_checkpoint_event_name(event - EVENT_CHECKPOINT); + } + } + + g_assert_not_reached(); +} + bool replay_next_event_is(int event) { bool res = false; @@ -226,6 +327,15 @@ bool replay_has_event(void) return res; } +G_NORETURN void replay_sync_error(const char *error) +{ + error_report("%s (insn total %"PRId64"/%d left, event %d is %s)", error, + replay_state.current_icount, replay_state.instruction_count, + replay_state.current_event, + replay_event_name(replay_state.data_kind)); + abort(); +} + static void replay_enable(const char *fname, int mode) { const char *fmode = NULL; @@ -258,6 +368,7 @@ static void replay_enable(const char *fname, int mode) replay_state.data_kind = -1; replay_state.instruction_count = 0; replay_state.current_icount = 0; + replay_state.current_event = 0; replay_state.has_unread_data = 0; /* skip file header for RECORD and check it for PLAY */ @@ -338,6 +449,27 @@ void replay_start(void) replay_enable_events(); } +/* + * For none/record the answer is yes. + */ +bool replay_can_wait(void) +{ + if (replay_mode == REPLAY_MODE_PLAY) { + /* + * For playback we shouldn't ever be at a point we wait. If + * the instruction count has reached zero and we have an + * unconsumed event we should go around again and consume it. + */ + if (replay_state.instruction_count == 0 && replay_state.has_unread_data) { + return false; + } else { + replay_sync_error("Playback shouldn't have to iowait"); + } + } + return true; +} + + void replay_finish(void) { if (replay_mode == REPLAY_MODE_NONE) { diff --git a/roms/edk2 b/roms/edk2 index 819cfc6b42a68790a23509e4fcc58ceb70e1965e..b8a3eec88cc74bbfe7fb389d026cc7d1d8a989c8 160000 --- a/roms/edk2 +++ b/roms/edk2 @@ -1 +1 @@ -Subproject commit 819cfc6b42a68790a23509e4fcc58ceb70e1965e +Subproject commit b8a3eec88cc74bbfe7fb389d026cc7d1d8a989c8 diff --git a/roms/edk2-build.config b/roms/edk2-build.config index bab6a9caebfa354e5f589aad3406f9e53e9e439a..0d367dbdb7755a6644e46cf358a1dd01cd40df0b 100644 --- a/roms/edk2-build.config +++ b/roms/edk2-build.config @@ -22,9 +22,15 @@ SMM_REQUIRE = TRUE [opts.armvirt.silent] DEBUG_PRINT_ERROR_LEVEL = 0x80000000 -[pcds.nx.broken.grub] +[pcds.nx.strict] +PcdDxeNxMemoryProtectionPolicy = 0xC000000000007FD5 +PcdUninstallMemAttrProtocol = FALSE + +[pcds.nx.broken.shim.grub] # grub.efi uses EfiLoaderData for code PcdDxeNxMemoryProtectionPolicy = 0xC000000000007FD1 +# shim.efi has broken MemAttr code +PcdUninstallMemAttrProtocol = TRUE [pcds.workaround.202308] PcdFirstTimeWakeUpAPsBySipi = FALSE @@ -95,7 +101,7 @@ conf = ArmVirtPkg/ArmVirtQemu.dsc arch = ARM opts = common armvirt.silent -pcds = nx.broken.grub +pcds = nx.broken.shim.grub plat = ArmVirtQemu-ARM dest = ../pc-bios cpy1 = FV/QEMU_EFI.fd edk2-arm-code.fd @@ -112,7 +118,7 @@ conf = ArmVirtPkg/ArmVirtQemu.dsc arch = AARCH64 opts = common armvirt.silent -pcds = nx.broken.grub +pcds = nx.broken.shim.grub plat = ArmVirtQemu-AARCH64 dest = ../pc-bios cpy1 = FV/QEMU_EFI.fd edk2-aarch64-code.fd diff --git a/roms/opensbi b/roms/opensbi index 057eb10b6d523540012e6947d5c9f63e95244e94..a2b255b88918715173942f2c5e1f97ac9e90c877 160000 --- a/roms/opensbi +++ b/roms/opensbi @@ -1 +1 @@ -Subproject commit 057eb10b6d523540012e6947d5c9f63e95244e94 +Subproject commit a2b255b88918715173942f2c5e1f97ac9e90c877 diff --git a/roms/seabios-hppa b/roms/seabios-hppa index 4c6ecda618f2066707f50c53f31419244fd7f77a..e4eac85880e8677f96d8b9e94de9f2eec9c0751f 160000 --- a/roms/seabios-hppa +++ b/roms/seabios-hppa @@ -1 +1 @@ -Subproject commit 4c6ecda618f2066707f50c53f31419244fd7f77a +Subproject commit e4eac85880e8677f96d8b9e94de9f2eec9c0751f diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py index de506cb8bf17a01bc8ba00af53ac26daa3187a2e..a39dfb8766b8af9392655c865980a106d4cc525a 100755 --- a/scripts/analyze-migration.py +++ b/scripts/analyze-migration.py @@ -263,6 +263,34 @@ def getDict(self): return "" +class S390StorageAttributes(object): + STATTR_FLAG_EOS = 0x01 + STATTR_FLAG_MORE = 0x02 + STATTR_FLAG_ERROR = 0x04 + STATTR_FLAG_DONE = 0x08 + + def __init__(self, file, version_id, device, section_key): + if version_id != 0: + raise Exception("Unknown storage_attributes version %d" % version_id) + + self.file = file + self.section_key = section_key + + def read(self): + while True: + addr_flags = self.file.read64() + flags = addr_flags & 0xfff + if (flags & (self.STATTR_FLAG_DONE | self.STATTR_FLAG_EOS)): + return + if (flags & self.STATTR_FLAG_ERROR): + raise Exception("Error in migration stream") + count = self.file.read64() + self.file.readvar(count) + + def getDict(self): + return "" + + class ConfigurationSection(object): def __init__(self, file, desc): self.file = file @@ -544,8 +572,11 @@ class MigrationDump(object): QEMU_VM_SECTION_FOOTER= 0x7e def __init__(self, filename): - self.section_classes = { ( 'ram', 0 ) : [ RamSection, None ], - ( 'spapr/htab', 0) : ( HTABSection, None ) } + self.section_classes = { + ( 'ram', 0 ) : [ RamSection, None ], + ( 's390-storage_attributes', 0 ) : [ S390StorageAttributes, None], + ( 'spapr/htab', 0) : ( HTABSection, None ) + } self.filename = filename self.vmsd_desc = None diff --git a/scripts/block-coroutine-wrapper.py b/scripts/block-coroutine-wrapper.py index a38e5833fb3362776500f92c277ac860d6499253..dbbde99e39eb7a0189e01bae6c0b5d1bc903efe5 100644 --- a/scripts/block-coroutine-wrapper.py +++ b/scripts/block-coroutine-wrapper.py @@ -92,8 +92,6 @@ def __init__(self, wrapper_type: str, return_type: str, name: str, f"{self.name}") self.target_name = f'{subsystem}_{subname}' - self.ctx = self.gen_ctx() - self.get_result = 's->ret = ' self.ret = 'return s.ret;' self.co_ret = 'return ' @@ -167,7 +165,7 @@ def create_mixed_wrapper(func: FuncDecl) -> str: {func.co_ret}{name}({ func.gen_list('{name}') }); }} else {{ {struct_name} s = {{ - .poll_state.ctx = {func.ctx}, + .poll_state.ctx = qemu_get_current_aio_context(), .poll_state.in_progress = true, { func.gen_block(' .{name} = {name},') } @@ -191,7 +189,7 @@ def create_co_wrapper(func: FuncDecl) -> str: {func.return_type} {func.name}({ func.gen_list('{decl}') }) {{ {struct_name} s = {{ - .poll_state.ctx = {func.ctx}, + .poll_state.ctx = qemu_get_current_aio_context(), .poll_state.in_progress = true, { func.gen_block(' .{name} = {name},') } @@ -261,8 +259,8 @@ def gen_no_co_wrapper(func: FuncDecl) -> str: graph_lock=' bdrv_graph_rdlock_main_loop();' graph_unlock=' bdrv_graph_rdunlock_main_loop();' elif func.graph_wrlock: - graph_lock=' bdrv_graph_wrlock(NULL);' - graph_unlock=' bdrv_graph_wrunlock(NULL);' + graph_lock=' bdrv_graph_wrlock();' + graph_unlock=' bdrv_graph_wrunlock();' return f"""\ /* @@ -278,12 +276,9 @@ def gen_no_co_wrapper(func: FuncDecl) -> str: static void {name}_bh(void *opaque) {{ {struct_name} *s = opaque; - AioContext *ctx = {func.gen_ctx('s->')}; {graph_lock} - aio_context_acquire(ctx); {func.get_result}{name}({ func.gen_list('s->{name}') }); - aio_context_release(ctx); {graph_unlock} aio_co_wake(s->co); diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 6e4100d2a41c8cef5d5955910977a9d1222966d9..70268950741278cf86d69b163f85cfe1577b5745 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -35,6 +35,9 @@ my $summary_file = 0; my $root; my %debug; my $help = 0; +my $codespell = 0; +my $codespellfile = "/usr/share/codespell/dictionary.txt"; +my $user_codespellfile = ""; sub help { my ($exitcode) = @_; @@ -66,6 +69,9 @@ Options: is all off) --test-only=WORD report only warnings/errors containing WORD literally + --codespell Use the codespell dictionary for spelling/typos + (default: $codespellfile) + --codespellfile Use this codespell dictionary --color[=WHEN] Use colors 'always', 'never', or only when output is a terminal ('auto'). Default is 'auto'. -h, --help, --version display this help and exit @@ -85,28 +91,50 @@ foreach (@ARGV) { } GetOptions( - 'q|quiet+' => \$quiet, - 'tree!' => \$tree, - 'signoff!' => \$chk_signoff, - 'patch!' => \$chk_patch, - 'branch!' => \$chk_branch, - 'emacs!' => \$emacs, - 'terse!' => \$terse, - 'f|file!' => \$file, - 'strict!' => \$no_warnings, - 'root=s' => \$root, - 'summary!' => \$summary, - 'mailback!' => \$mailback, - 'summary-file!' => \$summary_file, - - 'debug=s' => \%debug, - 'test-only=s' => \$tst_only, - 'color=s' => \$color, - 'no-color' => sub { $color = 'never'; }, - 'h|help' => \$help, - 'version' => \$help + 'q|quiet+' => \$quiet, + 'tree!' => \$tree, + 'signoff!' => \$chk_signoff, + 'patch!' => \$chk_patch, + 'branch!' => \$chk_branch, + 'emacs!' => \$emacs, + 'terse!' => \$terse, + 'f|file!' => \$file, + 'strict!' => \$no_warnings, + 'root=s' => \$root, + 'summary!' => \$summary, + 'mailback!' => \$mailback, + 'summary-file!' => \$summary_file, + 'debug=s' => \%debug, + 'test-only=s' => \$tst_only, + 'codespell!' => \$codespell, + 'codespellfile=s' => \$user_codespellfile, + 'color=s' => \$color, + 'no-color' => sub { $color = 'never'; }, + 'h|help' => \$help, + 'version' => \$help ) or help(1); +if ($user_codespellfile) { + # Use the user provided codespell file unconditionally + $codespellfile = $user_codespellfile; +} elsif (!(-f $codespellfile)) { + # If /usr/share/codespell/dictionary.txt is not present, try to find it + # under codespell's install directory: /data/dictionary.txt + if (($codespell || $help) && which("python3") ne "") { + my $python_codespell_dict = << "EOF"; + +import os.path as op +import codespell_lib +codespell_dir = op.dirname(codespell_lib.__file__) +codespell_file = op.join(codespell_dir, 'data', 'dictionary.txt') +print(codespell_file, end='') +EOF + + my $codespell_dict = `python3 -c "$python_codespell_dict" 2> /dev/null`; + $codespellfile = $codespell_dict if (-f $codespell_dict); + } +} + help(0) if ($help); my $exit = 0; @@ -337,6 +365,36 @@ our @typeList = ( qr{guintptr}, ); +# Load common spelling mistakes and build regular expression list. +my $misspellings; +my %spelling_fix; + +if ($codespell) { + if (open(my $spelling, '<', $codespellfile)) { + while (<$spelling>) { + my $line = $_; + + $line =~ s/\s*\n?$//g; + $line =~ s/^\s*//g; + + next if ($line =~ m/^\s*#/); + next if ($line =~ m/^\s*$/); + next if ($line =~ m/, disabled/i); + + $line =~ s/,.*$//; + + my ($suspect, $fix) = split(/->/, $line); + + $spelling_fix{$suspect} = $fix; + } + close($spelling); + } else { + warn "No codespell typos will be found - file '$codespellfile': $!\n"; + } +} + +$misspellings = join("|", sort keys %spelling_fix) if keys %spelling_fix; + # This can be modified by sub possible. Since it can be empty, be careful # about regexes that always match, because they can cause infinite loops. our @modifierList = ( @@ -477,6 +535,18 @@ sub top_of_kernel_tree { return 1; } +sub which { + my ($bin) = @_; + + foreach my $path (split(/:/, $ENV{PATH})) { + if (-e "$path/$bin") { + return "$path/$bin"; + } + } + + return ""; +} + sub expand_tabs { my ($str) = @_; @@ -1585,6 +1655,21 @@ sub process { WARN("8-bit UTF-8 used in possible commit log\n" . $herecurr); } +# Check for various typo / spelling mistakes + if (defined($misspellings) && + ($in_commit_log || $line =~ /^(?:\+|Subject:)/i)) { + while ($rawline =~ /(?:^|[^\w\-'`])($misspellings)(?:[^\w\-'`]|$)/gi) { + my $typo = $1; + my $blank = copy_spacing($rawline); + my $ptr = substr($blank, 0, $-[1]) . "^" x length($typo); + my $hereptr = "$hereline$ptr\n"; + my $typo_fix = $spelling_fix{lc($typo)}; + $typo_fix = ucfirst($typo_fix) if ($typo =~ /^[A-Z]/); + $typo_fix = uc($typo_fix) if ($typo =~ /^[A-Z]+$/); + WARN("'$typo' may be misspelled - perhaps '$typo_fix'?\n" . $hereptr); + } + } + # ignore non-hunk lines and lines being removed next if (!$hunk_line || $line =~ /^-/); diff --git a/scripts/mtest2make.py b/scripts/mtest2make.py index 179dd54871824ffc08f9c0c16a8179e687fd01e9..eb01a05ddbd97f668b0a6b3ed0289e91ca4385d1 100644 --- a/scripts/mtest2make.py +++ b/scripts/mtest2make.py @@ -27,7 +27,8 @@ def names(self, base): .speed.slow = $(foreach s,$(sort $(filter-out %-thorough, $1)), --suite $s) .speed.thorough = $(foreach s,$(sort $1), --suite $s) -.mtestargs = --no-rebuild -t 0 +TIMEOUT_MULTIPLIER = 1 +.mtestargs = --no-rebuild -t $(TIMEOUT_MULTIPLIER) ifneq ($(SPEED), quick) .mtestargs += --setup $(SPEED) endif diff --git a/scripts/replay-dump.py b/scripts/replay-dump.py index b89dc29555a3480ab5afdfbb22550987d56dbb19..d668193e793bb6bd5b35dbd10cd575dc5de9c46f 100755 --- a/scripts/replay-dump.py +++ b/scripts/replay-dump.py @@ -21,6 +21,7 @@ import argparse import struct from collections import namedtuple +from os import path # This mirrors some of the global replay state which some of the # stream loading refers to. Some decoders may read the next event so @@ -82,6 +83,12 @@ def read_qword(fin): "Read a 64 bit word" return struct.unpack('>Q', fin.read(8))[0] +def read_array(fin): + "Read a sized array" + size = read_dword(fin) + data = fin.read(size) + return data + # Generic decoder structure Decoder = namedtuple("Decoder", "eid name fn") @@ -115,6 +122,11 @@ def decode_unimp(eid, name, _unused_dumpfile): print("%s not handled - will now stop" % (name)) return False +def decode_plain(eid, name, _unused_dumpfile): + "Plain events without additional data" + print_event(eid, name, "no data") + return True + # Checkpoint decoder def swallow_async_qword(eid, name, dumpfile): "Swallow a qword of data without looking at it" @@ -145,10 +157,19 @@ def decode_async(eid, name, dumpfile): return call_decode(async_decode_table, async_event_kind, dumpfile) +total_insns = 0 def decode_instruction(eid, name, dumpfile): + global total_insns ins_diff = read_dword(dumpfile) - print_event(eid, name, "0x%x" % (ins_diff)) + total_insns += ins_diff + print_event(eid, name, "+ %d -> %d" % (ins_diff, total_insns)) + return True + +def decode_char_write(eid, name, dumpfile): + res = read_dword(dumpfile) + offset = read_dword(dumpfile) + print_event(eid, name, "%d -> %d" % (offset, res)) return True def decode_audio_out(eid, name, dumpfile): @@ -189,14 +210,19 @@ def decode_clock(eid, name, dumpfile): print_event(eid, name, "0x%x" % (clock_data)) return True +def decode_random(eid, name, dumpfile): + ret = read_dword(dumpfile) + data = read_array(dumpfile) + print_event(eid, "%d bytes of random data" % len(data)) + return True # pre-MTTCG merge v5_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction), Decoder(1, "EVENT_INTERRUPT", decode_interrupt), - Decoder(2, "EVENT_EXCEPTION", decode_unimp), + Decoder(2, "EVENT_EXCEPTION", decode_plain), Decoder(3, "EVENT_ASYNC", decode_async), Decoder(4, "EVENT_SHUTDOWN", decode_unimp), - Decoder(5, "EVENT_CHAR_WRITE", decode_unimp), + Decoder(5, "EVENT_CHAR_WRITE", decode_char_write), Decoder(6, "EVENT_CHAR_READ_ALL", decode_unimp), Decoder(7, "EVENT_CHAR_READ_ALL_ERROR", decode_unimp), Decoder(8, "EVENT_CLOCK_HOST", decode_clock), @@ -215,10 +241,10 @@ def decode_clock(eid, name, dumpfile): # post-MTTCG merge, AUDIO support added v6_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction), Decoder(1, "EVENT_INTERRUPT", decode_interrupt), - Decoder(2, "EVENT_EXCEPTION", decode_unimp), + Decoder(2, "EVENT_EXCEPTION", decode_plain), Decoder(3, "EVENT_ASYNC", decode_async), Decoder(4, "EVENT_SHUTDOWN", decode_unimp), - Decoder(5, "EVENT_CHAR_WRITE", decode_unimp), + Decoder(5, "EVENT_CHAR_WRITE", decode_char_write), Decoder(6, "EVENT_CHAR_READ_ALL", decode_unimp), Decoder(7, "EVENT_CHAR_READ_ALL_ERROR", decode_unimp), Decoder(8, "EVENT_AUDIO_OUT", decode_audio_out), @@ -250,7 +276,7 @@ def decode_clock(eid, name, dumpfile): Decoder(10, "EVENT_SHUTDOWN_GUEST_RESET", decode_unimp), Decoder(11, "EVENT_SHUTDOWN_GUEST_PANIC", decode_unimp), Decoder(12, "EVENT_SHUTDOWN___MAX", decode_unimp), - Decoder(13, "EVENT_CHAR_WRITE", decode_unimp), + Decoder(13, "EVENT_CHAR_WRITE", decode_char_write), Decoder(14, "EVENT_CHAR_READ_ALL", decode_unimp), Decoder(15, "EVENT_CHAR_READ_ALL_ERROR", decode_unimp), Decoder(16, "EVENT_AUDIO_OUT", decode_audio_out), @@ -268,6 +294,48 @@ def decode_clock(eid, name, dumpfile): Decoder(28, "EVENT_CP_RESET", decode_checkpoint), ] +v12_event_table = [Decoder(0, "EVENT_INSTRUCTION", decode_instruction), + Decoder(1, "EVENT_INTERRUPT", decode_interrupt), + Decoder(2, "EVENT_EXCEPTION", decode_plain), + Decoder(3, "EVENT_ASYNC", decode_async), + Decoder(4, "EVENT_ASYNC", decode_async), + Decoder(5, "EVENT_ASYNC", decode_async), + Decoder(6, "EVENT_ASYNC", decode_async), + Decoder(6, "EVENT_ASYNC", decode_async), + Decoder(8, "EVENT_ASYNC", decode_async), + Decoder(9, "EVENT_ASYNC", decode_async), + Decoder(10, "EVENT_ASYNC", decode_async), + Decoder(11, "EVENT_SHUTDOWN", decode_unimp), + Decoder(12, "EVENT_SHUTDOWN_HOST_ERR", decode_unimp), + Decoder(13, "EVENT_SHUTDOWN_HOST_QMP_QUIT", decode_unimp), + Decoder(14, "EVENT_SHUTDOWN_HOST_QMP_RESET", decode_unimp), + Decoder(14, "EVENT_SHUTDOWN_HOST_SIGNAL", decode_unimp), + Decoder(15, "EVENT_SHUTDOWN_HOST_UI", decode_unimp), + Decoder(16, "EVENT_SHUTDOWN_GUEST_SHUTDOWN", decode_unimp), + Decoder(17, "EVENT_SHUTDOWN_GUEST_RESET", decode_unimp), + Decoder(18, "EVENT_SHUTDOWN_GUEST_PANIC", decode_unimp), + Decoder(19, "EVENT_SHUTDOWN_GUEST_SUBSYSTEM_RESET", decode_unimp), + Decoder(20, "EVENT_SHUTDOWN_GUEST_SNAPSHOT_LOAD", decode_unimp), + Decoder(21, "EVENT_SHUTDOWN___MAX", decode_unimp), + Decoder(22, "EVENT_CHAR_WRITE", decode_char_write), + Decoder(23, "EVENT_CHAR_READ_ALL", decode_unimp), + Decoder(24, "EVENT_CHAR_READ_ALL_ERROR", decode_unimp), + Decoder(25, "EVENT_AUDIO_IN", decode_unimp), + Decoder(26, "EVENT_AUDIO_OUT", decode_audio_out), + Decoder(27, "EVENT_RANDOM", decode_random), + Decoder(28, "EVENT_CLOCK_HOST", decode_clock), + Decoder(29, "EVENT_CLOCK_VIRTUAL_RT", decode_clock), + Decoder(30, "EVENT_CP_CLOCK_WARP_START", decode_checkpoint), + Decoder(31, "EVENT_CP_CLOCK_WARP_ACCOUNT", decode_checkpoint), + Decoder(32, "EVENT_CP_RESET_REQUESTED", decode_checkpoint), + Decoder(33, "EVENT_CP_SUSPEND_REQUESTED", decode_checkpoint), + Decoder(34, "EVENT_CP_CLOCK_VIRTUAL", decode_checkpoint), + Decoder(35, "EVENT_CP_CLOCK_HOST", decode_checkpoint), + Decoder(36, "EVENT_CP_CLOCK_VIRTUAL_RT", decode_checkpoint), + Decoder(37, "EVENT_CP_INIT", decode_checkpoint_init), + Decoder(38, "EVENT_CP_RESET", decode_checkpoint), +] + def parse_arguments(): "Grab arguments for script" parser = argparse.ArgumentParser() @@ -278,14 +346,18 @@ def parse_arguments(): def decode_file(filename): "Decode a record/replay dump" dumpfile = open(filename, "rb") - + dumpsize = path.getsize(filename) # read and throwaway the header version = read_dword(dumpfile) junk = read_qword(dumpfile) + # see REPLAY_VERSION print("HEADER: version 0x%x" % (version)) - if version == 0xe02007: + if version == 0xe0200c: + event_decode_table = v12_event_table + replay_state.checkpoint_start = 30 + elif version == 0xe02007: event_decode_table = v7_event_table replay_state.checkpoint_start = 12 elif version == 0xe02006: @@ -299,8 +371,13 @@ def decode_file(filename): decode_ok = True while decode_ok: event = read_event(dumpfile) - decode_ok = call_decode(event_decode_table, event, dumpfile) + decode_ok = call_decode(event_decode_table, event, + dumpfile) + except Exception as inst: + print(f"error {inst}") + finally: + print(f"Reached {dumpfile.tell()} of {dumpsize} bytes") dumpfile.close() if __name__ == "__main__": diff --git a/scripts/update-linux-headers.sh b/scripts/update-linux-headers.sh index 34295c0fe55b72bbf4db013c4e96262fa1ebfeac..a0006eec6fd1661cd6abc389bdd5cc45e259ebfb 100755 --- a/scripts/update-linux-headers.sh +++ b/scripts/update-linux-headers.sh @@ -156,6 +156,9 @@ for arch in $ARCHLIST; do cp_portable "$tmpdir/bootparam.h" \ "$output/include/standard-headers/asm-$arch" fi + if [ $arch = riscv ]; then + cp "$tmpdir/include/asm/ptrace.h" "$output/linux-headers/asm-riscv/" + fi done rm -rf "$output/linux-headers/linux" diff --git a/scsi/meson.build b/scsi/meson.build index 53f3a1f71693c94d980b819a2e7ddb8887973128..cdb91e11b0e6c439df30851bc20dbf915f7c63bc 100644 --- a/scsi/meson.build +++ b/scsi/meson.build @@ -1,4 +1,6 @@ block_ss.add(files('utils.c')) -block_ss.add(when: 'CONFIG_LINUX', - if_true: files('pr-manager.c', 'pr-manager-helper.c'), - if_false: files('pr-manager-stub.c')) +if host_os == 'linux' + block_ss.add(files('pr-manager.c', 'pr-manager-helper.c')) +else + block_ss.add(files('pr-manager-stub.c')) +endif diff --git a/semihosting/console.c b/semihosting/console.c index 5d61e8207e26a7703dac45968bfbc0fb548b4b1c..60102bbab6657035d41334d27e0f4742e7af5332 100644 --- a/semihosting/console.c +++ b/semihosting/console.c @@ -43,7 +43,7 @@ static SemihostingConsole console; static int console_can_read(void *opaque) { SemihostingConsole *c = opaque; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); return (int)fifo8_num_free(&c->fifo); } @@ -58,7 +58,7 @@ static void console_wake_up(gpointer data, gpointer user_data) static void console_read(void *opaque, const uint8_t *buf, int size) { SemihostingConsole *c = opaque; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); while (size-- && !fifo8_is_full(&c->fifo)) { fifo8_push(&c->fifo, *buf++); } @@ -70,7 +70,7 @@ bool qemu_semihosting_console_ready(void) { SemihostingConsole *c = &console; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); return !fifo8_is_empty(&c->fifo); } @@ -78,7 +78,7 @@ void qemu_semihosting_console_block_until_ready(CPUState *cs) { SemihostingConsole *c = &console; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); /* Block if the fifo is completely empty. */ if (fifo8_is_empty(&c->fifo)) { diff --git a/storage-daemon/meson.build b/storage-daemon/meson.build index 5e90cd32b40a5ee1db9acfc0cb7cf958c8b4d5f8..46267b63e72b1eab85acff02952d4499aa418805 100644 --- a/storage-daemon/meson.build +++ b/storage-daemon/meson.build @@ -5,7 +5,7 @@ qsd_ss.add(blockdev, chardev, qmp, qom, qemuutil, gnutls) subdir('qapi') if have_tools - qsd_ss = qsd_ss.apply(config_targetos, strict: false) + qsd_ss = qsd_ss.apply({}) qsd = executable('qemu-storage-daemon', qsd_ss.sources(), dependencies: qsd_ss.dependencies(), diff --git a/stubs/iothread-lock.c b/stubs/iothread-lock.c index 5b45b7fc8b905701f7b30fd769032dcc36f2b7e0..d7890e5581c5c29b69d18e9e27f509e453c739e5 100644 --- a/stubs/iothread-lock.c +++ b/stubs/iothread-lock.c @@ -1,15 +1,15 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" -bool qemu_mutex_iothread_locked(void) +bool bql_locked(void) { return false; } -void qemu_mutex_lock_iothread_impl(const char *file, int line) +void bql_lock_impl(const char *file, int line) { } -void qemu_mutex_unlock_iothread(void) +void bql_unlock(void) { } diff --git a/system/cpu-throttle.c b/system/cpu-throttle.c index d9bb30a223d84e4b460dbdfaeec5da010659cd4e..c951a6c65e1996bd26eb57c605a2e0f3a1b1c020 100644 --- a/system/cpu-throttle.c +++ b/system/cpu-throttle.c @@ -54,12 +54,12 @@ static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque) endtime_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + sleeptime_ns; while (sleeptime_ns > 0 && !cpu->stop) { if (sleeptime_ns > SCALE_MS) { - qemu_cond_timedwait_iothread(cpu->halt_cond, + qemu_cond_timedwait_bql(cpu->halt_cond, sleeptime_ns / SCALE_MS); } else { - qemu_mutex_unlock_iothread(); + bql_unlock(); g_usleep(sleeptime_ns / SCALE_US); - qemu_mutex_lock_iothread(); + bql_lock(); } sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME); } diff --git a/system/cpu-timers.c b/system/cpu-timers.c index 7452d97b673162b718e00599bdb36183e975bb18..bdf3a41dcba3f1e3830a900e6f662a480e3014ad 100644 --- a/system/cpu-timers.c +++ b/system/cpu-timers.c @@ -165,7 +165,7 @@ static const VMStateDescription icount_vmstate_warp_timer = { .version_id = 1, .minimum_version_id = 1, .needed = warp_timer_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(vm_clock_warp_start, TimersState), VMSTATE_TIMER_PTR(icount_warp_timer, TimersState), VMSTATE_END_OF_LIST() @@ -177,7 +177,7 @@ static const VMStateDescription icount_vmstate_adjust_timers = { .version_id = 1, .minimum_version_id = 1, .needed = adjust_timers_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_TIMER_PTR(icount_rt_timer, TimersState), VMSTATE_TIMER_PTR(icount_vm_timer, TimersState), VMSTATE_END_OF_LIST() @@ -189,7 +189,7 @@ static const VMStateDescription icount_vmstate_shift = { .version_id = 2, .minimum_version_id = 2, .needed = icount_shift_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT16(icount_time_shift, TimersState), VMSTATE_INT64(last_delta, TimersState), VMSTATE_END_OF_LIST() @@ -204,12 +204,12 @@ static const VMStateDescription icount_vmstate_timers = { .version_id = 1, .minimum_version_id = 1, .needed = icount_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(qemu_icount_bias, TimersState), VMSTATE_INT64(qemu_icount, TimersState), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &icount_vmstate_warp_timer, &icount_vmstate_adjust_timers, &icount_vmstate_shift, @@ -221,13 +221,13 @@ static const VMStateDescription vmstate_timers = { .name = "timer", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(cpu_ticks_offset, TimersState), VMSTATE_UNUSED(8), VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &icount_vmstate_timers, NULL } diff --git a/system/cpus.c b/system/cpus.c index a444a747f0164ee9a998ef4b25b49a2efc4607a8..68d161d96b746e1e24e357e4a89b485bfdb09f84 100644 --- a/system/cpus.c +++ b/system/cpus.c @@ -65,7 +65,8 @@ #endif /* CONFIG_LINUX */ -static QemuMutex qemu_global_mutex; +/* The Big QEMU Lock (BQL) */ +static QemuMutex bql; /* * The chosen accelerator is supposed to register this. @@ -259,14 +260,33 @@ void cpu_interrupt(CPUState *cpu, int mask) } } +/* + * True if the vm was previously suspended, and has not been woken or reset. + */ +static int vm_was_suspended; + +void vm_set_suspended(bool suspended) +{ + vm_was_suspended = suspended; +} + +bool vm_get_suspended(void) +{ + return vm_was_suspended; +} + static int do_vm_stop(RunState state, bool send_stop) { int ret = 0; + RunState oldstate = runstate_get(); - if (runstate_is_running()) { + if (runstate_is_live(oldstate)) { + vm_was_suspended = (oldstate == RUN_STATE_SUSPENDED); runstate_set(state); cpu_disable_ticks(); - pause_all_vcpus(); + if (oldstate == RUN_STATE_RUNNING) { + pause_all_vcpus(); + } vm_state_notify(0, state); if (send_stop) { qapi_event_send_stop(); @@ -389,14 +409,14 @@ void qemu_init_cpu_loop(void) qemu_init_sigbus(); qemu_cond_init(&qemu_cpu_cond); qemu_cond_init(&qemu_pause_cond); - qemu_mutex_init(&qemu_global_mutex); + qemu_mutex_init(&bql); qemu_thread_get_self(&io_thread); } void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data) { - do_run_on_cpu(cpu, func, data, &qemu_global_mutex); + do_run_on_cpu(cpu, func, data, &bql); } static void qemu_cpu_stop(CPUState *cpu, bool exit) @@ -428,7 +448,7 @@ void qemu_wait_io_event(CPUState *cpu) slept = true; qemu_plugin_vcpu_idle_cb(cpu); } - qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); + qemu_cond_wait(cpu->halt_cond, &bql); } if (slept) { qemu_plugin_vcpu_resume_cb(cpu); @@ -481,46 +501,46 @@ bool qemu_in_vcpu_thread(void) return current_cpu && qemu_cpu_is_self(current_cpu); } -QEMU_DEFINE_STATIC_CO_TLS(bool, iothread_locked) +QEMU_DEFINE_STATIC_CO_TLS(bool, bql_locked) -bool qemu_mutex_iothread_locked(void) +bool bql_locked(void) { - return get_iothread_locked(); + return get_bql_locked(); } bool qemu_in_main_thread(void) { - return qemu_mutex_iothread_locked(); + return bql_locked(); } /* * The BQL is taken from so many places that it is worth profiling the * callers directly, instead of funneling them all through a single function. */ -void qemu_mutex_lock_iothread_impl(const char *file, int line) +void bql_lock_impl(const char *file, int line) { - QemuMutexLockFunc bql_lock = qatomic_read(&qemu_bql_mutex_lock_func); + QemuMutexLockFunc bql_lock_fn = qatomic_read(&bql_mutex_lock_func); - g_assert(!qemu_mutex_iothread_locked()); - bql_lock(&qemu_global_mutex, file, line); - set_iothread_locked(true); + g_assert(!bql_locked()); + bql_lock_fn(&bql, file, line); + set_bql_locked(true); } -void qemu_mutex_unlock_iothread(void) +void bql_unlock(void) { - g_assert(qemu_mutex_iothread_locked()); - set_iothread_locked(false); - qemu_mutex_unlock(&qemu_global_mutex); + g_assert(bql_locked()); + set_bql_locked(false); + qemu_mutex_unlock(&bql); } -void qemu_cond_wait_iothread(QemuCond *cond) +void qemu_cond_wait_bql(QemuCond *cond) { - qemu_cond_wait(cond, &qemu_global_mutex); + qemu_cond_wait(cond, &bql); } -void qemu_cond_timedwait_iothread(QemuCond *cond, int ms) +void qemu_cond_timedwait_bql(QemuCond *cond, int ms) { - qemu_cond_timedwait(cond, &qemu_global_mutex, ms); + qemu_cond_timedwait(cond, &bql, ms); } /* signal CPU creation */ @@ -571,15 +591,15 @@ void pause_all_vcpus(void) replay_mutex_unlock(); while (!all_vcpus_paused()) { - qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex); + qemu_cond_wait(&qemu_pause_cond, &bql); CPU_FOREACH(cpu) { qemu_cpu_kick(cpu); } } - qemu_mutex_unlock_iothread(); + bql_unlock(); replay_mutex_lock(); - qemu_mutex_lock_iothread(); + bql_lock(); } void cpu_resume(CPUState *cpu) @@ -608,9 +628,9 @@ void cpu_remove_sync(CPUState *cpu) cpu->stop = true; cpu->unplug = true; qemu_cpu_kick(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_thread_join(cpu->thread); - qemu_mutex_lock_iothread(); + bql_lock(); } void cpus_register_accel(const AccelOpsClass *ops) @@ -649,7 +669,7 @@ void qemu_init_vcpu(CPUState *cpu) cpus_accel->create_vcpu_thread(cpu); while (!cpu->created) { - qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex); + qemu_cond_wait(&qemu_cpu_cond, &bql); } } @@ -679,11 +699,13 @@ int vm_stop(RunState state) /** * Prepare for (re)starting the VM. - * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already - * running or in case of an error condition), 0 otherwise. + * Returns 0 if the vCPUs should be restarted, -1 on an error condition, + * and 1 otherwise. */ int vm_prepare_start(bool step_pending) { + int ret = vm_was_suspended ? 1 : 0; + RunState state = vm_was_suspended ? RUN_STATE_SUSPENDED : RUN_STATE_RUNNING; RunState requested; qemu_vmstop_requested(&requested); @@ -714,9 +736,10 @@ int vm_prepare_start(bool step_pending) qapi_event_send_resume(); cpu_enable_ticks(); - runstate_set(RUN_STATE_RUNNING); - vm_state_notify(1, RUN_STATE_RUNNING); - return 0; + runstate_set(state); + vm_state_notify(1, state); + vm_was_suspended = false; + return ret; } void vm_start(void) @@ -726,11 +749,20 @@ void vm_start(void) } } +void vm_resume(RunState state) +{ + if (runstate_is_live(state)) { + vm_start(); + } else { + runstate_set(state); + } +} + /* does a state transition even if the VM is already stopped, current state is forgotten forever */ int vm_stop_force_state(RunState state) { - if (runstate_is_running()) { + if (runstate_is_live(runstate_get())) { return vm_stop(state); } else { int ret; diff --git a/system/dirtylimit.c b/system/dirtylimit.c index 495c7a7082ff190ec12a0d13611bac260a1d0d83..b5607eb8c272a4b5456f3f903c6396dd307381f6 100644 --- a/system/dirtylimit.c +++ b/system/dirtylimit.c @@ -148,9 +148,9 @@ void vcpu_dirty_rate_stat_stop(void) { qatomic_set(&vcpu_dirty_rate_stat->running, 0); dirtylimit_state_unlock(); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_thread_join(&vcpu_dirty_rate_stat->thread); - qemu_mutex_lock_iothread(); + bql_lock(); dirtylimit_state_lock(); } diff --git a/system/dma-helpers.c b/system/dma-helpers.c index 36211acc7eaeb2a9ac16375daf5f3f59bd64c329..9b221cf94e259fdfa8c487dcad10f566f2854826 100644 --- a/system/dma-helpers.c +++ b/system/dma-helpers.c @@ -119,13 +119,15 @@ static void dma_blk_cb(void *opaque, int ret) trace_dma_blk_cb(dbs, ret); - aio_context_acquire(ctx); + /* DMAAIOCB is not thread-safe and must be accessed only from dbs->ctx */ + assert(ctx == qemu_get_current_aio_context()); + dbs->acb = NULL; dbs->offset += dbs->iov.size; if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { dma_complete(dbs, ret); - goto out; + return; } dma_blk_unmap(dbs); @@ -168,7 +170,7 @@ static void dma_blk_cb(void *opaque, int ret) trace_dma_map_wait(dbs); dbs->bh = aio_bh_new(ctx, reschedule_dma, dbs); cpu_register_map_client(dbs->bh); - goto out; + return; } if (!QEMU_IS_ALIGNED(dbs->iov.size, dbs->align)) { @@ -179,8 +181,6 @@ static void dma_blk_cb(void *opaque, int ret) dbs->acb = dbs->io_func(dbs->offset, &dbs->iov, dma_blk_cb, dbs, dbs->io_func_opaque); assert(dbs->acb); -out: - aio_context_release(ctx); } static void dma_aio_cancel(BlockAIOCB *acb) diff --git a/system/memory.c b/system/memory.c index 798b6c0a171b71db8acb9aea503ea21ac26a07fe..a229a79988fce2aa3cb77e3a130db4c694e8cd49 100644 --- a/system/memory.c +++ b/system/memory.c @@ -1119,7 +1119,7 @@ void memory_region_transaction_commit(void) AddressSpace *as; assert(memory_region_transaction_depth); - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); --memory_region_transaction_depth; if (!memory_region_transaction_depth) { @@ -1546,16 +1546,17 @@ void memory_region_init_io(MemoryRegion *mr, mr->terminates = true; } -void memory_region_init_ram_nomigrate(MemoryRegion *mr, +bool memory_region_init_ram_nomigrate(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, Error **errp) { - memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp); + return memory_region_init_ram_flags_nomigrate(mr, owner, name, + size, 0, errp); } -void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, +bool memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1572,10 +1573,12 @@ void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, mr->size = int128_zero(); object_unparent(OBJECT(mr)); error_propagate(errp, err); + return false; } + return true; } -void memory_region_init_resizeable_ram(MemoryRegion *mr, +bool memory_region_init_resizeable_ram(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1596,11 +1599,13 @@ void memory_region_init_resizeable_ram(MemoryRegion *mr, mr->size = int128_zero(); object_unparent(OBJECT(mr)); error_propagate(errp, err); + return false; } + return true; } #ifdef CONFIG_POSIX -void memory_region_init_ram_from_file(MemoryRegion *mr, +bool memory_region_init_ram_from_file(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1623,10 +1628,12 @@ void memory_region_init_ram_from_file(MemoryRegion *mr, mr->size = int128_zero(); object_unparent(OBJECT(mr)); error_propagate(errp, err); + return false; } + return true; } -void memory_region_init_ram_from_fd(MemoryRegion *mr, +bool memory_region_init_ram_from_fd(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, @@ -1647,7 +1654,9 @@ void memory_region_init_ram_from_fd(MemoryRegion *mr, mr->size = int128_zero(); object_unparent(OBJECT(mr)); error_propagate(errp, err); + return false; } + return true; } #endif @@ -1698,17 +1707,22 @@ void memory_region_init_alias(MemoryRegion *mr, mr->alias_offset = offset; } -void memory_region_init_rom_nomigrate(MemoryRegion *mr, +bool memory_region_init_rom_nomigrate(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, Error **errp) { - memory_region_init_ram_flags_nomigrate(mr, owner, name, size, 0, errp); + if (!memory_region_init_ram_flags_nomigrate(mr, owner, name, + size, 0, errp)) { + return false; + } mr->readonly = true; + + return true; } -void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, +bool memory_region_init_rom_device_nomigrate(MemoryRegion *mr, Object *owner, const MemoryRegionOps *ops, void *opaque, @@ -1729,7 +1743,9 @@ void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, mr->size = int128_zero(); object_unparent(OBJECT(mr)); error_propagate(errp, err); + return false; } + return true; } void memory_region_init_iommu(void *_iommu_mr, @@ -3562,19 +3578,16 @@ void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled) } } -void memory_region_init_ram(MemoryRegion *mr, +bool memory_region_init_ram(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, Error **errp) { DeviceState *owner_dev; - Error *err = NULL; - memory_region_init_ram_nomigrate(mr, owner, name, size, &err); - if (err) { - error_propagate(errp, err); - return; + if (!memory_region_init_ram_nomigrate(mr, owner, name, size, errp)) { + return false; } /* This will assert if owner is neither NULL nor a DeviceState. * We only want the owner here for the purposes of defining a @@ -3584,21 +3597,20 @@ void memory_region_init_ram(MemoryRegion *mr, */ owner_dev = DEVICE(owner); vmstate_register_ram(mr, owner_dev); + + return true; } -void memory_region_init_rom(MemoryRegion *mr, +bool memory_region_init_rom(MemoryRegion *mr, Object *owner, const char *name, uint64_t size, Error **errp) { DeviceState *owner_dev; - Error *err = NULL; - memory_region_init_rom_nomigrate(mr, owner, name, size, &err); - if (err) { - error_propagate(errp, err); - return; + if (!memory_region_init_rom_nomigrate(mr, owner, name, size, errp)) { + return false; } /* This will assert if owner is neither NULL nor a DeviceState. * We only want the owner here for the purposes of defining a @@ -3608,9 +3620,11 @@ void memory_region_init_rom(MemoryRegion *mr, */ owner_dev = DEVICE(owner); vmstate_register_ram(mr, owner_dev); + + return true; } -void memory_region_init_rom_device(MemoryRegion *mr, +bool memory_region_init_rom_device(MemoryRegion *mr, Object *owner, const MemoryRegionOps *ops, void *opaque, @@ -3619,13 +3633,10 @@ void memory_region_init_rom_device(MemoryRegion *mr, Error **errp) { DeviceState *owner_dev; - Error *err = NULL; - memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque, - name, size, &err); - if (err) { - error_propagate(errp, err); - return; + if (!memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque, + name, size, errp)) { + return false; } /* This will assert if owner is neither NULL nor a DeviceState. * We only want the owner here for the purposes of defining a @@ -3635,6 +3646,8 @@ void memory_region_init_rom_device(MemoryRegion *mr, */ owner_dev = DEVICE(owner); vmstate_register_ram(mr, owner_dev); + + return true; } /* diff --git a/system/meson.build b/system/meson.build index 3a64dd89de1f1c79201df75ccbc633c519ca9b4c..25e2117250580507088168e32452665595625fc2 100644 --- a/system/meson.build +++ b/system/meson.build @@ -33,4 +33,6 @@ endif system_ss.add(when: seccomp, if_true: files('qemu-seccomp.c')) system_ss.add(when: fdt, if_true: files('device_tree.c')) -system_ss.add(when: 'CONFIG_LINUX', if_true: files('async-teardown.c')) +if host_os == 'linux' + system_ss.add(files('async-teardown.c')) +endif diff --git a/system/physmem.c b/system/physmem.c index a63853a7bc9d5c4fc2ee06f5fe56222c36490fe0..cc68a7976378967b1d13d0792c21322497514434 100644 --- a/system/physmem.c +++ b/system/physmem.c @@ -799,7 +799,7 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr) abort(); found: - /* It is safe to write mru_block outside the iothread lock. This + /* It is safe to write mru_block outside the BQL. This * is what happens: * * mru_block = xxx @@ -1597,7 +1597,7 @@ int qemu_ram_get_fd(RAMBlock *rb) return rb->fd; } -/* Called with iothread lock held. */ +/* Called with the BQL held. */ void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) { RAMBlock *block; @@ -1625,7 +1625,7 @@ void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev) } } -/* Called with iothread lock held. */ +/* Called with the BQL held. */ void qemu_ram_unset_idstr(RAMBlock *block) { /* FIXME: arch_init.c assumes that this is not called throughout @@ -2639,8 +2639,8 @@ bool prepare_mmio_access(MemoryRegion *mr) { bool release_lock = false; - if (!qemu_mutex_iothread_locked()) { - qemu_mutex_lock_iothread(); + if (!bql_locked()) { + bql_lock(); release_lock = true; } if (mr->flush_coalesced_mmio) { @@ -2721,7 +2721,7 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); release_lock = false; } @@ -2799,7 +2799,7 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, } if (release_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); release_lock = false; } diff --git a/system/qtest.c b/system/qtest.c index 7964f0b2488b86930fbb4f7fb42f7a353e041bfd..6da58b3874ee369f8fc671326353ea1e36fe3252 100644 --- a/system/qtest.c +++ b/system/qtest.c @@ -21,6 +21,7 @@ #include "exec/tswap.h" #include "hw/qdev-core.h" #include "hw/irq.h" +#include "hw/core/cpu.h" #include "qemu/accel.h" #include "sysemu/cpu-timers.h" #include "qemu/config-file.h" diff --git a/system/runstate.c b/system/runstate.c index ea9d6c2a32a45541a87cecaba569583f60624ea2..fb07b7b71ad5542904011e117c5089e44903a610 100644 --- a/system/runstate.c +++ b/system/runstate.c @@ -77,6 +77,7 @@ typedef struct { static const RunStateTransition runstate_transitions_def[] = { { RUN_STATE_PRELAUNCH, RUN_STATE_INMIGRATE }, + { RUN_STATE_PRELAUNCH, RUN_STATE_SUSPENDED }, { RUN_STATE_DEBUG, RUN_STATE_RUNNING }, { RUN_STATE_DEBUG, RUN_STATE_FINISH_MIGRATE }, @@ -108,6 +109,7 @@ static const RunStateTransition runstate_transitions_def[] = { { RUN_STATE_PAUSED, RUN_STATE_POSTMIGRATE }, { RUN_STATE_PAUSED, RUN_STATE_PRELAUNCH }, { RUN_STATE_PAUSED, RUN_STATE_COLO}, + { RUN_STATE_PAUSED, RUN_STATE_SUSPENDED}, { RUN_STATE_POSTMIGRATE, RUN_STATE_RUNNING }, { RUN_STATE_POSTMIGRATE, RUN_STATE_FINISH_MIGRATE }, @@ -131,6 +133,7 @@ static const RunStateTransition runstate_transitions_def[] = { { RUN_STATE_RESTORE_VM, RUN_STATE_RUNNING }, { RUN_STATE_RESTORE_VM, RUN_STATE_PRELAUNCH }, + { RUN_STATE_RESTORE_VM, RUN_STATE_SUSPENDED }, { RUN_STATE_COLO, RUN_STATE_RUNNING }, { RUN_STATE_COLO, RUN_STATE_PRELAUNCH }, @@ -149,6 +152,7 @@ static const RunStateTransition runstate_transitions_def[] = { { RUN_STATE_RUNNING, RUN_STATE_COLO}, { RUN_STATE_SAVE_VM, RUN_STATE_RUNNING }, + { RUN_STATE_SAVE_VM, RUN_STATE_SUSPENDED }, { RUN_STATE_SHUTDOWN, RUN_STATE_PAUSED }, { RUN_STATE_SHUTDOWN, RUN_STATE_FINISH_MIGRATE }, @@ -161,6 +165,10 @@ static const RunStateTransition runstate_transitions_def[] = { { RUN_STATE_SUSPENDED, RUN_STATE_FINISH_MIGRATE }, { RUN_STATE_SUSPENDED, RUN_STATE_PRELAUNCH }, { RUN_STATE_SUSPENDED, RUN_STATE_COLO}, + { RUN_STATE_SUSPENDED, RUN_STATE_PAUSED}, + { RUN_STATE_SUSPENDED, RUN_STATE_SAVE_VM }, + { RUN_STATE_SUSPENDED, RUN_STATE_RESTORE_VM }, + { RUN_STATE_SUSPENDED, RUN_STATE_SHUTDOWN }, { RUN_STATE_WATCHDOG, RUN_STATE_RUNNING }, { RUN_STATE_WATCHDOG, RUN_STATE_FINISH_MIGRATE }, @@ -502,6 +510,7 @@ void qemu_system_reset(ShutdownCause reason) qapi_event_send_reset(shutdown_caused_by_guest(reason), reason); } cpu_synchronize_all_post_reset(); + vm_set_suspended(false); } /* @@ -810,7 +819,7 @@ void qemu_init_subsystems(void) qemu_init_cpu_list(); qemu_init_cpu_loop(); - qemu_mutex_lock_iothread(); + bql_lock(); atexit(qemu_run_exit_notifiers); diff --git a/system/vl.c b/system/vl.c index 2bcd9efb9a64396520576ae5ceaf0523c991a4ca..53850a1dafee576264c4221fedb1de56fb108916 100644 --- a/system/vl.c +++ b/system/vl.c @@ -2426,6 +2426,10 @@ static void qemu_validate_options(const QDict *machine_opts) } } + if (loadvm && incoming) { + error_report("'incoming' and 'loadvm' options are mutually exclusive"); + exit(EXIT_FAILURE); + } if (loadvm && preconfig_requested) { error_report("'preconfig' and 'loadvm' options are " "mutually exclusive"); @@ -2706,7 +2710,9 @@ void qmp_x_exit_preconfig(Error **errp) qemu_machine_creation_done(); if (loadvm) { + RunState state = autostart ? RUN_STATE_RUNNING : runstate_get(); load_snapshot(loadvm, NULL, false, NULL, &error_fatal); + load_snapshot_resume(state); } if (replay_mode != REPLAY_MODE_NONE) { replay_vmstate_init(); @@ -3733,7 +3739,7 @@ void qemu_init(int argc, char **argv) migration_object_init(); /* parse features once if machine provides default cpu_type */ - current_machine->cpu_type = machine_class->default_cpu_type; + current_machine->cpu_type = machine_class_default_cpu_type(machine_class); if (cpu_option) { current_machine->cpu_type = parse_cpu_option(cpu_option); } diff --git a/system/watchpoint.c b/system/watchpoint.c index ba5ad13352c355f5c8de5d25d36efb7fb09b95c2..b76007ebf6b62855f420b72c8ae36c972d74ec04 100644 --- a/system/watchpoint.c +++ b/system/watchpoint.c @@ -155,9 +155,9 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, * Now raise the debug interrupt so that it will * trigger after the current instruction. */ - qemu_mutex_lock_iothread(); + bql_lock(); cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG); - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c index 39cf841b3eee5d5a2bfbe83aeafaced7dca9be64..b8ed29e3432342105823449d2abe9b9061852f65 100644 --- a/target/alpha/cpu.c +++ b/target/alpha/cpu.c @@ -87,23 +87,6 @@ static void alpha_cpu_realizefn(DeviceState *dev, Error **errp) acc->parent_realize(dev, errp); } -static void alpha_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - - qemu_printf(" %s\n", object_class_get_name(oc)); -} - -void alpha_cpu_list(void) -{ - GSList *list; - - list = object_class_get_list_sorted(TYPE_ALPHA_CPU, false); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, alpha_cpu_list_entry, NULL); - g_slist_free(list); -} - /* Models */ typedef struct AlphaCPUAlias { const char *alias; @@ -142,12 +125,6 @@ static ObjectClass *alpha_cpu_class_by_name(const char *cpu_model) oc = object_class_by_name(typename); g_free(typename); - /* TODO: remove match everything nonsense */ - if (!oc || object_class_is_abstract(oc)) { - /* Default to ev67; no reason not to emulate insns by default. */ - oc = object_class_by_name(ALPHA_CPU_TYPE_NAME("ev67")); - } - return oc; } diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h index d672e911ddd3d6432f3218b4e030bec6f8d328c8..ce806587ca1c49cb0faee0041de9c6376384a6e9 100644 --- a/target/alpha/cpu.h +++ b/target/alpha/cpu.h @@ -292,8 +292,6 @@ void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags); int alpha_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int alpha_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -#define cpu_list alpha_cpu_list - #include "exec/cpu-all.h" enum { @@ -441,7 +439,6 @@ void alpha_translate_init(void); #define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU -void alpha_cpu_list(void); G_NORETURN void dynamic_excp(CPUAlphaState *, uintptr_t, int, int); G_NORETURN void arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t); diff --git a/target/alpha/machine.c b/target/alpha/machine.c index 2b7c8148ff58a0ddb53557b4b25e554c8f23dabf..f09834f635d21aeef45b838f16e5cc0a6b344220 100644 --- a/target/alpha/machine.c +++ b/target/alpha/machine.c @@ -24,7 +24,7 @@ static const VMStateInfo vmstate_fpcr = { .put = put_fpcr, }; -static VMStateField vmstate_env_fields[] = { +static const VMStateField vmstate_env_fields[] = { VMSTATE_UINTTL_ARRAY(ir, CPUAlphaState, 31), VMSTATE_UINTTL_ARRAY(fir, CPUAlphaState, 31), /* Save the architecture value of the fpcr, not the internally @@ -73,7 +73,7 @@ static const VMStateDescription vmstate_env = { .fields = vmstate_env_fields, }; -static VMStateField vmstate_cpu_fields[] = { +static const VMStateField vmstate_cpu_fields[] = { VMSTATE_CPU(), VMSTATE_STRUCT(env, AlphaCPU, 1, vmstate_env, CPUAlphaState), VMSTATE_END_OF_LIST() diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c index c078849403c7751e303a620d15f63da549f75919..8850381565971212dbd7dbdfc8fcdc72168ff0d1 100644 --- a/target/arm/arm-powerctl.c +++ b/target/arm/arm-powerctl.c @@ -88,7 +88,7 @@ static void arm_set_cpu_on_async_work(CPUState *target_cpu_state, g_free(info); /* Finally set the power status */ - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); target_cpu->power_state = PSCI_ON; } @@ -99,7 +99,7 @@ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, ARMCPU *target_cpu; struct CpuOnInfo *info; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64 "\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry, @@ -196,7 +196,7 @@ static void arm_set_cpu_on_and_reset_async_work(CPUState *target_cpu_state, target_cpu_state->halted = 0; /* Finally set the power status */ - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); target_cpu->power_state = PSCI_ON; } @@ -205,7 +205,7 @@ int arm_set_cpu_on_and_reset(uint64_t cpuid) CPUState *target_cpu_state; ARMCPU *target_cpu; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); /* Retrieve the cpu we are powering up */ target_cpu_state = arm_get_cpu_by_id(cpuid); @@ -247,7 +247,7 @@ static void arm_set_cpu_off_async_work(CPUState *target_cpu_state, { ARMCPU *target_cpu = ARM_CPU(target_cpu_state); - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); target_cpu->power_state = PSCI_OFF; target_cpu_state->halted = 1; target_cpu_state->exception_index = EXCP_HLT; @@ -258,7 +258,7 @@ int arm_set_cpu_off(uint64_t cpuid) CPUState *target_cpu_state; ARMCPU *target_cpu; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); DPRINTF("cpu %" PRId64 "\n", cpuid); @@ -294,7 +294,7 @@ int arm_reset_cpu(uint64_t cpuid) CPUState *target_cpu_state; ARMCPU *target_cpu; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); DPRINTF("cpu %" PRId64 "\n", cpuid); diff --git a/target/arm/arm-qmp-cmds.c b/target/arm/arm-qmp-cmds.c index b53d5efe13d0d1276ccfcbc9f30df57139714f48..2250cd7ddfa368c16bea8e09e9986391f5e53cb4 100644 --- a/target/arm/arm-qmp-cmds.c +++ b/target/arm/arm-qmp-cmds.c @@ -237,8 +237,7 @@ static void arm_cpu_add_definition(gpointer data, gpointer user_data) typename = object_class_get_name(oc); info = g_malloc0(sizeof(*info)); - info->name = g_strndup(typename, - strlen(typename) - strlen("-" TYPE_ARM_CPU)); + info->name = cpu_model_from_type(typename); info->q_typename = g_strdup(typename); QAPI_LIST_PREPEND(*cpu_list, info); diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h index f1293d16c07a86812fb179e0e4e753b291b81596..b6fdd0f3eb434b316dffadca7f048b1ef5fcfa2d 100644 --- a/target/arm/cpregs.h +++ b/target/arm/cpregs.h @@ -118,6 +118,11 @@ enum { * ARM pseudocode function CheckSMEAccess(). */ ARM_CP_SME = 1 << 19, + /* + * Flag: one of the four EL2 registers which redirect to the + * equivalent EL1 register when FEAT_NV2 is enabled. + */ + ARM_CP_NV2_REDIRECT = 1 << 20, }; /* @@ -821,6 +826,11 @@ typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque); #define CP_ANY 0xff +/* Flags in the high bits of nv2_redirect_offset */ +#define NV2_REDIR_NV1 0x4000 /* Only redirect when HCR_EL2.NV1 == 1 */ +#define NV2_REDIR_NO_NV1 0x8000 /* Only redirect when HCR_EL2.NV1 == 0 */ +#define NV2_REDIR_FLAG_MASK 0xc000 + /* Definition of an ARM coprocessor register */ struct ARMCPRegInfo { /* Name of register (useful mainly for debugging, need not be unique) */ @@ -862,6 +872,13 @@ struct ARMCPRegInfo { * value encodes both the trap register and bit within it. */ FGTBit fgt; + + /* + * Offset from VNCR_EL2 when FEAT_NV2 redirects access to memory; + * may include an NV2_REDIR_* flag. + */ + uint32_t nv2_redirect_offset; + /* * The opaque pointer passed to define_arm_cp_regs_with_opaque() when * this register was defined: can be used to hand data through to the @@ -937,7 +954,7 @@ struct ARMCPRegInfo { CPResetFn *resetfn; /* - * "Original" writefn and readfn. + * "Original" readfn, writefn, accessfn. * For ARMv8.1-VHE register aliases, we overwrite the read/write * accessor functions of various EL1/EL0 to perform the runtime * check for which sysreg should actually be modified, and then @@ -948,6 +965,7 @@ struct ARMCPRegInfo { */ CPReadFn *orig_readfn; CPWriteFn *orig_writefn; + CPAccessFn *orig_accessfn; }; /* @@ -1079,4 +1097,38 @@ void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu); CPAccessResult access_tvm_trvm(CPUARMState *, const ARMCPRegInfo *, bool); +/** + * arm_cpreg_trap_in_nv: Return true if cpreg traps in nested virtualization + * + * Return true if this cpreg is one which should be trapped to EL2 if + * it is executed at EL1 when nested virtualization is enabled via HCR_EL2.NV. + */ +static inline bool arm_cpreg_traps_in_nv(const ARMCPRegInfo *ri) +{ + /* + * The Arm ARM defines the registers to be trapped in terms of + * their names (I_TZTZL). However the underlying principle is "if + * it would UNDEF at EL1 but work at EL2 then it should trap", and + * the way the encoding of sysregs and system instructions is done + * means that the right set of registers is exactly those where + * the opc1 field is 4 or 5. (You can see this also in the assert + * we do that the opc1 field and the permissions mask line up in + * define_one_arm_cp_reg_with_opaque().) + * Checking the opc1 field is easier for us and avoids the problem + * that we do not consistently use the right architectural names + * for all sysregs, since we treat the name field as largely for debug. + * + * However we do this check, it is going to be at least potentially + * fragile to future new sysregs, but this seems the least likely + * to break. + * + * In particular, note that the released sysreg XML defines that + * the FEAT_MEC sysregs and instructions do not follow this FEAT_NV + * trapping rule, so we will need to add an ARM_CP_* flag to indicate + * "register does not trap on NV" to handle those if/when we implement + * FEAT_MEC. + */ + return ri->opc1 == 4 || ri->opc1 == 5; +} + #endif /* TARGET_ARM_CPREGS_H */ diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h index 954d3582685c44eb156809f013a520603d721d08..7a590c824cf5366bf5e35dae876d7e60dd604b67 100644 --- a/target/arm/cpu-features.h +++ b/target/arm/cpu-features.h @@ -839,6 +839,16 @@ static inline bool isar_feature_aa64_e0pd(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, E0PD) != 0; } +static inline bool isar_feature_aa64_nv(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, NV) != 0; +} + +static inline bool isar_feature_aa64_nv2(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, NV) >= 2; +} + static inline bool isar_feature_aa64_pmuv3p1(const ARMISARegisters *id) { return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 && diff --git a/target/arm/cpu.c b/target/arm/cpu.c index efb22a87f9eda26e212b6a7895d35d67461c6c08..826ce842c09ba540d85a16a48d71712568efcd22 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -1059,6 +1059,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) uint32_t psr = pstate_read(env); int i, j; int el = arm_current_el(env); + uint64_t hcr = arm_hcr_el2_eff(env); const char *ns_status; bool sve; @@ -1096,6 +1097,10 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) if (cpu_isar_feature(aa64_bti, cpu)) { qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10); } + qemu_fprintf(f, "%s%s%s", + (hcr & HCR_NV) ? " NV" : "", + (hcr & HCR_NV1) ? " NV1" : "", + (hcr & HCR_NV2) ? " NV2" : ""); if (!(flags & CPU_DUMP_FPU)) { qemu_fprintf(f, "\n"); return; @@ -1686,7 +1691,7 @@ void arm_cpu_post_init(Object *obj) } if (kvm_enabled()) { - kvm_arm_add_vcpu_properties(obj); + kvm_arm_add_vcpu_properties(cpu); } #ifndef CONFIG_USER_ONLY @@ -2238,9 +2243,6 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) /* FEAT_MPAM (Memory Partitioning and Monitoring Extension) */ cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, MPAM, 0); - /* FEAT_NV (Nested Virtualization) */ - cpu->isar.id_aa64mmfr2 = - FIELD_DP64(cpu->isar.id_aa64mmfr2, ID_AA64MMFR2, NV, 0); } /* MPU can be configured out of a PMSA CPU either by setting has-mpu @@ -2411,9 +2413,7 @@ static ObjectClass *arm_cpu_class_by_name(const char *cpu_model) oc = object_class_by_name(typename); g_strfreev(cpuname); g_free(typename); - if (!oc || !object_class_dynamic_cast(oc, TYPE_ARM_CPU)) { - return NULL; - } + return oc; } diff --git a/target/arm/cpu.h b/target/arm/cpu.h index a0282e0d2817e513db8eb5a159155f52258e6d52..ec276fcd57cd2d7100636e7efb87565f955de995 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -120,12 +120,12 @@ enum { #define TARGET_INSN_START_EXTRA_WORDS 2 /* The 2nd extra word holding syndrome info for data aborts does not use - * the upper 6 bits nor the lower 14 bits. We mask and shift it down to + * the upper 6 bits nor the lower 13 bits. We mask and shift it down to * help the sleb128 encoder do a better job. * When restoring the CPU state, we shift it back up. */ #define ARM_INSN_START_WORD2_MASK ((1 << 26) - 1) -#define ARM_INSN_START_WORD2_SHIFT 14 +#define ARM_INSN_START_WORD2_SHIFT 13 /* We currently assume float and double are IEEE single and double precision respectively. @@ -547,6 +547,9 @@ typedef struct CPUArchState { uint64_t gpccr_el3; uint64_t gptbr_el3; uint64_t mfar_el3; + + /* NV2 register */ + uint64_t vncr_el2; } cp15; struct { @@ -2737,7 +2740,6 @@ static inline bool access_secure_reg(CPUARMState *env) (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \ (_val)) -void arm_cpu_list(void); uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx, uint32_t cur_el, bool secure); @@ -2840,8 +2842,6 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync); #define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU -#define cpu_list arm_cpu_list - /* ARM has the following "translation regimes" (as the ARM ARM calls them): * * If EL3 is 64-bit: @@ -3235,17 +3235,26 @@ FIELD(TBFLAG_A64, PSTATE_ZA, 23, 1) FIELD(TBFLAG_A64, SVL, 24, 4) /* Indicates that SME Streaming mode is active, and SMCR_ELx.FA64 is not. */ FIELD(TBFLAG_A64, SME_TRAP_NONSTREAMING, 28, 1) -FIELD(TBFLAG_A64, FGT_ERET, 29, 1) +FIELD(TBFLAG_A64, TRAP_ERET, 29, 1) FIELD(TBFLAG_A64, NAA, 30, 1) FIELD(TBFLAG_A64, ATA0, 31, 1) +FIELD(TBFLAG_A64, NV, 32, 1) +FIELD(TBFLAG_A64, NV1, 33, 1) +FIELD(TBFLAG_A64, NV2, 34, 1) +/* Set if FEAT_NV2 RAM accesses use the EL2&0 translation regime */ +FIELD(TBFLAG_A64, NV2_MEM_E20, 35, 1) +/* Set if FEAT_NV2 RAM accesses are big-endian */ +FIELD(TBFLAG_A64, NV2_MEM_BE, 36, 1) /* - * Helpers for using the above. + * Helpers for using the above. Note that only the A64 accessors use + * FIELD_DP64() and FIELD_EX64(), because in the other cases the flags + * word either is or might be 32 bits only. */ #define DP_TBFLAG_ANY(DST, WHICH, VAL) \ (DST.flags = FIELD_DP32(DST.flags, TBFLAG_ANY, WHICH, VAL)) #define DP_TBFLAG_A64(DST, WHICH, VAL) \ - (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A64, WHICH, VAL)) + (DST.flags2 = FIELD_DP64(DST.flags2, TBFLAG_A64, WHICH, VAL)) #define DP_TBFLAG_A32(DST, WHICH, VAL) \ (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_A32, WHICH, VAL)) #define DP_TBFLAG_M32(DST, WHICH, VAL) \ @@ -3254,7 +3263,7 @@ FIELD(TBFLAG_A64, ATA0, 31, 1) (DST.flags2 = FIELD_DP32(DST.flags2, TBFLAG_AM32, WHICH, VAL)) #define EX_TBFLAG_ANY(IN, WHICH) FIELD_EX32(IN.flags, TBFLAG_ANY, WHICH) -#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A64, WHICH) +#define EX_TBFLAG_A64(IN, WHICH) FIELD_EX64(IN.flags2, TBFLAG_A64, WHICH) #define EX_TBFLAG_A32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_A32, WHICH) #define EX_TBFLAG_M32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_M32, WHICH) #define EX_TBFLAG_AM32(IN, WHICH) FIELD_EX32(IN.flags2, TBFLAG_AM32, WHICH) diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index 1e9c6c85aece12172665e13d9d0f1122250b1dc6..8e30a7993ea1506a6e609d0008726dbf777d8743 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -66,7 +66,7 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) */ if (kvm_enabled()) { if (kvm_arm_sve_supported()) { - cpu->sve_vq.supported = kvm_arm_sve_get_vls(CPU(cpu)); + cpu->sve_vq.supported = kvm_arm_sve_get_vls(cpu); vq_supported = cpu->sve_vq.supported; } else { assert(!cpu_isar_feature(aa64_sve, cpu)); diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c index cbfba532f5046f7f2022598d9766392c3040cbf0..7d856acddf2137e10602e279be1f5555ca49a956 100644 --- a/target/arm/debug_helper.c +++ b/target/arm/debug_helper.c @@ -844,6 +844,16 @@ static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } +static CPAccessResult access_dbgvcr32(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* MCDR_EL3.TDMA doesn't apply for FEAT_NV traps */ + if (arm_current_el(env) == 2 && (env->cp15.mdcr_el3 & MDCR_TDA)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + /* * Check for traps to Debug Comms Channel registers. If FEAT_FGT * is implemented then these are controlled by MDCR_EL2.TDCC for @@ -950,6 +960,7 @@ static const ARMCPRegInfo debug_cp_reginfo[] = { .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, .access = PL1_RW, .accessfn = access_tda, .fgt = FGT_MDSCR_EL1, + .nv2_redirect_offset = 0x158, .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), .resetvalue = 0 }, /* @@ -1026,14 +1037,6 @@ static const ARMCPRegInfo debug_cp_reginfo[] = { .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, .access = PL1_RW, .accessfn = access_tda, .type = ARM_CP_NOP }, - /* - * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor - * to save and restore a 32-bit guest's DBGVCR) - */ - { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, - .access = PL2_RW, .accessfn = access_tda, - .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP }, /* * Dummy MDCCINT_EL1, since we don't implement the Debug Communications * Channel but Linux may try to access this register. The 32-bit @@ -1062,6 +1065,18 @@ static const ARMCPRegInfo debug_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.dbgclaim) }, }; +/* These are present only when EL1 supports AArch32 */ +static const ARMCPRegInfo debug_aa32_el1_reginfo[] = { + /* + * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor + * to save and restore a 32-bit guest's DBGVCR) + */ + { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0, + .access = PL2_RW, .accessfn = access_dbgvcr32, + .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP }, +}; + static const ARMCPRegInfo debug_lpae_cp_reginfo[] = { /* 64 bit access versions of the (dummy) debug registers */ { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0, @@ -1207,6 +1222,9 @@ void define_debug_regs(ARMCPU *cpu) assert(ctx_cmps <= brps); define_arm_cp_regs(cpu, debug_cp_reginfo); + if (cpu_isar_feature(aa64_aa32_el1, cpu)) { + define_arm_cp_regs(cpu, debug_aa32_el1_reginfo); + } if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) { define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); diff --git a/target/arm/helper.c b/target/arm/helper.c index 2746d3fdac884ab96a7aa07e9ed6d62195cf7630..dc8f14f43316085ca427f02724b6a07fa1cab67e 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -263,6 +263,18 @@ void init_cpreg_list(ARMCPU *cpu) g_list_free(keys); } +static bool arm_pan_enabled(CPUARMState *env) +{ + if (is_a64(env)) { + if ((arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1)) { + return false; + } + return env->pstate & PSTATE_PAN; + } else { + return env->uncached_cpsr & CPSR_PAN; + } +} + /* * Some registers are not accessible from AArch32 EL3 if SCR.NS == 0. */ @@ -635,6 +647,7 @@ static const ARMCPRegInfo cp_reginfo[] = { .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_CONTEXTIDR_EL1, + .nv2_redirect_offset = 0x108 | NV2_REDIR_NV1, .secure = ARM_CP_SECSTATE_NS, .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, @@ -871,6 +884,7 @@ static const ARMCPRegInfo v6_cp_reginfo[] = { { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, .fgt = FGT_CPACR_EL1, + .nv2_redirect_offset = 0x100 | NV2_REDIR_NV1, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, }; @@ -1475,6 +1489,22 @@ static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, pmu_op_finish(env); } +static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + uint64_t pmcr = env->cp15.c9_pmcr; + + /* + * If EL2 is implemented and enabled for the current security state, reads + * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN. + */ + if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) { + pmcr &= ~PMCRN_MASK; + pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT; + } + + return pmcr; +} + static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { @@ -2222,11 +2252,13 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_AFSR0_EL1, + .nv2_redirect_offset = 0x128 | NV2_REDIR_NV1, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_AFSR1_EL1, + .nv2_redirect_offset = 0x130 | NV2_REDIR_NV1, .type = ARM_CP_CONST, .resetvalue = 0 }, /* * MAIR can just read-as-written because we don't implement caches @@ -2236,6 +2268,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_MAIR_EL1, + .nv2_redirect_offset = 0x140 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), .resetvalue = 0 }, { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, @@ -3158,6 +3191,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = { .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1, .type = ARM_CP_IO, .access = PL0_RW, .accessfn = gt_ptimer_access, + .nv2_redirect_offset = 0x180 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), .resetvalue = 0, .readfn = gt_phys_redir_ctl_read, .raw_readfn = raw_read, @@ -3175,6 +3209,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = { .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1, .type = ARM_CP_IO, .access = PL0_RW, .accessfn = gt_vtimer_access, + .nv2_redirect_offset = 0x170 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), .resetvalue = 0, .readfn = gt_virt_redir_ctl_read, .raw_readfn = raw_read, @@ -3254,6 +3289,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = { .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2, .access = PL0_RW, .type = ARM_CP_IO, + .nv2_redirect_offset = 0x178 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), .resetvalue = 0, .accessfn = gt_ptimer_access, .readfn = gt_phys_redir_cval_read, .raw_readfn = raw_read, @@ -3271,6 +3307,7 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = { .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2, .access = PL0_RW, .type = ARM_CP_IO, + .nv2_redirect_offset = 0x168 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), .resetvalue = 0, .accessfn = gt_vtimer_access, .readfn = gt_virt_redir_cval_read, .raw_readfn = raw_read, @@ -3308,6 +3345,11 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = { static CPAccessResult e2h_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { + if (arm_current_el(env) == 1) { + /* This must be a FEAT_NV access */ + /* TODO: FEAT_ECV will need to check CNTHCTL_EL2 here */ + return CP_ACCESS_OK; + } if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { return CP_ACCESS_TRAP; } @@ -3593,7 +3635,7 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */ /* fall through */ case 1: - if (ri->crm == 9 && (env->uncached_cpsr & CPSR_PAN)) { + if (ri->crm == 9 && arm_pan_enabled(env)) { mmu_idx = ARMMMUIdx_Stage1_E1_PAN; } else { mmu_idx = ARMMMUIdx_Stage1_E1; @@ -3687,6 +3729,15 @@ static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri, return at_e012_access(env, ri, isread); } +static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) { + return CP_ACCESS_TRAP_EL2; + } + return at_e012_access(env, ri, isread); +} + static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { @@ -3700,7 +3751,7 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, case 0: switch (ri->opc1) { case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */ - if (ri->crm == 9 && (env->pstate & PSTATE_PAN)) { + if (ri->crm == 9 && arm_pan_enabled(env)) { mmu_idx = regime_e20 ? ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN; } else { @@ -4236,6 +4287,7 @@ static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_FAR_EL1, + .nv2_redirect_offset = 0x220 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), .resetvalue = 0, }, }; @@ -4245,11 +4297,13 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = { .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_ESR_EL1, + .nv2_redirect_offset = 0x138 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_TTBR0_EL1, + .nv2_redirect_offset = 0x200 | NV2_REDIR_NV1, .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), offsetof(CPUARMState, cp15.ttbr0_ns) } }, @@ -4257,6 +4311,7 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = { .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_TTBR1_EL1, + .nv2_redirect_offset = 0x210 | NV2_REDIR_NV1, .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), offsetof(CPUARMState, cp15.ttbr1_ns) } }, @@ -4264,6 +4319,7 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = { .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_TCR_EL1, + .nv2_redirect_offset = 0x120 | NV2_REDIR_NV1, .writefn = vmsa_tcr_el12_write, .raw_writefn = raw_write, .resetvalue = 0, @@ -4503,6 +4559,7 @@ static const ARMCPRegInfo lpae_cp_reginfo[] = { .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_AMAIR_EL1, + .nv2_redirect_offset = 0x148 | NV2_REDIR_NV1, .type = ARM_CP_CONST, .resetvalue = 0 }, /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, @@ -5325,6 +5382,19 @@ static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, } } +static CPAccessResult access_nv1(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1) { + uint64_t hcr_nv = arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1 | HCR_NV2); + + if (hcr_nv == (HCR_NV | HCR_NV1)) { + return CP_ACCESS_TRAP_EL2; + } + } + return CP_ACCESS_OK; +} + #ifdef CONFIG_USER_ONLY /* * `IC IVAU` is handled to improve compatibility with JITs that dual-map their @@ -5552,22 +5622,22 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .fgt = FGT_ATS1E1R, - .accessfn = at_e012_access, .writefn = ats_write64 }, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .fgt = FGT_ATS1E1W, - .accessfn = at_e012_access, .writefn = ats_write64 }, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .fgt = FGT_ATS1E0R, - .accessfn = at_e012_access, .writefn = ats_write64 }, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .fgt = FGT_ATS1E0W, - .accessfn = at_e012_access, .writefn = ats_write64 }, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4, .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, @@ -5673,12 +5743,14 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, - .access = PL1_RW, + .access = PL1_RW, .accessfn = access_nv1, + .nv2_redirect_offset = 0x230 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, - .access = PL1_RW, + .access = PL1_RW, .accessfn = access_nv1, + .nv2_redirect_offset = 0x160 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, /* * We rely on the access checks not allowing the guest to write to the @@ -5692,26 +5764,13 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, sp_el[0]) }, { .name = "SP_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0, + .nv2_redirect_offset = 0x240, .access = PL2_RW, .type = ARM_CP_ALIAS | ARM_CP_EL3_NO_EL2_KEEP, .fieldoffset = offsetof(CPUARMState, sp_el[1]) }, { .name = "SPSel", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, .type = ARM_CP_NO_RAW, .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, - { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, - .access = PL2_RW, - .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP, - .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) }, - { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, - .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, - .writefn = dacr_write, .raw_writefn = raw_write, - .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, - { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, - .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, - .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0, @@ -5746,6 +5805,24 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) }, }; +/* These are present only when EL1 supports AArch32 */ +static const ARMCPRegInfo v8_aa32_el1_reginfo[] = { + { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, + .access = PL2_RW, + .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP, + .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) }, + { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0, + .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, + .writefn = dacr_write, .raw_writefn = raw_write, + .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) }, + { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1, + .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP, + .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) }, +}; + static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) { ARMCPU *cpu = env_archcpu(env); @@ -5795,6 +5872,12 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) if (cpu_isar_feature(aa64_rme, cpu)) { valid_mask |= HCR_GPF; } + if (cpu_isar_feature(aa64_nv, cpu)) { + valid_mask |= HCR_NV | HCR_NV1 | HCR_AT; + } + if (cpu_isar_feature(aa64_nv2, cpu)) { + valid_mask |= HCR_NV2; + } } if (cpu_isar_feature(any_evt, cpu)) { @@ -5813,9 +5896,10 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) * HCR_DC disables stage1 and enables stage2 translation * HCR_DCT enables tagging on (disabled) stage1 translation * HCR_FWB changes the interpretation of stage2 descriptor bits + * HCR_NV and HCR_NV1 affect interpretation of descriptor bits */ if ((env->cp15.hcr_el2 ^ value) & - (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB)) { + (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB | HCR_NV | HCR_NV1)) { tlb_flush(CPU(cpu)); } env->cp15.hcr_el2 = value; @@ -5824,14 +5908,14 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) * Updates to VI and VF require us to update the status of * virtual interrupts, which are the logical OR of these bits * and the state of the input lines from the GIC. (This requires - * that we have the iothread lock, which is done by marking the + * that we have the BQL, which is done by marking the * reginfo structs as ARM_CP_IO.) * Note that if a write to HCR pends a VIRQ or VFIQ it is never * possible for it to be taken immediately, because VIRQ and * VFIQ are masked unless running at EL0 or EL1, and HCR * can only be written at EL2. */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); arm_cpu_update_virq(cpu); arm_cpu_update_vfiq(cpu); arm_cpu_update_vserr(cpu); @@ -5981,7 +6065,7 @@ static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri, static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { - if (arm_current_el(env) < 3 + if (arm_current_el(env) == 2 && arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) { return CP_ACCESS_TRAP_EL3; @@ -5993,6 +6077,7 @@ static const ARMCPRegInfo hcrx_el2_reginfo = { .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2, .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen, + .nv2_redirect_offset = 0xa0, .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2), }; @@ -6059,6 +6144,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .type = ARM_CP_IO, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2), + .nv2_redirect_offset = 0x78, .writefn = hcr_write, .raw_writefn = raw_write }, { .name = "HCR", .state = ARM_CP_STATE_AA32, .type = ARM_CP_ALIAS | ARM_CP_IO, @@ -6069,14 +6155,16 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 7, .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, - .type = ARM_CP_ALIAS, + .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, + .type = ARM_CP_NV2_REDIRECT, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) }, { .name = "FAR_EL2", .state = ARM_CP_STATE_BOTH, + .type = ARM_CP_NV2_REDIRECT, .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) }, { .name = "HIFAR", .state = ARM_CP_STATE_AA32, @@ -6085,7 +6173,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .access = PL2_RW, .fieldoffset = offsetofhigh32(CPUARMState, cp15.far_el[2]) }, { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, - .type = ARM_CP_ALIAS, + .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, @@ -6141,6 +6229,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2, .access = PL2_RW, + .nv2_redirect_offset = 0x40, /* no .writefn needed as this can't cause an ASID change */ .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) }, { .name = "VTTBR", .state = ARM_CP_STATE_AA32, @@ -6152,6 +6241,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0, .access = PL2_RW, .writefn = vttbr_write, .raw_writefn = raw_write, + .nv2_redirect_offset = 0x20, .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) }, { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0, @@ -6160,6 +6250,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2, .access = PL2_RW, .resetvalue = 0, + .nv2_redirect_offset = 0x90, .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) }, { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0, @@ -6255,6 +6346,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3, .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0, .writefn = gt_cntvoff_write, + .nv2_redirect_offset = 0x60, .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) }, { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14, .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO, @@ -6293,6 +6385,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH, .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3, .access = PL2_RW, + .nv2_redirect_offset = 0x80, .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) }, }; @@ -6318,10 +6411,12 @@ static const ARMCPRegInfo el2_sec_cp_reginfo[] = { { .name = "VSTTBR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 0, .access = PL2_RW, .accessfn = sel2_access, + .nv2_redirect_offset = 0x30, .fieldoffset = offsetof(CPUARMState, cp15.vsttbr_el2) }, { .name = "VSTCR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 6, .opc2 = 2, .access = PL2_RW, .accessfn = sel2_access, + .nv2_redirect_offset = 0x48, .fieldoffset = offsetof(CPUARMState, cp15.vstcr_el2) }, }; @@ -6489,6 +6584,42 @@ static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri, writefn(env, ri, value); } +static uint64_t el2_e2h_e12_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + /* Pass the EL1 register accessor its ri, not the EL12 alias ri */ + return ri->orig_readfn(env, ri->opaque); +} + +static void el2_e2h_e12_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* Pass the EL1 register accessor its ri, not the EL12 alias ri */ + return ri->orig_writefn(env, ri->opaque, value); +} + +static CPAccessResult el2_e2h_e12_access(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1) { + /* + * This must be a FEAT_NV access (will either trap or redirect + * to memory). None of the registers with _EL12 aliases want to + * apply their trap controls for this kind of access, so don't + * call the orig_accessfn or do the "UNDEF when E2H is 0" check. + */ + return CP_ACCESS_OK; + } + /* FOO_EL12 aliases only exist when E2H is 1; otherwise they UNDEF */ + if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { + return CP_ACCESS_TRAP_UNCATEGORIZED; + } + if (ri->orig_accessfn) { + return ri->orig_accessfn(env, ri->opaque, isread); + } + return CP_ACCESS_OK; +} + static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) { struct E2HAlias { @@ -6588,6 +6719,41 @@ static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) new_reg->type |= ARM_CP_ALIAS; /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */ new_reg->access &= PL2_RW | PL3_RW; + /* The new_reg op fields are as per new_key, not the target reg */ + new_reg->crn = (a->new_key & CP_REG_ARM64_SYSREG_CRN_MASK) + >> CP_REG_ARM64_SYSREG_CRN_SHIFT; + new_reg->crm = (a->new_key & CP_REG_ARM64_SYSREG_CRM_MASK) + >> CP_REG_ARM64_SYSREG_CRM_SHIFT; + new_reg->opc0 = (a->new_key & CP_REG_ARM64_SYSREG_OP0_MASK) + >> CP_REG_ARM64_SYSREG_OP0_SHIFT; + new_reg->opc1 = (a->new_key & CP_REG_ARM64_SYSREG_OP1_MASK) + >> CP_REG_ARM64_SYSREG_OP1_SHIFT; + new_reg->opc2 = (a->new_key & CP_REG_ARM64_SYSREG_OP2_MASK) + >> CP_REG_ARM64_SYSREG_OP2_SHIFT; + new_reg->opaque = src_reg; + new_reg->orig_readfn = src_reg->readfn ?: raw_read; + new_reg->orig_writefn = src_reg->writefn ?: raw_write; + new_reg->orig_accessfn = src_reg->accessfn; + if (!new_reg->raw_readfn) { + new_reg->raw_readfn = raw_read; + } + if (!new_reg->raw_writefn) { + new_reg->raw_writefn = raw_write; + } + new_reg->readfn = el2_e2h_e12_read; + new_reg->writefn = el2_e2h_e12_write; + new_reg->accessfn = el2_e2h_e12_access; + + /* + * If the _EL1 register is redirected to memory by FEAT_NV2, + * then it shares the offset with the _EL12 register, + * and which one is redirected depends on HCR_EL2.NV1. + */ + if (new_reg->nv2_redirect_offset) { + assert(new_reg->nv2_redirect_offset & NV2_REDIR_NV1); + new_reg->nv2_redirect_offset &= ~NV2_REDIR_NV1; + new_reg->nv2_redirect_offset |= NV2_REDIR_NO_NV1; + } ok = g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)a->new_key, new_reg); @@ -6721,9 +6887,11 @@ static const ARMCPRegInfo minimal_ras_reginfo[] = { .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "VDISR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 1, .opc2 = 1, + .nv2_redirect_offset = 0x500, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vdisr_el2) }, { .name = "VSESR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 3, + .nv2_redirect_offset = 0x508, .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.vsesr_el2) }, }; @@ -6895,6 +7063,7 @@ static void zcr_write(CPUARMState *env, const ARMCPRegInfo *ri, static const ARMCPRegInfo zcr_reginfo[] = { { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, + .nv2_redirect_offset = 0x1e0 | NV2_REDIR_NV1, .access = PL1_RW, .type = ARM_CP_SVE, .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), .writefn = zcr_write, .raw_writefn = raw_write }, @@ -6931,10 +7100,21 @@ static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } -static CPAccessResult access_esm(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) +static CPAccessResult access_smprimap(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* If EL1 this is a FEAT_NV access and CPTR_EL3.ESM doesn't apply */ + if (arm_current_el(env) == 2 + && arm_feature(env, ARM_FEATURE_EL3) + && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +static CPAccessResult access_smpri(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) { - /* TODO: FEAT_FGT for SMPRI_EL1 but not SMPRIMAP_EL2 */ if (arm_current_el(env) < 3 && arm_feature(env, ARM_FEATURE_EL3) && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, ESM)) { @@ -7025,6 +7205,7 @@ static const ARMCPRegInfo sme_reginfo[] = { .writefn = svcr_write, .raw_writefn = raw_write }, { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6, + .nv2_redirect_offset = 0x1f0 | NV2_REDIR_NV1, .access = PL1_RW, .type = ARM_CP_SME, .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]), .writefn = smcr_write, .raw_writefn = raw_write }, @@ -7053,12 +7234,13 @@ static const ARMCPRegInfo sme_reginfo[] = { */ { .name = "SMPRI_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 4, - .access = PL1_RW, .accessfn = access_esm, + .access = PL1_RW, .accessfn = access_smpri, .fgt = FGT_NSMPRI_EL1, .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "SMPRIMAP_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 5, - .access = PL2_RW, .accessfn = access_esm, + .nv2_redirect_offset = 0x1f8, + .access = PL2_RW, .accessfn = access_smprimap, .type = ARM_CP_CONST, .resetvalue = 0 }, }; @@ -7154,8 +7336,9 @@ static void define_pmu_regs(ARMCPU *cpu) .fgt = FGT_PMCR_EL0, .type = ARM_CP_IO | ARM_CP_ALIAS, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), - .accessfn = pmreg_access, .writefn = pmcr_write, - .raw_writefn = raw_write, + .accessfn = pmreg_access, + .readfn = pmcr_read, .raw_readfn = raw_read, + .writefn = pmcr_write, .raw_writefn = raw_write, }; ARMCPRegInfo pmcr64 = { .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, @@ -7165,6 +7348,7 @@ static void define_pmu_regs(ARMCPU *cpu) .type = ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), .resetvalue = cpu->isar.reset_pmcr_el0, + .readfn = pmcr_read, .raw_readfn = raw_read, .writefn = pmcr_write, .raw_writefn = raw_write, }; @@ -7645,6 +7829,7 @@ static const ARMCPRegInfo rndr_reginfo[] = { static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, uint64_t value) { +#ifdef CONFIG_TCG ARMCPU *cpu = env_archcpu(env); /* CTR_EL0 System register -> DminLine, bits [19:16] */ uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); @@ -7669,6 +7854,10 @@ static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, } #endif /*CONFIG_USER_ONLY*/ } +#else + /* Handled by hardware accelerator. */ + g_assert_not_reached(); +#endif /* CONFIG_TCG */ } static const ARMCPRegInfo dcpop_reg[] = { @@ -7701,7 +7890,46 @@ static CPAccessResult access_mte(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { int el = arm_current_el(env); + if (el < 2 && arm_is_el2_enabled(env)) { + uint64_t hcr = arm_hcr_el2_eff(env); + if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { + return CP_ACCESS_TRAP_EL2; + } + } + if (el < 3 && + arm_feature(env, ARM_FEATURE_EL3) && + !(env->cp15.scr_el3 & SCR_ATA)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +static CPAccessResult access_tfsr_el1(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + CPAccessResult nv1 = access_nv1(env, ri, isread); + + if (nv1 != CP_ACCESS_OK) { + return nv1; + } + return access_mte(env, ri, isread); +} + +static CPAccessResult access_tfsr_el2(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* + * TFSR_EL2: similar to generic access_mte(), but we need to + * account for FEAT_NV. At EL1 this must be a FEAT_NV access; + * if NV2 is enabled then we will redirect this to TFSR_EL1 + * after doing the HCR and SCR ATA traps; otherwise this will + * be a trap to EL2 and the HCR/SCR traps do not apply. + */ + int el = arm_current_el(env); + if (el == 1 && (arm_hcr_el2_eff(env) & HCR_NV2)) { + return CP_ACCESS_OK; + } if (el < 2 && arm_is_el2_enabled(env)) { uint64_t hcr = arm_hcr_el2_eff(env); if (!(hcr & HCR_ATA) && (!(hcr & HCR_E2H) || !(hcr & HCR_TGE))) { @@ -7733,11 +7961,13 @@ static const ARMCPRegInfo mte_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[0]) }, { .name = "TFSR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0, - .access = PL1_RW, .accessfn = access_mte, + .access = PL1_RW, .accessfn = access_tfsr_el1, + .nv2_redirect_offset = 0x190 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) }, { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64, + .type = ARM_CP_NV2_REDIRECT, .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 6, .opc2 = 0, - .access = PL2_RW, .accessfn = access_mte, + .access = PL2_RW, .accessfn = access_tfsr_el2, .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[2]) }, { .name = "TFSR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 6, .opc2 = 0, @@ -7885,6 +8115,18 @@ static CPAccessResult access_scxtnum(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } +static CPAccessResult access_scxtnum_el1(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + CPAccessResult nv1 = access_nv1(env, ri, isread); + + if (nv1 != CP_ACCESS_OK) { + return nv1; + } + return access_scxtnum(env, ri, isread); +} + static const ARMCPRegInfo scxtnum_reginfo[] = { { .name = "SCXTNUM_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 13, .crm = 0, .opc2 = 7, @@ -7893,8 +8135,9 @@ static const ARMCPRegInfo scxtnum_reginfo[] = { .fieldoffset = offsetof(CPUARMState, scxtnum_el[0]) }, { .name = "SCXTNUM_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 7, - .access = PL1_RW, .accessfn = access_scxtnum, + .access = PL1_RW, .accessfn = access_scxtnum_el1, .fgt = FGT_SCXTNUM_EL1, + .nv2_redirect_offset = 0x188 | NV2_REDIR_NV1, .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) }, { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7, @@ -7919,25 +8162,53 @@ static CPAccessResult access_fgt(CPUARMState *env, const ARMCPRegInfo *ri, static const ARMCPRegInfo fgt_reginfo[] = { { .name = "HFGRTR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 4, + .nv2_redirect_offset = 0x1b8, .access = PL2_RW, .accessfn = access_fgt, .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HFGRTR]) }, { .name = "HFGWTR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 5, + .nv2_redirect_offset = 0x1c0, .access = PL2_RW, .accessfn = access_fgt, .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HFGWTR]) }, { .name = "HDFGRTR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 4, + .nv2_redirect_offset = 0x1d0, .access = PL2_RW, .accessfn = access_fgt, .fieldoffset = offsetof(CPUARMState, cp15.fgt_read[FGTREG_HDFGRTR]) }, { .name = "HDFGWTR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 1, .opc2 = 5, + .nv2_redirect_offset = 0x1d8, .access = PL2_RW, .accessfn = access_fgt, .fieldoffset = offsetof(CPUARMState, cp15.fgt_write[FGTREG_HDFGWTR]) }, { .name = "HFGITR_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 6, + .nv2_redirect_offset = 0x1c8, .access = PL2_RW, .accessfn = access_fgt, .fieldoffset = offsetof(CPUARMState, cp15.fgt_exec[FGTREG_HFGITR]) }, }; + +static void vncr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * Clear the RES0 bottom 12 bits; this means at runtime we can guarantee + * that VNCR_EL2 + offset is 64-bit aligned. We don't need to do anything + * about the RESS bits at the top -- we choose the "generate an EL2 + * translation abort on use" CONSTRAINED UNPREDICTABLE option (i.e. let + * the ptw.c code detect the resulting invalid address). + */ + env->cp15.vncr_el2 = value & ~0xfffULL; +} + +static const ARMCPRegInfo nv2_reginfo[] = { + { .name = "VNCR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 2, .opc2 = 0, + .access = PL2_RW, + .writefn = vncr_write, + .nv2_redirect_offset = 0xb0, + .fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) }, +}; + #endif /* TARGET_AARCH64 */ static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, @@ -8098,12 +8369,14 @@ static const ARMCPRegInfo vhe_reginfo[] = { .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 1, .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL2_RW, .accessfn = e2h_access, + .nv2_redirect_offset = 0x180 | NV2_REDIR_NO_NV1, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl), .writefn = gt_phys_ctl_write, .raw_writefn = raw_write }, { .name = "CNTV_CTL_EL02", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 1, .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL2_RW, .accessfn = e2h_access, + .nv2_redirect_offset = 0x170 | NV2_REDIR_NO_NV1, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl), .writefn = gt_virt_ctl_write, .raw_writefn = raw_write }, { .name = "CNTP_TVAL_EL02", .state = ARM_CP_STATE_AA64, @@ -8120,11 +8393,13 @@ static const ARMCPRegInfo vhe_reginfo[] = { .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 2, .opc2 = 2, .type = ARM_CP_IO | ARM_CP_ALIAS, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval), + .nv2_redirect_offset = 0x178 | NV2_REDIR_NO_NV1, .access = PL2_RW, .accessfn = e2h_access, .writefn = gt_phys_cval_write, .raw_writefn = raw_write }, { .name = "CNTV_CVAL_EL02", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 5, .crn = 14, .crm = 3, .opc2 = 2, .type = ARM_CP_IO | ARM_CP_ALIAS, + .nv2_redirect_offset = 0x168 | NV2_REDIR_NO_NV1, .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval), .access = PL2_RW, .accessfn = e2h_access, .writefn = gt_virt_cval_write, .raw_writefn = raw_write }, @@ -8137,12 +8412,12 @@ static const ARMCPRegInfo ats1e1_reginfo[] = { .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .fgt = FGT_ATS1E1RP, - .accessfn = at_e012_access, .writefn = ats_write64 }, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1, .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .fgt = FGT_ATS1E1WP, - .accessfn = at_e012_access, .writefn = ats_write64 }, + .accessfn = at_s1e01_access, .writefn = ats_write64 }, }; static const ARMCPRegInfo ats1cp_reginfo[] = { @@ -8716,6 +8991,9 @@ void register_cp_regs_for_features(ARMCPU *cpu) } define_arm_cp_regs(cpu, v8_idregs); define_arm_cp_regs(cpu, v8_cp_reginfo); + if (cpu_isar_feature(aa64_aa32_el1, cpu)) { + define_arm_cp_regs(cpu, v8_aa32_el1_reginfo); + } for (i = 4; i < 16; i++) { /* @@ -8763,6 +9041,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0, .access = PL2_RW, .resetvalue = cpu->midr, .type = ARM_CP_EL3_NO_EL2_C_NZ, + .nv2_redirect_offset = 0x88, .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) }, { .name = "VMPIDR", .state = ARM_CP_STATE_AA32, .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, @@ -8774,6 +9053,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5, .access = PL2_RW, .resetvalue = vmpidr_def, .type = ARM_CP_EL3_NO_EL2_C_NZ, + .nv2_redirect_offset = 0x50, .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) }, }; /* @@ -9203,6 +9483,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1, .access = PL1_RW, .accessfn = access_tacr, + .nv2_redirect_offset = 0x118, .type = ARM_CP_CONST, .resetvalue = cpu->reset_auxcr }, { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1, @@ -9272,7 +9553,9 @@ void register_cp_regs_for_features(ARMCPU *cpu) { .name = "VBAR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .writefn = vbar_write, + .accessfn = access_nv1, .fgt = FGT_VBAR_EL1, + .nv2_redirect_offset = 0x250 | NV2_REDIR_NV1, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), offsetof(CPUARMState, cp15.vbar_ns) }, .resetvalue = 0 }, @@ -9287,6 +9570,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_SCTLR_EL1, + .nv2_redirect_offset = 0x110 | NV2_REDIR_NV1, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), offsetof(CPUARMState, cp15.sctlr_ns) }, .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, @@ -9417,6 +9701,10 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_arm_cp_regs(cpu, rme_mte_reginfo); } } + + if (cpu_isar_feature(aa64_nv2, cpu)) { + define_arm_cp_regs(cpu, nv2_reginfo); + } #endif if (cpu_isar_feature(any_predinv, cpu)) { @@ -9438,52 +9726,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) #endif } -/* Sort alphabetically by type name, except for "any". */ -static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) -{ - ObjectClass *class_a = (ObjectClass *)a; - ObjectClass *class_b = (ObjectClass *)b; - const char *name_a, *name_b; - - name_a = object_class_get_name(class_a); - name_b = object_class_get_name(class_b); - if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) { - return 1; - } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) { - return -1; - } else { - return strcmp(name_a, name_b); - } -} - -static void arm_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - CPUClass *cc = CPU_CLASS(oc); - const char *typename; - char *name; - - typename = object_class_get_name(oc); - name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU)); - if (cc->deprecation_note) { - qemu_printf(" %s (deprecated)\n", name); - } else { - qemu_printf(" %s\n", name); - } - g_free(name); -} - -void arm_cpu_list(void) -{ - GSList *list; - - list = object_class_get_list(TYPE_ARM_CPU, false); - list = g_slist_sort(list, arm_cpu_list_compare); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, arm_cpu_list_entry, NULL); - g_slist_free(list); -} - /* * Private utility function for define_one_arm_cp_reg_with_opaque(): * add a single reginfo struct to the hash table. @@ -10135,61 +10377,6 @@ void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask, } } -/* Sign/zero extend */ -uint32_t HELPER(sxtb16)(uint32_t x) -{ - uint32_t res; - res = (uint16_t)(int8_t)x; - res |= (uint32_t)(int8_t)(x >> 16) << 16; - return res; -} - -static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra) -{ - /* - * Take a division-by-zero exception if necessary; otherwise return - * to get the usual non-trapping division behaviour (result of 0) - */ - if (arm_feature(env, ARM_FEATURE_M) - && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) { - raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra); - } -} - -uint32_t HELPER(uxtb16)(uint32_t x) -{ - uint32_t res; - res = (uint16_t)(uint8_t)x; - res |= (uint32_t)(uint8_t)(x >> 16) << 16; - return res; -} - -int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den) -{ - if (den == 0) { - handle_possible_div0_trap(env, GETPC()); - return 0; - } - if (num == INT_MIN && den == -1) { - return INT_MIN; - } - return num / den; -} - -uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den) -{ - if (den == 0) { - handle_possible_div0_trap(env, GETPC()); - return 0; - } - return num / den; -} - -uint32_t HELPER(rbit)(uint32_t x) -{ - return revbit32(x); -} - #ifdef CONFIG_USER_ONLY static void switch_mode(CPUARMState *env, int mode) @@ -11205,6 +11392,20 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) old_mode = pstate_read(env); aarch64_save_sp(env, arm_current_el(env)); env->elr_el[new_el] = env->pc; + + if (cur_el == 1 && new_el == 1) { + uint64_t hcr = arm_hcr_el2_eff(env); + if ((hcr & (HCR_NV | HCR_NV1 | HCR_NV2)) == HCR_NV || + (hcr & (HCR_NV | HCR_NV2)) == (HCR_NV | HCR_NV2)) { + /* + * FEAT_NV, FEAT_NV2 may need to report EL2 in the SPSR + * by setting M[3:2] to 0b10. + * If NV2 is disabled, change SPSR when NV,NV1 == 1,0 (I_ZJRNN) + * If NV2 is enabled, change SPSR when NV is 1 (I_DBTLM) + */ + old_mode = deposit32(old_mode, 2, 2, 2); + } + } } else { old_mode = cpsr_read_for_spsr_elx(env); env->elr_el[new_el] = env->regs[15]; @@ -11215,6 +11416,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) } env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; + qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%x\n", old_mode); qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", env->elr_el[new_el]); @@ -11344,7 +11546,7 @@ void arm_cpu_do_interrupt(CPUState *cs) * BQL needs to be held for any modification of * cs->interrupt_request. */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); arm_call_pre_el_change_hook(cpu); @@ -12058,15 +12260,6 @@ ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) } #endif -static bool arm_pan_enabled(CPUARMState *env) -{ - if (is_a64(env)) { - return env->pstate & PSTATE_PAN; - } else { - return env->uncached_cpsr & CPSR_PAN; - } -} - ARMMMUIdx arm_mmu_idx_el(CPUARMState *env, int el) { ARMMMUIdx idx; diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c index 757e13b0f904e7390d431158342d9350457e583e..a537a5bc94c082d2b3bbb58fd3d636a6549ad36c 100644 --- a/target/arm/hvf/hvf.c +++ b/target/arm/hvf/hvf.c @@ -36,7 +36,7 @@ #define MDSCR_EL1_SS_SHIFT 0 #define MDSCR_EL1_MDE_SHIFT 15 -static uint16_t dbgbcr_regs[] = { +static const uint16_t dbgbcr_regs[] = { HV_SYS_REG_DBGBCR0_EL1, HV_SYS_REG_DBGBCR1_EL1, HV_SYS_REG_DBGBCR2_EL1, @@ -54,7 +54,8 @@ static uint16_t dbgbcr_regs[] = { HV_SYS_REG_DBGBCR14_EL1, HV_SYS_REG_DBGBCR15_EL1, }; -static uint16_t dbgbvr_regs[] = { + +static const uint16_t dbgbvr_regs[] = { HV_SYS_REG_DBGBVR0_EL1, HV_SYS_REG_DBGBVR1_EL1, HV_SYS_REG_DBGBVR2_EL1, @@ -72,7 +73,8 @@ static uint16_t dbgbvr_regs[] = { HV_SYS_REG_DBGBVR14_EL1, HV_SYS_REG_DBGBVR15_EL1, }; -static uint16_t dbgwcr_regs[] = { + +static const uint16_t dbgwcr_regs[] = { HV_SYS_REG_DBGWCR0_EL1, HV_SYS_REG_DBGWCR1_EL1, HV_SYS_REG_DBGWCR2_EL1, @@ -90,7 +92,8 @@ static uint16_t dbgwcr_regs[] = { HV_SYS_REG_DBGWCR14_EL1, HV_SYS_REG_DBGWCR15_EL1, }; -static uint16_t dbgwvr_regs[] = { + +static const uint16_t dbgwvr_regs[] = { HV_SYS_REG_DBGWVR0_EL1, HV_SYS_REG_DBGWVR1_EL1, HV_SYS_REG_DBGWVR2_EL1, @@ -1718,9 +1721,9 @@ static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts) * sleeping. */ qatomic_set_mb(&cpu->thread_kicked, false); - qemu_mutex_unlock_iothread(); + bql_unlock(); pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask); - qemu_mutex_lock_iothread(); + bql_lock(); } static void hvf_wfi(CPUState *cpu) @@ -1821,7 +1824,7 @@ int hvf_vcpu_exec(CPUState *cpu) flush_cpu_state(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); assert_hvf_ok(hv_vcpu_run(cpu->accel->fd)); /* handle VMEXIT */ @@ -1830,7 +1833,7 @@ int hvf_vcpu_exec(CPUState *cpu) uint32_t ec = syn_get_ec(syndrome); ret = 0; - qemu_mutex_lock_iothread(); + bql_lock(); switch (exit_reason) { case HV_EXIT_REASON_EXCEPTION: /* This is the main one, handle below. */ @@ -2010,7 +2013,7 @@ static const VMStateDescription vmstate_hvf_vtimer = { .name = "hvf-vtimer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(vtimer_val, HVFVTimer), VMSTATE_END_OF_LIST() }, diff --git a/target/arm/internals.h b/target/arm/internals.h index 143d57c0fe46a04d22c8b1fcfa1629b5f7322855..71d6c70bf380f9d638334f3077e6cf05f0f63c20 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -940,7 +940,7 @@ static inline const char *aarch32_mode_name(uint32_t psr) * * Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following * a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit. - * Must be called with the iothread lock held. + * Must be called with the BQL held. */ void arm_cpu_update_virq(ARMCPU *cpu); @@ -949,7 +949,7 @@ void arm_cpu_update_virq(ARMCPU *cpu); * * Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following * a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit. - * Must be called with the iothread lock held. + * Must be called with the BQL held. */ void arm_cpu_update_vfiq(ARMCPU *cpu); diff --git a/target/arm/kvm.c b/target/arm/kvm.c index 7903e2ddde1b70bbd0db7f27992afe86763899d5..8f52b211f9ac917e2e80f11f7c3136be2a3e65bb 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -2,6 +2,8 @@ * ARM implementation of KVM hooks * * Copyright Christoffer Dall 2009-2010 + * Copyright Mian-M. Hamayun 2013, Virtual Open Systems + * Copyright Alex Bennée 2014, Linaro * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. @@ -19,6 +21,7 @@ #include "qom/object.h" #include "qapi/error.h" #include "sysemu/sysemu.h" +#include "sysemu/runstate.h" #include "sysemu/kvm.h" #include "sysemu/kvm_int.h" #include "kvm_arm.h" @@ -28,10 +31,13 @@ #include "hw/pci/pci.h" #include "exec/memattrs.h" #include "exec/address-spaces.h" +#include "exec/gdbstub.h" #include "hw/boards.h" #include "hw/irq.h" #include "qapi/visitor.h" #include "qemu/log.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/ghes.h" const KVMCapabilityInfo kvm_arch_required_capabilities[] = { KVM_CAP_LAST_INFO @@ -41,28 +47,54 @@ static bool cap_has_mp_state; static bool cap_has_inject_serror_esr; static bool cap_has_inject_ext_dabt; +/** + * ARMHostCPUFeatures: information about the host CPU (identified + * by asking the host kernel) + */ +typedef struct ARMHostCPUFeatures { + ARMISARegisters isar; + uint64_t features; + uint32_t target; + const char *dtb_compatible; +} ARMHostCPUFeatures; + static ARMHostCPUFeatures arm_host_cpu_features; -int kvm_arm_vcpu_init(CPUState *cs) +/** + * kvm_arm_vcpu_init: + * @cpu: ARMCPU + * + * Initialize (or reinitialize) the VCPU by invoking the + * KVM_ARM_VCPU_INIT ioctl with the CPU type and feature + * bitmask specified in the CPUState. + * + * Returns: 0 if success else < 0 error code + */ +static int kvm_arm_vcpu_init(ARMCPU *cpu) { - ARMCPU *cpu = ARM_CPU(cs); struct kvm_vcpu_init init; init.target = cpu->kvm_target; memcpy(init.features, cpu->kvm_init_features, sizeof(init.features)); - return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init); + return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_INIT, &init); } -int kvm_arm_vcpu_finalize(CPUState *cs, int feature) -{ - return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_FINALIZE, &feature); -} - -void kvm_arm_init_serror_injection(CPUState *cs) +/** + * kvm_arm_vcpu_finalize: + * @cpu: ARMCPU + * @feature: feature to finalize + * + * Finalizes the configuration of the specified VCPU feature by + * invoking the KVM_ARM_VCPU_FINALIZE ioctl. Features requiring + * this are documented in the "KVM_ARM_VCPU_FINALIZE" section of + * KVM's API documentation. + * + * Returns: 0 if success else < 0 error code + */ +static int kvm_arm_vcpu_finalize(ARMCPU *cpu, int feature) { - cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state, - KVM_CAP_ARM_INJECT_SERROR_ESR); + return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_FINALIZE, &feature); } bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, @@ -167,6 +199,260 @@ void kvm_arm_destroy_scratch_host_vcpu(int *fdarray) } } +static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id) +{ + uint64_t ret; + struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret }; + int err; + + assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); + err = ioctl(fd, KVM_GET_ONE_REG, &idreg); + if (err < 0) { + return -1; + } + *pret = ret; + return 0; +} + +static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id) +{ + struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret }; + + assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); + return ioctl(fd, KVM_GET_ONE_REG, &idreg); +} + +static bool kvm_arm_pauth_supported(void) +{ + return (kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_ADDRESS) && + kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC)); +} + +static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) +{ + /* Identify the feature bits corresponding to the host CPU, and + * fill out the ARMHostCPUClass fields accordingly. To do this + * we have to create a scratch VM, create a single CPU inside it, + * and then query that CPU for the relevant ID registers. + */ + int fdarray[3]; + bool sve_supported; + bool pmu_supported = false; + uint64_t features = 0; + int err; + + /* Old kernels may not know about the PREFERRED_TARGET ioctl: however + * we know these will only support creating one kind of guest CPU, + * which is its preferred CPU type. Fortunately these old kernels + * support only a very limited number of CPUs. + */ + static const uint32_t cpus_to_try[] = { + KVM_ARM_TARGET_AEM_V8, + KVM_ARM_TARGET_FOUNDATION_V8, + KVM_ARM_TARGET_CORTEX_A57, + QEMU_KVM_ARM_TARGET_NONE + }; + /* + * target = -1 informs kvm_arm_create_scratch_host_vcpu() + * to use the preferred target + */ + struct kvm_vcpu_init init = { .target = -1, }; + + /* + * Ask for SVE if supported, so that we can query ID_AA64ZFR0, + * which is otherwise RAZ. + */ + sve_supported = kvm_arm_sve_supported(); + if (sve_supported) { + init.features[0] |= 1 << KVM_ARM_VCPU_SVE; + } + + /* + * Ask for Pointer Authentication if supported, so that we get + * the unsanitized field values for AA64ISAR1_EL1. + */ + if (kvm_arm_pauth_supported()) { + init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | + 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); + } + + if (kvm_arm_pmu_supported()) { + init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; + pmu_supported = true; + } + + if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { + return false; + } + + ahcf->target = init.target; + ahcf->dtb_compatible = "arm,arm-v8"; + + err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0, + ARM64_SYS_REG(3, 0, 0, 4, 0)); + if (unlikely(err < 0)) { + /* + * Before v4.15, the kernel only exposed a limited number of system + * registers, not including any of the interesting AArch64 ID regs. + * For the most part we could leave these fields as zero with minimal + * effect, since this does not affect the values seen by the guest. + * + * However, it could cause problems down the line for QEMU, + * so provide a minimal v8.0 default. + * + * ??? Could read MIDR and use knowledge from cpu64.c. + * ??? Could map a page of memory into our temp guest and + * run the tiniest of hand-crafted kernels to extract + * the values seen by the guest. + * ??? Either of these sounds like too much effort just + * to work around running a modern host kernel. + */ + ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */ + err = 0; + } else { + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, + ARM64_SYS_REG(3, 0, 0, 4, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0, + ARM64_SYS_REG(3, 0, 0, 4, 5)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0, + ARM64_SYS_REG(3, 0, 0, 5, 0)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1, + ARM64_SYS_REG(3, 0, 0, 5, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, + ARM64_SYS_REG(3, 0, 0, 6, 0)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, + ARM64_SYS_REG(3, 0, 0, 6, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2, + ARM64_SYS_REG(3, 0, 0, 6, 2)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0, + ARM64_SYS_REG(3, 0, 0, 7, 0)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1, + ARM64_SYS_REG(3, 0, 0, 7, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2, + ARM64_SYS_REG(3, 0, 0, 7, 2)); + + /* + * Note that if AArch32 support is not present in the host, + * the AArch32 sysregs are present to be read, but will + * return UNKNOWN values. This is neither better nor worse + * than skipping the reads and leaving 0, as we must avoid + * considering the values in every case. + */ + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0, + ARM64_SYS_REG(3, 0, 0, 1, 0)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1, + ARM64_SYS_REG(3, 0, 0, 1, 1)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, + ARM64_SYS_REG(3, 0, 0, 1, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, + ARM64_SYS_REG(3, 0, 0, 1, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, + ARM64_SYS_REG(3, 0, 0, 1, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, + ARM64_SYS_REG(3, 0, 0, 1, 6)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, + ARM64_SYS_REG(3, 0, 0, 1, 7)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, + ARM64_SYS_REG(3, 0, 0, 2, 0)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, + ARM64_SYS_REG(3, 0, 0, 2, 1)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, + ARM64_SYS_REG(3, 0, 0, 2, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, + ARM64_SYS_REG(3, 0, 0, 2, 3)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, + ARM64_SYS_REG(3, 0, 0, 2, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, + ARM64_SYS_REG(3, 0, 0, 2, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, + ARM64_SYS_REG(3, 0, 0, 2, 6)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, + ARM64_SYS_REG(3, 0, 0, 2, 7)); + + err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, + ARM64_SYS_REG(3, 0, 0, 3, 0)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, + ARM64_SYS_REG(3, 0, 0, 3, 1)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, + ARM64_SYS_REG(3, 0, 0, 3, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, + ARM64_SYS_REG(3, 0, 0, 3, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1, + ARM64_SYS_REG(3, 0, 0, 3, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5, + ARM64_SYS_REG(3, 0, 0, 3, 6)); + + /* + * DBGDIDR is a bit complicated because the kernel doesn't + * provide an accessor for it in 64-bit mode, which is what this + * scratch VM is in, and there's no architected "64-bit sysreg + * which reads the same as the 32-bit register" the way there is + * for other ID registers. Instead we synthesize a value from the + * AArch64 ID_AA64DFR0, the same way the kernel code in + * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does. + * We only do this if the CPU supports AArch32 at EL1. + */ + if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) { + int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS); + int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS); + int ctx_cmps = + FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS); + int version = 6; /* ARMv8 debug architecture */ + bool has_el3 = + !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3); + uint32_t dbgdidr = 0; + + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3); + dbgdidr |= (1 << 15); /* RES1 bit */ + ahcf->isar.dbgdidr = dbgdidr; + } + + if (pmu_supported) { + /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */ + err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0, + ARM64_SYS_REG(3, 3, 9, 12, 0)); + } + + if (sve_supported) { + /* + * There is a range of kernels between kernel commit 73433762fcae + * and f81cb2c3ad41 which have a bug where the kernel doesn't + * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has + * enabled SVE support, which resulted in an error rather than RAZ. + * So only read the register if we set KVM_ARM_VCPU_SVE above. + */ + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, + ARM64_SYS_REG(3, 0, 0, 4, 4)); + } + } + + kvm_arm_destroy_scratch_host_vcpu(fdarray); + + if (err < 0) { + return false; + } + + /* + * We can assume any KVM supporting CPU is at least a v8 + * with VFPv4+Neon; this in turn implies most of the other + * feature bits. + */ + features |= 1ULL << ARM_FEATURE_V8; + features |= 1ULL << ARM_FEATURE_NEON; + features |= 1ULL << ARM_FEATURE_AARCH64; + features |= 1ULL << ARM_FEATURE_PMU; + features |= 1ULL << ARM_FEATURE_GENERIC_TIMER; + + ahcf->features = features; + + return true; +} + void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu) { CPUARMState *env = &cpu->env; @@ -210,10 +496,10 @@ static void kvm_steal_time_set(Object *obj, bool value, Error **errp) } /* KVM VCPU properties should be prefixed with "kvm-". */ -void kvm_arm_add_vcpu_properties(Object *obj) +void kvm_arm_add_vcpu_properties(ARMCPU *cpu) { - ARMCPU *cpu = ARM_CPU(obj); CPUARMState *env = &cpu->env; + Object *obj = OBJECT(cpu); if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) { cpu->kvm_adjvtime = true; @@ -271,6 +557,10 @@ int kvm_arch_init(MachineState *ms, KVMState *s) cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE); + /* Check whether user space can specify guest syndrome value */ + cap_has_inject_serror_esr = + kvm_check_extension(s, KVM_CAP_ARM_INJECT_SERROR_ESR); + if (ms->smp.cpus > 256 && !kvm_check_extension(s, KVM_CAP_ARM_IRQ_LINE_LAYOUT_2)) { error_report("Using more than 256 vcpus requires a host kernel " @@ -308,7 +598,13 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } } - kvm_arm_init_debug(s); + max_hw_wps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_WPS); + hw_watchpoints = g_array_sized_new(true, true, + sizeof(HWWatchpoint), max_hw_wps); + + max_hw_bps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_BPS); + hw_breakpoints = g_array_sized_new(true, true, + sizeof(HWBreakpoint), max_hw_bps); return ret; } @@ -469,11 +765,36 @@ static uint64_t *kvm_arm_get_cpreg_ptr(ARMCPU *cpu, uint64_t regidx) return &cpu->cpreg_values[res - cpu->cpreg_indexes]; } -/* Initialize the ARMCPU cpreg list according to the kernel's +/** + * kvm_arm_reg_syncs_via_cpreg_list: + * @regidx: KVM register index + * + * Return true if this KVM register should be synchronized via the + * cpreg list of arbitrary system registers, false if it is synchronized + * by hand using code in kvm_arch_get/put_registers(). + */ +static bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx) +{ + switch (regidx & KVM_REG_ARM_COPROC_MASK) { + case KVM_REG_ARM_CORE: + case KVM_REG_ARM64_SVE: + return false; + default: + return true; + } +} + +/** + * kvm_arm_init_cpreg_list: + * @cpu: ARMCPU + * + * Initialize the ARMCPU cpreg list according to the kernel's * definition of what CPU registers it knows about (and throw away * the previous TCG-created cpreg list). + * + * Returns: 0 if success, else < 0 error code */ -int kvm_arm_init_cpreg_list(ARMCPU *cpu) +static int kvm_arm_init_cpreg_list(ARMCPU *cpu) { struct kvm_reg_list rl; struct kvm_reg_list *rlp; @@ -546,6 +867,28 @@ out: return ret; } +/** + * kvm_arm_cpreg_level: + * @regidx: KVM register index + * + * Return the level of this coprocessor/system register. Return value is + * either KVM_PUT_RUNTIME_STATE, KVM_PUT_RESET_STATE, or KVM_PUT_FULL_STATE. + */ +static int kvm_arm_cpreg_level(uint64_t regidx) +{ + /* + * All system registers are assumed to be level KVM_PUT_RUNTIME_STATE. + * If a register should be written less often, you must add it here + * with a state of either KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE. + */ + switch (regidx) { + case KVM_REG_ARM_TIMER_CNT: + case KVM_REG_ARM_PTIMER_CNT: + return KVM_PUT_FULL_STATE; + } + return KVM_PUT_RUNTIME_STATE; +} + bool write_kvmstate_to_list(ARMCPU *cpu) { CPUState *cs = CPU(cpu); @@ -638,7 +981,7 @@ void kvm_arm_reset_vcpu(ARMCPU *cpu) /* Re-init VCPU so that all registers are set to * their respective reset values. */ - ret = kvm_arm_vcpu_init(CPU(cpu)); + ret = kvm_arm_vcpu_init(cpu); if (ret < 0) { fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret)); abort(); @@ -660,54 +1003,50 @@ void kvm_arm_reset_vcpu(ARMCPU *cpu) /* * Update KVM's MP_STATE based on what QEMU thinks it is */ -int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu) +static int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu) { if (cap_has_mp_state) { struct kvm_mp_state mp_state = { .mp_state = (cpu->power_state == PSCI_OFF) ? KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE }; - int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); - if (ret) { - fprintf(stderr, "%s: failed to set MP_STATE %d/%s\n", - __func__, ret, strerror(-ret)); - return -1; - } + return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); } - return 0; } /* * Sync the KVM MP_STATE into QEMU */ -int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu) +static int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu) { if (cap_has_mp_state) { struct kvm_mp_state mp_state; int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state); if (ret) { - fprintf(stderr, "%s: failed to get MP_STATE %d/%s\n", - __func__, ret, strerror(-ret)); - abort(); + return ret; } cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ? PSCI_OFF : PSCI_ON; } - return 0; } -void kvm_arm_get_virtual_time(CPUState *cs) +/** + * kvm_arm_get_virtual_time: + * @cpu: ARMCPU + * + * Gets the VCPU's virtual counter and stores it in the KVM CPU state. + */ +static void kvm_arm_get_virtual_time(ARMCPU *cpu) { - ARMCPU *cpu = ARM_CPU(cs); int ret; if (cpu->kvm_vtime_dirty) { return; } - ret = kvm_get_one_reg(cs, KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); + ret = kvm_get_one_reg(CPU(cpu), KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); if (ret) { error_report("Failed to get KVM_REG_ARM_TIMER_CNT"); abort(); @@ -716,16 +1055,21 @@ void kvm_arm_get_virtual_time(CPUState *cs) cpu->kvm_vtime_dirty = true; } -void kvm_arm_put_virtual_time(CPUState *cs) +/** + * kvm_arm_put_virtual_time: + * @cpu: ARMCPU + * + * Sets the VCPU's virtual counter to the value stored in the KVM CPU state. + */ +static void kvm_arm_put_virtual_time(ARMCPU *cpu) { - ARMCPU *cpu = ARM_CPU(cs); int ret; if (!cpu->kvm_vtime_dirty) { return; } - ret = kvm_set_one_reg(cs, KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); + ret = kvm_set_one_reg(CPU(cpu), KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime); if (ret) { error_report("Failed to set KVM_REG_ARM_TIMER_CNT"); abort(); @@ -734,7 +1078,15 @@ void kvm_arm_put_virtual_time(CPUState *cs) cpu->kvm_vtime_dirty = false; } -int kvm_put_vcpu_events(ARMCPU *cpu) +/** + * kvm_put_vcpu_events: + * @cpu: ARMCPU + * + * Put VCPU related state to kvm. + * + * Returns: 0 if success else < 0 error code + */ +static int kvm_put_vcpu_events(ARMCPU *cpu) { CPUARMState *env = &cpu->env; struct kvm_vcpu_events events; @@ -763,7 +1115,15 @@ int kvm_put_vcpu_events(ARMCPU *cpu) return ret; } -int kvm_get_vcpu_events(ARMCPU *cpu) +/** + * kvm_get_vcpu_events: + * @cpu: ARMCPU + * + * Get VCPU related state from kvm. + * + * Returns: 0 if success else < 0 error code + */ +static int kvm_get_vcpu_events(ARMCPU *cpu) { CPUARMState *env = &cpu->env; struct kvm_vcpu_events events; @@ -787,6 +1147,63 @@ int kvm_get_vcpu_events(ARMCPU *cpu) return 0; } +#define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0) +#define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2) + +/* + * ESR_EL1 + * ISS encoding + * AARCH64: DFSC, bits [5:0] + * AARCH32: + * TTBCR.EAE == 0 + * FS[4] - DFSR[10] + * FS[3:0] - DFSR[3:0] + * TTBCR.EAE == 1 + * FS, bits [5:0] + */ +#define ESR_DFSC(aarch64, lpae, v) \ + ((aarch64 || (lpae)) ? ((v) & 0x3F) \ + : (((v) >> 6) | ((v) & 0x1F))) + +#define ESR_DFSC_EXTABT(aarch64, lpae) \ + ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8) + +/** + * kvm_arm_verify_ext_dabt_pending: + * @cpu: ARMCPU + * + * Verify the fault status code wrt the Ext DABT injection + * + * Returns: true if the fault status code is as expected, false otherwise + */ +static bool kvm_arm_verify_ext_dabt_pending(ARMCPU *cpu) +{ + CPUState *cs = CPU(cpu); + uint64_t dfsr_val; + + if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) { + CPUARMState *env = &cpu->env; + int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64); + int lpae = 0; + + if (!aarch64_mode) { + uint64_t ttbcr; + + if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) { + lpae = arm_feature(env, ARM_FEATURE_LPAE) + && (ttbcr & TTBCR_EAE); + } + } + /* + * The verification here is based on the DFSC bits + * of the ESR_EL1 reg only + */ + return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) == + ESR_DFSC_EXTABT(aarch64_mode, lpae)); + } + return false; +} + void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) { ARMCPU *cpu = ARM_CPU(cs); @@ -801,7 +1218,7 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) * an IMPLEMENTATION DEFINED exception (for 32-bit EL1) */ if (!arm_feature(env, ARM_FEATURE_AARCH64) && - unlikely(!kvm_arm_verify_ext_dabt_pending(cs))) { + unlikely(!kvm_arm_verify_ext_dabt_pending(cpu))) { error_report("Data abort exception with no valid ISS generated by " "guest memory access. KVM unable to emulate faulting " @@ -833,7 +1250,7 @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) if (run->s.regs.device_irq_level != cpu->device_irq_level) { switched_level = cpu->device_irq_level ^ run->s.regs.device_irq_level; - qemu_mutex_lock_iothread(); + bql_lock(); if (switched_level & KVM_ARM_DEV_EL1_VTIMER) { qemu_set_irq(cpu->gt_timer_outputs[GTIMER_VIRT], @@ -862,41 +1279,39 @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) /* We also mark unknown levels as processed to not waste cycles */ cpu->device_irq_level = run->s.regs.device_irq_level; - qemu_mutex_unlock_iothread(); + bql_unlock(); } return MEMTXATTRS_UNSPECIFIED; } -void kvm_arm_vm_state_change(void *opaque, bool running, RunState state) +static void kvm_arm_vm_state_change(void *opaque, bool running, RunState state) { - CPUState *cs = opaque; - ARMCPU *cpu = ARM_CPU(cs); + ARMCPU *cpu = opaque; if (running) { if (cpu->kvm_adjvtime) { - kvm_arm_put_virtual_time(cs); + kvm_arm_put_virtual_time(cpu); } } else { if (cpu->kvm_adjvtime) { - kvm_arm_get_virtual_time(cs); + kvm_arm_get_virtual_time(cpu); } } } /** * kvm_arm_handle_dabt_nisv: - * @cs: CPUState + * @cpu: ARMCPU * @esr_iss: ISS encoding (limited) for the exception from Data Abort * ISV bit set to '0b0' -> no valid instruction syndrome * @fault_ipa: faulting address for the synchronous data abort * * Returns: 0 if the exception has been handled, < 0 otherwise */ -static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss, +static int kvm_arm_handle_dabt_nisv(ARMCPU *cpu, uint64_t esr_iss, uint64_t fault_ipa) { - ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; /* * Request KVM to inject the external data abort into the guest @@ -912,7 +1327,7 @@ static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss, */ events.exception.ext_dabt_pending = 1; /* KVM_CAP_ARM_INJECT_EXT_DABT implies KVM_CAP_VCPU_EVENTS */ - if (!kvm_vcpu_ioctl(cs, KVM_SET_VCPU_EVENTS, &events)) { + if (!kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events)) { env->ext_dabt_raised = 1; return 0; } @@ -925,19 +1340,97 @@ static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss, return -1; } +/** + * kvm_arm_handle_debug: + * @cpu: ARMCPU + * @debug_exit: debug part of the KVM exit structure + * + * Returns: TRUE if the debug exception was handled. + * + * See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register + * + * To minimise translating between kernel and user-space the kernel + * ABI just provides user-space with the full exception syndrome + * register value to be decoded in QEMU. + */ +static bool kvm_arm_handle_debug(ARMCPU *cpu, + struct kvm_debug_exit_arch *debug_exit) +{ + int hsr_ec = syn_get_ec(debug_exit->hsr); + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + + /* Ensure PC is synchronised */ + kvm_cpu_synchronize_state(cs); + + switch (hsr_ec) { + case EC_SOFTWARESTEP: + if (cs->singlestep_enabled) { + return true; + } else { + /* + * The kernel should have suppressed the guest's ability to + * single step at this point so something has gone wrong. + */ + error_report("%s: guest single-step while debugging unsupported" + " (%"PRIx64", %"PRIx32")", + __func__, env->pc, debug_exit->hsr); + return false; + } + break; + case EC_AA64_BKPT: + if (kvm_find_sw_breakpoint(cs, env->pc)) { + return true; + } + break; + case EC_BREAKPOINT: + if (find_hw_breakpoint(cs, env->pc)) { + return true; + } + break; + case EC_WATCHPOINT: + { + CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far); + if (wp) { + cs->watchpoint_hit = wp; + return true; + } + break; + } + default: + error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")", + __func__, debug_exit->hsr, env->pc); + } + + /* If we are not handling the debug exception it must belong to + * the guest. Let's re-use the existing TCG interrupt code to set + * everything up properly. + */ + cs->exception_index = EXCP_BKPT; + env->exception.syndrome = debug_exit->hsr; + env->exception.vaddress = debug_exit->far; + env->exception.target_el = 1; + bql_lock(); + arm_cpu_do_interrupt(cs); + bql_unlock(); + + return false; +} + int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) { + ARMCPU *cpu = ARM_CPU(cs); int ret = 0; switch (run->exit_reason) { case KVM_EXIT_DEBUG: - if (kvm_arm_handle_debug(cs, &run->debug.arch)) { + if (kvm_arm_handle_debug(cpu, &run->debug.arch)) { ret = EXCP_DEBUG; } /* otherwise return to guest */ break; case KVM_EXIT_ARM_NISV: /* External DABT with no valid iss to decode */ - ret = kvm_arm_handle_dabt_nisv(cs, run->arm_nisv.esr_iss, + ret = kvm_arm_handle_dabt_nisv(cpu, run->arm_nisv.esr_iss, run->arm_nisv.fault_ipa); break; default: @@ -958,12 +1451,47 @@ int kvm_arch_process_async_events(CPUState *cs) return 0; } +/** + * kvm_arm_hw_debug_active: + * @cpu: ARMCPU + * + * Return: TRUE if any hardware breakpoints in use. + */ +static bool kvm_arm_hw_debug_active(ARMCPU *cpu) +{ + return ((cur_hw_wps > 0) || (cur_hw_bps > 0)); +} + +/** + * kvm_arm_copy_hw_debug_data: + * @ptr: kvm_guest_debug_arch structure + * + * Copy the architecture specific debug registers into the + * kvm_guest_debug ioctl structure. + */ +static void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr) +{ + int i; + memset(ptr, 0, sizeof(struct kvm_guest_debug_arch)); + + for (i = 0; i < max_hw_wps; i++) { + HWWatchpoint *wp = get_hw_wp(i); + ptr->dbg_wcr[i] = wp->wcr; + ptr->dbg_wvr[i] = wp->wvr; + } + for (i = 0; i < max_hw_bps; i++) { + HWBreakpoint *bp = get_hw_bp(i); + ptr->dbg_bcr[i] = bp->bcr; + ptr->dbg_bvr[i] = bp->bvr; + } +} + void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg) { if (kvm_sw_breakpoints_active(cs)) { dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; } - if (kvm_arm_hw_debug_active(cs)) { + if (kvm_arm_hw_debug_active(ARM_CPU(cs))) { dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW; kvm_arm_copy_hw_debug_data(&dbg->arch); } @@ -1117,3 +1645,782 @@ void kvm_arch_accel_class_init(ObjectClass *oc) object_class_property_set_description(oc, "eager-split-size", "Eager Page Split chunk size for hugepages. (default: 0, disabled)"); } + +int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type) +{ + switch (type) { + case GDB_BREAKPOINT_HW: + return insert_hw_breakpoint(addr); + break; + case GDB_WATCHPOINT_READ: + case GDB_WATCHPOINT_WRITE: + case GDB_WATCHPOINT_ACCESS: + return insert_hw_watchpoint(addr, len, type); + default: + return -ENOSYS; + } +} + +int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type) +{ + switch (type) { + case GDB_BREAKPOINT_HW: + return delete_hw_breakpoint(addr); + case GDB_WATCHPOINT_READ: + case GDB_WATCHPOINT_WRITE: + case GDB_WATCHPOINT_ACCESS: + return delete_hw_watchpoint(addr, len, type); + default: + return -ENOSYS; + } +} + +void kvm_arch_remove_all_hw_breakpoints(void) +{ + if (cur_hw_wps > 0) { + g_array_remove_range(hw_watchpoints, 0, cur_hw_wps); + } + if (cur_hw_bps > 0) { + g_array_remove_range(hw_breakpoints, 0, cur_hw_bps); + } +} + +static bool kvm_arm_set_device_attr(ARMCPU *cpu, struct kvm_device_attr *attr, + const char *name) +{ + int err; + + err = kvm_vcpu_ioctl(CPU(cpu), KVM_HAS_DEVICE_ATTR, attr); + if (err != 0) { + error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err)); + return false; + } + + err = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEVICE_ATTR, attr); + if (err != 0) { + error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err)); + return false; + } + + return true; +} + +void kvm_arm_pmu_init(ARMCPU *cpu) +{ + struct kvm_device_attr attr = { + .group = KVM_ARM_VCPU_PMU_V3_CTRL, + .attr = KVM_ARM_VCPU_PMU_V3_INIT, + }; + + if (!cpu->has_pmu) { + return; + } + if (!kvm_arm_set_device_attr(cpu, &attr, "PMU")) { + error_report("failed to init PMU"); + abort(); + } +} + +void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq) +{ + struct kvm_device_attr attr = { + .group = KVM_ARM_VCPU_PMU_V3_CTRL, + .addr = (intptr_t)&irq, + .attr = KVM_ARM_VCPU_PMU_V3_IRQ, + }; + + if (!cpu->has_pmu) { + return; + } + if (!kvm_arm_set_device_attr(cpu, &attr, "PMU")) { + error_report("failed to set irq for PMU"); + abort(); + } +} + +void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa) +{ + struct kvm_device_attr attr = { + .group = KVM_ARM_VCPU_PVTIME_CTRL, + .attr = KVM_ARM_VCPU_PVTIME_IPA, + .addr = (uint64_t)&ipa, + }; + + if (cpu->kvm_steal_time == ON_OFF_AUTO_OFF) { + return; + } + if (!kvm_arm_set_device_attr(cpu, &attr, "PVTIME IPA")) { + error_report("failed to init PVTIME IPA"); + abort(); + } +} + +void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) +{ + bool has_steal_time = kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME); + + if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) { + if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + cpu->kvm_steal_time = ON_OFF_AUTO_OFF; + } else { + cpu->kvm_steal_time = ON_OFF_AUTO_ON; + } + } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) { + if (!has_steal_time) { + error_setg(errp, "'kvm-steal-time' cannot be enabled " + "on this host"); + return; + } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + /* + * DEN0057A chapter 2 says "This specification only covers + * systems in which the Execution state of the hypervisor + * as well as EL1 of virtual machines is AArch64.". And, + * to ensure that, the smc/hvc calls are only specified as + * smc64/hvc64. + */ + error_setg(errp, "'kvm-steal-time' cannot be enabled " + "for AArch32 guests"); + return; + } + } +} + +bool kvm_arm_aarch32_supported(void) +{ + return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT); +} + +bool kvm_arm_sve_supported(void) +{ + return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE); +} + +QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1); + +uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu) +{ + /* Only call this function if kvm_arm_sve_supported() returns true. */ + static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS]; + static bool probed; + uint32_t vq = 0; + int i; + + /* + * KVM ensures all host CPUs support the same set of vector lengths. + * So we only need to create the scratch VCPUs once and then cache + * the results. + */ + if (!probed) { + struct kvm_vcpu_init init = { + .target = -1, + .features[0] = (1 << KVM_ARM_VCPU_SVE), + }; + struct kvm_one_reg reg = { + .id = KVM_REG_ARM64_SVE_VLS, + .addr = (uint64_t)&vls[0], + }; + int fdarray[3], ret; + + probed = true; + + if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) { + error_report("failed to create scratch VCPU with SVE enabled"); + abort(); + } + ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®); + kvm_arm_destroy_scratch_host_vcpu(fdarray); + if (ret) { + error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s", + strerror(errno)); + abort(); + } + + for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) { + if (vls[i]) { + vq = 64 - clz64(vls[i]) + i * 64; + break; + } + } + if (vq > ARM_MAX_VQ) { + warn_report("KVM supports vector lengths larger than " + "QEMU can enable"); + vls[0] &= MAKE_64BIT_MASK(0, ARM_MAX_VQ); + } + } + + return vls[0]; +} + +static int kvm_arm_sve_set_vls(ARMCPU *cpu) +{ + uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map }; + + assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX); + + return kvm_set_one_reg(CPU(cpu), KVM_REG_ARM64_SVE_VLS, &vls[0]); +} + +#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 + +int kvm_arch_init_vcpu(CPUState *cs) +{ + int ret; + uint64_t mpidr; + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + uint64_t psciver; + + if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || + !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) { + error_report("KVM is not supported for this guest CPU type"); + return -EINVAL; + } + + qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cpu); + + /* Determine init features for this CPU */ + memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features)); + if (cs->start_powered_off) { + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; + } + if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) { + cpu->psci_version = QEMU_PSCI_VERSION_0_2; + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; + } + if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT; + } + if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) { + cpu->has_pmu = false; + } + if (cpu->has_pmu) { + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; + } else { + env->features &= ~(1ULL << ARM_FEATURE_PMU); + } + if (cpu_isar_feature(aa64_sve, cpu)) { + assert(kvm_arm_sve_supported()); + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE; + } + if (cpu_isar_feature(aa64_pauth, cpu)) { + cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | + 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); + } + + /* Do KVM_ARM_VCPU_INIT ioctl */ + ret = kvm_arm_vcpu_init(cpu); + if (ret) { + return ret; + } + + if (cpu_isar_feature(aa64_sve, cpu)) { + ret = kvm_arm_sve_set_vls(cpu); + if (ret) { + return ret; + } + ret = kvm_arm_vcpu_finalize(cpu, KVM_ARM_VCPU_SVE); + if (ret) { + return ret; + } + } + + /* + * KVM reports the exact PSCI version it is implementing via a + * special sysreg. If it is present, use its contents to determine + * what to report to the guest in the dtb (it is the PSCI version, + * in the same 15-bits major 16-bits minor format that PSCI_VERSION + * returns). + */ + if (!kvm_get_one_reg(cs, KVM_REG_ARM_PSCI_VERSION, &psciver)) { + cpu->psci_version = psciver; + } + + /* + * When KVM is in use, PSCI is emulated in-kernel and not by qemu. + * Currently KVM has its own idea about MPIDR assignment, so we + * override our defaults with what we get from KVM. + */ + ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr); + if (ret) { + return ret; + } + cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK; + + return kvm_arm_init_cpreg_list(cpu); +} + +int kvm_arch_destroy_vcpu(CPUState *cs) +{ + return 0; +} + +/* Callers must hold the iothread mutex lock */ +static void kvm_inject_arm_sea(CPUState *c) +{ + ARMCPU *cpu = ARM_CPU(c); + CPUARMState *env = &cpu->env; + uint32_t esr; + bool same_el; + + c->exception_index = EXCP_DATA_ABORT; + env->exception.target_el = 1; + + /* + * Set the DFSC to synchronous external abort and set FnV to not valid, + * this will tell guest the FAR_ELx is UNKNOWN for this abort. + */ + same_el = arm_current_el(env) == env->exception.target_el; + esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10); + + env->exception.syndrome = esr; + + arm_cpu_do_interrupt(c); +} + +#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ + KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) + +#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \ + KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) + +#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \ + KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) + +static int kvm_arch_put_fpsimd(CPUState *cs) +{ + CPUARMState *env = &ARM_CPU(cs)->env; + int i, ret; + + for (i = 0; i < 32; i++) { + uint64_t *q = aa64_vfp_qreg(env, i); +#if HOST_BIG_ENDIAN + uint64_t fp_val[2] = { q[1], q[0] }; + ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), + fp_val); +#else + ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q); +#endif + if (ret) { + return ret; + } + } + + return 0; +} + +/* + * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits + * and PREGS and the FFR have a slice size of 256 bits. However we simply hard + * code the slice index to zero for now as it's unlikely we'll need more than + * one slice for quite some time. + */ +static int kvm_arch_put_sve(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + uint64_t tmp[ARM_MAX_VQ * 2]; + uint64_t *r; + int n, ret; + + for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) { + r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2); + ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r); + if (ret) { + return ret; + } + } + + for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) { + r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0], + DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); + ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r); + if (ret) { + return ret; + } + } + + r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0], + DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); + ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r); + if (ret) { + return ret; + } + + return 0; +} + +int kvm_arch_put_registers(CPUState *cs, int level) +{ + uint64_t val; + uint32_t fpr; + int i, ret; + unsigned int el; + + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + /* If we are in AArch32 mode then we need to copy the AArch32 regs to the + * AArch64 registers before pushing them out to 64-bit KVM. + */ + if (!is_a64(env)) { + aarch64_sync_32_to_64(env); + } + + for (i = 0; i < 31; i++) { + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]), + &env->xregs[i]); + if (ret) { + return ret; + } + } + + /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the + * QEMU side we keep the current SP in xregs[31] as well. + */ + aarch64_save_sp(env, 1); + + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); + if (ret) { + return ret; + } + + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); + if (ret) { + return ret; + } + + /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */ + if (is_a64(env)) { + val = pstate_read(env); + } else { + val = cpsr_read(env); + } + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val); + if (ret) { + return ret; + } + + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); + if (ret) { + return ret; + } + + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); + if (ret) { + return ret; + } + + /* Saved Program State Registers + * + * Before we restore from the banked_spsr[] array we need to + * ensure that any modifications to env->spsr are correctly + * reflected in the banks. + */ + el = arm_current_el(env); + if (el > 0 && !is_a64(env)) { + i = bank_number(env->uncached_cpsr & CPSR_M); + env->banked_spsr[i] = env->spsr; + } + + /* KVM 0-4 map to QEMU banks 1-5 */ + for (i = 0; i < KVM_NR_SPSR; i++) { + ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(spsr[i]), + &env->banked_spsr[i + 1]); + if (ret) { + return ret; + } + } + + if (cpu_isar_feature(aa64_sve, cpu)) { + ret = kvm_arch_put_sve(cs); + } else { + ret = kvm_arch_put_fpsimd(cs); + } + if (ret) { + return ret; + } + + fpr = vfp_get_fpsr(env); + ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr); + if (ret) { + return ret; + } + + fpr = vfp_get_fpcr(env); + ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr); + if (ret) { + return ret; + } + + write_cpustate_to_list(cpu, true); + + if (!write_list_to_kvmstate(cpu, level)) { + return -EINVAL; + } + + /* + * Setting VCPU events should be triggered after syncing the registers + * to avoid overwriting potential changes made by KVM upon calling + * KVM_SET_VCPU_EVENTS ioctl + */ + ret = kvm_put_vcpu_events(cpu); + if (ret) { + return ret; + } + + return kvm_arm_sync_mpstate_to_kvm(cpu); +} + +static int kvm_arch_get_fpsimd(CPUState *cs) +{ + CPUARMState *env = &ARM_CPU(cs)->env; + int i, ret; + + for (i = 0; i < 32; i++) { + uint64_t *q = aa64_vfp_qreg(env, i); + ret = kvm_get_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q); + if (ret) { + return ret; + } else { +#if HOST_BIG_ENDIAN + uint64_t t; + t = q[0], q[0] = q[1], q[1] = t; +#endif + } + } + + return 0; +} + +/* + * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits + * and PREGS and the FFR have a slice size of 256 bits. However we simply hard + * code the slice index to zero for now as it's unlikely we'll need more than + * one slice for quite some time. + */ +static int kvm_arch_get_sve(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + uint64_t *r; + int n, ret; + + for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) { + r = &env->vfp.zregs[n].d[0]; + ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r); + if (ret) { + return ret; + } + sve_bswap64(r, r, cpu->sve_max_vq * 2); + } + + for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) { + r = &env->vfp.pregs[n].p[0]; + ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r); + if (ret) { + return ret; + } + sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); + } + + r = &env->vfp.pregs[FFR_PRED_NUM].p[0]; + ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r); + if (ret) { + return ret; + } + sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); + + return 0; +} + +int kvm_arch_get_registers(CPUState *cs) +{ + uint64_t val; + unsigned int el; + uint32_t fpr; + int i, ret; + + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + for (i = 0; i < 31; i++) { + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]), + &env->xregs[i]); + if (ret) { + return ret; + } + } + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); + if (ret) { + return ret; + } + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); + if (ret) { + return ret; + } + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val); + if (ret) { + return ret; + } + + env->aarch64 = ((val & PSTATE_nRW) == 0); + if (is_a64(env)) { + pstate_write(env, val); + } else { + cpsr_write(env, val, 0xffffffff, CPSRWriteRaw); + } + + /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the + * QEMU side we keep the current SP in xregs[31] as well. + */ + aarch64_restore_sp(env, 1); + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); + if (ret) { + return ret; + } + + /* If we are in AArch32 mode then we need to sync the AArch32 regs with the + * incoming AArch64 regs received from 64-bit KVM. + * We must perform this after all of the registers have been acquired from + * the kernel. + */ + if (!is_a64(env)) { + aarch64_sync_64_to_32(env); + } + + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); + if (ret) { + return ret; + } + + /* Fetch the SPSR registers + * + * KVM SPSRs 0-4 map to QEMU banks 1-5 + */ + for (i = 0; i < KVM_NR_SPSR; i++) { + ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(spsr[i]), + &env->banked_spsr[i + 1]); + if (ret) { + return ret; + } + } + + el = arm_current_el(env); + if (el > 0 && !is_a64(env)) { + i = bank_number(env->uncached_cpsr & CPSR_M); + env->spsr = env->banked_spsr[i]; + } + + if (cpu_isar_feature(aa64_sve, cpu)) { + ret = kvm_arch_get_sve(cs); + } else { + ret = kvm_arch_get_fpsimd(cs); + } + if (ret) { + return ret; + } + + ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr); + if (ret) { + return ret; + } + vfp_set_fpsr(env, fpr); + + ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr); + if (ret) { + return ret; + } + vfp_set_fpcr(env, fpr); + + ret = kvm_get_vcpu_events(cpu); + if (ret) { + return ret; + } + + if (!write_kvmstate_to_list(cpu)) { + return -EINVAL; + } + /* Note that it's OK to have registers which aren't in CPUState, + * so we can ignore a failure return here. + */ + write_list_to_cpustate(cpu); + + ret = kvm_arm_sync_mpstate_to_qemu(cpu); + + /* TODO: other registers */ + return ret; +} + +void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) +{ + ram_addr_t ram_addr; + hwaddr paddr; + + assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO); + + if (acpi_ghes_present() && addr) { + ram_addr = qemu_ram_addr_from_host(addr); + if (ram_addr != RAM_ADDR_INVALID && + kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { + kvm_hwpoison_page_add(ram_addr); + /* + * If this is a BUS_MCEERR_AR, we know we have been called + * synchronously from the vCPU thread, so we can easily + * synchronize the state and inject an error. + * + * TODO: we currently don't tell the guest at all about + * BUS_MCEERR_AO. In that case we might either be being + * called synchronously from the vCPU thread, or a bit + * later from the main thread, so doing the injection of + * the error would be more complicated. + */ + if (code == BUS_MCEERR_AR) { + kvm_cpu_synchronize_state(c); + if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) { + kvm_inject_arm_sea(c); + } else { + error_report("failed to record the error"); + abort(); + } + } + return; + } + if (code == BUS_MCEERR_AO) { + error_report("Hardware memory error at addr %p for memory used by " + "QEMU itself instead of guest system!", addr); + } + } + + if (code == BUS_MCEERR_AR) { + error_report("Hardware memory error!"); + exit(1); + } +} + +/* C6.6.29 BRK instruction */ +static const uint32_t brk_insn = 0xd4200000; + +int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +{ + if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || + cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) { + return -EINVAL; + } + return 0; +} + +int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +{ + static uint32_t brk; + + if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) || + brk != brk_insn || + cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) { + return -EINVAL; + } + return 0; +} diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c deleted file mode 100644 index 3c175c93a7a8f29a7a157c9bf059db601f546b7f..0000000000000000000000000000000000000000 --- a/target/arm/kvm64.c +++ /dev/null @@ -1,1290 +0,0 @@ -/* - * ARM implementation of KVM hooks, 64 bit specific code - * - * Copyright Mian-M. Hamayun 2013, Virtual Open Systems - * Copyright Alex Bennée 2014, Linaro - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include -#include - -#include -#include - -#include "qapi/error.h" -#include "cpu.h" -#include "qemu/timer.h" -#include "qemu/error-report.h" -#include "qemu/host-utils.h" -#include "qemu/main-loop.h" -#include "exec/gdbstub.h" -#include "sysemu/runstate.h" -#include "sysemu/kvm.h" -#include "sysemu/kvm_int.h" -#include "kvm_arm.h" -#include "internals.h" -#include "cpu-features.h" -#include "hw/acpi/acpi.h" -#include "hw/acpi/ghes.h" - -static bool have_guest_debug; - -void kvm_arm_init_debug(KVMState *s) -{ - have_guest_debug = kvm_check_extension(s, - KVM_CAP_SET_GUEST_DEBUG); - - max_hw_wps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_WPS); - hw_watchpoints = g_array_sized_new(true, true, - sizeof(HWWatchpoint), max_hw_wps); - - max_hw_bps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_BPS); - hw_breakpoints = g_array_sized_new(true, true, - sizeof(HWBreakpoint), max_hw_bps); - return; -} - -int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type) -{ - switch (type) { - case GDB_BREAKPOINT_HW: - return insert_hw_breakpoint(addr); - break; - case GDB_WATCHPOINT_READ: - case GDB_WATCHPOINT_WRITE: - case GDB_WATCHPOINT_ACCESS: - return insert_hw_watchpoint(addr, len, type); - default: - return -ENOSYS; - } -} - -int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type) -{ - switch (type) { - case GDB_BREAKPOINT_HW: - return delete_hw_breakpoint(addr); - case GDB_WATCHPOINT_READ: - case GDB_WATCHPOINT_WRITE: - case GDB_WATCHPOINT_ACCESS: - return delete_hw_watchpoint(addr, len, type); - default: - return -ENOSYS; - } -} - - -void kvm_arch_remove_all_hw_breakpoints(void) -{ - if (cur_hw_wps > 0) { - g_array_remove_range(hw_watchpoints, 0, cur_hw_wps); - } - if (cur_hw_bps > 0) { - g_array_remove_range(hw_breakpoints, 0, cur_hw_bps); - } -} - -void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr) -{ - int i; - memset(ptr, 0, sizeof(struct kvm_guest_debug_arch)); - - for (i = 0; i < max_hw_wps; i++) { - HWWatchpoint *wp = get_hw_wp(i); - ptr->dbg_wcr[i] = wp->wcr; - ptr->dbg_wvr[i] = wp->wvr; - } - for (i = 0; i < max_hw_bps; i++) { - HWBreakpoint *bp = get_hw_bp(i); - ptr->dbg_bcr[i] = bp->bcr; - ptr->dbg_bvr[i] = bp->bvr; - } -} - -bool kvm_arm_hw_debug_active(CPUState *cs) -{ - return ((cur_hw_wps > 0) || (cur_hw_bps > 0)); -} - -static bool kvm_arm_set_device_attr(CPUState *cs, struct kvm_device_attr *attr, - const char *name) -{ - int err; - - err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr); - if (err != 0) { - error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err)); - return false; - } - - err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr); - if (err != 0) { - error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err)); - return false; - } - - return true; -} - -void kvm_arm_pmu_init(CPUState *cs) -{ - struct kvm_device_attr attr = { - .group = KVM_ARM_VCPU_PMU_V3_CTRL, - .attr = KVM_ARM_VCPU_PMU_V3_INIT, - }; - - if (!ARM_CPU(cs)->has_pmu) { - return; - } - if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) { - error_report("failed to init PMU"); - abort(); - } -} - -void kvm_arm_pmu_set_irq(CPUState *cs, int irq) -{ - struct kvm_device_attr attr = { - .group = KVM_ARM_VCPU_PMU_V3_CTRL, - .addr = (intptr_t)&irq, - .attr = KVM_ARM_VCPU_PMU_V3_IRQ, - }; - - if (!ARM_CPU(cs)->has_pmu) { - return; - } - if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) { - error_report("failed to set irq for PMU"); - abort(); - } -} - -void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa) -{ - struct kvm_device_attr attr = { - .group = KVM_ARM_VCPU_PVTIME_CTRL, - .attr = KVM_ARM_VCPU_PVTIME_IPA, - .addr = (uint64_t)&ipa, - }; - - if (ARM_CPU(cs)->kvm_steal_time == ON_OFF_AUTO_OFF) { - return; - } - if (!kvm_arm_set_device_attr(cs, &attr, "PVTIME IPA")) { - error_report("failed to init PVTIME IPA"); - abort(); - } -} - -static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id) -{ - uint64_t ret; - struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret }; - int err; - - assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); - err = ioctl(fd, KVM_GET_ONE_REG, &idreg); - if (err < 0) { - return -1; - } - *pret = ret; - return 0; -} - -static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id) -{ - struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret }; - - assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64); - return ioctl(fd, KVM_GET_ONE_REG, &idreg); -} - -static bool kvm_arm_pauth_supported(void) -{ - return (kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_ADDRESS) && - kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC)); -} - -bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) -{ - /* Identify the feature bits corresponding to the host CPU, and - * fill out the ARMHostCPUClass fields accordingly. To do this - * we have to create a scratch VM, create a single CPU inside it, - * and then query that CPU for the relevant ID registers. - */ - int fdarray[3]; - bool sve_supported; - bool pmu_supported = false; - uint64_t features = 0; - int err; - - /* Old kernels may not know about the PREFERRED_TARGET ioctl: however - * we know these will only support creating one kind of guest CPU, - * which is its preferred CPU type. Fortunately these old kernels - * support only a very limited number of CPUs. - */ - static const uint32_t cpus_to_try[] = { - KVM_ARM_TARGET_AEM_V8, - KVM_ARM_TARGET_FOUNDATION_V8, - KVM_ARM_TARGET_CORTEX_A57, - QEMU_KVM_ARM_TARGET_NONE - }; - /* - * target = -1 informs kvm_arm_create_scratch_host_vcpu() - * to use the preferred target - */ - struct kvm_vcpu_init init = { .target = -1, }; - - /* - * Ask for SVE if supported, so that we can query ID_AA64ZFR0, - * which is otherwise RAZ. - */ - sve_supported = kvm_arm_sve_supported(); - if (sve_supported) { - init.features[0] |= 1 << KVM_ARM_VCPU_SVE; - } - - /* - * Ask for Pointer Authentication if supported, so that we get - * the unsanitized field values for AA64ISAR1_EL1. - */ - if (kvm_arm_pauth_supported()) { - init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | - 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); - } - - if (kvm_arm_pmu_supported()) { - init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; - pmu_supported = true; - } - - if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { - return false; - } - - ahcf->target = init.target; - ahcf->dtb_compatible = "arm,arm-v8"; - - err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0, - ARM64_SYS_REG(3, 0, 0, 4, 0)); - if (unlikely(err < 0)) { - /* - * Before v4.15, the kernel only exposed a limited number of system - * registers, not including any of the interesting AArch64 ID regs. - * For the most part we could leave these fields as zero with minimal - * effect, since this does not affect the values seen by the guest. - * - * However, it could cause problems down the line for QEMU, - * so provide a minimal v8.0 default. - * - * ??? Could read MIDR and use knowledge from cpu64.c. - * ??? Could map a page of memory into our temp guest and - * run the tiniest of hand-crafted kernels to extract - * the values seen by the guest. - * ??? Either of these sounds like too much effort just - * to work around running a modern host kernel. - */ - ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */ - err = 0; - } else { - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, - ARM64_SYS_REG(3, 0, 0, 4, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0, - ARM64_SYS_REG(3, 0, 0, 4, 5)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0, - ARM64_SYS_REG(3, 0, 0, 5, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1, - ARM64_SYS_REG(3, 0, 0, 5, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, - ARM64_SYS_REG(3, 0, 0, 6, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, - ARM64_SYS_REG(3, 0, 0, 6, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2, - ARM64_SYS_REG(3, 0, 0, 6, 2)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0, - ARM64_SYS_REG(3, 0, 0, 7, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1, - ARM64_SYS_REG(3, 0, 0, 7, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2, - ARM64_SYS_REG(3, 0, 0, 7, 2)); - - /* - * Note that if AArch32 support is not present in the host, - * the AArch32 sysregs are present to be read, but will - * return UNKNOWN values. This is neither better nor worse - * than skipping the reads and leaving 0, as we must avoid - * considering the values in every case. - */ - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0, - ARM64_SYS_REG(3, 0, 0, 1, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1, - ARM64_SYS_REG(3, 0, 0, 1, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, - ARM64_SYS_REG(3, 0, 0, 1, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, - ARM64_SYS_REG(3, 0, 0, 1, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, - ARM64_SYS_REG(3, 0, 0, 1, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, - ARM64_SYS_REG(3, 0, 0, 1, 6)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, - ARM64_SYS_REG(3, 0, 0, 1, 7)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, - ARM64_SYS_REG(3, 0, 0, 2, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, - ARM64_SYS_REG(3, 0, 0, 2, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, - ARM64_SYS_REG(3, 0, 0, 2, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, - ARM64_SYS_REG(3, 0, 0, 2, 3)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, - ARM64_SYS_REG(3, 0, 0, 2, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, - ARM64_SYS_REG(3, 0, 0, 2, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, - ARM64_SYS_REG(3, 0, 0, 2, 6)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, - ARM64_SYS_REG(3, 0, 0, 2, 7)); - - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, - ARM64_SYS_REG(3, 0, 0, 3, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, - ARM64_SYS_REG(3, 0, 0, 3, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, - ARM64_SYS_REG(3, 0, 0, 3, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, - ARM64_SYS_REG(3, 0, 0, 3, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1, - ARM64_SYS_REG(3, 0, 0, 3, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5, - ARM64_SYS_REG(3, 0, 0, 3, 6)); - - /* - * DBGDIDR is a bit complicated because the kernel doesn't - * provide an accessor for it in 64-bit mode, which is what this - * scratch VM is in, and there's no architected "64-bit sysreg - * which reads the same as the 32-bit register" the way there is - * for other ID registers. Instead we synthesize a value from the - * AArch64 ID_AA64DFR0, the same way the kernel code in - * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does. - * We only do this if the CPU supports AArch32 at EL1. - */ - if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) { - int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS); - int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS); - int ctx_cmps = - FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS); - int version = 6; /* ARMv8 debug architecture */ - bool has_el3 = - !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3); - uint32_t dbgdidr = 0; - - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps); - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps); - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps); - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version); - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3); - dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3); - dbgdidr |= (1 << 15); /* RES1 bit */ - ahcf->isar.dbgdidr = dbgdidr; - } - - if (pmu_supported) { - /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */ - err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0, - ARM64_SYS_REG(3, 3, 9, 12, 0)); - } - - if (sve_supported) { - /* - * There is a range of kernels between kernel commit 73433762fcae - * and f81cb2c3ad41 which have a bug where the kernel doesn't - * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has - * enabled SVE support, which resulted in an error rather than RAZ. - * So only read the register if we set KVM_ARM_VCPU_SVE above. - */ - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, - ARM64_SYS_REG(3, 0, 0, 4, 4)); - } - } - - kvm_arm_destroy_scratch_host_vcpu(fdarray); - - if (err < 0) { - return false; - } - - /* - * We can assume any KVM supporting CPU is at least a v8 - * with VFPv4+Neon; this in turn implies most of the other - * feature bits. - */ - features |= 1ULL << ARM_FEATURE_V8; - features |= 1ULL << ARM_FEATURE_NEON; - features |= 1ULL << ARM_FEATURE_AARCH64; - features |= 1ULL << ARM_FEATURE_PMU; - features |= 1ULL << ARM_FEATURE_GENERIC_TIMER; - - ahcf->features = features; - - return true; -} - -void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) -{ - bool has_steal_time = kvm_arm_steal_time_supported(); - - if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) { - if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - cpu->kvm_steal_time = ON_OFF_AUTO_OFF; - } else { - cpu->kvm_steal_time = ON_OFF_AUTO_ON; - } - } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) { - if (!has_steal_time) { - error_setg(errp, "'kvm-steal-time' cannot be enabled " - "on this host"); - return; - } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - /* - * DEN0057A chapter 2 says "This specification only covers - * systems in which the Execution state of the hypervisor - * as well as EL1 of virtual machines is AArch64.". And, - * to ensure that, the smc/hvc calls are only specified as - * smc64/hvc64. - */ - error_setg(errp, "'kvm-steal-time' cannot be enabled " - "for AArch32 guests"); - return; - } - } -} - -bool kvm_arm_aarch32_supported(void) -{ - return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT); -} - -bool kvm_arm_sve_supported(void) -{ - return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE); -} - -bool kvm_arm_steal_time_supported(void) -{ - return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME); -} - -QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1); - -uint32_t kvm_arm_sve_get_vls(CPUState *cs) -{ - /* Only call this function if kvm_arm_sve_supported() returns true. */ - static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS]; - static bool probed; - uint32_t vq = 0; - int i; - - /* - * KVM ensures all host CPUs support the same set of vector lengths. - * So we only need to create the scratch VCPUs once and then cache - * the results. - */ - if (!probed) { - struct kvm_vcpu_init init = { - .target = -1, - .features[0] = (1 << KVM_ARM_VCPU_SVE), - }; - struct kvm_one_reg reg = { - .id = KVM_REG_ARM64_SVE_VLS, - .addr = (uint64_t)&vls[0], - }; - int fdarray[3], ret; - - probed = true; - - if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) { - error_report("failed to create scratch VCPU with SVE enabled"); - abort(); - } - ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®); - kvm_arm_destroy_scratch_host_vcpu(fdarray); - if (ret) { - error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s", - strerror(errno)); - abort(); - } - - for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) { - if (vls[i]) { - vq = 64 - clz64(vls[i]) + i * 64; - break; - } - } - if (vq > ARM_MAX_VQ) { - warn_report("KVM supports vector lengths larger than " - "QEMU can enable"); - vls[0] &= MAKE_64BIT_MASK(0, ARM_MAX_VQ); - } - } - - return vls[0]; -} - -static int kvm_arm_sve_set_vls(CPUState *cs) -{ - ARMCPU *cpu = ARM_CPU(cs); - uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map }; - - assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX); - - return kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_VLS, &vls[0]); -} - -#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 - -int kvm_arch_init_vcpu(CPUState *cs) -{ - int ret; - uint64_t mpidr; - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - uint64_t psciver; - - if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || - !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) { - error_report("KVM is not supported for this guest CPU type"); - return -EINVAL; - } - - qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cs); - - /* Determine init features for this CPU */ - memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features)); - if (cs->start_powered_off) { - cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF; - } - if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) { - cpu->psci_version = QEMU_PSCI_VERSION_0_2; - cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2; - } - if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT; - } - if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) { - cpu->has_pmu = false; - } - if (cpu->has_pmu) { - cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3; - } else { - env->features &= ~(1ULL << ARM_FEATURE_PMU); - } - if (cpu_isar_feature(aa64_sve, cpu)) { - assert(kvm_arm_sve_supported()); - cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE; - } - if (cpu_isar_feature(aa64_pauth, cpu)) { - cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | - 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); - } - - /* Do KVM_ARM_VCPU_INIT ioctl */ - ret = kvm_arm_vcpu_init(cs); - if (ret) { - return ret; - } - - if (cpu_isar_feature(aa64_sve, cpu)) { - ret = kvm_arm_sve_set_vls(cs); - if (ret) { - return ret; - } - ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE); - if (ret) { - return ret; - } - } - - /* - * KVM reports the exact PSCI version it is implementing via a - * special sysreg. If it is present, use its contents to determine - * what to report to the guest in the dtb (it is the PSCI version, - * in the same 15-bits major 16-bits minor format that PSCI_VERSION - * returns). - */ - if (!kvm_get_one_reg(cs, KVM_REG_ARM_PSCI_VERSION, &psciver)) { - cpu->psci_version = psciver; - } - - /* - * When KVM is in use, PSCI is emulated in-kernel and not by qemu. - * Currently KVM has its own idea about MPIDR assignment, so we - * override our defaults with what we get from KVM. - */ - ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr); - if (ret) { - return ret; - } - cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK; - - /* Check whether user space can specify guest syndrome value */ - kvm_arm_init_serror_injection(cs); - - return kvm_arm_init_cpreg_list(cpu); -} - -int kvm_arch_destroy_vcpu(CPUState *cs) -{ - return 0; -} - -bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx) -{ - /* Return true if the regidx is a register we should synchronize - * via the cpreg_tuples array (ie is not a core or sve reg that - * we sync by hand in kvm_arch_get/put_registers()) - */ - switch (regidx & KVM_REG_ARM_COPROC_MASK) { - case KVM_REG_ARM_CORE: - case KVM_REG_ARM64_SVE: - return false; - default: - return true; - } -} - -typedef struct CPRegStateLevel { - uint64_t regidx; - int level; -} CPRegStateLevel; - -/* All system registers not listed in the following table are assumed to be - * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less - * often, you must add it to this table with a state of either - * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE. - */ -static const CPRegStateLevel non_runtime_cpregs[] = { - { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE }, - { KVM_REG_ARM_PTIMER_CNT, KVM_PUT_FULL_STATE }, -}; - -int kvm_arm_cpreg_level(uint64_t regidx) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) { - const CPRegStateLevel *l = &non_runtime_cpregs[i]; - if (l->regidx == regidx) { - return l->level; - } - } - - return KVM_PUT_RUNTIME_STATE; -} - -/* Callers must hold the iothread mutex lock */ -static void kvm_inject_arm_sea(CPUState *c) -{ - ARMCPU *cpu = ARM_CPU(c); - CPUARMState *env = &cpu->env; - uint32_t esr; - bool same_el; - - c->exception_index = EXCP_DATA_ABORT; - env->exception.target_el = 1; - - /* - * Set the DFSC to synchronous external abort and set FnV to not valid, - * this will tell guest the FAR_ELx is UNKNOWN for this abort. - */ - same_el = arm_current_el(env) == env->exception.target_el; - esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10); - - env->exception.syndrome = esr; - - arm_cpu_do_interrupt(c); -} - -#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ - KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) - -#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \ - KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) - -#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \ - KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x)) - -static int kvm_arch_put_fpsimd(CPUState *cs) -{ - CPUARMState *env = &ARM_CPU(cs)->env; - int i, ret; - - for (i = 0; i < 32; i++) { - uint64_t *q = aa64_vfp_qreg(env, i); -#if HOST_BIG_ENDIAN - uint64_t fp_val[2] = { q[1], q[0] }; - ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), - fp_val); -#else - ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q); -#endif - if (ret) { - return ret; - } - } - - return 0; -} - -/* - * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits - * and PREGS and the FFR have a slice size of 256 bits. However we simply hard - * code the slice index to zero for now as it's unlikely we'll need more than - * one slice for quite some time. - */ -static int kvm_arch_put_sve(CPUState *cs) -{ - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - uint64_t tmp[ARM_MAX_VQ * 2]; - uint64_t *r; - int n, ret; - - for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) { - r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2); - ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r); - if (ret) { - return ret; - } - } - - for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) { - r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0], - DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); - ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r); - if (ret) { - return ret; - } - } - - r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0], - DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); - ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r); - if (ret) { - return ret; - } - - return 0; -} - -int kvm_arch_put_registers(CPUState *cs, int level) -{ - uint64_t val; - uint32_t fpr; - int i, ret; - unsigned int el; - - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - - /* If we are in AArch32 mode then we need to copy the AArch32 regs to the - * AArch64 registers before pushing them out to 64-bit KVM. - */ - if (!is_a64(env)) { - aarch64_sync_32_to_64(env); - } - - for (i = 0; i < 31; i++) { - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]), - &env->xregs[i]); - if (ret) { - return ret; - } - } - - /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the - * QEMU side we keep the current SP in xregs[31] as well. - */ - aarch64_save_sp(env, 1); - - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); - if (ret) { - return ret; - } - - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); - if (ret) { - return ret; - } - - /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */ - if (is_a64(env)) { - val = pstate_read(env); - } else { - val = cpsr_read(env); - } - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val); - if (ret) { - return ret; - } - - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); - if (ret) { - return ret; - } - - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); - if (ret) { - return ret; - } - - /* Saved Program State Registers - * - * Before we restore from the banked_spsr[] array we need to - * ensure that any modifications to env->spsr are correctly - * reflected in the banks. - */ - el = arm_current_el(env); - if (el > 0 && !is_a64(env)) { - i = bank_number(env->uncached_cpsr & CPSR_M); - env->banked_spsr[i] = env->spsr; - } - - /* KVM 0-4 map to QEMU banks 1-5 */ - for (i = 0; i < KVM_NR_SPSR; i++) { - ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(spsr[i]), - &env->banked_spsr[i + 1]); - if (ret) { - return ret; - } - } - - if (cpu_isar_feature(aa64_sve, cpu)) { - ret = kvm_arch_put_sve(cs); - } else { - ret = kvm_arch_put_fpsimd(cs); - } - if (ret) { - return ret; - } - - fpr = vfp_get_fpsr(env); - ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr); - if (ret) { - return ret; - } - - fpr = vfp_get_fpcr(env); - ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr); - if (ret) { - return ret; - } - - write_cpustate_to_list(cpu, true); - - if (!write_list_to_kvmstate(cpu, level)) { - return -EINVAL; - } - - /* - * Setting VCPU events should be triggered after syncing the registers - * to avoid overwriting potential changes made by KVM upon calling - * KVM_SET_VCPU_EVENTS ioctl - */ - ret = kvm_put_vcpu_events(cpu); - if (ret) { - return ret; - } - - kvm_arm_sync_mpstate_to_kvm(cpu); - - return ret; -} - -static int kvm_arch_get_fpsimd(CPUState *cs) -{ - CPUARMState *env = &ARM_CPU(cs)->env; - int i, ret; - - for (i = 0; i < 32; i++) { - uint64_t *q = aa64_vfp_qreg(env, i); - ret = kvm_get_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q); - if (ret) { - return ret; - } else { -#if HOST_BIG_ENDIAN - uint64_t t; - t = q[0], q[0] = q[1], q[1] = t; -#endif - } - } - - return 0; -} - -/* - * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits - * and PREGS and the FFR have a slice size of 256 bits. However we simply hard - * code the slice index to zero for now as it's unlikely we'll need more than - * one slice for quite some time. - */ -static int kvm_arch_get_sve(CPUState *cs) -{ - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - uint64_t *r; - int n, ret; - - for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) { - r = &env->vfp.zregs[n].d[0]; - ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r); - if (ret) { - return ret; - } - sve_bswap64(r, r, cpu->sve_max_vq * 2); - } - - for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) { - r = &env->vfp.pregs[n].p[0]; - ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r); - if (ret) { - return ret; - } - sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); - } - - r = &env->vfp.pregs[FFR_PRED_NUM].p[0]; - ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r); - if (ret) { - return ret; - } - sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8)); - - return 0; -} - -int kvm_arch_get_registers(CPUState *cs) -{ - uint64_t val; - unsigned int el; - uint32_t fpr; - int i, ret; - - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - - for (i = 0; i < 31; i++) { - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]), - &env->xregs[i]); - if (ret) { - return ret; - } - } - - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]); - if (ret) { - return ret; - } - - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]); - if (ret) { - return ret; - } - - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val); - if (ret) { - return ret; - } - - env->aarch64 = ((val & PSTATE_nRW) == 0); - if (is_a64(env)) { - pstate_write(env, val); - } else { - cpsr_write(env, val, 0xffffffff, CPSRWriteRaw); - } - - /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the - * QEMU side we keep the current SP in xregs[31] as well. - */ - aarch64_restore_sp(env, 1); - - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc); - if (ret) { - return ret; - } - - /* If we are in AArch32 mode then we need to sync the AArch32 regs with the - * incoming AArch64 regs received from 64-bit KVM. - * We must perform this after all of the registers have been acquired from - * the kernel. - */ - if (!is_a64(env)) { - aarch64_sync_64_to_32(env); - } - - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]); - if (ret) { - return ret; - } - - /* Fetch the SPSR registers - * - * KVM SPSRs 0-4 map to QEMU banks 1-5 - */ - for (i = 0; i < KVM_NR_SPSR; i++) { - ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(spsr[i]), - &env->banked_spsr[i + 1]); - if (ret) { - return ret; - } - } - - el = arm_current_el(env); - if (el > 0 && !is_a64(env)) { - i = bank_number(env->uncached_cpsr & CPSR_M); - env->spsr = env->banked_spsr[i]; - } - - if (cpu_isar_feature(aa64_sve, cpu)) { - ret = kvm_arch_get_sve(cs); - } else { - ret = kvm_arch_get_fpsimd(cs); - } - if (ret) { - return ret; - } - - ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr); - if (ret) { - return ret; - } - vfp_set_fpsr(env, fpr); - - ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr); - if (ret) { - return ret; - } - vfp_set_fpcr(env, fpr); - - ret = kvm_get_vcpu_events(cpu); - if (ret) { - return ret; - } - - if (!write_kvmstate_to_list(cpu)) { - return -EINVAL; - } - /* Note that it's OK to have registers which aren't in CPUState, - * so we can ignore a failure return here. - */ - write_list_to_cpustate(cpu); - - kvm_arm_sync_mpstate_to_qemu(cpu); - - /* TODO: other registers */ - return ret; -} - -void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) -{ - ram_addr_t ram_addr; - hwaddr paddr; - - assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO); - - if (acpi_ghes_present() && addr) { - ram_addr = qemu_ram_addr_from_host(addr); - if (ram_addr != RAM_ADDR_INVALID && - kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { - kvm_hwpoison_page_add(ram_addr); - /* - * If this is a BUS_MCEERR_AR, we know we have been called - * synchronously from the vCPU thread, so we can easily - * synchronize the state and inject an error. - * - * TODO: we currently don't tell the guest at all about - * BUS_MCEERR_AO. In that case we might either be being - * called synchronously from the vCPU thread, or a bit - * later from the main thread, so doing the injection of - * the error would be more complicated. - */ - if (code == BUS_MCEERR_AR) { - kvm_cpu_synchronize_state(c); - if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) { - kvm_inject_arm_sea(c); - } else { - error_report("failed to record the error"); - abort(); - } - } - return; - } - if (code == BUS_MCEERR_AO) { - error_report("Hardware memory error at addr %p for memory used by " - "QEMU itself instead of guest system!", addr); - } - } - - if (code == BUS_MCEERR_AR) { - error_report("Hardware memory error!"); - exit(1); - } -} - -/* C6.6.29 BRK instruction */ -static const uint32_t brk_insn = 0xd4200000; - -int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) -{ - if (have_guest_debug) { - if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || - cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) { - return -EINVAL; - } - return 0; - } else { - error_report("guest debug not supported on this kernel"); - return -EINVAL; - } -} - -int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) -{ - static uint32_t brk; - - if (have_guest_debug) { - if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) || - brk != brk_insn || - cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) { - return -EINVAL; - } - return 0; - } else { - error_report("guest debug not supported on this kernel"); - return -EINVAL; - } -} - -/* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register - * - * To minimise translating between kernel and user-space the kernel - * ABI just provides user-space with the full exception syndrome - * register value to be decoded in QEMU. - */ - -bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit) -{ - int hsr_ec = syn_get_ec(debug_exit->hsr); - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - - /* Ensure PC is synchronised */ - kvm_cpu_synchronize_state(cs); - - switch (hsr_ec) { - case EC_SOFTWARESTEP: - if (cs->singlestep_enabled) { - return true; - } else { - /* - * The kernel should have suppressed the guest's ability to - * single step at this point so something has gone wrong. - */ - error_report("%s: guest single-step while debugging unsupported" - " (%"PRIx64", %"PRIx32")", - __func__, env->pc, debug_exit->hsr); - return false; - } - break; - case EC_AA64_BKPT: - if (kvm_find_sw_breakpoint(cs, env->pc)) { - return true; - } - break; - case EC_BREAKPOINT: - if (find_hw_breakpoint(cs, env->pc)) { - return true; - } - break; - case EC_WATCHPOINT: - { - CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far); - if (wp) { - cs->watchpoint_hit = wp; - return true; - } - break; - } - default: - error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")", - __func__, debug_exit->hsr, env->pc); - } - - /* If we are not handling the debug exception it must belong to - * the guest. Let's re-use the existing TCG interrupt code to set - * everything up properly. - */ - cs->exception_index = EXCP_BKPT; - env->exception.syndrome = debug_exit->hsr; - env->exception.vaddress = debug_exit->far; - env->exception.target_el = 1; - qemu_mutex_lock_iothread(); - arm_cpu_do_interrupt(cs); - qemu_mutex_unlock_iothread(); - - return false; -} - -#define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0) -#define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2) - -/* - * ESR_EL1 - * ISS encoding - * AARCH64: DFSC, bits [5:0] - * AARCH32: - * TTBCR.EAE == 0 - * FS[4] - DFSR[10] - * FS[3:0] - DFSR[3:0] - * TTBCR.EAE == 1 - * FS, bits [5:0] - */ -#define ESR_DFSC(aarch64, lpae, v) \ - ((aarch64 || (lpae)) ? ((v) & 0x3F) \ - : (((v) >> 6) | ((v) & 0x1F))) - -#define ESR_DFSC_EXTABT(aarch64, lpae) \ - ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8) - -bool kvm_arm_verify_ext_dabt_pending(CPUState *cs) -{ - uint64_t dfsr_val; - - if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) { - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64); - int lpae = 0; - - if (!aarch64_mode) { - uint64_t ttbcr; - - if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) { - lpae = arm_feature(env, ARM_FEATURE_LPAE) - && (ttbcr & TTBCR_EAE); - } - } - /* - * The verification here is based on the DFSC bits - * of the ESR_EL1 reg only - */ - return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) == - ESR_DFSC_EXTABT(aarch64_mode, lpae)); - } - return false; -} diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index 051a0da41c4929c178134f2b2a67622e84ef42b5..cfaa0d9bc71a91dd9cc5b2072f80a93395c94f50 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -12,46 +12,10 @@ #define QEMU_KVM_ARM_H #include "sysemu/kvm.h" -#include "exec/memory.h" -#include "qemu/error-report.h" #define KVM_ARM_VGIC_V2 (1 << 0) #define KVM_ARM_VGIC_V3 (1 << 1) -/** - * kvm_arm_init_debug() - initialize guest debug capabilities - * @s: KVMState - * - * Should be called only once before using guest debug capabilities. - */ -void kvm_arm_init_debug(KVMState *s); - -/** - * kvm_arm_vcpu_init: - * @cs: CPUState - * - * Initialize (or reinitialize) the VCPU by invoking the - * KVM_ARM_VCPU_INIT ioctl with the CPU type and feature - * bitmask specified in the CPUState. - * - * Returns: 0 if success else < 0 error code - */ -int kvm_arm_vcpu_init(CPUState *cs); - -/** - * kvm_arm_vcpu_finalize: - * @cs: CPUState - * @feature: feature to finalize - * - * Finalizes the configuration of the specified VCPU feature by - * invoking the KVM_ARM_VCPU_FINALIZE ioctl. Features requiring - * this are documented in the "KVM_ARM_VCPU_FINALIZE" section of - * KVM's API documentation. - * - * Returns: 0 if success else < 0 error code - */ -int kvm_arm_vcpu_finalize(CPUState *cs, int feature); - /** * kvm_arm_register_device: * @mr: memory region for this device @@ -73,37 +37,6 @@ int kvm_arm_vcpu_finalize(CPUState *cs, int feature); void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group, uint64_t attr, int dev_fd, uint64_t addr_ormask); -/** - * kvm_arm_init_cpreg_list: - * @cpu: ARMCPU - * - * Initialize the ARMCPU cpreg list according to the kernel's - * definition of what CPU registers it knows about (and throw away - * the previous TCG-created cpreg list). - * - * Returns: 0 if success, else < 0 error code - */ -int kvm_arm_init_cpreg_list(ARMCPU *cpu); - -/** - * kvm_arm_reg_syncs_via_cpreg_list: - * @regidx: KVM register index - * - * Return true if this KVM register should be synchronized via the - * cpreg list of arbitrary system registers, false if it is synchronized - * by hand using code in kvm_arch_get/put_registers(). - */ -bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx); - -/** - * kvm_arm_cpreg_level: - * @regidx: KVM register index - * - * Return the level of this coprocessor/system register. Return value is - * either KVM_PUT_RUNTIME_STATE, KVM_PUT_RESET_STATE, or KVM_PUT_FULL_STATE. - */ -int kvm_arm_cpreg_level(uint64_t regidx); - /** * write_list_to_kvmstate: * @cpu: ARMCPU @@ -163,34 +96,6 @@ void kvm_arm_cpu_post_load(ARMCPU *cpu); */ void kvm_arm_reset_vcpu(ARMCPU *cpu); -/** - * kvm_arm_init_serror_injection: - * @cs: CPUState - * - * Check whether KVM can set guest SError syndrome. - */ -void kvm_arm_init_serror_injection(CPUState *cs); - -/** - * kvm_get_vcpu_events: - * @cpu: ARMCPU - * - * Get VCPU related state from kvm. - * - * Returns: 0 if success else < 0 error code - */ -int kvm_get_vcpu_events(ARMCPU *cpu); - -/** - * kvm_put_vcpu_events: - * @cpu: ARMCPU - * - * Put VCPU related state to kvm. - * - * Returns: 0 if success else < 0 error code - */ -int kvm_put_vcpu_events(ARMCPU *cpu); - #ifdef CONFIG_KVM /** * kvm_arm_create_scratch_host_vcpu: @@ -222,37 +127,15 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, */ void kvm_arm_destroy_scratch_host_vcpu(int *fdarray); -/** - * ARMHostCPUFeatures: information about the host CPU (identified - * by asking the host kernel) - */ -typedef struct ARMHostCPUFeatures { - ARMISARegisters isar; - uint64_t features; - uint32_t target; - const char *dtb_compatible; -} ARMHostCPUFeatures; - -/** - * kvm_arm_get_host_cpu_features: - * @ahcf: ARMHostCPUClass to fill in - * - * Probe the capabilities of the host kernel's preferred CPU and fill - * in the ARMHostCPUClass struct accordingly. - * - * Returns true on success and false otherwise. - */ -bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf); - /** * kvm_arm_sve_get_vls: - * @cs: CPUState + * @cpu: ARMCPU * * Get all the SVE vector lengths supported by the KVM host, setting * the bits corresponding to their length in quadwords minus one * (vq - 1) up to ARM_MAX_VQ. Return the resulting map. */ -uint32_t kvm_arm_sve_get_vls(CPUState *cs); +uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu); /** * kvm_arm_set_cpu_features_from_host: @@ -265,12 +148,12 @@ void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu); /** * kvm_arm_add_vcpu_properties: - * @obj: The CPU object to add the properties to + * @cpu: The CPU object to add the properties to * * Add all KVM specific CPU properties to the CPU object. These * are the CPU properties with "kvm-" prefixed names. */ -void kvm_arm_add_vcpu_properties(Object *obj); +void kvm_arm_add_vcpu_properties(ARMCPU *cpu); /** * kvm_arm_steal_time_finalize: @@ -282,14 +165,6 @@ void kvm_arm_add_vcpu_properties(Object *obj); */ void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp); -/** - * kvm_arm_steal_time_supported: - * - * Returns: true if KVM can enable steal time reporting - * and false otherwise. - */ -bool kvm_arm_steal_time_supported(void); - /** * kvm_arm_aarch32_supported: * @@ -323,57 +198,19 @@ bool kvm_arm_sve_supported(void); */ int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa); -/** - * kvm_arm_sync_mpstate_to_kvm: - * @cpu: ARMCPU - * - * If supported set the KVM MP_STATE based on QEMU's model. - * - * Returns 0 on success and -1 on failure. - */ -int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu); - -/** - * kvm_arm_sync_mpstate_to_qemu: - * @cpu: ARMCPU - * - * If supported get the MP_STATE from KVM and store in QEMU's model. - * - * Returns 0 on success and aborts on failure. - */ -int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu); - -/** - * kvm_arm_get_virtual_time: - * @cs: CPUState - * - * Gets the VCPU's virtual counter and stores it in the KVM CPU state. - */ -void kvm_arm_get_virtual_time(CPUState *cs); - -/** - * kvm_arm_put_virtual_time: - * @cs: CPUState - * - * Sets the VCPU's virtual counter to the value stored in the KVM CPU state. - */ -void kvm_arm_put_virtual_time(CPUState *cs); - -void kvm_arm_vm_state_change(void *opaque, bool running, RunState state); - int kvm_arm_vgic_probe(void); -void kvm_arm_pmu_set_irq(CPUState *cs, int irq); -void kvm_arm_pmu_init(CPUState *cs); +void kvm_arm_pmu_init(ARMCPU *cpu); +void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq); /** * kvm_arm_pvtime_init: - * @cs: CPUState + * @cpu: ARMCPU * @ipa: Per-vcpu guest physical base address of the pvtime structures * * Initializes PVTIME for the VCPU, setting the PVTIME IPA to @ipa. */ -void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa); +void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa); int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level); @@ -398,11 +235,6 @@ static inline bool kvm_arm_sve_supported(void) return false; } -static inline bool kvm_arm_steal_time_supported(void) -{ - return false; -} - /* * These functions should never actually be called without KVM support. */ @@ -411,7 +243,7 @@ static inline void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu) g_assert_not_reached(); } -static inline void kvm_arm_add_vcpu_properties(Object *obj) +static inline void kvm_arm_add_vcpu_properties(ARMCPU *cpu) { g_assert_not_reached(); } @@ -426,17 +258,17 @@ static inline int kvm_arm_vgic_probe(void) g_assert_not_reached(); } -static inline void kvm_arm_pmu_set_irq(CPUState *cs, int irq) +static inline void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq) { g_assert_not_reached(); } -static inline void kvm_arm_pmu_init(CPUState *cs) +static inline void kvm_arm_pmu_init(ARMCPU *cpu) { g_assert_not_reached(); } -static inline void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa) +static inline void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa) { g_assert_not_reached(); } @@ -446,48 +278,11 @@ static inline void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp) g_assert_not_reached(); } -static inline uint32_t kvm_arm_sve_get_vls(CPUState *cs) +static inline uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu) { g_assert_not_reached(); } #endif -/** - * kvm_arm_handle_debug: - * @cs: CPUState - * @debug_exit: debug part of the KVM exit structure - * - * Returns: TRUE if the debug exception was handled. - */ -bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit); - -/** - * kvm_arm_hw_debug_active: - * @cs: CPU State - * - * Return: TRUE if any hardware breakpoints in use. - */ -bool kvm_arm_hw_debug_active(CPUState *cs); - -/** - * kvm_arm_copy_hw_debug_data: - * @ptr: kvm_guest_debug_arch structure - * - * Copy the architecture specific debug registers into the - * kvm_guest_debug ioctl structure. - */ -struct kvm_guest_debug_arch; -void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr); - -/** - * kvm_arm_verify_ext_dabt_pending: - * @cs: CPUState - * - * Verify the fault status code wrt the Ext DABT injection - * - * Returns: true if the fault status code is as expected, false otherwise - */ -bool kvm_arm_verify_ext_dabt_pending(CPUState *cs); - #endif diff --git a/target/arm/machine.c b/target/arm/machine.c index 9e20b411895e0c05d82a6d9a2c8f5184f346489d..542be14bec2c9a27eeb2894b501698e33f9f52eb 100644 --- a/target/arm/machine.c +++ b/target/arm/machine.c @@ -49,7 +49,7 @@ static const VMStateDescription vmstate_vfp = { .version_id = 3, .minimum_version_id = 3, .needed = vfp_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* For compatibility, store Qn out of Zn here. */ VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2), VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2), @@ -115,7 +115,7 @@ static const VMStateDescription vmstate_iwmmxt = { .version_id = 1, .minimum_version_id = 1, .needed = iwmmxt_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16), VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16), VMSTATE_END_OF_LIST() @@ -140,7 +140,7 @@ static const VMStateDescription vmstate_zreg_hi_reg = { .name = "cpu/sve/zreg_hi", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2), VMSTATE_END_OF_LIST() } @@ -150,7 +150,7 @@ static const VMStateDescription vmstate_preg_reg = { .name = "cpu/sve/preg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8), VMSTATE_END_OF_LIST() } @@ -161,7 +161,7 @@ static const VMStateDescription vmstate_sve = { .version_id = 1, .minimum_version_id = 1, .needed = sve_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0, vmstate_zreg_hi_reg, ARMVectorReg), VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0, @@ -174,7 +174,7 @@ static const VMStateDescription vmstate_vreg = { .name = "vreg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(d, ARMVectorReg, ARM_MAX_VQ * 2), VMSTATE_END_OF_LIST() } @@ -196,7 +196,7 @@ static const VMStateDescription vmstate_za = { .version_id = 1, .minimum_version_id = 1, .needed = za_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(env.zarray, ARMCPU, ARM_MAX_VQ * 16, 0, vmstate_vreg, ARMVectorReg), VMSTATE_END_OF_LIST() @@ -217,7 +217,7 @@ static const VMStateDescription vmstate_serror = { .version_id = 1, .minimum_version_id = 1, .needed = serror_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(env.serror.pending, ARMCPU), VMSTATE_UINT8(env.serror.has_esr, ARMCPU), VMSTATE_UINT64(env.serror.esr, ARMCPU), @@ -235,7 +235,7 @@ static const VMStateDescription vmstate_irq_line_state = { .version_id = 1, .minimum_version_id = 1, .needed = irq_line_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.irq_line_state, ARMCPU), VMSTATE_END_OF_LIST() } @@ -254,7 +254,7 @@ static const VMStateDescription vmstate_m_faultmask_primask = { .version_id = 1, .minimum_version_id = 1, .needed = m_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU), VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU), VMSTATE_END_OF_LIST() @@ -289,7 +289,7 @@ static const VMStateDescription vmstate_m_csselr = { .version_id = 1, .minimum_version_id = 1, .needed = m_csselr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS), VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate), VMSTATE_END_OF_LIST() @@ -301,7 +301,7 @@ static const VMStateDescription vmstate_m_scr = { .version_id = 1, .minimum_version_id = 1, .needed = m_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU), VMSTATE_END_OF_LIST() } @@ -312,7 +312,7 @@ static const VMStateDescription vmstate_m_other_sp = { .version_id = 1, .minimum_version_id = 1, .needed = m_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.v7m.other_sp, ARMCPU), VMSTATE_END_OF_LIST() } @@ -331,7 +331,7 @@ static const VMStateDescription vmstate_m_v8m = { .version_id = 1, .minimum_version_id = 1, .needed = m_v8m_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS), VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS), VMSTATE_END_OF_LIST() @@ -343,7 +343,7 @@ static const VMStateDescription vmstate_m_fp = { .version_id = 1, .minimum_version_id = 1, .needed = vfp_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(env.v7m.fpcar, ARMCPU, M_REG_NUM_BANKS), VMSTATE_UINT32_ARRAY(env.v7m.fpccr, ARMCPU, M_REG_NUM_BANKS), VMSTATE_UINT32_ARRAY(env.v7m.fpdscr, ARMCPU, M_REG_NUM_BANKS), @@ -365,7 +365,7 @@ static const VMStateDescription vmstate_m_mve = { .version_id = 1, .minimum_version_id = 1, .needed = mve_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.v7m.vpr, ARMCPU), VMSTATE_UINT32(env.v7m.ltpsize, ARMCPU), VMSTATE_END_OF_LIST() @@ -377,7 +377,7 @@ static const VMStateDescription vmstate_m = { .version_id = 4, .minimum_version_id = 4, .needed = m_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU), VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU), VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU), @@ -391,7 +391,7 @@ static const VMStateDescription vmstate_m = { VMSTATE_INT32(env.v7m.exception, ARMCPU), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_m_faultmask_primask, &vmstate_m_csselr, &vmstate_m_scr, @@ -416,7 +416,7 @@ static const VMStateDescription vmstate_thumb2ee = { .version_id = 1, .minimum_version_id = 1, .needed = thumb2ee_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.teecr, ARMCPU), VMSTATE_UINT32(env.teehbr, ARMCPU), VMSTATE_END_OF_LIST() @@ -445,7 +445,7 @@ static const VMStateDescription vmstate_pmsav7 = { .version_id = 1, .minimum_version_id = 1, .needed = pmsav7_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0, vmstate_info_uint32, uint32_t), VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0, @@ -474,7 +474,7 @@ static const VMStateDescription vmstate_pmsav7_rnr = { .version_id = 1, .minimum_version_id = 1, .needed = pmsav7_rnr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU), VMSTATE_END_OF_LIST() } @@ -504,7 +504,7 @@ static const VMStateDescription vmstate_pmsav8r = { .version_id = 1, .minimum_version_id = 1, .needed = pmsav8r_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(env.pmsav8.hprbar, ARMCPU, pmsav8r_hdregion, 0, vmstate_info_uint32, uint32_t), VMSTATE_VARRAY_UINT32(env.pmsav8.hprlar, ARMCPU, @@ -518,7 +518,7 @@ static const VMStateDescription vmstate_pmsav8 = { .version_id = 1, .minimum_version_id = 1, .needed = pmsav8_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion, 0, vmstate_info_uint32, uint32_t), VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion, @@ -527,7 +527,7 @@ static const VMStateDescription vmstate_pmsav8 = { VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_pmsav8r, NULL } @@ -560,7 +560,7 @@ static const VMStateDescription vmstate_m_security = { .version_id = 1, .minimum_version_id = 1, .needed = m_security_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.v7m.secure, ARMCPU), VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU), VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU), @@ -888,7 +888,7 @@ const VMStateDescription vmstate_arm_cpu = { .post_save = cpu_post_save, .pre_load = cpu_pre_load, .post_load = cpu_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16), VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32), VMSTATE_UINT64(env.pc, ARMCPU), @@ -937,7 +937,7 @@ const VMStateDescription vmstate_arm_cpu = { }, VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_vfp, &vmstate_iwmmxt, &vmstate_m, diff --git a/target/arm/meson.build b/target/arm/meson.build index 5d04a8e94f2ebace7dd56ee3c92923b4e1b8d55d..46b5a21eb3140029e3568e9de85b9a075dcecbc6 100644 --- a/target/arm/meson.build +++ b/target/arm/meson.build @@ -8,7 +8,7 @@ arm_ss.add(files( )) arm_ss.add(zlib) -arm_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c', 'kvm64.c'), if_false: files('kvm-stub.c')) +arm_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c'), if_false: files('kvm-stub.c')) arm_ss.add(when: 'CONFIG_HVF', if_true: files('hyp_gdbstub.c')) arm_ss.add(when: 'TARGET_AARCH64', if_true: files( @@ -28,7 +28,7 @@ arm_system_ss.add(files( subdir('hvf') -if 'CONFIG_TCG' in config_all +if 'CONFIG_TCG' in config_all_accel subdir('tcg') else arm_ss.add(files('tcg-stubs.c')) diff --git a/target/arm/ptw.c b/target/arm/ptw.c index 1762b058aecfc0e61e58a50160d0e2bef92787f4..2d4fa8dbcaf99a7301626725831952971b1415ea 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -772,9 +772,9 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, #if !TCG_OVERSIZED_GUEST # error "Unexpected configuration" #endif - bool locked = qemu_mutex_iothread_locked(); + bool locked = bql_locked(); if (!locked) { - qemu_mutex_lock_iothread(); + bql_lock(); } if (ptw->out_be) { cur_val = ldq_be_p(host); @@ -788,7 +788,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, } } if (!locked) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif @@ -1581,6 +1581,12 @@ static bool lpae_block_desc_valid(ARMCPU *cpu, bool ds, } } +static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw) +{ + uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); + return (hcr & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1); +} + /** * get_phys_addr_lpae: perform one stage of page table walk, LPAE format * @@ -1989,6 +1995,21 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, xn = extract64(attrs, 54, 1); pxn = extract64(attrs, 53, 1); + if (el == 1 && nv_nv1_enabled(env, ptw)) { + /* + * With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1}, the block/page + * descriptor bit 54 holds PXN, 53 is RES0, and the effective value + * of UXN is 0. Similarly for bits 59 and 60 in table descriptors + * (which we have already folded into bits 53 and 54 of attrs). + * AP[1] (descriptor bit 6, our ap bit 0) is treated as 0. + * Similarly, APTable[0] from the table descriptor is treated as 0; + * we already folded this into AP[1] and squashing that to 0 does + * the right thing. + */ + pxn = xn; + xn = 0; + ap &= ~1; + } /* * Note that we modified ptw->in_space earlier for NSTable, but * result->f.attrs retains a copy of the original security space. diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h index 95454b5b3bb01be2800f0933da43635c9f6fcde5..1a49767479fef03cbf295bc24d4e86f3ba13f761 100644 --- a/target/arm/syndrome.h +++ b/target/arm/syndrome.h @@ -86,6 +86,9 @@ typedef enum { #define ARM_EL_IL (1 << ARM_EL_IL_SHIFT) #define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT) +/* In the Data Abort syndrome */ +#define ARM_EL_VNCR (1 << 13) + static inline uint32_t syn_get_ec(uint32_t syn) { return syn >> ARM_EL_EC_SHIFT; @@ -256,13 +259,12 @@ static inline uint32_t syn_bxjtrap(int cv, int cond, int rm) (cv << 24) | (cond << 20) | rm; } -static inline uint32_t syn_gpc(int s2ptw, int ind, int gpcsc, +static inline uint32_t syn_gpc(int s2ptw, int ind, int gpcsc, int vncr, int cm, int s1ptw, int wnr, int fsc) { - /* TODO: FEAT_NV2 adds VNCR */ return (EC_GPC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (s2ptw << 21) - | (ind << 20) | (gpcsc << 14) | (cm << 8) | (s1ptw << 7) - | (wnr << 6) | fsc; + | (ind << 20) | (gpcsc << 14) | (vncr << 13) | (cm << 8) + | (s1ptw << 7) | (wnr << 6) | fsc; } static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc) @@ -295,6 +297,16 @@ static inline uint32_t syn_data_abort_with_iss(int same_el, | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc; } +/* + * Faults due to FEAT_NV2 VNCR_EL2-based accesses report as same-EL + * Data Aborts with the VNCR bit set. + */ +static inline uint32_t syn_data_abort_vncr(int ea, int wnr, int fsc) +{ + return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (1 << ARM_EL_EC_SHIFT) + | ARM_EL_IL | ARM_EL_VNCR | (wnr << 6) | fsc; +} + static inline uint32_t syn_swstep(int same_el, int isv, int ex) { return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT) diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c index fcda99e1583d5e4916b7b1fb4a61cdd017b12ed6..5fba2c0f04084b59480ce7637181b1c856aac687 100644 --- a/target/arm/tcg/cpu64.c +++ b/target/arm/tcg/cpu64.c @@ -1105,6 +1105,16 @@ void aarch64_max_tcg_initfn(Object *obj) u = FIELD_DP32(u, CLIDR_EL1, LOUU, 0); cpu->clidr = u; + /* + * Set CTR_EL0.DIC and IDC to tell the guest it doesnt' need to + * do any cache maintenance for data-to-instruction or + * instruction-to-guest coherence. (Our cache ops are nops.) + */ + t = cpu->ctr; + t = FIELD_DP64(t, CTR_EL0, IDC, 1); + t = FIELD_DP64(t, CTR_EL0, DIC, 1); + cpu->ctr = t; + t = cpu->isar.id_aa64isar0; t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* FEAT_PMULL */ t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */ @@ -1194,6 +1204,7 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1); /* FEAT_UAO */ t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1); /* FEAT_IESB */ t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */ + t = FIELD_DP64(t, ID_AA64MMFR2, NV, 2); /* FEAT_NV2 */ t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* FEAT_TTST */ t = FIELD_DP64(t, ID_AA64MMFR2, AT, 1); /* FEAT_LSE2 */ t = FIELD_DP64(t, ID_AA64MMFR2, IDS, 1); /* FEAT_IDST */ diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c index 8ad84623d37d93a3d56c3f42e770be342b705ff0..198b975f207c008d427a9c4ee7c82ea46aa8b72f 100644 --- a/target/arm/tcg/helper-a64.c +++ b/target/arm/tcg/helper-a64.c @@ -809,9 +809,9 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) goto illegal_return; } - qemu_mutex_lock_iothread(); + bql_lock(); arm_call_pre_el_change_hook(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (!return_to_aa64) { env->aarch64 = false; @@ -876,9 +876,9 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) */ aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64); - qemu_mutex_lock_iothread(); + bql_lock(); arm_call_el_change_hook(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); return; diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c index a6ebd7571a32f8448de12382d2cbba98561d379a..8e5d35d92275ec59c18c9186562d817f6ec232e8 100644 --- a/target/arm/tcg/hflags.c +++ b/target/arm/tcg/hflags.c @@ -169,6 +169,7 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, CPUARMTBFlags flags = {}; ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); uint64_t tcr = regime_tcr(env, mmu_idx); + uint64_t hcr = arm_hcr_el2_eff(env); uint64_t sctlr; int tbii, tbid; @@ -260,8 +261,10 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, switch (mmu_idx) { case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: - /* TODO: ARMv8.3-NV */ - DP_TBFLAG_A64(flags, UNPRIV, 1); + /* FEAT_NV: NV,NV1 == 1,1 means we don't do UNPRIV accesses */ + if ((hcr & (HCR_NV | HCR_NV1)) != (HCR_NV | HCR_NV1)) { + DP_TBFLAG_A64(flags, UNPRIV, 1); + } break; case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: @@ -285,13 +288,34 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, if (arm_fgt_active(env, el)) { DP_TBFLAG_ANY(flags, FGT_ACTIVE, 1); if (FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], HFGITR_EL2, ERET)) { - DP_TBFLAG_A64(flags, FGT_ERET, 1); + DP_TBFLAG_A64(flags, TRAP_ERET, 1); } if (fgt_svc(env, el)) { DP_TBFLAG_ANY(flags, FGT_SVC, 1); } } + /* + * ERET can also be trapped for FEAT_NV. arm_hcr_el2_eff() takes care + * of "is EL2 enabled" and the NV bit can only be set if FEAT_NV is present. + */ + if (el == 1 && (hcr & HCR_NV)) { + DP_TBFLAG_A64(flags, TRAP_ERET, 1); + DP_TBFLAG_A64(flags, NV, 1); + if (hcr & HCR_NV1) { + DP_TBFLAG_A64(flags, NV1, 1); + } + if (hcr & HCR_NV2) { + DP_TBFLAG_A64(flags, NV2, 1); + if (hcr & HCR_E2H) { + DP_TBFLAG_A64(flags, NV2_MEM_E20, 1); + } + if (env->cp15.sctlr_el[2] & SCTLR_EE) { + DP_TBFLAG_A64(flags, NV2_MEM_BE, 1); + } + } + } + if (cpu_isar_feature(aa64_mte, env_archcpu(env))) { /* * Set MTE_ACTIVE if any access may be Checked, and leave clear diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c index a26adb75aa247359ccf8aae40c5aff1aa7cdecba..d1f1e02acc11a8f1964c47e218173c649c51e75a 100644 --- a/target/arm/tcg/m_helper.c +++ b/target/arm/tcg/m_helper.c @@ -373,8 +373,8 @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env) bool ts = is_secure && (env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_TS_MASK); bool take_exception; - /* Take the iothread lock as we are going to touch the NVIC */ - qemu_mutex_lock_iothread(); + /* Take the BQL as we are going to touch the NVIC */ + bql_lock(); /* Check the background context had access to the FPU */ if (!v7m_cpacr_pass(env, is_secure, is_priv)) { @@ -428,7 +428,7 @@ void HELPER(v7m_preserve_fp_state)(CPUARMState *env) take_exception = !stacked_ok && armv7m_nvic_can_take_pending_exception(env->nvic); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (take_exception) { raise_exception_ra(env, EXCP_LAZYFP, 0, 1, GETPC()); diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c index ea08936a852b17d363ff58f4262edb57cbe3b376..b5ac26061c785969e98a6f49f8c99caa910de632 100644 --- a/target/arm/tcg/op_helper.c +++ b/target/arm/tcg/op_helper.c @@ -121,6 +121,61 @@ void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue) } } +/* Sign/zero extend */ +uint32_t HELPER(sxtb16)(uint32_t x) +{ + uint32_t res; + res = (uint16_t)(int8_t)x; + res |= (uint32_t)(int8_t)(x >> 16) << 16; + return res; +} + +static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra) +{ + /* + * Take a division-by-zero exception if necessary; otherwise return + * to get the usual non-trapping division behaviour (result of 0) + */ + if (arm_feature(env, ARM_FEATURE_M) + && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) { + raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra); + } +} + +uint32_t HELPER(uxtb16)(uint32_t x) +{ + uint32_t res; + res = (uint16_t)(uint8_t)x; + res |= (uint32_t)(uint8_t)(x >> 16) << 16; + return res; +} + +int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den) +{ + if (den == 0) { + handle_possible_div0_trap(env, GETPC()); + return 0; + } + if (num == INT_MIN && den == -1) { + return INT_MIN; + } + return num / den; +} + +uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den) +{ + if (den == 0) { + handle_possible_div0_trap(env, GETPC()); + return 0; + } + return num / den; +} + +uint32_t HELPER(rbit)(uint32_t x) +{ + return revbit32(x); +} + uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b) { uint32_t res = a + b; @@ -427,9 +482,9 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) { uint32_t mask; - qemu_mutex_lock_iothread(); + bql_lock(); arm_call_pre_el_change_hook(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar); cpsr_write(env, val, mask, CPSRWriteExceptionReturn); @@ -442,9 +497,9 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) env->regs[15] &= (env->thumb ? ~1 : ~3); arm_rebuild_hflags(env); - qemu_mutex_lock_iothread(); + bql_lock(); arm_call_el_change_hook(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* Access to user mode registers from privileged modes. */ @@ -803,9 +858,9 @@ void HELPER(set_cp_reg)(CPUARMState *env, const void *rip, uint32_t value) const ARMCPRegInfo *ri = rip; if (ri->type & ARM_CP_IO) { - qemu_mutex_lock_iothread(); + bql_lock(); ri->writefn(env, ri, value); - qemu_mutex_unlock_iothread(); + bql_unlock(); } else { ri->writefn(env, ri, value); } @@ -817,9 +872,9 @@ uint32_t HELPER(get_cp_reg)(CPUARMState *env, const void *rip) uint32_t res; if (ri->type & ARM_CP_IO) { - qemu_mutex_lock_iothread(); + bql_lock(); res = ri->readfn(env, ri); - qemu_mutex_unlock_iothread(); + bql_unlock(); } else { res = ri->readfn(env, ri); } @@ -832,9 +887,9 @@ void HELPER(set_cp_reg64)(CPUARMState *env, const void *rip, uint64_t value) const ARMCPRegInfo *ri = rip; if (ri->type & ARM_CP_IO) { - qemu_mutex_lock_iothread(); + bql_lock(); ri->writefn(env, ri, value); - qemu_mutex_unlock_iothread(); + bql_unlock(); } else { ri->writefn(env, ri, value); } @@ -846,9 +901,9 @@ uint64_t HELPER(get_cp_reg64)(CPUARMState *env, const void *rip) uint64_t res; if (ri->type & ARM_CP_IO) { - qemu_mutex_lock_iothread(); + bql_lock(); res = ri->readfn(env, ri); - qemu_mutex_unlock_iothread(); + bql_unlock(); } else { res = ri->readfn(env, ri); } @@ -930,7 +985,14 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) * * Conduit SMC, valid call Trap to EL2 PSCI Call * Conduit SMC, inval call Trap to EL2 Undef insn - * Conduit not SMC Undef insn Undef insn + * Conduit not SMC Undef or trap[1] Undef insn + * + * [1] In this case: + * - if HCR_EL2.NV == 1 we must trap to EL2 + * - if HCR_EL2.NV == 0 then newer architecture revisions permit + * AArch64 (but not AArch32) to trap to EL2 as an IMPDEF choice + * - otherwise we must UNDEF + * We take the IMPDEF choice to always UNDEF if HCR_EL2.NV == 0. */ /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state. @@ -944,9 +1006,12 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) : smd_flag && !secure; if (!arm_feature(env, ARM_FEATURE_EL3) && + !(arm_hcr_el2_eff(env) & HCR_NV) && cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { - /* If we have no EL3 then SMC always UNDEFs and can't be - * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3 + /* + * If we have no EL3 then traditionally SMC always UNDEFs and can't be + * trapped to EL2. For nested virtualization, SMC can be trapped to + * the outer hypervisor. PSCI-via-SMC is a sort of ersatz EL3 * firmware within QEMU, and we want an EL2 guest to be able * to forbid its EL1 from making PSCI calls into QEMU's * "firmware" via HCR.TSC, so for these purposes treat diff --git a/target/arm/tcg/psci.c b/target/arm/tcg/psci.c index 6c1239bb9685a00ef7d9ad883ba4c1a0e87ada8b..9080a91d9c696847373bf87b13297e1e057cf9e3 100644 --- a/target/arm/tcg/psci.c +++ b/target/arm/tcg/psci.c @@ -107,7 +107,7 @@ void arm_handle_psci_call(ARMCPU *cpu) } target_cpu = ARM_CPU(target_cpu_state); - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); ret = target_cpu->power_state; break; default: diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c index 4fdd85359e15b005170d97aecbd441290ad58e44..dd5de74ffb792e234f04cd8b488785bb5329b114 100644 --- a/target/arm/tcg/tlb_helper.c +++ b/target/arm/tcg/tlb_helper.c @@ -50,7 +50,15 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn, * ST64BV, or ST64BV0 insns report syndrome info even for stage-1 * faults and regardless of the target EL. */ - if (!(template_syn & ARM_EL_ISV) || target_el != 2 + if (template_syn & ARM_EL_VNCR) { + /* + * FEAT_NV2 faults on accesses via VNCR_EL2 are a special case: + * they are always reported as "same EL", even though we are going + * from EL1 to EL2. + */ + assert(!fi->stage2); + syn = syn_data_abort_vncr(fi->ea, is_write, fsc); + } else if (!(template_syn & ARM_EL_ISV) || target_el != 2 || fi->s1ptw || !fi->stage2) { syn = syn_data_abort_no_iss(same_el, 0, fi->ea, 0, fi->s1ptw, is_write, fsc); @@ -169,6 +177,20 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr, int current_el = arm_current_el(env); bool same_el; uint32_t syn, exc, fsr, fsc; + /* + * We know this must be a data or insn abort, and that + * env->exception.syndrome contains the template syndrome set + * up at translate time. So we can check only the VNCR bit + * (and indeed syndrome does not have the EC field in it, + * because we masked that out in disas_set_insn_syndrome()) + */ + bool is_vncr = (mmu_idx != MMU_INST_FETCH) && + (env->exception.syndrome & ARM_EL_VNCR); + + if (is_vncr) { + /* FEAT_NV2 faults on accesses via VNCR_EL2 go to EL2 */ + target_el = 2; + } if (report_as_gpc_exception(cpu, current_el, fi)) { target_el = 3; @@ -177,7 +199,8 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr, syn = syn_gpc(fi->stage2 && fi->type == ARMFault_GPCFOnWalk, access_type == MMU_INST_FETCH, - encode_gpcsc(fi), 0, fi->s1ptw, + encode_gpcsc(fi), is_vncr, + 0, fi->s1ptw, access_type == MMU_DATA_STORE, fsc); env->cp15.mfar_el3 = fi->paddr; diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index a2e49c39f9f3caf17b1bc1fdbba6018121962777..27335e8540793af0831ee6bc0da9edef29839d94 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" +#include "exec/exec-all.h" #include "translate.h" #include "translate-a64.h" #include "qemu/log.h" @@ -1605,7 +1606,7 @@ static bool trans_ERET(DisasContext *s, arg_ERET *a) if (s->current_el == 0) { return false; } - if (s->fgt_eret) { + if (s->trap_eret) { gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(0), 2); return true; } @@ -1632,7 +1633,7 @@ static bool trans_ERETA(DisasContext *s, arg_reta *a) return false; } /* The FGT trap takes precedence over an auth trap. */ - if (s->fgt_eret) { + if (s->trap_eret) { gen_exception_insn_el(s, 0, EXCP_UDEF, syn_erettrap(a->m ? 3 : 2), 2); return true; } @@ -2131,16 +2132,19 @@ static void handle_sys(DisasContext *s, bool isread, crn, crm, op0, op1, op2); const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key); bool need_exit_tb = false; + bool nv_trap_to_el2 = false; + bool nv_redirect_reg = false; + bool skip_fp_access_checks = false; + bool nv2_mem_redirect = false; TCGv_ptr tcg_ri = NULL; TCGv_i64 tcg_rt; - uint32_t syndrome; + uint32_t syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); if (crn == 11 || crn == 15) { /* * Check for TIDCP trap, which must take precedence over * the UNDEF for "no such register" etc. */ - syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); switch (s->current_el) { case 0: if (dc_isar_feature(aa64_tidcp1, s)) { @@ -2164,17 +2168,65 @@ static void handle_sys(DisasContext *s, bool isread, return; } + if (s->nv2 && ri->nv2_redirect_offset) { + /* + * Some registers always redirect to memory; some only do so if + * HCR_EL2.NV1 is 0, and some only if NV1 is 1 (these come in + * pairs which share an offset; see the table in R_CSRPQ). + */ + if (ri->nv2_redirect_offset & NV2_REDIR_NV1) { + nv2_mem_redirect = s->nv1; + } else if (ri->nv2_redirect_offset & NV2_REDIR_NO_NV1) { + nv2_mem_redirect = !s->nv1; + } else { + nv2_mem_redirect = true; + } + } + /* Check access permissions */ if (!cp_access_ok(s->current_el, ri, isread)) { - gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt); - return; + /* + * FEAT_NV/NV2 handling does not do the usual FP access checks + * for registers only accessible at EL2 (though it *does* do them + * for registers accessible at EL1). + */ + skip_fp_access_checks = true; + if (s->nv2 && (ri->type & ARM_CP_NV2_REDIRECT)) { + /* + * This is one of the few EL2 registers which should redirect + * to the equivalent EL1 register. We do that after running + * the EL2 register's accessfn. + */ + nv_redirect_reg = true; + assert(!nv2_mem_redirect); + } else if (nv2_mem_redirect) { + /* + * NV2 redirect-to-memory takes precedence over trap to EL2 or + * UNDEF to EL1. + */ + } else if (s->nv && arm_cpreg_traps_in_nv(ri)) { + /* + * This register / instruction exists and is an EL2 register, so + * we must trap to EL2 if accessed in nested virtualization EL1 + * instead of UNDEFing. We'll do that after the usual access checks. + * (This makes a difference only for a couple of registers like + * VSTTBR_EL2 where the "UNDEF if NonSecure" should take priority + * over the trap-to-EL2. Most trapped-by-FEAT_NV registers have + * an accessfn which does nothing when called from EL1, because + * the trap-to-EL3 controls which would apply to that register + * at EL2 don't take priority over the FEAT_NV trap-to-EL2.) + */ + nv_trap_to_el2 = true; + } else { + gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt); + return; + } } if (ri->accessfn || (ri->fgt && s->fgt_active)) { /* Emit code to perform further access permissions checks at * runtime; this may result in an exception. */ - syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread); gen_a64_update_pc(s, 0); tcg_ri = tcg_temp_new_ptr(); gen_helper_access_check_cp_reg(tcg_ri, tcg_env, @@ -2189,6 +2241,78 @@ static void handle_sys(DisasContext *s, bool isread, gen_a64_update_pc(s, 0); } + if (!skip_fp_access_checks) { + if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) { + return; + } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) { + return; + } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) { + return; + } + } + + if (nv_trap_to_el2) { + gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2); + return; + } + + if (nv_redirect_reg) { + /* + * FEAT_NV2 redirection of an EL2 register to an EL1 register. + * Conveniently in all cases the encoding of the EL1 register is + * identical to the EL2 register except that opc1 is 0. + * Get the reginfo for the EL1 register to use for the actual access. + * We don't use the EL1 register's access function, and + * fine-grained-traps on EL1 also do not apply here. + */ + key = ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, + crn, crm, op0, 0, op2); + ri = get_arm_cp_reginfo(s->cp_regs, key); + assert(ri); + assert(cp_access_ok(s->current_el, ri, isread)); + /* + * We might not have done an update_pc earlier, so check we don't + * need it. We could support this in future if necessary. + */ + assert(!(ri->type & ARM_CP_RAISES_EXC)); + } + + if (nv2_mem_redirect) { + /* + * This system register is being redirected into an EL2 memory access. + * This means it is not an IO operation, doesn't change hflags, + * and need not end the TB, because it has no side effects. + * + * The access is 64-bit single copy atomic, guaranteed aligned because + * of the definition of VCNR_EL2. Its endianness depends on + * SCTLR_EL2.EE, not on the data endianness of EL1. + * It is done under either the EL2 translation regime or the EL2&0 + * translation regime, depending on HCR_EL2.E2H. It behaves as if + * PSTATE.PAN is 0. + */ + TCGv_i64 ptr = tcg_temp_new_i64(); + MemOp mop = MO_64 | MO_ALIGN | MO_ATOM_IFALIGN; + ARMMMUIdx armmemidx = s->nv2_mem_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2; + int memidx = arm_to_core_mmu_idx(armmemidx); + uint32_t syn; + + mop |= (s->nv2_mem_be ? MO_BE : MO_LE); + + tcg_gen_ld_i64(ptr, tcg_env, offsetof(CPUARMState, cp15.vncr_el2)); + tcg_gen_addi_i64(ptr, ptr, + (ri->nv2_redirect_offset & ~NV2_REDIR_FLAG_MASK)); + tcg_rt = cpu_reg(s, rt); + + syn = syn_data_abort_vncr(0, !isread, 0); + disas_set_insn_syndrome(s, syn); + if (isread) { + tcg_gen_qemu_ld_i64(tcg_rt, ptr, memidx, mop); + } else { + tcg_gen_qemu_st_i64(tcg_rt, ptr, memidx, mop); + } + return; + } + /* Handle special cases first */ switch (ri->type & ARM_CP_SPECIAL_MASK) { case 0: @@ -2204,12 +2328,17 @@ static void handle_sys(DisasContext *s, bool isread, } return; case ARM_CP_CURRENTEL: - /* Reads as current EL value from pstate, which is + { + /* + * Reads as current EL value from pstate, which is * guaranteed to be constant by the tb flags. + * For nested virt we should report EL2. */ + int el = s->nv ? 2 : s->current_el; tcg_rt = cpu_reg(s, rt); - tcg_gen_movi_i64(tcg_rt, s->current_el << 2); + tcg_gen_movi_i64(tcg_rt, el << 2); return; + } case ARM_CP_DC_ZVA: /* Writes clear the aligned block of memory which rt points into. */ if (s->mte_active[0]) { @@ -2267,13 +2396,6 @@ static void handle_sys(DisasContext *s, bool isread, default: g_assert_not_reached(); } - if ((ri->type & ARM_CP_FPU) && !fp_access_check_only(s)) { - return; - } else if ((ri->type & ARM_CP_SVE) && !sve_access_check(s)) { - return; - } else if ((ri->type & ARM_CP_SME) && !sme_access_check(s)) { - return; - } if (ri->type & ARM_CP_IO) { /* I/O operations must end the TB here (whether read or write) */ @@ -13979,7 +14101,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, dc->pstate_il = EX_TBFLAG_ANY(tb_flags, PSTATE__IL); dc->fgt_active = EX_TBFLAG_ANY(tb_flags, FGT_ACTIVE); dc->fgt_svc = EX_TBFLAG_ANY(tb_flags, FGT_SVC); - dc->fgt_eret = EX_TBFLAG_A64(tb_flags, FGT_ERET); + dc->trap_eret = EX_TBFLAG_A64(tb_flags, TRAP_ERET); dc->sve_excp_el = EX_TBFLAG_A64(tb_flags, SVEEXC_EL); dc->sme_excp_el = EX_TBFLAG_A64(tb_flags, SMEEXC_EL); dc->vl = (EX_TBFLAG_A64(tb_flags, VL) + 1) * 16; @@ -13996,6 +14118,11 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA); dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING); dc->naa = EX_TBFLAG_A64(tb_flags, NAA); + dc->nv = EX_TBFLAG_A64(tb_flags, NV); + dc->nv1 = EX_TBFLAG_A64(tb_flags, NV1); + dc->nv2 = EX_TBFLAG_A64(tb_flags, NV2); + dc->nv2_mem_e20 = EX_TBFLAG_A64(tb_flags, NV2_MEM_E20); + dc->nv2_mem_be = EX_TBFLAG_A64(tb_flags, NV2_MEM_BE); dc->vec_len = 0; dc->vec_stride = 0; dc->cp_regs = arm_cpu->cp_regs; diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index 3c3bb3431ada27cecb6d5159e32c0bf0600f01d9..93be745cf33103f4acb0519fe4be050ee4d6b575 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -138,12 +138,22 @@ typedef struct DisasContext { bool mve_no_pred; /* True if fine-grained traps are active */ bool fgt_active; - /* True if fine-grained trap on ERET is enabled */ - bool fgt_eret; /* True if fine-grained trap on SVC is enabled */ bool fgt_svc; + /* True if a trap on ERET is enabled (FGT or NV) */ + bool trap_eret; /* True if FEAT_LSE2 SCTLR_ELx.nAA is set */ bool naa; + /* True if FEAT_NV HCR_EL2.NV is enabled */ + bool nv; + /* True if NV enabled and HCR_EL2.NV1 is set */ + bool nv1; + /* True if NV enabled and HCR_EL2.NV2 is set */ + bool nv2; + /* True if NV2 enabled and NV2 RAM accesses use EL2&0 translation regime */ + bool nv2_mem_e20; + /* True if NV2 enabled and NV2 RAM accesses are big-endian */ + bool nv2_mem_be; /* * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. * < 0, set by the current instruction. @@ -159,6 +169,8 @@ typedef struct DisasContext { int c15_cpar; /* TCG op of the current insn_start. */ TCGOp *insn_start; + /* Offset from VNCR_EL2 when FEAT_NV2 redirects this reg to memory */ + uint32_t nv2_redirect_offset; } DisasContext; typedef struct DisasCompare { diff --git a/target/avr/cpu.c b/target/avr/cpu.c index 999c010dedb8f3f946d25bf1fc50a7447ae59b9b..f5cbdc4a8c04b52e85ff3fdce276f22c26226a0e 100644 --- a/target/avr/cpu.c +++ b/target/avr/cpu.c @@ -160,13 +160,7 @@ static Property avr_cpu_properties[] = { static ObjectClass *avr_cpu_class_by_name(const char *cpu_model) { - ObjectClass *oc; - - oc = object_class_by_name(cpu_model); - if (object_class_dynamic_cast(oc, TYPE_AVR_CPU) == NULL) { - oc = NULL; - } - return oc; + return object_class_by_name(cpu_model); } static void avr_cpu_dump_state(CPUState *cs, FILE *f, int flags) @@ -368,21 +362,6 @@ typedef struct AVRCPUInfo { } AVRCPUInfo; -static void avr_cpu_list_entry(gpointer data, gpointer user_data) -{ - const char *typename = object_class_get_name(OBJECT_CLASS(data)); - - qemu_printf("%s\n", typename); -} - -void avr_cpu_list(void) -{ - GSList *list; - list = object_class_get_list_sorted(TYPE_AVR_CPU, false); - g_slist_foreach(list, avr_cpu_list_entry, NULL); - g_slist_free(list); -} - #define DEFINE_AVR_CPU_TYPE(model, initfn) \ { \ .parent = TYPE_AVR_CPU, \ diff --git a/target/avr/cpu.h b/target/avr/cpu.h index 7960c5c57a8a539d295cd691daf0eeb3d8657a73..7d5dd42575d7e860cd35f5c64722a6c9fc679a00 100644 --- a/target/avr/cpu.h +++ b/target/avr/cpu.h @@ -184,7 +184,6 @@ static inline void set_avr_feature(CPUAVRState *env, int feature) env->features |= (1U << feature); } -#define cpu_list avr_cpu_list #define cpu_mmu_index avr_cpu_mmu_index static inline int avr_cpu_mmu_index(CPUAVRState *env, bool ifetch) @@ -194,7 +193,6 @@ static inline int avr_cpu_mmu_index(CPUAVRState *env, bool ifetch) void avr_cpu_tcg_init(void); -void avr_cpu_list(void); int cpu_avr_exec(CPUState *cpu); enum { diff --git a/target/avr/machine.c b/target/avr/machine.c index 16f7a3e031d743d5c1c2909ad7e2201f7e8639f4..4402862fb962d9384a79f9e6792727dc1e12bc33 100644 --- a/target/avr/machine.c +++ b/target/avr/machine.c @@ -100,7 +100,7 @@ const VMStateDescription vms_avr_cpu = { .name = "cpu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.pc_w, AVRCPU), VMSTATE_UINT32(env.sp, AVRCPU), VMSTATE_UINT32(env.skip, AVRCPU), diff --git a/target/cris/cpu.c b/target/cris/cpu.c index 675b73ac04f1a0f79863806c697b5036a7899e75..9ba08e8b0caa84e77086c984bb2d2618660134db 100644 --- a/target/cris/cpu.c +++ b/target/cris/cpu.c @@ -95,48 +95,8 @@ static ObjectClass *cris_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(CRIS_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (oc != NULL && !object_class_dynamic_cast(oc, TYPE_CRIS_CPU)) { - oc = NULL; - } - return oc; -} - -/* Sort alphabetically by VR. */ -static gint cris_cpu_list_compare(gconstpointer a, gconstpointer b) -{ - CRISCPUClass *ccc_a = CRIS_CPU_CLASS(a); - CRISCPUClass *ccc_b = CRIS_CPU_CLASS(b); - - /* */ - if (ccc_a->vr > ccc_b->vr) { - return 1; - } else if (ccc_a->vr < ccc_b->vr) { - return -1; - } else { - return 0; - } -} -static void cris_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - const char *typename = object_class_get_name(oc); - char *name; - - name = g_strndup(typename, strlen(typename) - strlen(CRIS_CPU_TYPE_SUFFIX)); - qemu_printf(" %s\n", name); - g_free(name); -} - -void cris_cpu_list(void) -{ - GSList *list; - - list = object_class_get_list(TYPE_CRIS_CPU, false); - list = g_slist_sort(list, cris_cpu_list_compare); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, cris_cpu_list_entry, NULL); - g_slist_free(list); + return oc; } static void cris_cpu_realizefn(DeviceState *dev, Error **errp) diff --git a/target/cris/cpu.h b/target/cris/cpu.h index 1be7f90319cf4f193173ad85d86834e69e444bb3..d830dcac5b79c97b37edd21b344c23ffa7fbb655 100644 --- a/target/cris/cpu.h +++ b/target/cris/cpu.h @@ -287,7 +287,4 @@ static inline void cpu_get_tb_cpu_state(CPUCRISState *env, vaddr *pc, | X_FLAG | PFIX_FLAG)); } -#define cpu_list cris_cpu_list -void cris_cpu_list(void); - #endif diff --git a/target/cris/machine.c b/target/cris/machine.c index f370f33486dad968a16daabbf941522ee272cbb4..7b9bde872aa49ea92487ac4a2cb9ddcc09d7d295 100644 --- a/target/cris/machine.c +++ b/target/cris/machine.c @@ -26,7 +26,7 @@ static const VMStateDescription vmstate_tlbset = { .name = "cpu/tlbset", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(lo, TLBSet), VMSTATE_UINT32(hi, TLBSet), VMSTATE_END_OF_LIST() @@ -37,7 +37,7 @@ static const VMStateDescription vmstate_cris_env = { .name = "env", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(regs, CPUCRISState, 16), VMSTATE_UINT32_ARRAY(pregs, CPUCRISState, 16), VMSTATE_UINT32(pc, CPUCRISState), @@ -85,7 +85,7 @@ const VMStateDescription vmstate_cris_cpu = { .name = "cpu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CPU(), VMSTATE_STRUCT(env, CRISCPU, 1, vmstate_cris_env, CPUCRISState), VMSTATE_END_OF_LIST() diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c index 9d1ffc3b4bb6f7961e384e8c960840963c79cd7e..c0cd739e15e8e0959c744c3208b104cd0ad6754e 100644 --- a/target/hexagon/cpu.c +++ b/target/hexagon/cpu.c @@ -32,26 +32,6 @@ static void hexagon_v69_cpu_init(Object *obj) { } static void hexagon_v71_cpu_init(Object *obj) { } static void hexagon_v73_cpu_init(Object *obj) { } -static void hexagon_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - char *name = g_strdup(object_class_get_name(oc)); - if (g_str_has_suffix(name, HEXAGON_CPU_TYPE_SUFFIX)) { - name[strlen(name) - strlen(HEXAGON_CPU_TYPE_SUFFIX)] = '\0'; - } - qemu_printf(" %s\n", name); - g_free(name); -} - -void hexagon_cpu_list(void) -{ - GSList *list; - list = object_class_get_list_sorted(TYPE_HEXAGON_CPU, false); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, hexagon_cpu_list_entry, NULL); - g_slist_free(list); -} - static ObjectClass *hexagon_cpu_class_by_name(const char *cpu_model) { ObjectClass *oc; @@ -63,9 +43,7 @@ static ObjectClass *hexagon_cpu_class_by_name(const char *cpu_model) oc = object_class_by_name(typename); g_strfreev(cpuname); g_free(typename); - if (!oc || !object_class_dynamic_cast(oc, TYPE_HEXAGON_CPU)) { - return NULL; - } + return oc; } diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h index 7d16083c6aa16e84de4814a0354139c664bc4e63..5c11ae344513a21797a60f94e5a60f3b671bdb4d 100644 --- a/target/hexagon/cpu.h +++ b/target/hexagon/cpu.h @@ -37,9 +37,6 @@ #define CPU_RESOLVING_TYPE TYPE_HEXAGON_CPU -void hexagon_cpu_list(void); -#define cpu_list hexagon_cpu_list - #define MMU_USER_IDX 0 typedef struct { diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c index 04de1689d7ed23075b4f3e27bf62b45fd78a5eb4..14e17fa9aab36f81891fb774f6d823cce8024fbd 100644 --- a/target/hppa/cpu.c +++ b/target/hppa/cpu.c @@ -110,11 +110,7 @@ void hppa_cpu_do_unaligned_access(CPUState *cs, vaddr addr, CPUHPPAState *env = &cpu->env; cs->exception_index = EXCP_UNALIGN; - if (env->psw & PSW_Q) { - /* ??? Needs tweaking for hppa64. */ - env->cr[CR_IOR] = addr; - env->cr[CR_ISR] = addr >> 32; - } + hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx)); cpu_loop_exit_restore(cs, retaddr); } @@ -160,38 +156,8 @@ static void hppa_cpu_initfn(Object *obj) static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model) { g_autofree char *typename = g_strconcat(cpu_model, "-cpu", NULL); - ObjectClass *oc = object_class_by_name(typename); - - if (oc && - !object_class_is_abstract(oc) && - object_class_dynamic_cast(oc, TYPE_HPPA_CPU)) { - return oc; - } - return NULL; -} - -static void hppa_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - CPUClass *cc = CPU_CLASS(oc); - const char *tname = object_class_get_name(oc); - g_autofree char *name = g_strndup(tname, strchr(tname, '-') - tname); - - if (cc->deprecation_note) { - qemu_printf(" %s (deprecated)\n", name); - } else { - qemu_printf(" %s\n", name); - } -} - -void hppa_cpu_list(void) -{ - GSList *list; - list = object_class_get_list_sorted(TYPE_HPPA_CPU, false); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, hppa_cpu_list_entry, NULL); - g_slist_free(list); + return object_class_by_name(typename); } #ifndef CONFIG_USER_ONLY diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h index 8be45c69c99ab53e5da3d9d6be304e082a14e2e5..6a153405d27b968ef3359d8321c598a37538d6fb 100644 --- a/target/hppa/cpu.h +++ b/target/hppa/cpu.h @@ -385,6 +385,7 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int); #ifndef CONFIG_USER_ONLY void hppa_ptlbe(CPUHPPAState *env); hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr); +void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled); bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); @@ -402,7 +403,4 @@ G_NORETURN void hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra); #define CPU_RESOLVING_TYPE TYPE_HPPA_CPU -#define cpu_list hppa_cpu_list -void hppa_cpu_list(void); - #endif /* HPPA_CPU_H */ diff --git a/target/hppa/int_helper.c b/target/hppa/int_helper.c index 98e9d688f64070e7b93974c649a1a62df5ed8d23..efe638b36ed9022a386acca15f215e4c516e445b 100644 --- a/target/hppa/int_helper.c +++ b/target/hppa/int_helper.c @@ -84,17 +84,17 @@ void hppa_cpu_alarm_timer(void *opaque) void HELPER(write_eirr)(CPUHPPAState *env, target_ulong val) { env->cr[CR_EIRR] &= ~val; - qemu_mutex_lock_iothread(); + bql_lock(); eval_interrupt(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(write_eiem)(CPUHPPAState *env, target_ulong val) { env->cr[CR_EIEM] = val; - qemu_mutex_lock_iothread(); + bql_lock(); eval_interrupt(env_archcpu(env)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void hppa_cpu_do_interrupt(CPUState *cs) diff --git a/target/hppa/machine.c b/target/hppa/machine.c index 15cbc5e6d037afe531a76217ab254f758e9bd5ef..211bfcf640712faee15f47cf9c2ef60205aa9b9e 100644 --- a/target/hppa/machine.c +++ b/target/hppa/machine.c @@ -201,7 +201,7 @@ static const VMStateField vmstate_env_fields[] = { VMSTATE_END_OF_LIST() }; -static const VMStateDescription *vmstate_env_subsections[] = { +static const VMStateDescription * const vmstate_env_subsections[] = { &vmstate_tlb, NULL }; diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c index 08abd1a9f90ef3036ccd4d6e6efbb96ee2e3c27e..4fcc612754bd90673afef7036830d23955846333 100644 --- a/target/hppa/mem_helper.c +++ b/target/hppa/mem_helper.c @@ -55,8 +55,14 @@ hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr) /* I/O address space */ addr = (int32_t)addr; } else { - /* PDC address space */ - addr &= MAKE_64BIT_MASK(0, 24); + /* + * PDC address space: + * Figures H-10 and H-11 of the parisc2.0 spec do not specify + * where to map into the 64-bit PDC address space. + * We map with an offset which equals the 32-bit address, which + * is what can be seen on physical machines too. + */ + addr = (uint32_t)addr; addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4); } return addr; @@ -299,14 +305,8 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) return excp == EXCP_DTLB_MISS ? -1 : phys; } -G_NORETURN static void -raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr, - vaddr addr, bool mmu_disabled) +void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled) { - CPUState *cs = env_cpu(env); - - cs->exception_index = excp; - if (env->psw & PSW_Q) { /* * For pa1.x, the offset and space never overlap, and so we @@ -333,16 +333,23 @@ raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr, */ uint64_t b; - cpu_restore_state(cs, retaddr); - - b = env->gr[env->unwind_breg]; + b = env->unwind_breg ? env->gr[env->unwind_breg] : 0; b >>= (env->psw & PSW_W ? 62 : 30); env->cr[CR_IOR] |= b << 62; - - cpu_loop_exit(cs); } } } +} + +G_NORETURN static void +raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr, + vaddr addr, bool mmu_disabled) +{ + CPUState *cs = env_cpu(env); + + cs->exception_index = excp; + hppa_set_ior_and_isr(env, addr, mmu_disabled); + cpu_loop_exit_restore(cs, retaddr); } diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c index 7f607c3afd6718f7fde4c6fef58ca4e75b4690dc..ce15469465e4a7f8db9719c1ea9152a79fa6be8d 100644 --- a/target/hppa/op_helper.c +++ b/target/hppa/op_helper.c @@ -351,11 +351,7 @@ target_ulong HELPER(probe)(CPUHPPAState *env, target_ulong addr, excp = hppa_get_physical_address(env, addr, mmu_idx, 0, &phys, &prot, NULL); if (excp >= 0) { - if (env->psw & PSW_Q) { - /* ??? Needs tweaking for hppa64. */ - env->cr[CR_IOR] = addr; - env->cr[CR_ISR] = addr >> 32; - } + hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx)); if (excp == EXCP_DTLB_MISS) { excp = EXCP_NA_DTLB_MISS; } diff --git a/target/i386/cpu.c b/target/i386/cpu.c index cd16cb893daf8d2a2be94a91f5ad47de787a6080..2524881ce2457671acfc2a636e7f76eb95fe097e 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -24,7 +24,6 @@ #include "qemu/hw-version.h" #include "cpu.h" #include "tcg/helper-tcg.h" -#include "sysemu/reset.h" #include "sysemu/hvf.h" #include "hvf/hvf-i386.h" #include "kvm/kvm_i386.h" @@ -37,6 +36,7 @@ #include "hw/qdev-properties.h" #include "hw/i386/topology.h" #ifndef CONFIG_USER_ONLY +#include "sysemu/reset.h" #include "qapi/qapi-commands-machine-target.h" #include "exec/address-spaces.h" #include "hw/boards.h" @@ -738,7 +738,7 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, #define TCG_7_0_EDX_FEATURES (CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_KERNEL_FEATURES) #define TCG_7_1_EAX_FEATURES (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | \ - CPUID_7_1_EAX_FSRC) + CPUID_7_1_EAX_FSRC | CPUID_7_1_EAX_CMPCCXADD) #define TCG_7_1_EDX_FEATURES 0 #define TCG_7_2_EDX_FEATURES 0 #define TCG_APM_FEATURES 0 @@ -1744,8 +1744,7 @@ static char *x86_cpu_class_get_model_name(X86CPUClass *cc) { const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); - return g_strndup(class_name, - strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); + return cpu_model_from_type(class_name); } typedef struct X86CPUVersionDefinition { diff --git a/target/i386/cpu.h b/target/i386/cpu.h index ef987f344cff2409b7746b12f654ed7e3fb560dd..7f0786e8b98f7ae6ec3fc2a1b7c73dfcbce9a4a1 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -1285,6 +1285,7 @@ typedef enum { CC_OP_NB, } CCOp; +QEMU_BUILD_BUG_ON(CC_OP_NB >= 128); typedef struct SegmentCache { uint32_t selector; @@ -2344,13 +2345,13 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, uint64_t status, uint64_t mcg_status, uint64_t addr, uint64_t misc, int flags); -uint32_t cpu_cc_compute_all(CPUX86State *env1, int op); +uint32_t cpu_cc_compute_all(CPUX86State *env1); static inline uint32_t cpu_compute_eflags(CPUX86State *env) { uint32_t eflags = env->eflags; if (tcg_enabled()) { - eflags |= cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK); + eflags |= cpu_cc_compute_all(env) | (env->df & DF_MASK); } return eflags; } diff --git a/target/i386/hvf/README.md b/target/i386/hvf/README.md index 2d33477aca505e08eb899698c8d036273035d2c9..64a8935237c8c209eab6a2f2905f342bc30c67a2 100644 --- a/target/i386/hvf/README.md +++ b/target/i386/hvf/README.md @@ -4,4 +4,4 @@ These sources (and ../hvf-all.c) are adapted from Veertu Inc's vdhh (Veertu Desk 1. Adapt to our current QEMU's `CPUState` structure and `address_space_rw` API; many struct members have been moved around (emulated x86 state, xsave_buf) due to historical differences + QEMU needing to handle more emulation targets. 2. Removal of `apic_page` and hyperv-related functionality. -3. More relaxed use of `qemu_mutex_lock_iothread`. +3. More relaxed use of `bql_lock`. diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index 20b9ca3ef5135afd3d5b0e959568a503d1c72603..11ffdd4c69fdb61ab072e9364d6007c19ce88c39 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -429,9 +429,9 @@ int hvf_vcpu_exec(CPUState *cpu) } vmx_update_tpr(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) { - qemu_mutex_lock_iothread(); + bql_lock(); return EXCP_HLT; } @@ -450,7 +450,7 @@ int hvf_vcpu_exec(CPUState *cpu) rip = rreg(cpu->accel->fd, HV_X86_RIP); env->eflags = rreg(cpu->accel->fd, HV_X86_RFLAGS); - qemu_mutex_lock_iothread(); + bql_lock(); update_apic_tpr(cpu); current_cpu = cpu; diff --git a/target/i386/kvm/hyperv.c b/target/i386/kvm/hyperv.c index e3ac978648b81cc31fa9f2bd1ba22c3d1ad9a764..6825c89af374ab7319a1bc10724068fffe16e306 100644 --- a/target/i386/kvm/hyperv.c +++ b/target/i386/kvm/hyperv.c @@ -45,9 +45,9 @@ void hyperv_x86_synic_update(X86CPU *cpu) static void async_synic_update(CPUState *cs, run_on_cpu_data data) { - qemu_mutex_lock_iothread(); + bql_lock(); hyperv_x86_synic_update(X86_CPU(cs)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 4ce80555b45c14fe8f683c1ee6c5bde5db216a9b..76a66246eb728daf3162c85bee75a1403b64d246 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -4713,9 +4713,9 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) /* Inject NMI */ if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; - qemu_mutex_unlock_iothread(); + bql_unlock(); DPRINTF("injected NMI\n"); ret = kvm_vcpu_ioctl(cpu, KVM_NMI); if (ret < 0) { @@ -4724,9 +4724,9 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) } } if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; - qemu_mutex_unlock_iothread(); + bql_unlock(); DPRINTF("injected SMI\n"); ret = kvm_vcpu_ioctl(cpu, KVM_SMI); if (ret < 0) { @@ -4737,7 +4737,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) } if (!kvm_pic_in_kernel()) { - qemu_mutex_lock_iothread(); + bql_lock(); } /* Force the VCPU out of its inner loop to process any INIT requests @@ -4790,7 +4790,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) DPRINTF("setting tpr\n"); run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state); - qemu_mutex_unlock_iothread(); + bql_unlock(); } } @@ -4838,12 +4838,12 @@ MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) /* We need to protect the apic state against concurrent accesses from * different threads in case the userspace irqchip is used. */ if (!kvm_irqchip_in_kernel()) { - qemu_mutex_lock_iothread(); + bql_lock(); } cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8); cpu_set_apic_base(x86_cpu->apic_state, run->apic_base); if (!kvm_irqchip_in_kernel()) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } return cpu_get_mem_attrs(env); } @@ -5277,17 +5277,17 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) switch (run->exit_reason) { case KVM_EXIT_HLT: DPRINTF("handle_hlt\n"); - qemu_mutex_lock_iothread(); + bql_lock(); ret = kvm_handle_halt(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; case KVM_EXIT_SET_TPR: ret = 0; break; case KVM_EXIT_TPR_ACCESS: - qemu_mutex_lock_iothread(); + bql_lock(); ret = kvm_handle_tpr_access(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; case KVM_EXIT_FAIL_ENTRY: code = run->fail_entry.hardware_entry_failure_reason; @@ -5313,9 +5313,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) break; case KVM_EXIT_DEBUG: DPRINTF("kvm_exit_debug\n"); - qemu_mutex_lock_iothread(); + bql_lock(); ret = kvm_handle_debug(cpu, &run->debug.arch); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; case KVM_EXIT_HYPERV: ret = kvm_hv_handle_exit(cpu, &run->hyperv); diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c index c0631f9cf439fd580756f716501a31305b778ce0..fc2c2321acd1443752145dc7bb1be87e18fec590 100644 --- a/target/i386/kvm/xen-emu.c +++ b/target/i386/kvm/xen-emu.c @@ -403,7 +403,7 @@ void kvm_xen_maybe_deassert_callback(CPUState *cs) /* If the evtchn_upcall_pending flag is cleared, turn the GSI off. */ if (!vi->evtchn_upcall_pending) { - qemu_mutex_lock_iothread(); + bql_lock(); /* * Check again now we have the lock, because it may have been * asserted in the interim. And we don't want to take the lock @@ -413,7 +413,7 @@ void kvm_xen_maybe_deassert_callback(CPUState *cs) X86_CPU(cs)->env.xen_callback_asserted = false; xen_evtchn_set_callback_level(0); } - qemu_mutex_unlock_iothread(); + bql_unlock(); } } @@ -581,7 +581,7 @@ static int xen_set_shared_info(uint64_t gfn) uint64_t gpa = gfn << TARGET_PAGE_BITS; int i, err; - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); /* * The xen_overlay device tells KVM about it too, since it had to @@ -773,9 +773,9 @@ static bool handle_set_param(struct kvm_xen_exit *exit, X86CPU *cpu, switch (hp.index) { case HVM_PARAM_CALLBACK_IRQ: - qemu_mutex_lock_iothread(); + bql_lock(); err = xen_evtchn_set_callback_param(hp.value); - qemu_mutex_unlock_iothread(); + bql_unlock(); xen_set_long_mode(exit->u.hcall.longmode); break; default: @@ -1408,7 +1408,7 @@ int kvm_xen_soft_reset(void) CPUState *cpu; int err; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); trace_kvm_xen_soft_reset(); @@ -1481,9 +1481,9 @@ static int schedop_shutdown(CPUState *cs, uint64_t arg) break; case SHUTDOWN_soft_reset: - qemu_mutex_lock_iothread(); + bql_lock(); ret = kvm_xen_soft_reset(); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; default: diff --git a/target/i386/machine.c b/target/i386/machine.c index a1041ef828cb2670e913738a44853b7eeadd1e7e..c3ae32081470c23e44f3a1270d7db87bc4277cf5 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -18,7 +18,7 @@ static const VMStateDescription vmstate_segment = { .name = "segment", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(selector, SegmentCache), VMSTATE_UINTTL(base, SegmentCache), VMSTATE_UINT32(limit, SegmentCache), @@ -43,7 +43,7 @@ static const VMStateDescription vmstate_xmm_reg = { .name = "xmm_reg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(ZMM_Q(0), ZMMReg), VMSTATE_UINT64(ZMM_Q(1), ZMMReg), VMSTATE_END_OF_LIST() @@ -59,7 +59,7 @@ static const VMStateDescription vmstate_ymmh_reg = { .name = "ymmh_reg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(ZMM_Q(2), ZMMReg), VMSTATE_UINT64(ZMM_Q(3), ZMMReg), VMSTATE_END_OF_LIST() @@ -74,7 +74,7 @@ static const VMStateDescription vmstate_zmmh_reg = { .name = "zmmh_reg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(ZMM_Q(4), ZMMReg), VMSTATE_UINT64(ZMM_Q(5), ZMMReg), VMSTATE_UINT64(ZMM_Q(6), ZMMReg), @@ -92,7 +92,7 @@ static const VMStateDescription vmstate_hi16_zmm_reg = { .name = "hi16_zmm_reg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(ZMM_Q(0), ZMMReg), VMSTATE_UINT64(ZMM_Q(1), ZMMReg), VMSTATE_UINT64(ZMM_Q(2), ZMMReg), @@ -114,7 +114,7 @@ static const VMStateDescription vmstate_bnd_regs = { .name = "bnd_regs", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(lb, BNDReg), VMSTATE_UINT64(ub, BNDReg), VMSTATE_END_OF_LIST() @@ -128,7 +128,7 @@ static const VMStateDescription vmstate_mtrr_var = { .name = "mtrr_var", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(base, MTRRVar), VMSTATE_UINT64(mask, MTRRVar), VMSTATE_END_OF_LIST() @@ -142,7 +142,7 @@ static const VMStateDescription vmstate_lbr_records_var = { .name = "lbr_records_var", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(from, LBREntry), VMSTATE_UINT64(to, LBREntry), VMSTATE_UINT64(info, LBREntry), @@ -201,7 +201,7 @@ static const VMStateDescription vmstate_fpreg_tmp = { .name = "fpreg_tmp", .post_load = fpreg_post_load, .pre_save = fpreg_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(tmp_mant, x86_FPReg_tmp), VMSTATE_UINT16(tmp_exp, x86_FPReg_tmp), VMSTATE_END_OF_LIST() @@ -210,7 +210,7 @@ static const VMStateDescription vmstate_fpreg_tmp = { static const VMStateDescription vmstate_fpreg = { .name = "fpreg", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_WITH_TMP(FPReg, x86_FPReg_tmp, vmstate_fpreg_tmp), VMSTATE_END_OF_LIST() } @@ -453,7 +453,7 @@ static const VMStateDescription vmstate_exception_info = { .version_id = 1, .minimum_version_id = 1, .needed = exception_info_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(env.exception_pending, X86CPU), VMSTATE_UINT8(env.exception_injected, X86CPU), VMSTATE_UINT8(env.exception_has_payload, X86CPU), @@ -475,7 +475,7 @@ static const VMStateDescription vmstate_steal_time_msr = { .version_id = 1, .minimum_version_id = 1, .needed = steal_time_msr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.steal_time_msr, X86CPU), VMSTATE_END_OF_LIST() } @@ -486,7 +486,7 @@ static const VMStateDescription vmstate_async_pf_msr = { .version_id = 1, .minimum_version_id = 1, .needed = async_pf_msr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.async_pf_en_msr, X86CPU), VMSTATE_END_OF_LIST() } @@ -497,7 +497,7 @@ static const VMStateDescription vmstate_async_pf_int_msr = { .version_id = 1, .minimum_version_id = 1, .needed = async_pf_int_msr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.async_pf_int_msr, X86CPU), VMSTATE_END_OF_LIST() } @@ -508,7 +508,7 @@ static const VMStateDescription vmstate_pv_eoi_msr = { .version_id = 1, .minimum_version_id = 1, .needed = pv_eoi_msr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU), VMSTATE_END_OF_LIST() } @@ -519,7 +519,7 @@ static const VMStateDescription vmstate_poll_control_msr = { .version_id = 1, .minimum_version_id = 1, .needed = poll_control_msr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.poll_control_msr, X86CPU), VMSTATE_END_OF_LIST() } @@ -538,7 +538,7 @@ static const VMStateDescription vmstate_fpop_ip_dp = { .version_id = 1, .minimum_version_id = 1, .needed = fpop_ip_dp_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16(env.fpop, X86CPU), VMSTATE_UINT64(env.fpip, X86CPU), VMSTATE_UINT64(env.fpdp, X86CPU), @@ -559,7 +559,7 @@ static const VMStateDescription vmstate_msr_tsc_adjust = { .version_id = 1, .minimum_version_id = 1, .needed = tsc_adjust_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.tsc_adjust, X86CPU), VMSTATE_END_OF_LIST() } @@ -578,7 +578,7 @@ static const VMStateDescription vmstate_msr_smi_count = { .version_id = 1, .minimum_version_id = 1, .needed = msr_smi_count_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_smi_count, X86CPU), VMSTATE_END_OF_LIST() } @@ -597,7 +597,7 @@ static const VMStateDescription vmstate_msr_tscdeadline = { .version_id = 1, .minimum_version_id = 1, .needed = tscdeadline_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.tsc_deadline, X86CPU), VMSTATE_END_OF_LIST() } @@ -624,7 +624,7 @@ static const VMStateDescription vmstate_msr_ia32_misc_enable = { .version_id = 1, .minimum_version_id = 1, .needed = misc_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU), VMSTATE_END_OF_LIST() } @@ -635,7 +635,7 @@ static const VMStateDescription vmstate_msr_ia32_feature_control = { .version_id = 1, .minimum_version_id = 1, .needed = feature_control_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU), VMSTATE_END_OF_LIST() } @@ -670,7 +670,7 @@ static const VMStateDescription vmstate_msr_architectural_pmu = { .version_id = 1, .minimum_version_id = 1, .needed = pmu_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU), VMSTATE_UINT64(env.msr_global_ctrl, X86CPU), VMSTATE_UINT64(env.msr_global_status, X86CPU), @@ -706,7 +706,7 @@ static const VMStateDescription vmstate_mpx = { .version_id = 1, .minimum_version_id = 1, .needed = mpx_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4), VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU), VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU), @@ -728,7 +728,7 @@ static const VMStateDescription vmstate_msr_hyperv_hypercall = { .version_id = 1, .minimum_version_id = 1, .needed = hyperv_hypercall_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU), VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU), VMSTATE_END_OF_LIST() @@ -748,7 +748,7 @@ static const VMStateDescription vmstate_msr_hyperv_vapic = { .version_id = 1, .minimum_version_id = 1, .needed = hyperv_vapic_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_hv_vapic, X86CPU), VMSTATE_END_OF_LIST() } @@ -767,7 +767,7 @@ static const VMStateDescription vmstate_msr_hyperv_time = { .version_id = 1, .minimum_version_id = 1, .needed = hyperv_time_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_hv_tsc, X86CPU), VMSTATE_END_OF_LIST() } @@ -792,7 +792,7 @@ static const VMStateDescription vmstate_msr_hyperv_crash = { .version_id = 1, .minimum_version_id = 1, .needed = hyperv_crash_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params, X86CPU, HV_CRASH_PARAMS), VMSTATE_END_OF_LIST() } @@ -815,7 +815,7 @@ static const VMStateDescription vmstate_msr_hyperv_runtime = { .version_id = 1, .minimum_version_id = 1, .needed = hyperv_runtime_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_hv_runtime, X86CPU), VMSTATE_END_OF_LIST() } @@ -855,7 +855,7 @@ static const VMStateDescription vmstate_msr_hyperv_synic = { .minimum_version_id = 1, .needed = hyperv_synic_enable_needed, .post_load = hyperv_synic_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU), VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU), VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU), @@ -883,7 +883,7 @@ static const VMStateDescription vmstate_msr_hyperv_stimer = { .version_id = 1, .minimum_version_id = 1, .needed = hyperv_stimer_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, X86CPU, HV_STIMER_COUNT), VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, X86CPU, HV_STIMER_COUNT), @@ -926,7 +926,7 @@ static const VMStateDescription vmstate_msr_hyperv_reenlightenment = { .minimum_version_id = 1, .needed = hyperv_reenlightenment_enable_needed, .post_load = hyperv_reenlightenment_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_hv_reenlightenment_control, X86CPU), VMSTATE_UINT64(env.msr_hv_tsc_emulation_control, X86CPU), VMSTATE_UINT64(env.msr_hv_tsc_emulation_status, X86CPU), @@ -970,7 +970,7 @@ static const VMStateDescription vmstate_avx512 = { .version_id = 1, .minimum_version_id = 1, .needed = avx512_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS), VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0), #ifdef TARGET_X86_64 @@ -993,7 +993,7 @@ static const VMStateDescription vmstate_xss = { .version_id = 1, .minimum_version_id = 1, .needed = xss_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.xss, X86CPU), VMSTATE_END_OF_LIST() } @@ -1012,7 +1012,7 @@ static const VMStateDescription vmstate_umwait = { .version_id = 1, .minimum_version_id = 1, .needed = umwait_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.umwait, X86CPU), VMSTATE_END_OF_LIST() } @@ -1031,7 +1031,7 @@ static const VMStateDescription vmstate_pkru = { .version_id = 1, .minimum_version_id = 1, .needed = pkru_needed, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT32(env.pkru, X86CPU), VMSTATE_END_OF_LIST() } @@ -1050,7 +1050,7 @@ static const VMStateDescription vmstate_pkrs = { .version_id = 1, .minimum_version_id = 1, .needed = pkrs_needed, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT32(env.pkrs, X86CPU), VMSTATE_END_OF_LIST() } @@ -1070,7 +1070,7 @@ static const VMStateDescription vmstate_tsc_khz = { .version_id = 1, .minimum_version_id = 1, .needed = tsc_khz_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(env.tsc_khz, X86CPU), VMSTATE_END_OF_LIST() } @@ -1090,7 +1090,7 @@ static const VMStateDescription vmstate_vmx_vmcs12 = { .version_id = 1, .minimum_version_id = 1, .needed = vmx_vmcs12_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(data.vmx[0].vmcs12, struct kvm_nested_state, KVM_STATE_NESTED_VMX_VMCS_SIZE), @@ -1110,7 +1110,7 @@ static const VMStateDescription vmstate_vmx_shadow_vmcs12 = { .version_id = 1, .minimum_version_id = 1, .needed = vmx_shadow_vmcs12_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(data.vmx[0].shadow_vmcs12, struct kvm_nested_state, KVM_STATE_NESTED_VMX_VMCS_SIZE), @@ -1131,13 +1131,13 @@ static const VMStateDescription vmstate_vmx_nested_state = { .version_id = 1, .minimum_version_id = 1, .needed = vmx_nested_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_U64(hdr.vmx.vmxon_pa, struct kvm_nested_state), VMSTATE_U64(hdr.vmx.vmcs12_pa, struct kvm_nested_state), VMSTATE_U16(hdr.vmx.smm.flags, struct kvm_nested_state), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_vmx_vmcs12, &vmstate_vmx_shadow_vmcs12, NULL, @@ -1162,7 +1162,7 @@ static const VMStateDescription vmstate_svm_nested_state = { .version_id = 1, .minimum_version_id = 1, .needed = svm_nested_state_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_U64(hdr.svm.vmcb_pa, struct kvm_nested_state), VMSTATE_UINT8_ARRAY(data.svm[0].vmcb12, struct kvm_nested_state, @@ -1232,13 +1232,13 @@ static const VMStateDescription vmstate_kvm_nested_state = { .name = "cpu/kvm_nested_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_U16(flags, struct kvm_nested_state), VMSTATE_U16(format, struct kvm_nested_state), VMSTATE_U32(size, struct kvm_nested_state), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_vmx_nested_state, &vmstate_svm_nested_state, NULL @@ -1251,7 +1251,7 @@ static const VMStateDescription vmstate_nested_state = { .minimum_version_id = 1, .needed = nested_state_needed, .post_load = nested_state_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_POINTER(env.nested_state, X86CPU, vmstate_kvm_nested_state, struct kvm_nested_state), @@ -1269,7 +1269,7 @@ static const VMStateDescription vmstate_xen_vcpu = { .version_id = 1, .minimum_version_id = 1, .needed = xen_vcpu_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.xen_vcpu_info_gpa, X86CPU), VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU), VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU), @@ -1295,7 +1295,7 @@ static const VMStateDescription vmstate_mcg_ext_ctl = { .version_id = 1, .minimum_version_id = 1, .needed = mcg_ext_ctl_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU), VMSTATE_END_OF_LIST() } @@ -1314,7 +1314,7 @@ static const VMStateDescription vmstate_spec_ctrl = { .version_id = 1, .minimum_version_id = 1, .needed = spec_ctrl_needed, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT64(env.spec_ctrl, X86CPU), VMSTATE_END_OF_LIST() } @@ -1334,7 +1334,7 @@ static const VMStateDescription amd_tsc_scale_msr_ctrl = { .version_id = 1, .minimum_version_id = 1, .needed = amd_tsc_scale_msr_needed, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT64(env.amd_tsc_scale_msr, X86CPU), VMSTATE_END_OF_LIST() } @@ -1367,7 +1367,7 @@ static const VMStateDescription vmstate_msr_intel_pt = { .version_id = 1, .minimum_version_id = 1, .needed = intel_pt_enable_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_rtit_ctrl, X86CPU), VMSTATE_UINT64(env.msr_rtit_status, X86CPU), VMSTATE_UINT64(env.msr_rtit_output_base, X86CPU), @@ -1391,7 +1391,7 @@ static const VMStateDescription vmstate_msr_virt_ssbd = { .version_id = 1, .minimum_version_id = 1, .needed = virt_ssbd_needed, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT64(env.virt_ssbd, X86CPU), VMSTATE_END_OF_LIST() } @@ -1410,7 +1410,7 @@ static const VMStateDescription vmstate_svm_npt = { .version_id = 1, .minimum_version_id = 1, .needed = svm_npt_needed, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT64(env.nested_cr3, X86CPU), VMSTATE_UINT32(env.nested_pg_mode, X86CPU), VMSTATE_END_OF_LIST() @@ -1430,7 +1430,7 @@ static const VMStateDescription vmstate_svm_guest = { .version_id = 1, .minimum_version_id = 1, .needed = svm_guest_needed, - .fields = (VMStateField[]){ + .fields = (const VMStateField[]){ VMSTATE_UINT32(env.int_ctl, X86CPU), VMSTATE_END_OF_LIST() } @@ -1450,7 +1450,7 @@ static const VMStateDescription vmstate_efer32 = { .version_id = 1, .minimum_version_id = 1, .needed = intel_efer32_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.efer, X86CPU), VMSTATE_END_OF_LIST() } @@ -1470,7 +1470,7 @@ static const VMStateDescription vmstate_msr_tsx_ctrl = { .version_id = 1, .minimum_version_id = 1, .needed = msr_tsx_ctrl_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.tsx_ctrl, X86CPU), VMSTATE_END_OF_LIST() } @@ -1489,7 +1489,7 @@ static const VMStateDescription vmstate_msr_intel_sgx = { .version_id = 1, .minimum_version_id = 1, .needed = intel_sgx_msrs_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.msr_ia32_sgxlepubkeyhash, X86CPU, 4), VMSTATE_END_OF_LIST() } @@ -1517,7 +1517,7 @@ static const VMStateDescription vmstate_pdptrs = { .minimum_version_id = 1, .needed = pdptrs_needed, .post_load = pdptrs_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.pdptrs, X86CPU, 4), VMSTATE_END_OF_LIST() } @@ -1536,7 +1536,7 @@ static const VMStateDescription vmstate_msr_xfd = { .version_id = 1, .minimum_version_id = 1, .needed = xfd_msrs_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_xfd, X86CPU), VMSTATE_UINT64(env.msr_xfd_err, X86CPU), VMSTATE_END_OF_LIST() @@ -1557,7 +1557,7 @@ static const VMStateDescription vmstate_amx_xtile = { .version_id = 1, .minimum_version_id = 1, .needed = amx_xtile_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(env.xtilecfg, X86CPU, 64), VMSTATE_UINT8_ARRAY(env.xtiledata, X86CPU, 8192), VMSTATE_END_OF_LIST() @@ -1578,7 +1578,7 @@ static const VMStateDescription vmstate_arch_lbr = { .version_id = 1, .minimum_version_id = 1, .needed = arch_lbr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.msr_lbr_ctl, X86CPU), VMSTATE_UINT64(env.msr_lbr_depth, X86CPU), VMSTATE_LBR_VARS(env.lbr_records, X86CPU, ARCH_LBR_NR_ENTRIES, 1), @@ -1599,7 +1599,7 @@ static const VMStateDescription vmstate_triple_fault = { .version_id = 1, .minimum_version_id = 1, .needed = triple_fault_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8(env.triple_fault_pending, X86CPU), VMSTATE_END_OF_LIST() } @@ -1611,7 +1611,7 @@ const VMStateDescription vmstate_x86_cpu = { .minimum_version_id = 11, .pre_save = cpu_pre_save, .post_load = cpu_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS), VMSTATE_UINTTL(env.eip, X86CPU), VMSTATE_UINTTL(env.eflags, X86CPU), @@ -1699,7 +1699,7 @@ const VMStateDescription vmstate_x86_cpu = { VMSTATE_END_OF_LIST() /* The above list is not sorted /wrt version numbers, watch out! */ }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_exception_info, &vmstate_async_pf_msr, &vmstate_async_pf_int_msr, diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c index 6c46101ac1aecafb6e92c64f3c909e6392b64b10..6b2bfd9b9c308fc65368e9a263b7e43035f692b6 100644 --- a/target/i386/nvmm/nvmm-accel-ops.c +++ b/target/i386/nvmm/nvmm-accel-ops.c @@ -25,7 +25,7 @@ static void *qemu_nvmm_cpu_thread_fn(void *arg) rcu_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); current_cpu = cpu; @@ -48,14 +48,14 @@ static void *qemu_nvmm_cpu_thread_fn(void *arg) } } while (cpu_thread_is_idle(cpu)) { - qemu_cond_wait_iothread(cpu->halt_cond); + qemu_cond_wait_bql(cpu->halt_cond); } qemu_wait_io_event_common(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); nvmm_destroy_vcpu(cpu); cpu_thread_signal_destroyed(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_unregister_thread(); return NULL; } diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index 7d752bc5e000d10465f0e5af3d1c05063275e510..cfdca91123c6615722b0ce5a529dc51aa2e71aee 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -399,7 +399,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) uint8_t tpr; int ret; - qemu_mutex_lock_iothread(); + bql_lock(); tpr = cpu_get_apic_tpr(x86_cpu->apic_state); if (tpr != qcpu->tpr) { @@ -462,7 +462,7 @@ nvmm_vcpu_pre_run(CPUState *cpu) } } - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* @@ -485,9 +485,9 @@ nvmm_vcpu_post_run(CPUState *cpu, struct nvmm_vcpu_exit *exit) tpr = exit->exitstate.cr8; if (qcpu->tpr != tpr) { qcpu->tpr = tpr; - qemu_mutex_lock_iothread(); + bql_lock(); cpu_set_apic_tpr(x86_cpu->apic_state, qcpu->tpr); - qemu_mutex_unlock_iothread(); + bql_unlock(); } } @@ -648,7 +648,7 @@ nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu, CPUX86State *env = cpu_env(cpu); int ret = 0; - qemu_mutex_lock_iothread(); + bql_lock(); if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) && @@ -658,7 +658,7 @@ nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu, ret = 1; } - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } @@ -721,7 +721,7 @@ nvmm_vcpu_loop(CPUState *cpu) return 0; } - qemu_mutex_unlock_iothread(); + bql_unlock(); cpu_exec_start(cpu); /* @@ -806,16 +806,16 @@ nvmm_vcpu_loop(CPUState *cpu) error_report("NVMM: Unexpected VM exit code 0x%lx [hw=0x%lx]", exit->reason, exit->u.inv.hwcode); nvmm_get_registers(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_system_guest_panicked(cpu_get_crash_info(cpu)); - qemu_mutex_unlock_iothread(); + bql_unlock(); ret = -1; break; } } while (ret == 0); cpu_exec_end(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); qatomic_set(&cpu->exit_request, false); diff --git a/target/i386/tcg/cc_helper.c b/target/i386/tcg/cc_helper.c index c310bd842f14c9037089754a921e6dd2950e74da..f76e9cb8cfba41ea2847b369ac0afa06646bce03 100644 --- a/target/i386/tcg/cc_helper.c +++ b/target/i386/tcg/cc_helper.c @@ -220,9 +220,9 @@ target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1, } } -uint32_t cpu_cc_compute_all(CPUX86State *env, int op) +uint32_t cpu_cc_compute_all(CPUX86State *env) { - return helper_cc_compute_all(CC_DST, CC_SRC, CC_SRC2, op); + return helper_cc_compute_all(CC_DST, CC_SRC, CC_SRC2, CC_OP); } target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1, @@ -335,7 +335,7 @@ target_ulong helper_read_eflags(CPUX86State *env) { uint32_t eflags; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); eflags |= (env->df & DF_MASK); eflags |= env->eflags & ~(VM_MASK | RF_MASK); return eflags; diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc index 2bdbb1bba0f3dd0106e30ae79641bd13e188ea88..426c459412028925de8a0dd65d6556708890c77d 100644 --- a/target/i386/tcg/decode-new.c.inc +++ b/target/i386/tcg/decode-new.c.inc @@ -26,6 +26,13 @@ * size (X86_SIZE_*) codes used in the manual. There are a few differences * though. * + * Operand sizes + * ------------- + * + * The manual lists d64 ("cannot encode 32-bit size in 64-bit mode") and f64 + * ("cannot encode 16-bit or 32-bit size in 64-bit mode") as modifiers of the + * "v" or "z" sizes. The decoder simply makes them separate operand sizes. + * * Vector operands * --------------- * @@ -44,6 +51,11 @@ * if the difference is expressed via prefixes. Individual instructions * are separated by prefix in the generator functions. * + * There is a custom size "xh" used to address half of a SSE/AVX operand. + * This points to a 64-bit operand for SSE operations, 128-bit operand + * for 256-bit AVX operands, etc. It is used for conversion operations + * such as VCVTPH2PS or VCVTSS2SD. + * * There are a couple cases in which instructions (e.g. MOVD) write the * whole XMM or MM register but are established incorrectly in the manual * as "d" or "q". These have to be fixed for the decoder to work correctly. @@ -139,10 +151,13 @@ #define cpuid(feat) .cpuid = X86_FEAT_##feat, #define xchg .special = X86_SPECIAL_Locked, +#define lock .special = X86_SPECIAL_HasLock, #define mmx .special = X86_SPECIAL_MMX, -#define zext0 .special = X86_SPECIAL_ZExtOp0, -#define zext2 .special = X86_SPECIAL_ZExtOp2, +#define op0_Rd .special = X86_SPECIAL_Op0_Rd, +#define op2_Ry .special = X86_SPECIAL_Op2_Ry, #define avx_movx .special = X86_SPECIAL_AVXExtMov, +#define sextT0 .special = X86_SPECIAL_SExtT0, +#define zextT0 .special = X86_SPECIAL_ZExtT0, #define vex1 .vex_class = 1, #define vex1_rep3 .vex_class = 1, .vex_special = X86_VEX_REPScalar, @@ -523,6 +538,28 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = { [0xdd] = X86_OP_ENTRY3(VAESENCLAST, V,x, H,x, W,x, vex4 cpuid(AES) p_66), [0xde] = X86_OP_ENTRY3(VAESDEC, V,x, H,x, W,x, vex4 cpuid(AES) p_66), [0xdf] = X86_OP_ENTRY3(VAESDECLAST, V,x, H,x, W,x, vex4 cpuid(AES) p_66), + + /* + * REG selects srcdest2 operand, VEX.vvvv selects src3. VEX class not found + * in manual, assumed to be 13 from the VEX.L0 constraint. + */ + [0xe0] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xe1] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xe2] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xe3] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xe4] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xe5] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xe6] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xe7] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + + [0xe8] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xe9] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xea] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xeb] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xec] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xed] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xee] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), + [0xef] = X86_OP_ENTRY3(CMPccXADD, M,y, G,y, B,y, vex13 xchg chk(o64) cpuid(CMPCCXADD) p_66), }; /* five rows for no prefix, 66, F3, F2, 66+F2 */ @@ -558,8 +595,8 @@ static const X86OpEntry opcodes_0F38_F0toFF[16][5] = { [5] = { X86_OP_ENTRY3(BZHI, G,y, E,y, B,y, vex13 cpuid(BMI1)), {}, - X86_OP_ENTRY3(PEXT, G,y, B,y, E,y, vex13 cpuid(BMI2)), - X86_OP_ENTRY3(PDEP, G,y, B,y, E,y, vex13 cpuid(BMI2)), + X86_OP_ENTRY3(PEXT, G,y, B,y, E,y, vex13 zextT0 cpuid(BMI2)), + X86_OP_ENTRY3(PDEP, G,y, B,y, E,y, vex13 zextT0 cpuid(BMI2)), {}, }, [6] = { @@ -570,10 +607,10 @@ static const X86OpEntry opcodes_0F38_F0toFF[16][5] = { {}, }, [7] = { - X86_OP_ENTRY3(BEXTR, G,y, E,y, B,y, vex13 cpuid(BMI1)), + X86_OP_ENTRY3(BEXTR, G,y, E,y, B,y, vex13 zextT0 cpuid(BMI1)), X86_OP_ENTRY3(SHLX, G,y, E,y, B,y, vex13 cpuid(BMI1)), - X86_OP_ENTRY3(SARX, G,y, E,y, B,y, vex13 cpuid(BMI1)), - X86_OP_ENTRY3(SHRX, G,y, E,y, B,y, vex13 cpuid(BMI1)), + X86_OP_ENTRY3(SARX, G,y, E,y, B,y, vex13 sextT0 cpuid(BMI1)), + X86_OP_ENTRY3(SHRX, G,y, E,y, B,y, vex13 zextT0 cpuid(BMI1)), {}, }, }; @@ -619,13 +656,13 @@ static const X86OpEntry opcodes_0F3A[256] = { [0x05] = X86_OP_ENTRY3(VPERMILPD_i, V,x, W,x, I,b, vex6 chk(W0) cpuid(AVX) p_66), [0x06] = X86_OP_ENTRY4(VPERM2x128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX) p_66), - [0x14] = X86_OP_ENTRY3(PEXTRB, E,b, V,dq, I,b, vex5 cpuid(SSE41) zext0 p_66), - [0x15] = X86_OP_ENTRY3(PEXTRW, E,w, V,dq, I,b, vex5 cpuid(SSE41) zext0 p_66), + [0x14] = X86_OP_ENTRY3(PEXTRB, E,b, V,dq, I,b, vex5 cpuid(SSE41) op0_Rd p_66), + [0x15] = X86_OP_ENTRY3(PEXTRW, E,w, V,dq, I,b, vex5 cpuid(SSE41) op0_Rd p_66), [0x16] = X86_OP_ENTRY3(PEXTR, E,y, V,dq, I,b, vex5 cpuid(SSE41) p_66), [0x17] = X86_OP_ENTRY3(VEXTRACTPS, E,d, V,dq, I,b, vex5 cpuid(SSE41) p_66), [0x1d] = X86_OP_ENTRY3(VCVTPS2PH, W,xh, V,x, I,b, vex11 chk(W0) cpuid(F16C) p_66), - [0x20] = X86_OP_ENTRY4(PINSRB, V,dq, H,dq, E,b, vex5 cpuid(SSE41) zext2 p_66), + [0x20] = X86_OP_ENTRY4(PINSRB, V,dq, H,dq, E,b, vex5 cpuid(SSE41) op2_Ry p_66), [0x21] = X86_OP_GROUP0(VINSERTPS), [0x22] = X86_OP_ENTRY4(PINSR, V,dq, H,dq, E,y, vex5 cpuid(SSE41) p_66), @@ -1091,10 +1128,6 @@ static int decode_modrm(DisasContext *s, CPUX86State *env, X86DecodedInsn *decod { int modrm = get_modrm(s, env); if ((modrm >> 6) == 3) { - if (s->prefix & PREFIX_LOCK) { - decode->e.gen = gen_illegal; - return 0xff; - } op->n = (modrm & 7); if (type != X86_TYPE_Q && type != X86_TYPE_N) { op->n |= REX_B(s); @@ -1201,6 +1234,8 @@ static bool decode_op(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, case X86_TYPE_None: /* Implicit or absent */ case X86_TYPE_A: /* Implicit */ case X86_TYPE_F: /* EFLAGS/RFLAGS */ + case X86_TYPE_X: /* string source */ + case X86_TYPE_Y: /* string destination */ break; case X86_TYPE_B: /* VEX.vvvv selects a GPR */ @@ -1316,43 +1351,15 @@ static bool decode_op(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode, } case X86_TYPE_I: /* Immediate */ - op->unit = X86_OP_IMM; - decode->immediate = insn_get_signed(env, s, op->ot); - break; - case X86_TYPE_J: /* Relative offset for a jump */ op->unit = X86_OP_IMM; decode->immediate = insn_get_signed(env, s, op->ot); - decode->immediate += s->pc - s->cs_base; - if (s->dflag == MO_16) { - decode->immediate &= 0xffff; - } else if (!CODE64(s)) { - decode->immediate &= 0xffffffffu; - } break; case X86_TYPE_L: /* The upper 4 bits of the immediate select a 128-bit register */ op->n = insn_get(env, s, op->ot) >> 4; break; - case X86_TYPE_X: /* string source */ - op->n = -1; - decode->mem = (AddressParts) { - .def_seg = R_DS, - .base = R_ESI, - .index = -1, - }; - break; - - case X86_TYPE_Y: /* string destination */ - op->n = -1; - decode->mem = (AddressParts) { - .def_seg = R_ES, - .base = R_EDI, - .index = -1, - }; - break; - case X86_TYPE_2op: *op = decode->op[0]; break; @@ -1518,6 +1525,9 @@ static bool has_cpuid_feature(DisasContext *s, X86CPUIDFeature cpuid) return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_AVX2); case X86_FEAT_SHA_NI: return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SHA_NI); + + case X86_FEAT_CMPCCXADD: + return (s->cpuid_7_1_eax_features & CPUID_7_1_EAX_CMPCCXADD); } g_assert_not_reached(); } @@ -1677,6 +1687,7 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) bool first = true; X86DecodedInsn decode; X86DecodeFunc decode_func = decode_root; + uint8_t cc_live; s->has_modrm = false; @@ -1830,6 +1841,7 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) } memset(&decode, 0, sizeof(decode)); + decode.cc_op = -1; decode.b = b; if (!decode_insn(s, env, decode_func, &decode)) { goto illegal_op; @@ -1869,19 +1881,22 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) if (decode.op[0].has_ea) { s->prefix |= PREFIX_LOCK; } + decode.e.special = X86_SPECIAL_HasLock; + /* fallthrough */ + case X86_SPECIAL_HasLock: break; - case X86_SPECIAL_ZExtOp0: + case X86_SPECIAL_Op0_Rd: assert(decode.op[0].unit == X86_OP_INT); if (!decode.op[0].has_ea) { decode.op[0].ot = MO_32; } break; - case X86_SPECIAL_ZExtOp2: + case X86_SPECIAL_Op2_Ry: assert(decode.op[2].unit == X86_OP_INT); if (!decode.op[2].has_ea) { - decode.op[2].ot = MO_32; + decode.op[2].ot = s->dflag == MO_16 ? MO_32 : s->dflag; } break; @@ -1893,10 +1908,22 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) } break; + case X86_SPECIAL_SExtT0: + case X86_SPECIAL_ZExtT0: + /* Handled in gen_load. */ + assert(decode.op[1].unit == X86_OP_INT); + break; + default: break; } + if (s->prefix & PREFIX_LOCK) { + if (decode.e.special != X86_SPECIAL_HasLock || !decode.op[0].has_ea) { + goto illegal_op; + } + } + if (!validate_vex(s, &decode)) { return; } @@ -1940,9 +1967,6 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) gen_load_ea(s, &decode.mem, decode.e.vex_class == 12); } if (s->prefix & PREFIX_LOCK) { - if (decode.op[0].unit != X86_OP_INT || !decode.op[0].has_ea) { - goto illegal_op; - } gen_load(s, &decode, 2, s->T1); decode.e.gen(s, env, &decode); } else { @@ -1956,6 +1980,38 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b) decode.e.gen(s, env, &decode); gen_writeback(s, &decode, 0, s->T0); } + + /* + * Write back flags after last memory access. Some newer ALU instructions, as + * well as SSE instructions, write flags in the gen_* function, but that can + * cause incorrect tracking of CC_OP for instructions that write to both memory + * and flags. + */ + if (decode.cc_op != -1) { + if (decode.cc_dst) { + tcg_gen_mov_tl(cpu_cc_dst, decode.cc_dst); + } + if (decode.cc_src) { + tcg_gen_mov_tl(cpu_cc_src, decode.cc_src); + } + if (decode.cc_src2) { + tcg_gen_mov_tl(cpu_cc_src2, decode.cc_src2); + } + if (decode.cc_op == CC_OP_DYNAMIC) { + tcg_gen_mov_i32(cpu_cc_op, decode.cc_op_dynamic); + } + set_cc_op(s, decode.cc_op); + cc_live = cc_op_live[decode.cc_op]; + } else { + cc_live = 0; + } + if (decode.cc_op != CC_OP_DYNAMIC) { + assert(!decode.cc_op_dynamic); + assert(!!decode.cc_dst == !!(cc_live & USES_CC_DST)); + assert(!!decode.cc_src == !!(cc_live & USES_CC_SRC)); + assert(!!decode.cc_src2 == !!(cc_live & USES_CC_SRC2)); + } + return; gp_fault: gen_exception_gpf(s); diff --git a/target/i386/tcg/decode-new.h b/target/i386/tcg/decode-new.h index e6c904a31929cce614b1000f6d777084291c6d14..15e6bfef4b19321d108effce99226ea830c0e749 100644 --- a/target/i386/tcg/decode-new.h +++ b/target/i386/tcg/decode-new.h @@ -104,6 +104,7 @@ typedef enum X86CPUIDFeature { X86_FEAT_AVX2, X86_FEAT_BMI1, X86_FEAT_BMI2, + X86_FEAT_CMPCCXADD, X86_FEAT_F16C, X86_FEAT_FMA, X86_FEAT_MOVBE, @@ -158,15 +159,27 @@ typedef enum X86InsnCheck { typedef enum X86InsnSpecial { X86_SPECIAL_None, + /* Accepts LOCK prefix; LOCKed operations do not load or writeback operand 0 */ + X86_SPECIAL_HasLock, + /* Always locked if it has a memory operand (XCHG) */ X86_SPECIAL_Locked, /* - * Register operand 0/2 is zero extended to 32 bits. Rd/Mb or Rd/Mw - * in the manual. + * Rd/Mb or Rd/Mw in the manual: register operand 0 is treated as 32 bits + * (and writeback zero-extends it to 64 bits if applicable). PREFIX_DATA + * does not trigger 16-bit writeback and, as a side effect, high-byte + * registers are never used. + */ + X86_SPECIAL_Op0_Rd, + + /* + * Ry/Mb in the manual (PINSRB). However, the high bits are never used by + * the instruction in either the register or memory cases; the *real* effect + * of this modifier is that high-byte registers are never used, even without + * a REX prefix. Therefore, PINSRW does not need it despite having Ry/Mw. */ - X86_SPECIAL_ZExtOp0, - X86_SPECIAL_ZExtOp2, + X86_SPECIAL_Op2_Ry, /* * Register operand 2 is extended to full width, while a memory operand @@ -179,6 +192,10 @@ typedef enum X86InsnSpecial { * become P/P/Q/N, and size "x" becomes "q". */ X86_SPECIAL_MMX, + + /* When loaded into s->T0, register operand 1 is zero/sign extended. */ + X86_SPECIAL_SExtT0, + X86_SPECIAL_ZExtT0, } X86InsnSpecial; /* @@ -267,6 +284,10 @@ struct X86DecodedInsn { target_ulong immediate; AddressParts mem; + TCGv cc_dst, cc_src, cc_src2; + TCGv_i32 cc_op_dynamic; + int8_t cc_op; + uint8_t b; }; diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc index 82da5488d475a7afce1db31931da637641f7aa08..6bcf88ecd719281f9f5594524d20bf918810c7ca 100644 --- a/target/i386/tcg/emit.c.inc +++ b/target/i386/tcg/emit.c.inc @@ -55,11 +55,6 @@ static void gen_NM_exception(DisasContext *s) gen_exception(s, EXCP07_PREX); } -static void gen_illegal(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) -{ - gen_illegal_opcode(s); -} - static void gen_load_ea(DisasContext *s, AddressParts *mem, bool is_vsib) { TCGv ea = gen_lea_modrm_1(s, *mem, is_vsib); @@ -237,9 +232,30 @@ static void gen_load(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v) break; case X86_OP_INT: if (op->has_ea) { - gen_op_ld_v(s, op->ot, v, s->A0); + if (v == s->T0 && decode->e.special == X86_SPECIAL_SExtT0) { + gen_op_ld_v(s, op->ot | MO_SIGN, v, s->A0); + } else { + gen_op_ld_v(s, op->ot, v, s->A0); + } + + } else if (op->ot == MO_8 && byte_reg_is_xH(s, op->n)) { + if (v == s->T0 && decode->e.special == X86_SPECIAL_SExtT0) { + tcg_gen_sextract_tl(v, cpu_regs[op->n - 4], 8, 8); + } else { + tcg_gen_extract_tl(v, cpu_regs[op->n - 4], 8, 8); + } + + } else if (op->ot < MO_TL && v == s->T0 && + (decode->e.special == X86_SPECIAL_SExtT0 || + decode->e.special == X86_SPECIAL_ZExtT0)) { + if (decode->e.special == X86_SPECIAL_SExtT0) { + tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot | MO_SIGN); + } else { + tcg_gen_ext_tl(v, cpu_regs[op->n], op->ot); + } + } else { - gen_op_mov_v_reg(s, op->ot, v, op->n); + tcg_gen_mov_tl(v, cpu_regs[op->n]); } break; case X86_OP_IMM: @@ -323,6 +339,19 @@ static inline int vector_len(DisasContext *s, X86DecodedInsn *decode) return s->vex_l ? 32 : 16; } +static void prepare_update1_cc(X86DecodedInsn *decode, DisasContext *s, CCOp op) +{ + decode->cc_dst = s->T0; + decode->cc_op = op; +} + +static void prepare_update2_cc(X86DecodedInsn *decode, DisasContext *s, CCOp op) +{ + decode->cc_src = s->T1; + decode->cc_dst = s->T0; + decode->cc_op = op; +} + static void gen_store_sse(DisasContext *s, X86DecodedInsn *decode, int src_ofs) { MemOp ot = decode->op[0].ot; @@ -1011,6 +1040,7 @@ static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decod VSIB_AVX(VPGATHERD, vpgatherd) VSIB_AVX(VPGATHERQ, vpgatherq) +/* ADCX/ADOX do not have memory operands and can use set_cc_op. */ static void gen_ADCOX(DisasContext *s, CPUX86State *env, MemOp ot, int cc_op) { int opposite_cc_op; @@ -1073,8 +1103,7 @@ static void gen_ANDN(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) MemOp ot = decode->op[0].ot; tcg_gen_andc_tl(s->T0, s->T1, s->T0); - gen_op_update1_cc(s); - set_cc_op(s, CC_OP_LOGICB + ot); + prepare_update1_cc(decode, s, CC_OP_LOGICB + ot); } static void gen_BEXTR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) @@ -1089,9 +1118,6 @@ static void gen_BEXTR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) * Shifts larger than operand size get zeros. */ tcg_gen_ext8u_tl(s->A0, s->T1); - if (TARGET_LONG_BITS == 64 && ot == MO_32) { - tcg_gen_ext32u_tl(s->T0, s->T0); - } tcg_gen_shr_tl(s->T0, s->T0, s->A0); tcg_gen_movcond_tl(TCG_COND_LEU, s->T0, s->A0, bound, s->T0, zero); @@ -1105,10 +1131,10 @@ static void gen_BEXTR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) tcg_gen_movcond_tl(TCG_COND_LEU, s->T1, s->A0, bound, s->T1, zero); tcg_gen_andc_tl(s->T0, s->T0, s->T1); - gen_op_update1_cc(s); - set_cc_op(s, CC_OP_LOGICB + ot); + prepare_update1_cc(decode, s, CC_OP_LOGICB + ot); } +/* BLSI do not have memory operands and can use set_cc_op. */ static void gen_BLSI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; @@ -1120,6 +1146,7 @@ static void gen_BLSI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) set_cc_op(s, CC_OP_BMILGB + ot); } +/* BLSMSK do not have memory operands and can use set_cc_op. */ static void gen_BLSMSK(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; @@ -1131,6 +1158,7 @@ static void gen_BLSMSK(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode set_cc_op(s, CC_OP_BMILGB + ot); } +/* BLSR do not have memory operands and can use set_cc_op. */ static void gen_BLSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; @@ -1151,18 +1179,119 @@ static void gen_BZHI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) tcg_gen_ext8u_tl(s->T1, s->T1); + tcg_gen_shl_tl(s->A0, mone, s->T1); + tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->T1, bound, s->A0, zero); + tcg_gen_andc_tl(s->T0, s->T0, s->A0); /* * Note that since we're using BMILG (in order to get O * cleared) we need to store the inverse into C. */ - tcg_gen_setcond_tl(TCG_COND_LEU, cpu_cc_src, s->T1, bound); + tcg_gen_setcond_tl(TCG_COND_LEU, s->T1, s->T1, bound); + prepare_update2_cc(decode, s, CC_OP_BMILGB + ot); +} + +static void gen_CMPccXADD(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) +{ + TCGLabel *label_top = gen_new_label(); + TCGLabel *label_bottom = gen_new_label(); + TCGv oldv = tcg_temp_new(); + TCGv newv = tcg_temp_new(); + TCGv cmpv = tcg_temp_new(); + TCGCond cond; + + TCGv cmp_lhs, cmp_rhs; + MemOp ot, ot_full; + + int jcc_op = (decode->b >> 1) & 7; + static const TCGCond cond_table[8] = { + [JCC_O] = TCG_COND_LT, /* test sign bit by comparing against 0 */ + [JCC_B] = TCG_COND_LTU, + [JCC_Z] = TCG_COND_EQ, + [JCC_BE] = TCG_COND_LEU, + [JCC_S] = TCG_COND_LT, /* test sign bit by comparing against 0 */ + [JCC_P] = TCG_COND_EQ, /* even parity - tests low bit of popcount */ + [JCC_L] = TCG_COND_LT, + [JCC_LE] = TCG_COND_LE, + }; - tcg_gen_shl_tl(s->A0, mone, s->T1); - tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->T1, bound, s->A0, zero); - tcg_gen_andc_tl(s->T0, s->T0, s->A0); + cond = cond_table[jcc_op]; + if (decode->b & 1) { + cond = tcg_invert_cond(cond); + } - gen_op_update1_cc(s); - set_cc_op(s, CC_OP_BMILGB + ot); + ot = decode->op[0].ot; + ot_full = ot | MO_LE; + if (jcc_op >= JCC_S) { + /* + * Sign-extend values before subtracting for S, P (zero/sign extension + * does not matter there) L, LE and their inverses. + */ + ot_full |= MO_SIGN; + } + + /* + * cmpv will be moved to cc_src *after* cpu_regs[] is written back, so use + * tcg_gen_ext_tl instead of gen_ext_tl. + */ + tcg_gen_ext_tl(cmpv, cpu_regs[decode->op[1].n], ot_full); + + /* + * Cmpxchg loop starts here. + * - s->T1: addition operand (from decoder) + * - s->A0: dest address (from decoder) + * - s->cc_srcT: memory operand (lhs for comparison) + * - cmpv: rhs for comparison + */ + gen_set_label(label_top); + gen_op_ld_v(s, ot_full, s->cc_srcT, s->A0); + tcg_gen_sub_tl(s->T0, s->cc_srcT, cmpv); + + /* Compute the comparison result by hand, to avoid clobbering cc_*. */ + switch (jcc_op) { + case JCC_O: + /* (src1 ^ src2) & (src1 ^ dst). newv is only used here for a moment */ + tcg_gen_xor_tl(newv, s->cc_srcT, s->T0); + tcg_gen_xor_tl(s->tmp0, s->cc_srcT, cmpv); + tcg_gen_and_tl(s->tmp0, s->tmp0, newv); + tcg_gen_sextract_tl(s->tmp0, s->tmp0, 0, 8 << ot); + cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(0); + break; + + case JCC_P: + tcg_gen_ext8u_tl(s->tmp0, s->T0); + tcg_gen_ctpop_tl(s->tmp0, s->tmp0); + tcg_gen_andi_tl(s->tmp0, s->tmp0, 1); + cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(0); + break; + + case JCC_S: + tcg_gen_sextract_tl(s->tmp0, s->T0, 0, 8 << ot); + cmp_lhs = s->tmp0, cmp_rhs = tcg_constant_tl(0); + break; + + default: + cmp_lhs = s->cc_srcT, cmp_rhs = cmpv; + break; + } + + /* Compute new value: if condition does not hold, just store back s->cc_srcT */ + tcg_gen_add_tl(newv, s->cc_srcT, s->T1); + tcg_gen_movcond_tl(cond, newv, cmp_lhs, cmp_rhs, newv, s->cc_srcT); + tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, s->cc_srcT, newv, s->mem_index, ot_full); + + /* Exit unconditionally if cmpxchg succeeded. */ + tcg_gen_brcond_tl(TCG_COND_EQ, oldv, s->cc_srcT, label_bottom); + + /* Try again if there was actually a store to make. */ + tcg_gen_brcond_tl(cond, cmp_lhs, cmp_rhs, label_top); + gen_set_label(label_bottom); + + /* Store old value to registers only after a successful store. */ + gen_writeback(s, decode, 1, s->cc_srcT); + + decode->cc_dst = s->T0; + decode->cc_src = cmpv; + decode->cc_op = CC_OP_SUBB + ot; } static void gen_CRC32(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) @@ -1242,9 +1371,7 @@ static void gen_LDMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decod static void gen_MASKMOV(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { - tcg_gen_mov_tl(s->A0, cpu_regs[R_EDI]); - gen_extu(s->aflag, s->A0); - gen_add_A0_ds_seg(s); + gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_DS, s->override); if (s->prefix & PREFIX_DATA) { gen_helper_maskmov_xmm(tcg_env, OP_PTR1, OP_PTR2, s->A0); @@ -1355,7 +1482,8 @@ static void gen_MULX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) /* low part of result in VEX.vvvv, high in MODRM */ switch (ot) { - default: + case MO_32: +#ifdef TARGET_X86_64 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1); tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32, @@ -1363,13 +1491,15 @@ static void gen_MULX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], s->tmp2_i32); tcg_gen_extu_i32_tl(s->T0, s->tmp3_i32); break; -#ifdef TARGET_X86_64 + case MO_64: - tcg_gen_mulu2_i64(cpu_regs[s->vex_v], s->T0, s->T0, s->T1); - break; #endif - } + tcg_gen_mulu2_tl(cpu_regs[s->vex_v], s->T0, s->T0, s->T1); + break; + default: + g_assert_not_reached(); + } } static void gen_PALIGNR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) @@ -1432,19 +1562,11 @@ static void gen_PCMPISTRM(DisasContext *s, CPUX86State *env, X86DecodedInsn *dec static void gen_PDEP(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { - MemOp ot = decode->op[1].ot; - if (ot < MO_64) { - tcg_gen_ext32u_tl(s->T0, s->T0); - } gen_helper_pdep(s->T0, s->T0, s->T1); } static void gen_PEXT(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { - MemOp ot = decode->op[1].ot; - if (ot < MO_64) { - tcg_gen_ext32u_tl(s->T0, s->T0); - } gen_helper_pext(s->T0, s->T0, s->T1); } @@ -1772,14 +1894,24 @@ static void gen_PSLLDQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *deco static void gen_RORX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) { MemOp ot = decode->op[0].ot; - int b = decode->immediate; + int mask = ot == MO_64 ? 63 : 31; + int b = decode->immediate & mask; - if (ot == MO_64) { - tcg_gen_rotri_tl(s->T0, s->T0, b & 63); - } else { + switch (ot) { + case MO_32: +#ifdef TARGET_X86_64 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0); - tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b & 31); + tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b); tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32); + break; + + case MO_64: +#endif + tcg_gen_rotri_tl(s->T0, s->T0, b); + break; + + default: + g_assert_not_reached(); } } @@ -1790,9 +1922,6 @@ static void gen_SARX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) mask = ot == MO_64 ? 63 : 31; tcg_gen_andi_tl(s->T1, s->T1, mask); - if (ot != MO_64) { - tcg_gen_ext32s_tl(s->T0, s->T0); - } tcg_gen_sar_tl(s->T0, s->T0, s->T1); } @@ -1867,9 +1996,6 @@ static void gen_SHRX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) mask = ot == MO_64 ? 63 : 31; tcg_gen_andi_tl(s->T1, s->T1, mask); - if (ot != MO_64) { - tcg_gen_ext32u_tl(s->T0, s->T0); - } tcg_gen_shr_tl(s->T0, s->T0, s->T1); } diff --git a/target/i386/tcg/excp_helper.c b/target/i386/tcg/excp_helper.c index 7c3c8dc7fe84b3144437fef25ecd115c5af5ca23..65e37ae2a0c6a86b90d2846d7f47622fb53ccc0f 100644 --- a/target/i386/tcg/excp_helper.c +++ b/target/i386/tcg/excp_helper.c @@ -28,7 +28,7 @@ G_NORETURN void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend) { - raise_interrupt(env, intno, 1, 0, next_eip_addend); + raise_interrupt(env, intno, next_eip_addend); } G_NORETURN void helper_raise_exception(CPUX86State *env, int exception_index) @@ -112,10 +112,9 @@ void raise_interrupt2(CPUX86State *env, int intno, /* shortcuts to generate exceptions */ -G_NORETURN void raise_interrupt(CPUX86State *env, int intno, int is_int, - int error_code, int next_eip_addend) +G_NORETURN void raise_interrupt(CPUX86State *env, int intno, int next_eip_addend) { - raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0); + raise_interrupt2(env, intno, 1, 0, next_eip_addend, 0); } G_NORETURN void raise_exception_err(CPUX86State *env, int exception_index, diff --git a/target/i386/tcg/fpu_helper.c b/target/i386/tcg/fpu_helper.c index 4430d3d380ca0e54eee7d04bbbc6817148fb37b7..4b965a5d6c4e62b2d63902836b29e2fde6b3ccef 100644 --- a/target/i386/tcg/fpu_helper.c +++ b/target/i386/tcg/fpu_helper.c @@ -484,9 +484,8 @@ void helper_fcomi_ST0_FT0(CPUX86State *env) FloatRelation ret; ret = floatx80_compare(ST0, FT0, &env->fp_status); - eflags = cpu_cc_compute_all(env, CC_OP); - eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; - CC_SRC = eflags; + eflags = cpu_cc_compute_all(env) & ~(CC_Z | CC_P | CC_C); + CC_SRC = eflags | fcomi_ccval[ret + 1]; merge_exception_flags(env, old_flags); } @@ -497,9 +496,8 @@ void helper_fucomi_ST0_FT0(CPUX86State *env) FloatRelation ret; ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status); - eflags = cpu_cc_compute_all(env, CC_OP); - eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1]; - CC_SRC = eflags; + eflags = cpu_cc_compute_all(env) & ~(CC_Z | CC_P | CC_C); + CC_SRC = eflags | fcomi_ccval[ret + 1]; merge_exception_flags(env, old_flags); } diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h index cd1723389ada92436dcf649d319aa8e1975aedff..ce34b737bb00829ba43bdffcda546f19faf996ba 100644 --- a/target/i386/tcg/helper-tcg.h +++ b/target/i386/tcg/helper-tcg.h @@ -65,8 +65,7 @@ G_NORETURN void raise_exception_err(CPUX86State *env, int exception_index, int error_code); G_NORETURN void raise_exception_err_ra(CPUX86State *env, int exception_index, int error_code, uintptr_t retaddr); -G_NORETURN void raise_interrupt(CPUX86State *nenv, int intno, int is_int, - int error_code, int next_eip_addend); +G_NORETURN void raise_interrupt(CPUX86State *nenv, int intno, int next_eip_addend); G_NORETURN void handle_unaligned_access(CPUX86State *env, vaddr vaddr, MMUAccessType access_type, uintptr_t retaddr); diff --git a/target/i386/tcg/int_helper.c b/target/i386/tcg/int_helper.c index 05418f181f114373646a6ba38295946c3f9a87e5..ab85dc5540056151fc160bba0e919fba2c9be0fe 100644 --- a/target/i386/tcg/int_helper.c +++ b/target/i386/tcg/int_helper.c @@ -190,7 +190,7 @@ void helper_aaa(CPUX86State *env) int al, ah, af; int eflags; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); af = eflags & CC_A; al = env->regs[R_EAX] & 0xff; ah = (env->regs[R_EAX] >> 8) & 0xff; @@ -214,7 +214,7 @@ void helper_aas(CPUX86State *env) int al, ah, af; int eflags; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); af = eflags & CC_A; al = env->regs[R_EAX] & 0xff; ah = (env->regs[R_EAX] >> 8) & 0xff; @@ -237,7 +237,7 @@ void helper_daa(CPUX86State *env) int old_al, al, af, cf; int eflags; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); cf = eflags & CC_C; af = eflags & CC_A; old_al = al = env->regs[R_EAX] & 0xff; @@ -264,7 +264,7 @@ void helper_das(CPUX86State *env) int al, al1, af, cf; int eflags; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); cf = eflags & CC_C; af = eflags & CC_A; al = env->regs[R_EAX] & 0xff; diff --git a/target/i386/tcg/misc_helper.c b/target/i386/tcg/misc_helper.c index babff061864f27e7a7dd35e95bc81412e1704e90..b0f0f7b893b306a8886beae6ddc7f6db528f7c3a 100644 --- a/target/i386/tcg/misc_helper.c +++ b/target/i386/tcg/misc_helper.c @@ -41,9 +41,9 @@ void helper_into(CPUX86State *env, int next_eip_addend) { int eflags; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); if (eflags & CC_O) { - raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend); + raise_interrupt(env, EXCP04_INTO, next_eip_addend); } } diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c index eb29a1fd4e7225c7e7aace73dcf2a914e8395389..34ccabd8ce3ce20cca0d6ddf0eb00cc30a01473d 100644 --- a/target/i386/tcg/seg_helper.c +++ b/target/i386/tcg/seg_helper.c @@ -2230,7 +2230,7 @@ target_ulong helper_lsl(CPUX86State *env, target_ulong selector1) int rpl, dpl, cpl, type; selector = selector1 & 0xffff; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); if ((selector & 0xfffc) == 0) { goto fail; } @@ -2277,7 +2277,7 @@ target_ulong helper_lar(CPUX86State *env, target_ulong selector1) int rpl, dpl, cpl, type; selector = selector1 & 0xffff; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); if ((selector & 0xfffc) == 0) { goto fail; } @@ -2326,7 +2326,7 @@ void helper_verr(CPUX86State *env, target_ulong selector1) int rpl, dpl, cpl; selector = selector1 & 0xffff; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); if ((selector & 0xfffc) == 0) { goto fail; } @@ -2364,7 +2364,7 @@ void helper_verw(CPUX86State *env, target_ulong selector1) int rpl, dpl, cpl; selector = selector1 & 0xffff; - eflags = cpu_cc_compute_all(env, CC_OP); + eflags = cpu_cc_compute_all(env); if ((selector & 0xfffc) == 0) { goto fail; } diff --git a/target/i386/tcg/sysemu/fpu_helper.c b/target/i386/tcg/sysemu/fpu_helper.c index 93506cdd94e0bc17bd1a1bbc6beaf75acc337149..e0305ba23450e977ebea05bfd305aa655fb60e24 100644 --- a/target/i386/tcg/sysemu/fpu_helper.c +++ b/target/i386/tcg/sysemu/fpu_helper.c @@ -32,9 +32,9 @@ void x86_register_ferr_irq(qemu_irq irq) void fpu_check_raise_ferr_irq(CPUX86State *env) { if (ferr_irq && !(env->hflags2 & HF2_IGNNE_MASK)) { - qemu_mutex_lock_iothread(); + bql_lock(); qemu_irq_raise(ferr_irq); - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } } @@ -49,7 +49,7 @@ void cpu_set_ignne(void) { CPUX86State *env = &X86_CPU(first_cpu)->env; - assert(qemu_mutex_iothread_locked()); + assert(bql_locked()); env->hflags2 |= HF2_IGNNE_MASK; /* diff --git a/target/i386/tcg/sysemu/misc_helper.c b/target/i386/tcg/sysemu/misc_helper.c index e1528b7f80bec6b81e36f34eff18705dfb4ffd2d..1ddfc9fe094b18a5f10c40b0680d8a31a016f5ec 100644 --- a/target/i386/tcg/sysemu/misc_helper.c +++ b/target/i386/tcg/sysemu/misc_helper.c @@ -118,9 +118,9 @@ void helper_write_crN(CPUX86State *env, int reg, target_ulong t0) break; case 8: if (!(env->hflags2 & HF2_VINTR_MASK)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0); - qemu_mutex_unlock_iothread(); + bql_unlock(); } env->int_ctl = (env->int_ctl & ~V_TPR_MASK) | (t0 & V_TPR_MASK); diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index 037bc47e7c27ca01e955aa92e9c3e660cd9a9a77..e1eb82a5c688a1bfa3a23155ffc5f1d7bb50579d 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -122,6 +122,7 @@ typedef struct DisasContext { int cpuid_ext3_features; int cpuid_7_0_ebx_features; int cpuid_7_0_ecx_features; + int cpuid_7_1_eax_features; int cpuid_xsave_features; /* TCG local temps */ @@ -522,9 +523,9 @@ void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val) gen_op_mov_reg_v(s, size, reg, s->tmp0); } -static inline void gen_op_add_reg_T0(DisasContext *s, MemOp size, int reg) +static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val) { - tcg_gen_add_tl(s->tmp0, cpu_regs[reg], s->T0); + tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val); gen_op_mov_reg_v(s, size, reg, s->tmp0); } @@ -635,17 +636,17 @@ static TCGv eip_cur_tl(DisasContext *s) } } -/* Compute SEG:REG into A0. SEG is selected from the override segment +/* Compute SEG:REG into DEST. SEG is selected from the override segment (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to indicate no override. */ -static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, - int def_seg, int ovr_seg) +static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0, + int def_seg, int ovr_seg) { switch (aflag) { #ifdef TARGET_X86_64 case MO_64: if (ovr_seg < 0) { - tcg_gen_mov_tl(s->A0, a0); + tcg_gen_mov_tl(dest, a0); return; } break; @@ -656,14 +657,14 @@ static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, ovr_seg = def_seg; } if (ovr_seg < 0) { - tcg_gen_ext32u_tl(s->A0, a0); + tcg_gen_ext32u_tl(dest, a0); return; } break; case MO_16: /* 16 bit address */ - tcg_gen_ext16u_tl(s->A0, a0); - a0 = s->A0; + tcg_gen_ext16u_tl(dest, a0); + a0 = dest; if (ovr_seg < 0) { if (ADDSEG(s)) { ovr_seg = def_seg; @@ -680,17 +681,23 @@ static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, TCGv seg = cpu_seg_base[ovr_seg]; if (aflag == MO_64) { - tcg_gen_add_tl(s->A0, a0, seg); + tcg_gen_add_tl(dest, a0, seg); } else if (CODE64(s)) { - tcg_gen_ext32u_tl(s->A0, a0); - tcg_gen_add_tl(s->A0, s->A0, seg); + tcg_gen_ext32u_tl(dest, a0); + tcg_gen_add_tl(dest, dest, seg); } else { - tcg_gen_add_tl(s->A0, a0, seg); - tcg_gen_ext32u_tl(s->A0, s->A0); + tcg_gen_add_tl(dest, a0, seg); + tcg_gen_ext32u_tl(dest, dest); } } } +static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0, + int def_seg, int ovr_seg) +{ + gen_lea_v_seg_dest(s, aflag, s->A0, a0, def_seg, ovr_seg); +} + static inline void gen_string_movl_A0_ESI(DisasContext *s) { gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override); @@ -701,10 +708,12 @@ static inline void gen_string_movl_A0_EDI(DisasContext *s) gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1); } -static inline void gen_op_movl_T0_Dshift(DisasContext *s, MemOp ot) +static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot) { - tcg_gen_ld32s_tl(s->T0, tcg_env, offsetof(CPUX86State, df)); - tcg_gen_shli_tl(s->T0, s->T0, ot); + TCGv dshift = tcg_temp_new(); + tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df)); + tcg_gen_shli_tl(dshift, dshift, ot); + return dshift; }; static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign) @@ -712,6 +721,9 @@ static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign) if (size == MO_TL) { return src; } + if (!dst) { + dst = tcg_temp_new(); + } tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0)); return dst; } @@ -728,9 +740,9 @@ static void gen_exts(MemOp ot, TCGv reg) static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1) { - tcg_gen_mov_tl(s->tmp0, cpu_regs[R_ECX]); - gen_extu(s->aflag, s->tmp0); - tcg_gen_brcondi_tl(cond, s->tmp0, 0, label1); + TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false); + + tcg_gen_brcondi_tl(cond, tmp, 0, label1); } static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1) @@ -812,13 +824,16 @@ static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port, static void gen_movs(DisasContext *s, MemOp ot) { + TCGv dshift; + gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); gen_string_movl_A0_EDI(s); gen_op_st_v(s, ot, s->T0, s->A0); - gen_op_movl_T0_Dshift(s, ot); - gen_op_add_reg_T0(s, s->aflag, R_ESI); - gen_op_add_reg_T0(s, s->aflag, R_EDI); + + dshift = gen_compute_Dshift(s, ot); + gen_op_add_reg(s, s->aflag, R_ESI, dshift); + gen_op_add_reg(s, s->aflag, R_EDI, dshift); } static void gen_op_update1_cc(DisasContext *s) @@ -851,22 +866,22 @@ static void gen_op_update_neg_cc(DisasContext *s) tcg_gen_movi_tl(s->cc_srcT, 0); } -/* compute all eflags to cc_src */ -static void gen_compute_eflags(DisasContext *s) +/* compute all eflags to reg */ +static void gen_mov_eflags(DisasContext *s, TCGv reg) { - TCGv zero, dst, src1, src2; + TCGv dst, src1, src2; + TCGv_i32 cc_op; int live, dead; if (s->cc_op == CC_OP_EFLAGS) { + tcg_gen_mov_tl(reg, cpu_cc_src); return; } if (s->cc_op == CC_OP_CLR) { - tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P); - set_cc_op(s, CC_OP_EFLAGS); + tcg_gen_movi_tl(reg, CC_Z | CC_P); return; } - zero = NULL; dst = cpu_cc_dst; src1 = cpu_cc_src; src2 = cpu_cc_src2; @@ -875,7 +890,7 @@ static void gen_compute_eflags(DisasContext *s) live = cc_op_live[s->cc_op] & ~USES_CC_SRCT; dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2); if (dead) { - zero = tcg_constant_tl(0); + TCGv zero = tcg_constant_tl(0); if (dead & USES_CC_DST) { dst = zero; } @@ -887,8 +902,18 @@ static void gen_compute_eflags(DisasContext *s) } } - gen_update_cc_op(s); - gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op); + if (s->cc_op != CC_OP_DYNAMIC) { + cc_op = tcg_constant_i32(s->cc_op); + } else { + cc_op = cpu_cc_op; + } + gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op); +} + +/* compute all eflags to cc_src */ +static void gen_compute_eflags(DisasContext *s) +{ + gen_mov_eflags(s, cpu_cc_src); set_cc_op(s, CC_OP_EFLAGS); } @@ -1020,6 +1045,9 @@ static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg) case CC_OP_CLR: case CC_OP_POPCNT: return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 }; + case CC_OP_MULB ... CC_OP_MULQ: + return (CCPrepare) { .cond = TCG_COND_NE, + .reg = cpu_cc_src, .mask = -1 }; default: gen_compute_eflags(s); return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src, @@ -1126,10 +1154,9 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) if (reg == cpu_cc_src) { reg = s->tmp0; } - tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */ - tcg_gen_xor_tl(reg, reg, cpu_cc_src); + tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S); cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, - .mask = CC_S }; + .mask = CC_O }; break; default: case JCC_LE: @@ -1137,10 +1164,9 @@ static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg) if (reg == cpu_cc_src) { reg = s->tmp0; } - tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */ - tcg_gen_xor_tl(reg, reg, cpu_cc_src); + tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S); cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg, - .mask = CC_S | CC_Z }; + .mask = CC_O | CC_Z }; break; } break; @@ -1239,11 +1265,9 @@ static TCGLabel *gen_jz_ecx_string(DisasContext *s) static void gen_stos(DisasContext *s, MemOp ot) { - gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); gen_string_movl_A0_EDI(s); gen_op_st_v(s, ot, s->T0, s->A0); - gen_op_movl_T0_Dshift(s, ot); - gen_op_add_reg_T0(s, s->aflag, R_EDI); + gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot)); } static void gen_lods(DisasContext *s, MemOp ot) @@ -1251,28 +1275,33 @@ static void gen_lods(DisasContext *s, MemOp ot) gen_string_movl_A0_ESI(s); gen_op_ld_v(s, ot, s->T0, s->A0); gen_op_mov_reg_v(s, ot, R_EAX, s->T0); - gen_op_movl_T0_Dshift(s, ot); - gen_op_add_reg_T0(s, s->aflag, R_ESI); + gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot)); } static void gen_scas(DisasContext *s, MemOp ot) { gen_string_movl_A0_EDI(s); gen_op_ld_v(s, ot, s->T1, s->A0); - gen_op(s, OP_CMPL, ot, R_EAX); - gen_op_movl_T0_Dshift(s, ot); - gen_op_add_reg_T0(s, s->aflag, R_EDI); + tcg_gen_mov_tl(cpu_cc_src, s->T1); + tcg_gen_mov_tl(s->cc_srcT, s->T0); + tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1); + set_cc_op(s, CC_OP_SUBB + ot); + + gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot)); } static void gen_cmps(DisasContext *s, MemOp ot) { + TCGv dshift; + gen_string_movl_A0_EDI(s); gen_op_ld_v(s, ot, s->T1, s->A0); gen_string_movl_A0_ESI(s); gen_op(s, OP_CMPL, ot, OR_TMP0); - gen_op_movl_T0_Dshift(s, ot); - gen_op_add_reg_T0(s, s->aflag, R_ESI); - gen_op_add_reg_T0(s, s->aflag, R_EDI); + + dshift = gen_compute_Dshift(s, ot); + gen_op_add_reg(s, s->aflag, R_ESI, dshift); + gen_op_add_reg(s, s->aflag, R_EDI, dshift); } static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot) @@ -1300,8 +1329,7 @@ static void gen_ins(DisasContext *s, MemOp ot) tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff); gen_helper_in_func(ot, s->T0, s->tmp2_i32); gen_op_st_v(s, ot, s->T0, s->A0); - gen_op_movl_T0_Dshift(s, ot); - gen_op_add_reg_T0(s, s->aflag, R_EDI); + gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot)); gen_bpt_io(s, s->tmp2_i32, ot); } @@ -1314,8 +1342,7 @@ static void gen_outs(DisasContext *s, MemOp ot) tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff); tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0); gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32); - gen_op_movl_T0_Dshift(s, ot); - gen_op_add_reg_T0(s, s->aflag, R_ESI); + gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot)); gen_bpt_io(s, s->tmp2_i32, ot); } @@ -2474,14 +2501,10 @@ static void gen_jcc(DisasContext *s, int b, int diff) gen_jmp_rel(s, s->dflag, diff, 0); } -static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b, - int modrm, int reg) +static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src) { - CCPrepare cc; + CCPrepare cc = gen_prepare_cc(s, b, s->T1); - gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); - - cc = gen_prepare_cc(s, b, s->T1); if (cc.mask != -1) { TCGv t0 = tcg_temp_new(); tcg_gen_andi_tl(t0, cc.reg, cc.mask); @@ -2491,9 +2514,7 @@ static void gen_cmovcc1(CPUX86State *env, DisasContext *s, MemOp ot, int b, cc.reg2 = tcg_constant_tl(cc.imm); } - tcg_gen_movcond_tl(cc.cond, s->T0, cc.reg, cc.reg2, - s->T0, cpu_regs[reg]); - gen_op_mov_reg_v(s, ot, reg, s->T0); + tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest); } static inline void gen_op_movl_T0_seg(DisasContext *s, X86Seg seg_reg) @@ -2560,7 +2581,7 @@ static void gen_push_v(DisasContext *s, TCGv val) if (!CODE64(s)) { if (ADDSEG(s)) { - new_esp = s->tmp4; + new_esp = tcg_temp_new(); tcg_gen_mov_tl(new_esp, s->A0); } gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1); @@ -2575,8 +2596,8 @@ static MemOp gen_pop_T0(DisasContext *s) { MemOp d_ot = mo_pushpop(s, s->dflag); - gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1); - gen_op_ld_v(s, d_ot, s->T0, s->A0); + gen_lea_v_seg_dest(s, mo_stacksize(s), s->T0, cpu_regs[R_ESP], R_SS, -1); + gen_op_ld_v(s, d_ot, s->T0, s->T0); return d_ot; } @@ -4182,7 +4203,6 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) tcg_gen_mov_tl(s->A0, cpu_regs[R_EBX]); tcg_gen_ext8u_tl(s->T0, cpu_regs[R_EAX]); tcg_gen_add_tl(s->A0, s->A0, s->T0); - gen_extu(s->aflag, s->A0); gen_add_A0_ds_seg(s); gen_op_ld_v(s, MO_8, s->T0, s->A0); gen_op_mov_reg_v(s, MO_8, R_EAX, s->T0); @@ -4930,6 +4950,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) case 0xaa: /* stosS */ case 0xab: ot = mo_b_d(b, dflag); + gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) { gen_repz_stos(s, ot); } else { @@ -4948,6 +4969,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) case 0xae: /* scasS */ case 0xaf: ot = mo_b_d(b, dflag); + gen_op_mov_v_reg(s, MO_32, s->T0, R_EAX); if (prefixes & PREFIX_REPNZ) { gen_repz_scas(s, ot, 1); } else if (prefixes & PREFIX_REPZ) { @@ -5201,7 +5223,9 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) ot = dflag; modrm = x86_ldub_code(env, s); reg = ((modrm >> 3) & 7) | REX_R(s); - gen_cmovcc1(env, s, ot, b, modrm, reg); + gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0); + gen_cmovcc1(s, b ^ 1, s->T0, cpu_regs[reg]); + gen_op_mov_reg_v(s, ot, reg, s->T0); break; /************************/ @@ -5834,7 +5858,6 @@ static bool disas_insn(DisasContext *s, CPUState *cpu) gen_update_cc_op(s); gen_update_eip_cur(s); tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]); - gen_extu(s->aflag, s->A0); gen_add_A0_ds_seg(s); gen_helper_monitor(tcg_env, s->A0); break; @@ -6941,6 +6964,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX]; dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX]; dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX]; + dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX]; dc->cpuid_xsave_features = env->features[FEAT_XSAVE]; dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) || (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))); diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c index 67cad867207233dbd22ae38a6075f2895bae532a..189ae0f14068304fe995542babdaf5d063dfd307 100644 --- a/target/i386/whpx/whpx-accel-ops.c +++ b/target/i386/whpx/whpx-accel-ops.c @@ -25,7 +25,7 @@ static void *whpx_cpu_thread_fn(void *arg) rcu_register_thread(); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_thread_get_self(cpu->thread); cpu->thread_id = qemu_get_thread_id(); current_cpu = cpu; @@ -48,14 +48,14 @@ static void *whpx_cpu_thread_fn(void *arg) } } while (cpu_thread_is_idle(cpu)) { - qemu_cond_wait_iothread(cpu->halt_cond); + qemu_cond_wait_bql(cpu->halt_cond); } qemu_wait_io_event_common(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); whpx_destroy_vcpu(cpu); cpu_thread_signal_destroyed(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); rcu_unregister_thread(); return NULL; } diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index d29ba916a0cc8b8d649bf24ae91f83be399a1f5b..a7262654acdee1f656393465327caa61d0f0de8c 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -1324,7 +1324,7 @@ static int whpx_first_vcpu_starting(CPUState *cpu) struct whpx_state *whpx = &whpx_global; HRESULT hr; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (!QTAILQ_EMPTY(&cpu->breakpoints) || (whpx->breakpoints.breakpoints && @@ -1442,7 +1442,7 @@ static int whpx_handle_halt(CPUState *cpu) CPUX86State *env = cpu_env(cpu); int ret = 0; - qemu_mutex_lock_iothread(); + bql_lock(); if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) && !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) { @@ -1450,7 +1450,7 @@ static int whpx_handle_halt(CPUState *cpu) cpu->halted = true; ret = 1; } - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } @@ -1472,7 +1472,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) memset(&new_int, 0, sizeof(new_int)); memset(reg_values, 0, sizeof(reg_values)); - qemu_mutex_lock_iothread(); + bql_lock(); /* Inject NMI */ if (!vcpu->interruption_pending && @@ -1563,7 +1563,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu) reg_count += 1; } - qemu_mutex_unlock_iothread(); + bql_unlock(); vcpu->ready_for_pic_interrupt = false; if (reg_count) { @@ -1590,9 +1590,9 @@ static void whpx_vcpu_post_run(CPUState *cpu) uint64_t tpr = vcpu->exit_ctx.VpContext.Cr8; if (vcpu->tpr != tpr) { vcpu->tpr = tpr; - qemu_mutex_lock_iothread(); + bql_lock(); cpu_set_apic_tpr(x86_cpu->apic_state, whpx_cr8_to_apic_tpr(vcpu->tpr)); - qemu_mutex_unlock_iothread(); + bql_unlock(); } vcpu->interruption_pending = @@ -1652,7 +1652,7 @@ static int whpx_vcpu_run(CPUState *cpu) WhpxStepMode exclusive_step_mode = WHPX_STEP_NONE; int ret; - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (whpx->running_cpus++ == 0) { /* Insert breakpoints into memory, update exception exit bitmap. */ @@ -1690,7 +1690,7 @@ static int whpx_vcpu_run(CPUState *cpu) } } - qemu_mutex_unlock_iothread(); + bql_unlock(); if (exclusive_step_mode != WHPX_STEP_NONE) { start_exclusive(); @@ -2028,9 +2028,9 @@ static int whpx_vcpu_run(CPUState *cpu) error_report("WHPX: Unexpected VP exit code %d", vcpu->exit_ctx.ExitReason); whpx_get_registers(cpu); - qemu_mutex_lock_iothread(); + bql_lock(); qemu_system_guest_panicked(cpu_get_crash_info(cpu)); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; } @@ -2055,7 +2055,7 @@ static int whpx_vcpu_run(CPUState *cpu) cpu_exec_end(cpu); } - qemu_mutex_lock_iothread(); + bql_lock(); current_cpu = cpu; if (--whpx->running_cpus == 0) { diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c index fc075952e6356060fe46b8530ea1267765d7a867..064540397db24d30c0448d0b3f6cc15f8654028f 100644 --- a/target/loongarch/cpu.c +++ b/target/loongarch/cpu.c @@ -11,15 +11,25 @@ #include "qapi/error.h" #include "qemu/module.h" #include "sysemu/qtest.h" -#include "exec/cpu_ldst.h" +#include "sysemu/tcg.h" +#include "sysemu/kvm.h" +#include "kvm/kvm_loongarch.h" #include "exec/exec-all.h" #include "cpu.h" #include "internals.h" #include "fpu/softfloat-helpers.h" #include "cpu-csr.h" +#ifndef CONFIG_USER_ONLY #include "sysemu/reset.h" -#include "tcg/tcg.h" +#endif #include "vec.h" +#ifdef CONFIG_KVM +#include +#endif +#ifdef CONFIG_TCG +#include "exec/cpu_ldst.h" +#include "tcg/tcg.h" +#endif const char * const regnames[32] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", @@ -108,12 +118,15 @@ void loongarch_cpu_set_irq(void *opaque, int irq, int level) return; } - env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0); - - if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) { - cpu_interrupt(cs, CPU_INTERRUPT_HARD); - } else { - cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + if (kvm_enabled()) { + kvm_loongarch_set_interrupt(cpu, irq, level); + } else if (tcg_enabled()) { + env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0); + if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) { + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } } } @@ -138,7 +151,10 @@ static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env) return (pending & status) != 0; } +#endif +#ifdef CONFIG_TCG +#ifndef CONFIG_USER_ONLY static void loongarch_cpu_do_interrupt(CPUState *cs) { LoongArchCPU *cpu = LOONGARCH_CPU(cs); @@ -320,7 +336,6 @@ static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request) } #endif -#ifdef CONFIG_TCG static void loongarch_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -481,21 +496,6 @@ static void loongarch_max_initfn(Object *obj) loongarch_la464_initfn(obj); } -static void loongarch_cpu_list_entry(gpointer data, gpointer user_data) -{ - const char *typename = object_class_get_name(OBJECT_CLASS(data)); - - qemu_printf("%s\n", typename); -} - -void loongarch_cpu_list(void) -{ - GSList *list; - list = object_class_get_list_sorted(TYPE_LOONGARCH_CPU, false); - g_slist_foreach(list, loongarch_cpu_list_entry, NULL); - g_slist_free(list); -} - static void loongarch_cpu_reset_hold(Object *obj) { CPUState *cs = CPU(obj); @@ -531,10 +531,12 @@ static void loongarch_cpu_reset_hold(Object *obj) env->CSR_ESTAT = env->CSR_ESTAT & (~MAKE_64BIT_MASK(0, 2)); env->CSR_RVACFG = FIELD_DP64(env->CSR_RVACFG, CSR_RVACFG, RBITS, 0); + env->CSR_CPUID = cs->cpu_index; env->CSR_TCFG = FIELD_DP64(env->CSR_TCFG, CSR_TCFG, EN, 0); env->CSR_LLBCTL = FIELD_DP64(env->CSR_LLBCTL, CSR_LLBCTL, KLO, 0); env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0); env->CSR_MERRCTL = FIELD_DP64(env->CSR_MERRCTL, CSR_MERRCTL, ISMERR, 0); + env->CSR_TID = cs->cpu_index; env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, TLB_TYPE, 2); env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, MTLB_ENTRY, 63); @@ -551,9 +553,14 @@ static void loongarch_cpu_reset_hold(Object *obj) #ifndef CONFIG_USER_ONLY env->pc = 0x1c000000; memset(env->tlb, 0, sizeof(env->tlb)); + if (kvm_enabled()) { + kvm_arch_reset_vcpu(env); + } #endif +#ifdef CONFIG_TCG restore_fp_status(env); +#endif cs->exception_index = -1; } @@ -582,47 +589,6 @@ static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp) lacc->parent_realize(dev, errp); } -#ifndef CONFIG_USER_ONLY -static void loongarch_qemu_write(void *opaque, hwaddr addr, - uint64_t val, unsigned size) -{ - qemu_log_mask(LOG_UNIMP, "[%s]: Unimplemented reg 0x%" HWADDR_PRIx "\n", - __func__, addr); -} - -static uint64_t loongarch_qemu_read(void *opaque, hwaddr addr, unsigned size) -{ - switch (addr) { - case VERSION_REG: - return 0x11ULL; - case FEATURE_REG: - return 1ULL << IOCSRF_MSI | 1ULL << IOCSRF_EXTIOI | - 1ULL << IOCSRF_CSRIPI; - case VENDOR_REG: - return 0x6e6f73676e6f6f4cULL; /* "Loongson" */ - case CPUNAME_REG: - return 0x303030354133ULL; /* "3A5000" */ - case MISC_FUNC_REG: - return 1ULL << IOCSRM_EXTIOI_EN; - } - return 0ULL; -} - -static const MemoryRegionOps loongarch_qemu_ops = { - .read = loongarch_qemu_read, - .write = loongarch_qemu_write, - .endianness = DEVICE_LITTLE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 8, - }, - .impl = { - .min_access_size = 8, - .max_access_size = 8, - }, -}; -#endif - static bool loongarch_get_lsx(Object *obj, Error **errp) { LoongArchCPU *cpu = LOONGARCH_CPU(obj); @@ -693,17 +659,12 @@ static void loongarch_cpu_init(Object *obj) { #ifndef CONFIG_USER_ONLY LoongArchCPU *cpu = LOONGARCH_CPU(obj); - CPULoongArchState *env = &cpu->env; qdev_init_gpio_in(DEVICE(cpu), loongarch_cpu_set_irq, N_IRQS); +#ifdef CONFIG_TCG timer_init_ns(&cpu->timer, QEMU_CLOCK_VIRTUAL, &loongarch_constant_timer_cb, cpu); - memory_region_init_io(&env->system_iocsr, OBJECT(cpu), NULL, - env, "iocsr", UINT64_MAX); - address_space_init(&env->address_space_iocsr, &env->system_iocsr, "IOCSR"); - memory_region_init_io(&env->iocsr_mem, OBJECT(cpu), &loongarch_qemu_ops, - NULL, "iocsr_misc", 0x428); - memory_region_add_subregion(&env->system_iocsr, 0, &env->iocsr_mem); +#endif #endif } @@ -716,15 +677,9 @@ static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model) g_autofree char *typename = g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); - if (!oc) { - return NULL; - } } - if (object_class_dynamic_cast(oc, TYPE_LOONGARCH_CPU)) { - return oc; - } - return NULL; + return oc; } void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) @@ -762,6 +717,8 @@ void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) qemu_fprintf(f, "TLBRENTRY=%016" PRIx64 "\n", env->CSR_TLBRENTRY); qemu_fprintf(f, "TLBRBADV=%016" PRIx64 "\n", env->CSR_TLBRBADV); qemu_fprintf(f, "TLBRERA=%016" PRIx64 "\n", env->CSR_TLBRERA); + qemu_fprintf(f, "TCFG=%016" PRIx64 "\n", env->CSR_TCFG); + qemu_fprintf(f, "TVAL=%016" PRIx64 "\n", env->CSR_TVAL); /* fpr */ if (flags & CPU_DUMP_FPU) { @@ -795,7 +752,9 @@ static struct TCGCPUOps loongarch_tcg_ops = { #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps loongarch_sysemu_ops = { +#ifdef CONFIG_TCG .get_phys_page_debug = loongarch_cpu_get_phys_page_debug, +#endif }; static int64_t loongarch_cpu_get_arch_id(CPUState *cs) diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h index 00d1fba597f8eb791d99bc937115515e58eba610..0fa5e0ca93ba8b8bd5360be9b4771314fedce859 100644 --- a/target/loongarch/cpu.h +++ b/target/loongarch/cpu.h @@ -319,6 +319,7 @@ typedef struct CPUArchState { uint64_t CSR_PWCH; uint64_t CSR_STLBPS; uint64_t CSR_RVACFG; + uint64_t CSR_CPUID; uint64_t CSR_PRCFG1; uint64_t CSR_PRCFG2; uint64_t CSR_PRCFG3; @@ -350,16 +351,14 @@ typedef struct CPUArchState { uint64_t CSR_DBG; uint64_t CSR_DERA; uint64_t CSR_DSAVE; - uint64_t CSR_CPUID; #ifndef CONFIG_USER_ONLY LoongArchTLB tlb[LOONGARCH_TLB_MAX]; - AddressSpace address_space_iocsr; - MemoryRegion system_iocsr; - MemoryRegion iocsr_mem; + AddressSpace *address_space_iocsr; bool load_elf; uint64_t elf_address; + uint32_t mp_state; /* Store ipistate to access from this struct */ DeviceState *ipistate; #endif @@ -380,6 +379,8 @@ struct ArchCPU { /* 'compatible' string for this CPU for Linux device trees */ const char *dtb_compatible; + /* used by KVM_REG_LOONGARCH_COUNTER ioctl to access guest time counters */ + uint64_t kvm_state_counter; }; /** @@ -466,10 +467,6 @@ static inline void cpu_get_tb_cpu_state(CPULoongArchState *env, vaddr *pc, *flags |= is_va32(env) * HW_FLAGS_VA32; } -void loongarch_cpu_list(void); - -#define cpu_list loongarch_cpu_list - #include "exec/cpu-all.h" #define CPU_RESOLVING_TYPE TYPE_LOONGARCH_CPU diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h index c492863cc5defa89d0392de53355e2cc4d0f6922..0beb034748816bd7bf356ddeaf8e7b6bd1155c0a 100644 --- a/target/loongarch/internals.h +++ b/target/loongarch/internals.h @@ -31,8 +31,10 @@ void G_NORETURN do_raise_exception(CPULoongArchState *env, const char *loongarch_exception_name(int32_t exception); +#ifdef CONFIG_TCG int ieee_ex_to_loongarch(int xcpt); void restore_fp_status(CPULoongArchState *env); +#endif #ifndef CONFIG_USER_ONLY extern const VMStateDescription vmstate_loongarch_cpu; @@ -44,12 +46,13 @@ uint64_t cpu_loongarch_get_constant_timer_counter(LoongArchCPU *cpu); uint64_t cpu_loongarch_get_constant_timer_ticks(LoongArchCPU *cpu); void cpu_loongarch_store_constant_timer_config(LoongArchCPU *cpu, uint64_t value); - +#ifdef CONFIG_TCG bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +#endif #endif /* !CONFIG_USER_ONLY */ uint64_t read_fcc(CPULoongArchState *env); diff --git a/target/loongarch/kvm/kvm.c b/target/loongarch/kvm/kvm.c new file mode 100644 index 0000000000000000000000000000000000000000..84bcdf5f86d7711aa370f6cc8030450c8dc71f56 --- /dev/null +++ b/target/loongarch/kvm/kvm.c @@ -0,0 +1,768 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch KVM + * + * Copyright (c) 2023 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include +#include + +#include "qemu/timer.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "sysemu/sysemu.h" +#include "sysemu/kvm.h" +#include "sysemu/kvm_int.h" +#include "hw/pci/pci.h" +#include "exec/memattrs.h" +#include "exec/address-spaces.h" +#include "hw/boards.h" +#include "hw/irq.h" +#include "qemu/log.h" +#include "hw/loader.h" +#include "migration/migration.h" +#include "sysemu/runstate.h" +#include "cpu-csr.h" +#include "kvm_loongarch.h" +#include "trace.h" + +static bool cap_has_mp_state; +const KVMCapabilityInfo kvm_arch_required_capabilities[] = { + KVM_CAP_LAST_INFO +}; + +static int kvm_loongarch_get_regs_core(CPUState *cs) +{ + int ret = 0; + int i; + struct kvm_regs regs; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + /* Get the current register set as KVM seems it */ + ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); + if (ret < 0) { + trace_kvm_failed_get_regs_core(strerror(errno)); + return ret; + } + /* gpr[0] value is always 0 */ + env->gpr[0] = 0; + for (i = 1; i < 32; i++) { + env->gpr[i] = regs.gpr[i]; + } + + env->pc = regs.pc; + return ret; +} + +static int kvm_loongarch_put_regs_core(CPUState *cs) +{ + int ret = 0; + int i; + struct kvm_regs regs; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + /* Set the registers based on QEMU's view of things */ + for (i = 0; i < 32; i++) { + regs.gpr[i] = env->gpr[i]; + } + + regs.pc = env->pc; + ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); + if (ret < 0) { + trace_kvm_failed_put_regs_core(strerror(errno)); + } + + return ret; +} + +static int kvm_loongarch_get_csr(CPUState *cs) +{ + int ret = 0; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CRMD), + &env->CSR_CRMD); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRMD), + &env->CSR_PRMD); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EUEN), + &env->CSR_EUEN); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_MISC), + &env->CSR_MISC); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ECFG), + &env->CSR_ECFG); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ESTAT), + &env->CSR_ESTAT); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ERA), + &env->CSR_ERA); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADV), + &env->CSR_BADV); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADI), + &env->CSR_BADI); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EENTRY), + &env->CSR_EENTRY); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBIDX), + &env->CSR_TLBIDX); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBEHI), + &env->CSR_TLBEHI); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO0), + &env->CSR_TLBELO0); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO1), + &env->CSR_TLBELO1); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ASID), + &env->CSR_ASID); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDL), + &env->CSR_PGDL); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDH), + &env->CSR_PGDH); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGD), + &env->CSR_PGD); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCL), + &env->CSR_PWCL); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCH), + &env->CSR_PWCH); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_STLBPS), + &env->CSR_STLBPS); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_RVACFG), + &env->CSR_RVACFG); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CPUID), + &env->CSR_CPUID); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG1), + &env->CSR_PRCFG1); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG2), + &env->CSR_PRCFG2); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG3), + &env->CSR_PRCFG3); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(0)), + &env->CSR_SAVE[0]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(1)), + &env->CSR_SAVE[1]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(2)), + &env->CSR_SAVE[2]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(3)), + &env->CSR_SAVE[3]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(4)), + &env->CSR_SAVE[4]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(5)), + &env->CSR_SAVE[5]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(6)), + &env->CSR_SAVE[6]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(7)), + &env->CSR_SAVE[7]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TID), + &env->CSR_TID); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CNTC), + &env->CSR_CNTC); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TICLR), + &env->CSR_TICLR); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_LLBCTL), + &env->CSR_LLBCTL); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL1), + &env->CSR_IMPCTL1); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL2), + &env->CSR_IMPCTL2); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRENTRY), + &env->CSR_TLBRENTRY); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRBADV), + &env->CSR_TLBRBADV); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRERA), + &env->CSR_TLBRERA); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRSAVE), + &env->CSR_TLBRSAVE); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO0), + &env->CSR_TLBRELO0); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO1), + &env->CSR_TLBRELO1); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBREHI), + &env->CSR_TLBREHI); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRPRMD), + &env->CSR_TLBRPRMD); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(0)), + &env->CSR_DMW[0]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(1)), + &env->CSR_DMW[1]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(2)), + &env->CSR_DMW[2]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(3)), + &env->CSR_DMW[3]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TVAL), + &env->CSR_TVAL); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TCFG), + &env->CSR_TCFG); + + return ret; +} + +static int kvm_loongarch_put_csr(CPUState *cs) +{ + int ret = 0; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CRMD), + &env->CSR_CRMD); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRMD), + &env->CSR_PRMD); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EUEN), + &env->CSR_EUEN); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_MISC), + &env->CSR_MISC); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ECFG), + &env->CSR_ECFG); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ESTAT), + &env->CSR_ESTAT); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ERA), + &env->CSR_ERA); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADV), + &env->CSR_BADV); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADI), + &env->CSR_BADI); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EENTRY), + &env->CSR_EENTRY); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBIDX), + &env->CSR_TLBIDX); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBEHI), + &env->CSR_TLBEHI); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO0), + &env->CSR_TLBELO0); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO1), + &env->CSR_TLBELO1); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ASID), + &env->CSR_ASID); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDL), + &env->CSR_PGDL); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDH), + &env->CSR_PGDH); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGD), + &env->CSR_PGD); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCL), + &env->CSR_PWCL); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCH), + &env->CSR_PWCH); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_STLBPS), + &env->CSR_STLBPS); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_RVACFG), + &env->CSR_RVACFG); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CPUID), + &env->CSR_CPUID); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG1), + &env->CSR_PRCFG1); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG2), + &env->CSR_PRCFG2); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG3), + &env->CSR_PRCFG3); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(0)), + &env->CSR_SAVE[0]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(1)), + &env->CSR_SAVE[1]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(2)), + &env->CSR_SAVE[2]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(3)), + &env->CSR_SAVE[3]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(4)), + &env->CSR_SAVE[4]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(5)), + &env->CSR_SAVE[5]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(6)), + &env->CSR_SAVE[6]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(7)), + &env->CSR_SAVE[7]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TID), + &env->CSR_TID); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CNTC), + &env->CSR_CNTC); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TICLR), + &env->CSR_TICLR); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_LLBCTL), + &env->CSR_LLBCTL); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL1), + &env->CSR_IMPCTL1); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL2), + &env->CSR_IMPCTL2); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRENTRY), + &env->CSR_TLBRENTRY); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRBADV), + &env->CSR_TLBRBADV); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRERA), + &env->CSR_TLBRERA); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRSAVE), + &env->CSR_TLBRSAVE); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO0), + &env->CSR_TLBRELO0); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO1), + &env->CSR_TLBRELO1); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBREHI), + &env->CSR_TLBREHI); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRPRMD), + &env->CSR_TLBRPRMD); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(0)), + &env->CSR_DMW[0]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(1)), + &env->CSR_DMW[1]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(2)), + &env->CSR_DMW[2]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(3)), + &env->CSR_DMW[3]); + /* + * timer cfg must be put at last since it is used to enable + * guest timer + */ + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TVAL), + &env->CSR_TVAL); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TCFG), + &env->CSR_TCFG); + return ret; +} + +static int kvm_loongarch_get_regs_fp(CPUState *cs) +{ + int ret, i; + struct kvm_fpu fpu; + + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + ret = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu); + if (ret < 0) { + trace_kvm_failed_get_fpu(strerror(errno)); + return ret; + } + + env->fcsr0 = fpu.fcsr; + for (i = 0; i < 32; i++) { + env->fpr[i].vreg.UD[0] = fpu.fpr[i].val64[0]; + } + for (i = 0; i < 8; i++) { + env->cf[i] = fpu.fcc & 0xFF; + fpu.fcc = fpu.fcc >> 8; + } + + return ret; +} + +static int kvm_loongarch_put_regs_fp(CPUState *cs) +{ + int ret, i; + struct kvm_fpu fpu; + + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + fpu.fcsr = env->fcsr0; + fpu.fcc = 0; + for (i = 0; i < 32; i++) { + fpu.fpr[i].val64[0] = env->fpr[i].vreg.UD[0]; + } + + for (i = 0; i < 8; i++) { + fpu.fcc |= env->cf[i] << (8 * i); + } + + ret = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu); + if (ret < 0) { + trace_kvm_failed_put_fpu(strerror(errno)); + } + + return ret; +} + +void kvm_arch_reset_vcpu(CPULoongArchState *env) +{ + env->mp_state = KVM_MP_STATE_RUNNABLE; +} + +static int kvm_loongarch_get_mpstate(CPUState *cs) +{ + int ret = 0; + struct kvm_mp_state mp_state; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + if (cap_has_mp_state) { + ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state); + if (ret) { + trace_kvm_failed_get_mpstate(strerror(errno)); + return ret; + } + env->mp_state = mp_state.mp_state; + } + + return ret; +} + +static int kvm_loongarch_put_mpstate(CPUState *cs) +{ + int ret = 0; + + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + struct kvm_mp_state mp_state = { + .mp_state = env->mp_state + }; + + if (cap_has_mp_state) { + ret = kvm_vcpu_ioctl(cs, KVM_SET_MP_STATE, &mp_state); + if (ret) { + trace_kvm_failed_put_mpstate(strerror(errno)); + } + } + + return ret; +} + +static int kvm_loongarch_get_cpucfg(CPUState *cs) +{ + int i, ret = 0; + uint64_t val; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + for (i = 0; i < 21; i++) { + ret = kvm_get_one_reg(cs, KVM_IOC_CPUCFG(i), &val); + if (ret < 0) { + trace_kvm_failed_get_cpucfg(strerror(errno)); + } + env->cpucfg[i] = (uint32_t)val; + } + return ret; +} + +static int kvm_loongarch_put_cpucfg(CPUState *cs) +{ + int i, ret = 0; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + uint64_t val; + + for (i = 0; i < 21; i++) { + val = env->cpucfg[i]; + /* LSX and LASX and LBT are not supported in kvm now */ + if (i == 2) { + val &= ~(BIT(R_CPUCFG2_LSX_SHIFT) | BIT(R_CPUCFG2_LASX_SHIFT)); + val &= ~(BIT(R_CPUCFG2_LBT_X86_SHIFT) | + BIT(R_CPUCFG2_LBT_ARM_SHIFT) | + BIT(R_CPUCFG2_LBT_MIPS_SHIFT)); + } + ret = kvm_set_one_reg(cs, KVM_IOC_CPUCFG(i), &val); + if (ret < 0) { + trace_kvm_failed_put_cpucfg(strerror(errno)); + } + } + return ret; +} + +int kvm_arch_get_registers(CPUState *cs) +{ + int ret; + + ret = kvm_loongarch_get_regs_core(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_get_csr(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_get_regs_fp(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_get_mpstate(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_get_cpucfg(cs); + return ret; +} + +int kvm_arch_put_registers(CPUState *cs, int level) +{ + int ret; + + ret = kvm_loongarch_put_regs_core(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_put_csr(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_put_regs_fp(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_put_mpstate(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_put_cpucfg(cs); + return ret; +} + +static void kvm_loongarch_vm_stage_change(void *opaque, bool running, + RunState state) +{ + int ret; + CPUState *cs = opaque; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + + if (running) { + ret = kvm_set_one_reg(cs, KVM_REG_LOONGARCH_COUNTER, + &cpu->kvm_state_counter); + if (ret < 0) { + trace_kvm_failed_put_counter(strerror(errno)); + } + } else { + ret = kvm_get_one_reg(cs, KVM_REG_LOONGARCH_COUNTER, + &cpu->kvm_state_counter); + if (ret < 0) { + trace_kvm_failed_get_counter(strerror(errno)); + } + } +} + +int kvm_arch_init_vcpu(CPUState *cs) +{ + qemu_add_vm_change_state_handler(kvm_loongarch_vm_stage_change, cs); + return 0; +} + +int kvm_arch_destroy_vcpu(CPUState *cs) +{ + return 0; +} + +unsigned long kvm_arch_vcpu_id(CPUState *cs) +{ + return cs->cpu_index; +} + +int kvm_arch_release_virq_post(int virq) +{ + return 0; +} + +int kvm_arch_msi_data_to_gsi(uint32_t data) +{ + abort(); +} + +int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, + uint64_t address, uint32_t data, PCIDevice *dev) +{ + return 0; +} + +int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, + int vector, PCIDevice *dev) +{ + return 0; +} + +void kvm_arch_init_irq_routing(KVMState *s) +{ +} + +int kvm_arch_get_default_type(MachineState *ms) +{ + return 0; +} + +int kvm_arch_init(MachineState *ms, KVMState *s) +{ + cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE); + return 0; +} + +int kvm_arch_irqchip_create(KVMState *s) +{ + return 0; +} + +void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) +{ +} + +MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) +{ + return MEMTXATTRS_UNSPECIFIED; +} + +int kvm_arch_process_async_events(CPUState *cs) +{ + return cs->halted; +} + +bool kvm_arch_stop_on_emulation_error(CPUState *cs) +{ + return true; +} + +bool kvm_arch_cpu_check_are_resettable(void) +{ + return true; +} + +int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) +{ + int ret = 0; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + MemTxAttrs attrs = {}; + + attrs.requester_id = env_cpu(env)->cpu_index; + + trace_kvm_arch_handle_exit(run->exit_reason); + switch (run->exit_reason) { + case KVM_EXIT_LOONGARCH_IOCSR: + address_space_rw(env->address_space_iocsr, + run->iocsr_io.phys_addr, + attrs, + run->iocsr_io.data, + run->iocsr_io.len, + run->iocsr_io.is_write); + break; + default: + ret = -1; + warn_report("KVM: unknown exit reason %d", run->exit_reason); + break; + } + return ret; +} + +int kvm_loongarch_set_interrupt(LoongArchCPU *cpu, int irq, int level) +{ + struct kvm_interrupt intr; + CPUState *cs = CPU(cpu); + + if (level) { + intr.irq = irq; + } else { + intr.irq = -irq; + } + + trace_kvm_set_intr(irq, level); + return kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); +} + +void kvm_arch_accel_class_init(ObjectClass *oc) +{ +} diff --git a/target/loongarch/kvm/kvm_loongarch.h b/target/loongarch/kvm/kvm_loongarch.h new file mode 100644 index 0000000000000000000000000000000000000000..d945b6bb822a776eba000f6c5d8a9a7cbb676f96 --- /dev/null +++ b/target/loongarch/kvm/kvm_loongarch.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch kvm interface + * + * Copyright (c) 2023 Loongson Technology Corporation Limited + */ + +#include "cpu.h" + +#ifndef QEMU_KVM_LOONGARCH_H +#define QEMU_KVM_LOONGARCH_H + +int kvm_loongarch_set_interrupt(LoongArchCPU *cpu, int irq, int level); +void kvm_arch_reset_vcpu(CPULoongArchState *env); + +#endif diff --git a/target/loongarch/kvm/meson.build b/target/loongarch/kvm/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..2266de6ca97c3122bbf437a129fae97771010529 --- /dev/null +++ b/target/loongarch/kvm/meson.build @@ -0,0 +1 @@ +loongarch_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c')) diff --git a/target/loongarch/loongarch-qmp-cmds.c b/target/loongarch/loongarch-qmp-cmds.c index 645672ff593fb202216ac135ad14dfc148aa936c..ec33ce81f0eebe245838e5ccea9b9d12b6588135 100644 --- a/target/loongarch/loongarch-qmp-cmds.c +++ b/target/loongarch/loongarch-qmp-cmds.c @@ -22,8 +22,7 @@ static void loongarch_cpu_add_definition(gpointer data, gpointer user_data) CpuDefinitionInfo *info = g_new0(CpuDefinitionInfo, 1); const char *typename = object_class_get_name(oc); - info->name = g_strndup(typename, - strlen(typename) - strlen("-" TYPE_LOONGARCH_CPU)); + info->name = cpu_model_from_type(typename); info->q_typename = g_strdup(typename); QAPI_LIST_PREPEND(*cpu_list, info); diff --git a/target/loongarch/machine.c b/target/loongarch/machine.c index 1c4e01d07695ac67e22fa77166d48dabd7959da2..c7029fb9b47f87539ff9038273645d227f12c2a0 100644 --- a/target/loongarch/machine.c +++ b/target/loongarch/machine.c @@ -14,7 +14,7 @@ static const VMStateDescription vmstate_fpu_reg = { .name = "fpu_reg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(UD(0), VReg), VMSTATE_END_OF_LIST() } @@ -36,7 +36,7 @@ static const VMStateDescription vmstate_fpu = { .version_id = 1, .minimum_version_id = 1, .needed = fpu_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FPU_REGS(env.fpr, LoongArchCPU, 0), VMSTATE_UINT32(env.fcsr0, LoongArchCPU), VMSTATE_BOOL_ARRAY(env.cf, LoongArchCPU, 8), @@ -48,7 +48,7 @@ static const VMStateDescription vmstate_lsxh_reg = { .name = "lsxh_reg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(UD(1), VReg), VMSTATE_END_OF_LIST() } @@ -70,7 +70,7 @@ static const VMStateDescription vmstate_lsx = { .version_id = 1, .minimum_version_id = 1, .needed = lsx_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_LSXH_REGS(env.fpr, LoongArchCPU, 0), VMSTATE_END_OF_LIST() }, @@ -80,7 +80,7 @@ static const VMStateDescription vmstate_lasxh_reg = { .name = "lasxh_reg", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(UD(2), VReg), VMSTATE_UINT64(UD(3), VReg), VMSTATE_END_OF_LIST() @@ -103,7 +103,7 @@ static const VMStateDescription vmstate_lasx = { .version_id = 1, .minimum_version_id = 1, .needed = lasx_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_LASXH_REGS(env.fpr, LoongArchCPU, 0), VMSTATE_END_OF_LIST() }, @@ -114,7 +114,7 @@ const VMStateDescription vmstate_tlb = { .name = "cpu/tlb", .version_id = 0, .minimum_version_id = 0, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(tlb_misc, LoongArchTLB), VMSTATE_UINT64(tlb_entry0, LoongArchTLB), VMSTATE_UINT64(tlb_entry1, LoongArchTLB), @@ -127,7 +127,7 @@ const VMStateDescription vmstate_loongarch_cpu = { .name = "cpu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gpr, LoongArchCPU, 32), VMSTATE_UINTTL(env.pc, LoongArchCPU), @@ -193,7 +193,7 @@ const VMStateDescription vmstate_loongarch_cpu = { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_fpu, &vmstate_lsx, &vmstate_lasx, diff --git a/target/loongarch/meson.build b/target/loongarch/meson.build index 18e8191e2b67ea369ea43796c471e3d7a89a9d01..db310f60222a33885a7bff638c908e46264c7d53 100644 --- a/target/loongarch/meson.build +++ b/target/loongarch/meson.build @@ -3,31 +3,19 @@ gen = decodetree.process('insns.decode') loongarch_ss = ss.source_set() loongarch_ss.add(files( 'cpu.c', -)) -loongarch_tcg_ss = ss.source_set() -loongarch_tcg_ss.add(gen) -loongarch_tcg_ss.add(files( - 'fpu_helper.c', - 'op_helper.c', - 'translate.c', 'gdbstub.c', - 'vec_helper.c', )) -loongarch_tcg_ss.add(zlib) loongarch_system_ss = ss.source_set() loongarch_system_ss.add(files( 'loongarch-qmp-cmds.c', 'machine.c', - 'tlb_helper.c', - 'constant_timer.c', - 'csr_helper.c', - 'iocsr_helper.c', )) common_ss.add(when: 'CONFIG_LOONGARCH_DIS', if_true: [files('disas.c'), gen]) -loongarch_ss.add_all(when: 'CONFIG_TCG', if_true: [loongarch_tcg_ss]) +subdir('tcg') target_arch += {'loongarch': loongarch_ss} target_system_arch += {'loongarch': loongarch_system_ss} +subdir('kvm') diff --git a/target/loongarch/constant_timer.c b/target/loongarch/tcg/constant_timer.c similarity index 100% rename from target/loongarch/constant_timer.c rename to target/loongarch/tcg/constant_timer.c diff --git a/target/loongarch/csr_helper.c b/target/loongarch/tcg/csr_helper.c similarity index 96% rename from target/loongarch/csr_helper.c rename to target/loongarch/tcg/csr_helper.c index 55341551a5c7751224f4789e50c3abb31b509c27..15f94caefabc7722263fa46e948e21de37b4203c 100644 --- a/target/loongarch/csr_helper.c +++ b/target/loongarch/tcg/csr_helper.c @@ -89,9 +89,9 @@ target_ulong helper_csrwr_ticlr(CPULoongArchState *env, target_ulong val) int64_t old_v = 0; if (val & 0x1) { - qemu_mutex_lock_iothread(); + bql_lock(); loongarch_cpu_set_irq(cpu, IRQ_TIMER, 0); - qemu_mutex_unlock_iothread(); + bql_unlock(); } return old_v; } diff --git a/target/loongarch/fpu_helper.c b/target/loongarch/tcg/fpu_helper.c similarity index 100% rename from target/loongarch/fpu_helper.c rename to target/loongarch/tcg/fpu_helper.c diff --git a/target/loongarch/insn_trans/trans_arith.c.inc b/target/loongarch/tcg/insn_trans/trans_arith.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_arith.c.inc rename to target/loongarch/tcg/insn_trans/trans_arith.c.inc diff --git a/target/loongarch/insn_trans/trans_atomic.c.inc b/target/loongarch/tcg/insn_trans/trans_atomic.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_atomic.c.inc rename to target/loongarch/tcg/insn_trans/trans_atomic.c.inc diff --git a/target/loongarch/insn_trans/trans_bit.c.inc b/target/loongarch/tcg/insn_trans/trans_bit.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_bit.c.inc rename to target/loongarch/tcg/insn_trans/trans_bit.c.inc diff --git a/target/loongarch/insn_trans/trans_branch.c.inc b/target/loongarch/tcg/insn_trans/trans_branch.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_branch.c.inc rename to target/loongarch/tcg/insn_trans/trans_branch.c.inc diff --git a/target/loongarch/insn_trans/trans_extra.c.inc b/target/loongarch/tcg/insn_trans/trans_extra.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_extra.c.inc rename to target/loongarch/tcg/insn_trans/trans_extra.c.inc diff --git a/target/loongarch/insn_trans/trans_farith.c.inc b/target/loongarch/tcg/insn_trans/trans_farith.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_farith.c.inc rename to target/loongarch/tcg/insn_trans/trans_farith.c.inc diff --git a/target/loongarch/insn_trans/trans_fcmp.c.inc b/target/loongarch/tcg/insn_trans/trans_fcmp.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_fcmp.c.inc rename to target/loongarch/tcg/insn_trans/trans_fcmp.c.inc diff --git a/target/loongarch/insn_trans/trans_fcnv.c.inc b/target/loongarch/tcg/insn_trans/trans_fcnv.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_fcnv.c.inc rename to target/loongarch/tcg/insn_trans/trans_fcnv.c.inc diff --git a/target/loongarch/insn_trans/trans_fmemory.c.inc b/target/loongarch/tcg/insn_trans/trans_fmemory.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_fmemory.c.inc rename to target/loongarch/tcg/insn_trans/trans_fmemory.c.inc diff --git a/target/loongarch/insn_trans/trans_fmov.c.inc b/target/loongarch/tcg/insn_trans/trans_fmov.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_fmov.c.inc rename to target/loongarch/tcg/insn_trans/trans_fmov.c.inc diff --git a/target/loongarch/insn_trans/trans_memory.c.inc b/target/loongarch/tcg/insn_trans/trans_memory.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_memory.c.inc rename to target/loongarch/tcg/insn_trans/trans_memory.c.inc diff --git a/target/loongarch/insn_trans/trans_privileged.c.inc b/target/loongarch/tcg/insn_trans/trans_privileged.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_privileged.c.inc rename to target/loongarch/tcg/insn_trans/trans_privileged.c.inc diff --git a/target/loongarch/insn_trans/trans_shift.c.inc b/target/loongarch/tcg/insn_trans/trans_shift.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_shift.c.inc rename to target/loongarch/tcg/insn_trans/trans_shift.c.inc diff --git a/target/loongarch/insn_trans/trans_vec.c.inc b/target/loongarch/tcg/insn_trans/trans_vec.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_vec.c.inc rename to target/loongarch/tcg/insn_trans/trans_vec.c.inc diff --git a/target/loongarch/iocsr_helper.c b/target/loongarch/tcg/iocsr_helper.c similarity index 76% rename from target/loongarch/iocsr_helper.c rename to target/loongarch/tcg/iocsr_helper.c index 6cd01d5f0940bede6de45f6ed35442e08770e937..b6916f53d20ca133f0000e773685cb94240bafe2 100644 --- a/target/loongarch/iocsr_helper.c +++ b/target/loongarch/tcg/iocsr_helper.c @@ -17,52 +17,52 @@ uint64_t helper_iocsrrd_b(CPULoongArchState *env, target_ulong r_addr) { - return address_space_ldub(&env->address_space_iocsr, r_addr, + return address_space_ldub(env->address_space_iocsr, r_addr, GET_MEMTXATTRS(env), NULL); } uint64_t helper_iocsrrd_h(CPULoongArchState *env, target_ulong r_addr) { - return address_space_lduw(&env->address_space_iocsr, r_addr, + return address_space_lduw(env->address_space_iocsr, r_addr, GET_MEMTXATTRS(env), NULL); } uint64_t helper_iocsrrd_w(CPULoongArchState *env, target_ulong r_addr) { - return address_space_ldl(&env->address_space_iocsr, r_addr, + return address_space_ldl(env->address_space_iocsr, r_addr, GET_MEMTXATTRS(env), NULL); } uint64_t helper_iocsrrd_d(CPULoongArchState *env, target_ulong r_addr) { - return address_space_ldq(&env->address_space_iocsr, r_addr, + return address_space_ldq(env->address_space_iocsr, r_addr, GET_MEMTXATTRS(env), NULL); } void helper_iocsrwr_b(CPULoongArchState *env, target_ulong w_addr, target_ulong val) { - address_space_stb(&env->address_space_iocsr, w_addr, + address_space_stb(env->address_space_iocsr, w_addr, val, GET_MEMTXATTRS(env), NULL); } void helper_iocsrwr_h(CPULoongArchState *env, target_ulong w_addr, target_ulong val) { - address_space_stw(&env->address_space_iocsr, w_addr, + address_space_stw(env->address_space_iocsr, w_addr, val, GET_MEMTXATTRS(env), NULL); } void helper_iocsrwr_w(CPULoongArchState *env, target_ulong w_addr, target_ulong val) { - address_space_stl(&env->address_space_iocsr, w_addr, + address_space_stl(env->address_space_iocsr, w_addr, val, GET_MEMTXATTRS(env), NULL); } void helper_iocsrwr_d(CPULoongArchState *env, target_ulong w_addr, target_ulong val) { - address_space_stq(&env->address_space_iocsr, w_addr, + address_space_stq(env->address_space_iocsr, w_addr, val, GET_MEMTXATTRS(env), NULL); } diff --git a/target/loongarch/tcg/meson.build b/target/loongarch/tcg/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..bdf34f9673b686136fce68ad6867d9c472d80e83 --- /dev/null +++ b/target/loongarch/tcg/meson.build @@ -0,0 +1,19 @@ +if 'CONFIG_TCG' not in config_all_accel + subdir_done() +endif + +loongarch_ss.add([zlib, gen]) + +loongarch_ss.add(files( + 'fpu_helper.c', + 'op_helper.c', + 'translate.c', + 'vec_helper.c', +)) + +loongarch_system_ss.add(files( + 'constant_timer.c', + 'csr_helper.c', + 'iocsr_helper.c', + 'tlb_helper.c', +)) diff --git a/target/loongarch/op_helper.c b/target/loongarch/tcg/op_helper.c similarity index 100% rename from target/loongarch/op_helper.c rename to target/loongarch/tcg/op_helper.c diff --git a/target/loongarch/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c similarity index 100% rename from target/loongarch/tlb_helper.c rename to target/loongarch/tcg/tlb_helper.c diff --git a/target/loongarch/translate.c b/target/loongarch/tcg/translate.c similarity index 100% rename from target/loongarch/translate.c rename to target/loongarch/tcg/translate.c diff --git a/target/loongarch/vec_helper.c b/target/loongarch/tcg/vec_helper.c similarity index 100% rename from target/loongarch/vec_helper.c rename to target/loongarch/tcg/vec_helper.c diff --git a/target/loongarch/trace-events b/target/loongarch/trace-events new file mode 100644 index 0000000000000000000000000000000000000000..dea11edc0f1164318642cbed4306ca23a29ed3a8 --- /dev/null +++ b/target/loongarch/trace-events @@ -0,0 +1,15 @@ +# See docs/devel/tracing.rst for syntax documentation. + +#kvm.c +kvm_failed_get_regs_core(const char *msg) "Failed to get core regs from KVM: %s" +kvm_failed_put_regs_core(const char *msg) "Failed to put core regs into KVM: %s" +kvm_failed_get_fpu(const char *msg) "Failed to get fpu from KVM: %s" +kvm_failed_put_fpu(const char *msg) "Failed to put fpu into KVM: %s" +kvm_failed_get_mpstate(const char *msg) "Failed to get mp_state from KVM: %s" +kvm_failed_put_mpstate(const char *msg) "Failed to put mp_state into KVM: %s" +kvm_failed_get_counter(const char *msg) "Failed to get counter from KVM: %s" +kvm_failed_put_counter(const char *msg) "Failed to put counter into KVM: %s" +kvm_failed_get_cpucfg(const char *msg) "Failed to get cpucfg from KVM: %s" +kvm_failed_put_cpucfg(const char *msg) "Failed to put cpucfg into KVM: %s" +kvm_arch_handle_exit(int num) "kvm arch handle exit, the reason number: %d" +kvm_set_intr(int irq, int level) "kvm set interrupt, irq num: %d, level: %d" diff --git a/target/loongarch/trace.h b/target/loongarch/trace.h new file mode 100644 index 0000000000000000000000000000000000000000..c2ecb78f0843e24884d8279aec544ecc0f07fec4 --- /dev/null +++ b/target/loongarch/trace.h @@ -0,0 +1 @@ +#include "trace/trace-target_loongarch.h" diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c index 11c7e0a790203ed71662e85d6cdb312cc46af3e2..1421e77c2c07a62711030a5461e393260decf119 100644 --- a/target/m68k/cpu.c +++ b/target/m68k/cpu.c @@ -111,9 +111,7 @@ static ObjectClass *m68k_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(M68K_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (oc != NULL && object_class_dynamic_cast(oc, TYPE_M68K_CPU) == NULL) { - return NULL; - } + return oc; } @@ -381,7 +379,7 @@ static const VMStateDescription vmstate_freg_tmp = { .name = "freg_tmp", .post_load = freg_post_load, .pre_save = freg_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(tmp_mant, m68k_FPReg_tmp), VMSTATE_UINT16(tmp_exp, m68k_FPReg_tmp), VMSTATE_END_OF_LIST() @@ -390,7 +388,7 @@ static const VMStateDescription vmstate_freg_tmp = { static const VMStateDescription vmstate_freg = { .name = "freg", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_WITH_TMP(FPReg, m68k_FPReg_tmp, vmstate_freg_tmp), VMSTATE_END_OF_LIST() } @@ -411,7 +409,7 @@ const VMStateDescription vmmstate_fpu = { .minimum_version_id = 1, .needed = fpu_needed, .post_load = fpu_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.fpcr, M68kCPU), VMSTATE_UINT32(env.fpsr, M68kCPU), VMSTATE_STRUCT_ARRAY(env.fregs, M68kCPU, 8, 0, vmstate_freg, FPReg), @@ -432,7 +430,7 @@ const VMStateDescription vmstate_cf_spregs = { .version_id = 1, .minimum_version_id = 1, .needed = cf_spregs_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.macc, M68kCPU, 4), VMSTATE_UINT32(env.macsr, M68kCPU), VMSTATE_UINT32(env.mac_mask, M68kCPU), @@ -454,7 +452,7 @@ const VMStateDescription vmstate_68040_mmu = { .version_id = 1, .minimum_version_id = 1, .needed = cpu_68040_mmu_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.mmu.ar, M68kCPU), VMSTATE_UINT32(env.mmu.ssw, M68kCPU), VMSTATE_UINT16(env.mmu.tcr, M68kCPU), @@ -479,7 +477,7 @@ const VMStateDescription vmstate_68040_spregs = { .version_id = 1, .minimum_version_id = 1, .needed = cpu_68040_spregs_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(env.vbr, M68kCPU), VMSTATE_UINT32(env.cacr, M68kCPU), VMSTATE_UINT32(env.sfc, M68kCPU), @@ -492,7 +490,7 @@ static const VMStateDescription vmstate_m68k_cpu = { .name = "cpu", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(env.dregs, M68kCPU, 8), VMSTATE_UINT32_ARRAY(env.aregs, M68kCPU, 8), VMSTATE_UINT32(env.pc, M68kCPU), @@ -509,7 +507,7 @@ static const VMStateDescription vmstate_m68k_cpu = { VMSTATE_INT32(env.pending_level, M68kCPU), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmmstate_fpu, &vmstate_cf_spregs, &vmstate_68040_mmu, diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h index 6cfc696d2ba81e06c367b70678eb8585931e8d20..d13427b0fe61bb08f9a1626d49be37fc44535db3 100644 --- a/target/m68k/cpu.h +++ b/target/m68k/cpu.h @@ -556,8 +556,6 @@ static inline bool m68k_feature(CPUM68KState *env, int feature) return (env->features & BIT_ULL(feature)) != 0; } -void m68k_cpu_list(void); - void register_m68k_insns (CPUM68KState *env); enum { @@ -576,8 +574,6 @@ enum { #define CPU_RESOLVING_TYPE TYPE_M68K_CPU -#define cpu_list m68k_cpu_list - /* MMU modes definitions */ #define MMU_KERNEL_IDX 0 #define MMU_USER_IDX 1 diff --git a/target/m68k/helper.c b/target/m68k/helper.c index 0a1544cd68d9dc7cff8983283d07c83d773c32d6..14508dfa118766b229ab7c496c393d89fcf16448 100644 --- a/target/m68k/helper.c +++ b/target/m68k/helper.c @@ -29,46 +29,6 @@ #define SIGNBIT (1u << 31) -/* Sort alphabetically, except for "any". */ -static gint m68k_cpu_list_compare(gconstpointer a, gconstpointer b) -{ - ObjectClass *class_a = (ObjectClass *)a; - ObjectClass *class_b = (ObjectClass *)b; - const char *name_a, *name_b; - - name_a = object_class_get_name(class_a); - name_b = object_class_get_name(class_b); - if (strcmp(name_a, "any-" TYPE_M68K_CPU) == 0) { - return 1; - } else if (strcmp(name_b, "any-" TYPE_M68K_CPU) == 0) { - return -1; - } else { - return strcasecmp(name_a, name_b); - } -} - -static void m68k_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *c = data; - const char *typename; - char *name; - - typename = object_class_get_name(c); - name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_M68K_CPU)); - qemu_printf("%s\n", name); - g_free(name); -} - -void m68k_cpu_list(void) -{ - GSList *list; - - list = object_class_get_list(TYPE_M68K_CPU, false); - list = g_slist_sort(list, m68k_cpu_list_compare); - g_slist_foreach(list, m68k_cpu_list_entry, NULL); - g_slist_free(list); -} - static int cf_fpu_gdb_get_reg(CPUM68KState *env, GByteArray *mem_buf, int n) { if (n < 8) { diff --git a/target/microblaze/machine.c b/target/microblaze/machine.c index d24def3992e2776e2e14ef9809d66b968bb81872..51705e4f5c9c0c61069c6a06769f372b9c13e43d 100644 --- a/target/microblaze/machine.c +++ b/target/microblaze/machine.c @@ -22,7 +22,7 @@ #include "migration/cpu.h" -static VMStateField vmstate_mmu_fields[] = { +static const VMStateField vmstate_mmu_fields[] = { VMSTATE_UINT64_2DARRAY(rams, MicroBlazeMMU, 2, TLB_ENTRIES), VMSTATE_UINT8_ARRAY(tids, MicroBlazeMMU, TLB_ENTRIES), VMSTATE_UINT32_ARRAY(regs, MicroBlazeMMU, 3), @@ -60,7 +60,7 @@ static const VMStateInfo vmstate_msr = { .put = put_msr, }; -static VMStateField vmstate_env_fields[] = { +static const VMStateField vmstate_env_fields[] = { VMSTATE_UINT32_ARRAY(regs, CPUMBState, 32), VMSTATE_UINT32(pc, CPUMBState), @@ -92,7 +92,7 @@ static const VMStateDescription vmstate_env = { .fields = vmstate_env_fields, }; -static VMStateField vmstate_cpu_fields[] = { +static const VMStateField vmstate_cpu_fields[] = { VMSTATE_CPU(), VMSTATE_STRUCT(env, MicroBlazeCPU, 1, vmstate_env, CPUMBState), VMSTATE_END_OF_LIST() diff --git a/target/mips/cpu-defs.c.inc b/target/mips/cpu-defs.c.inc index c0c389c59a15939e26bb172d1ca6d43958976308..fbf787d8ce1810706c80a5d4fabf8aca37fb1c85 100644 --- a/target/mips/cpu-defs.c.inc +++ b/target/mips/cpu-defs.c.inc @@ -1018,15 +1018,6 @@ const mips_def_t mips_defs[] = }; const int mips_defs_number = ARRAY_SIZE(mips_defs); -void mips_cpu_list(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(mips_defs); i++) { - qemu_printf("MIPS '%s'\n", mips_defs[i].name); - } -} - static void fpu_init (CPUMIPSState *env, const mips_def_t *def) { int i; diff --git a/target/mips/cpu.h b/target/mips/cpu.h index 52f13f0363d517718973e99bd5a475127a8fc9b5..1163a71f3cf1b199f285f582a4a60130fbdf8954 100644 --- a/target/mips/cpu.h +++ b/target/mips/cpu.h @@ -1235,10 +1235,6 @@ struct MIPSCPUClass { bool no_data_aborts; }; -void mips_cpu_list(void); - -#define cpu_list mips_cpu_list - void cpu_wrdsp(uint32_t rs, uint32_t mask_num, CPUMIPSState *env); uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env); diff --git a/target/mips/kvm.c b/target/mips/kvm.c index e22e24ed974e12fea4858884c9a2ad12c2c10a20..15d0cf9adbb389fb73e1d78ee0185c42dfd7d959 100644 --- a/target/mips/kvm.c +++ b/target/mips/kvm.c @@ -138,7 +138,7 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) int r; struct kvm_mips_interrupt intr; - qemu_mutex_lock_iothread(); + bql_lock(); if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && cpu_mips_io_interrupts_pending(cpu)) { @@ -151,7 +151,7 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) } } - qemu_mutex_unlock_iothread(); + bql_unlock(); } MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) diff --git a/target/mips/meson.build b/target/mips/meson.build index e57ef24ecf4f556fe196bd7193c892e5d0399367..a26d1e1f792f990086c6da5c7a37509e6ce75420 100644 --- a/target/mips/meson.build +++ b/target/mips/meson.build @@ -12,7 +12,7 @@ if have_system subdir('sysemu') endif -if 'CONFIG_TCG' in config_all +if 'CONFIG_TCG' in config_all_accel subdir('tcg') endif diff --git a/target/mips/sysemu/machine.c b/target/mips/sysemu/machine.c index 80d37f9c2fc9b20aa2260ab0d2dcf4bb6064529d..218f4c3a673c05104a0ea60496578dcdb4203183 100644 --- a/target/mips/sysemu/machine.c +++ b/target/mips/sysemu/machine.c @@ -44,7 +44,7 @@ static int put_fpr(QEMUFile *f, void *pv, size_t size, return 0; } -const VMStateInfo vmstate_info_fpr = { +static const VMStateInfo vmstate_info_fpr = { .name = "fpr", .get = get_fpr, .put = put_fpr, @@ -56,21 +56,21 @@ const VMStateInfo vmstate_info_fpr = { #define VMSTATE_FPR_ARRAY(_f, _s, _n) \ VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0) -static VMStateField vmstate_fpu_fields[] = { +static const VMStateField vmstate_fpu_fields[] = { VMSTATE_FPR_ARRAY(fpr, CPUMIPSFPUContext, 32), VMSTATE_UINT32(fcr0, CPUMIPSFPUContext), VMSTATE_UINT32(fcr31, CPUMIPSFPUContext), VMSTATE_END_OF_LIST() }; -const VMStateDescription vmstate_fpu = { +static const VMStateDescription vmstate_fpu = { .name = "cpu/fpu", .version_id = 1, .minimum_version_id = 1, .fields = vmstate_fpu_fields }; -const VMStateDescription vmstate_inactive_fpu = { +static const VMStateDescription vmstate_inactive_fpu = { .name = "cpu/inactive_fpu", .version_id = 1, .minimum_version_id = 1, @@ -79,7 +79,7 @@ const VMStateDescription vmstate_inactive_fpu = { /* TC state */ -static VMStateField vmstate_tc_fields[] = { +static const VMStateField vmstate_tc_fields[] = { VMSTATE_UINTTL_ARRAY(gpr, TCState, 32), #if defined(TARGET_MIPS64) VMSTATE_UINT64_ARRAY(gpr_hi, TCState, 32), @@ -103,14 +103,14 @@ static VMStateField vmstate_tc_fields[] = { VMSTATE_END_OF_LIST() }; -const VMStateDescription vmstate_tc = { +static const VMStateDescription vmstate_tc = { .name = "cpu/tc", .version_id = 2, .minimum_version_id = 2, .fields = vmstate_tc_fields }; -const VMStateDescription vmstate_inactive_tc = { +static const VMStateDescription vmstate_inactive_tc = { .name = "cpu/inactive_tc", .version_id = 2, .minimum_version_id = 2, @@ -119,11 +119,11 @@ const VMStateDescription vmstate_inactive_tc = { /* MVP state */ -const VMStateDescription vmstate_mvp = { +static const VMStateDescription vmstate_mvp = { .name = "cpu/mvp", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(CP0_MVPControl, CPUMIPSMVPContext), VMSTATE_INT32(CP0_MVPConf0, CPUMIPSMVPContext), VMSTATE_INT32(CP0_MVPConf1, CPUMIPSMVPContext), @@ -190,7 +190,7 @@ static int put_tlb(QEMUFile *f, void *pv, size_t size, return 0; } -const VMStateInfo vmstate_info_tlb = { +static const VMStateInfo vmstate_info_tlb = { .name = "tlb_entry", .get = get_tlb, .put = put_tlb, @@ -202,11 +202,11 @@ const VMStateInfo vmstate_info_tlb = { #define VMSTATE_TLB_ARRAY(_f, _s, _n) \ VMSTATE_TLB_ARRAY_V(_f, _s, _n, 0) -const VMStateDescription vmstate_tlb = { +static const VMStateDescription vmstate_tlb = { .name = "cpu/tlb", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(nb_tlb, CPUMIPSTLBContext), VMSTATE_UINT32(tlb_in_use, CPUMIPSTLBContext), VMSTATE_TLB_ARRAY(mmu.r4k.tlb, CPUMIPSTLBContext, MIPS_TLB_MAX), @@ -221,7 +221,7 @@ const VMStateDescription vmstate_mips_cpu = { .version_id = 21, .minimum_version_id = 21, .post_load = cpu_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* Active TC */ VMSTATE_STRUCT(env.active_tc, MIPSCPU, 1, vmstate_tc, TCState), diff --git a/target/mips/sysemu/mips-qmp-cmds.c b/target/mips/sysemu/mips-qmp-cmds.c index 6db4626412cf84d4973c4fc607214572d34ad55d..7340ac70ba01f52673d06e23857b18f44e60355e 100644 --- a/target/mips/sysemu/mips-qmp-cmds.c +++ b/target/mips/sysemu/mips-qmp-cmds.c @@ -19,8 +19,7 @@ static void mips_cpu_add_definition(gpointer data, gpointer user_data) typename = object_class_get_name(oc); info = g_malloc0(sizeof(*info)); - info->name = g_strndup(typename, - strlen(typename) - strlen("-" TYPE_MIPS_CPU)); + info->name = cpu_model_from_type(typename); info->q_typename = g_strdup(typename); QAPI_LIST_PREPEND(*cpu_list, info); diff --git a/target/mips/tcg/sysemu/cp0_helper.c b/target/mips/tcg/sysemu/cp0_helper.c index d3495487431c0d769941437f3437fdd1d510663a..cc545aed9ca903075e2a42bcaf4f9a2162e3b05f 100644 --- a/target/mips/tcg/sysemu/cp0_helper.c +++ b/target/mips/tcg/sysemu/cp0_helper.c @@ -59,9 +59,9 @@ static inline void mips_vpe_wake(MIPSCPU *c) * because there might be other conditions that state that c should * be sleeping. */ - qemu_mutex_lock_iothread(); + bql_lock(); cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE); - qemu_mutex_unlock_iothread(); + bql_unlock(); } static inline void mips_vpe_sleep(MIPSCPU *cpu) diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c index 1173260017af2f3de1a2116ed142cd65f60ba062..381ebe00d37ad68fffcc982ef57b56d74041d2d1 100644 --- a/target/openrisc/cpu.c +++ b/target/openrisc/cpu.c @@ -164,9 +164,7 @@ static ObjectClass *openrisc_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(OPENRISC_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (oc != NULL && !object_class_dynamic_cast(oc, TYPE_OPENRISC_CPU)) { - return NULL; - } + return oc; } @@ -255,48 +253,6 @@ static void openrisc_cpu_class_init(ObjectClass *oc, void *data) cc->tcg_ops = &openrisc_tcg_ops; } -/* Sort alphabetically by type name, except for "any". */ -static gint openrisc_cpu_list_compare(gconstpointer a, gconstpointer b) -{ - ObjectClass *class_a = (ObjectClass *)a; - ObjectClass *class_b = (ObjectClass *)b; - const char *name_a, *name_b; - - name_a = object_class_get_name(class_a); - name_b = object_class_get_name(class_b); - if (strcmp(name_a, "any-" TYPE_OPENRISC_CPU) == 0) { - return 1; - } else if (strcmp(name_b, "any-" TYPE_OPENRISC_CPU) == 0) { - return -1; - } else { - return strcmp(name_a, name_b); - } -} - -static void openrisc_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - const char *typename; - char *name; - - typename = object_class_get_name(oc); - name = g_strndup(typename, - strlen(typename) - strlen("-" TYPE_OPENRISC_CPU)); - qemu_printf(" %s\n", name); - g_free(name); -} - -void cpu_openrisc_list(void) -{ - GSList *list; - - list = object_class_get_list(TYPE_OPENRISC_CPU, false); - list = g_slist_sort(list, openrisc_cpu_list_compare); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, openrisc_cpu_list_entry, NULL); - g_slist_free(list); -} - #define DEFINE_OPENRISC_CPU_TYPE(cpu_model, initfn) \ { \ .parent = TYPE_OPENRISC_CPU, \ diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h index dedeb89f8e94a5488ce9e202e478d4f229299e45..b454014ddda50148257693170dc8cfea654e2219 100644 --- a/target/openrisc/cpu.h +++ b/target/openrisc/cpu.h @@ -299,15 +299,12 @@ struct ArchCPU { CPUOpenRISCState env; }; -void cpu_openrisc_list(void); void openrisc_cpu_dump_state(CPUState *cpu, FILE *f, int flags); int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void openrisc_translate_init(void); int print_insn_or1k(bfd_vma addr, disassemble_info *info); -#define cpu_list cpu_openrisc_list - #ifndef CONFIG_USER_ONLY hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); diff --git a/target/openrisc/machine.c b/target/openrisc/machine.c index b7d738864012caba220c9a656d1abea9229aa672..3574e571cb252d3087cf29f86cff00f4c03364f0 100644 --- a/target/openrisc/machine.c +++ b/target/openrisc/machine.c @@ -25,7 +25,7 @@ static const VMStateDescription vmstate_tlb_entry = { .name = "tlb_entry", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(mr, OpenRISCTLBEntry), VMSTATE_UINTTL(tr, OpenRISCTLBEntry), VMSTATE_END_OF_LIST() @@ -36,7 +36,7 @@ static const VMStateDescription vmstate_cpu_tlb = { .name = "cpu_tlb", .version_id = 2, .minimum_version_id = 2, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(itlb, CPUOpenRISCTLBContext, TLB_SIZE, 0, vmstate_tlb_entry, OpenRISCTLBEntry), VMSTATE_STRUCT_ARRAY(dtlb, CPUOpenRISCTLBContext, TLB_SIZE, 0, @@ -71,7 +71,7 @@ static const VMStateDescription vmstate_env = { .name = "env", .version_id = 6, .minimum_version_id = 6, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL_2DARRAY(shadow_gpr, CPUOpenRISCState, 16, 32), VMSTATE_UINTTL(pc, CPUOpenRISCState), VMSTATE_UINTTL(ppc, CPUOpenRISCState), @@ -135,7 +135,7 @@ const VMStateDescription vmstate_openrisc_cpu = { .version_id = 1, .minimum_version_id = 1, .post_load = cpu_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_CPU(), VMSTATE_STRUCT(env, OpenRISCCPU, 1, vmstate_env, CPUOpenRISCState), VMSTATE_END_OF_LIST() diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c index 782a5751b750ce973a2b2cca0f5aa6b74754e3bb..77567afba47f773e4679a8d8fa452222fa116cb3 100644 --- a/target/openrisc/sys_helper.c +++ b/target/openrisc/sys_helper.c @@ -160,20 +160,20 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) break; case TO_SPR(9, 0): /* PICMR */ env->picmr = rb; - qemu_mutex_lock_iothread(); + bql_lock(); if (env->picsr & env->picmr) { cpu_interrupt(cs, CPU_INTERRUPT_HARD); } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } - qemu_mutex_unlock_iothread(); + bql_unlock(); break; case TO_SPR(9, 2): /* PICSR */ env->picsr &= ~rb; break; case TO_SPR(10, 0): /* TTMR */ { - qemu_mutex_lock_iothread(); + bql_lock(); if ((env->ttmr & TTMR_M) ^ (rb & TTMR_M)) { switch (rb & TTMR_M) { case TIMER_NONE: @@ -198,15 +198,15 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) cs->interrupt_request &= ~CPU_INTERRUPT_TIMER; } cpu_openrisc_timer_update(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); } break; case TO_SPR(10, 1): /* TTCR */ - qemu_mutex_lock_iothread(); + bql_lock(); cpu_openrisc_count_set(cpu, rb); cpu_openrisc_timer_update(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; } #endif @@ -347,9 +347,9 @@ target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd, return env->ttmr; case TO_SPR(10, 1): /* TTCR */ - qemu_mutex_lock_iothread(); + bql_lock(); cpu_openrisc_count_update(cpu); - qemu_mutex_unlock_iothread(); + bql_unlock(); return cpu_openrisc_count_get(cpu); } #endif diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index 40fe14a6c25924e684e7b1808ac85d817cb16005..344196a8ce3dfb954fc6631a02388b71dd883054 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -7036,8 +7036,7 @@ static void ppc_cpu_list_entry(gpointer data, gpointer user_data) return; } - name = g_strndup(typename, - strlen(typename) - strlen(POWERPC_CPU_TYPE_SUFFIX)); + name = cpu_model_from_type(typename); qemu_printf("PowerPC %-16s PVR %08x\n", name, pcc->pvr); for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) { PowerPCCPUAlias *alias = &ppc_cpu_aliases[i]; diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index a42743a3e077633dc6511b73abc436087046be21..2ec6429e36a536b8ae4c5811d44674bc6b587586 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -2222,7 +2222,7 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env) void ppc_maybe_interrupt(CPUPPCState *env) { CPUState *cs = env_cpu(env); - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); if (ppc_next_unmasked_interrupt(env)) { cpu_interrupt(cs, CPU_INTERRUPT_HARD); @@ -3056,7 +3056,7 @@ void helper_msgsnd(target_ulong rb) return; } - qemu_mutex_lock_iothread(); + bql_lock(); CPU_FOREACH(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *cenv = &cpu->env; @@ -3065,7 +3065,7 @@ void helper_msgsnd(target_ulong rb) ppc_set_irq(cpu, irq, 1); } } - qemu_mutex_unlock_iothread(); + bql_unlock(); } /* Server Processor Control */ @@ -3093,7 +3093,7 @@ static void book3s_msgsnd_common(int pir, int irq) { CPUState *cs; - qemu_mutex_lock_iothread(); + bql_lock(); CPU_FOREACH(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *cenv = &cpu->env; @@ -3103,7 +3103,7 @@ static void book3s_msgsnd_common(int pir, int irq) ppc_set_irq(cpu, irq, 1); } } - qemu_mutex_unlock_iothread(); + bql_unlock(); } void helper_book3s_msgsnd(target_ulong rb) @@ -3157,14 +3157,14 @@ void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) } /* Does iothread need to be locked for walking CPU list? */ - qemu_mutex_lock_iothread(); + bql_lock(); THREAD_SIBLING_FOREACH(cs, ccs) { PowerPCCPU *ccpu = POWERPC_CPU(ccs); uint32_t thread_id = ppc_cpu_tir(ccpu); if (ttir == thread_id) { ppc_set_irq(ccpu, PPC_INTERRUPT_DOORBELL, 1); - qemu_mutex_unlock_iothread(); + bql_unlock(); return; } } diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c index f380342d4dd4d81ae4c390b9e2c17533064c6414..e0b2dcd02ece2bde6daa279411e692e8dc463707 100644 --- a/target/ppc/helper_regs.c +++ b/target/ppc/helper_regs.c @@ -244,7 +244,7 @@ void cpu_interrupt_exittb(CPUState *cs) * unless running with TCG. */ if (tcg_enabled()) { - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); } } diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 9b1abe2fc4116fa9470fea3c59a96c0e24692311..26fa9d057576856fa61651d9e5a2896230a539bb 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -1656,7 +1656,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) CPUPPCState *env = &cpu->env; int ret; - qemu_mutex_lock_iothread(); + bql_lock(); switch (run->exit_reason) { case KVM_EXIT_DCR: @@ -1715,7 +1715,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) break; } - qemu_mutex_unlock_iothread(); + bql_unlock(); return ret; } diff --git a/target/ppc/machine.c b/target/ppc/machine.c index 68cbdffecd4f5042464473a5eb00f2326fab1c84..203fe28e014180bb07aa13eadac5fefcc8523e80 100644 --- a/target/ppc/machine.c +++ b/target/ppc/machine.c @@ -351,7 +351,7 @@ static const VMStateDescription vmstate_fpu = { .version_id = 1, .minimum_version_id = 1, .needed = fpu_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32), VMSTATE_UINTTL(env.fpscr, PowerPCCPU), VMSTATE_END_OF_LIST() @@ -392,7 +392,7 @@ static const VMStateDescription vmstate_altivec = { .version_id = 1, .minimum_version_id = 1, .needed = altivec_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32), /* * Save the architecture value of the vscr, not the internally @@ -425,7 +425,7 @@ static const VMStateDescription vmstate_vsx = { .version_id = 1, .minimum_version_id = 1, .needed = vsx_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32), VMSTATE_END_OF_LIST() }, @@ -445,7 +445,7 @@ static const VMStateDescription vmstate_tm = { .version_id = 1, .minimum_version_id = 1, .needed = tm_needed, - .fields = (VMStateField []) { + .fields = (const VMStateField []) { VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32), VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64), VMSTATE_UINT64(env.tm_cr, PowerPCCPU), @@ -479,7 +479,7 @@ static const VMStateDescription vmstate_sr = { .version_id = 1, .minimum_version_id = 1, .needed = sr_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32), VMSTATE_END_OF_LIST() }, @@ -553,7 +553,7 @@ static const VMStateDescription vmstate_slb = { .minimum_version_id = 1, .needed = slb_needed, .post_load = slb_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration), VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES), VMSTATE_END_OF_LIST() @@ -565,7 +565,7 @@ static const VMStateDescription vmstate_tlb6xx_entry = { .name = "cpu/tlb6xx_entry", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(pte0, ppc6xx_tlb_t), VMSTATE_UINTTL(pte1, ppc6xx_tlb_t), VMSTATE_UINTTL(EPN, ppc6xx_tlb_t), @@ -586,7 +586,7 @@ static const VMStateDescription vmstate_tlb6xx = { .version_id = 1, .minimum_version_id = 1, .needed = tlb6xx_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU, env.nb_tlb, @@ -601,7 +601,7 @@ static const VMStateDescription vmstate_tlbemb_entry = { .name = "cpu/tlbemb_entry", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(RPN, ppcemb_tlb_t), VMSTATE_UINTTL(EPN, ppcemb_tlb_t), VMSTATE_UINTTL(PID, ppcemb_tlb_t), @@ -625,7 +625,7 @@ static const VMStateDescription vmstate_tlbemb = { .version_id = 1, .minimum_version_id = 1, .needed = tlbemb_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU, env.nb_tlb, @@ -639,7 +639,7 @@ static const VMStateDescription vmstate_tlbmas_entry = { .name = "cpu/tlbmas_entry", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(mas8, ppcmas_tlb_t), VMSTATE_UINT32(mas1, ppcmas_tlb_t), VMSTATE_UINT64(mas2, ppcmas_tlb_t), @@ -661,7 +661,7 @@ static const VMStateDescription vmstate_tlbmas = { .version_id = 1, .minimum_version_id = 1, .needed = tlbmas_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU, env.nb_tlb, @@ -684,7 +684,7 @@ static const VMStateDescription vmstate_compat = { .version_id = 1, .minimum_version_id = 1, .needed = compat_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(compat_pvr, PowerPCCPU), VMSTATE_END_OF_LIST() } @@ -700,7 +700,7 @@ static const VMStateDescription vmstate_reservation = { .version_id = 1, .minimum_version_id = 1, .needed = reservation_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU), VMSTATE_UINTTL(env.reserve_length, PowerPCCPU), VMSTATE_UINTTL(env.reserve_val, PowerPCCPU), @@ -717,7 +717,7 @@ const VMStateDescription vmstate_ppc_cpu = { .minimum_version_id = 5, .pre_save = cpu_pre_save, .post_load = cpu_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */ /* User mode architected state */ @@ -748,7 +748,7 @@ const VMStateDescription vmstate_ppc_cpu = { VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_fpu, &vmstate_altivec, &vmstate_vsx, diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c index a05bdf78c982e4e038f08ecfc2fe9c6c034cc124..a9d41d28020fd4aa6424b5c6ec0d95dcb7b2b04c 100644 --- a/target/ppc/misc_helper.c +++ b/target/ppc/misc_helper.c @@ -238,7 +238,7 @@ target_ulong helper_load_dpdes(CPUPPCState *env) return dpdes; } - qemu_mutex_lock_iothread(); + bql_lock(); THREAD_SIBLING_FOREACH(cs, ccs) { PowerPCCPU *ccpu = POWERPC_CPU(ccs); CPUPPCState *cenv = &ccpu->env; @@ -248,7 +248,7 @@ target_ulong helper_load_dpdes(CPUPPCState *env) dpdes |= (0x1 << thread_id); } } - qemu_mutex_unlock_iothread(); + bql_unlock(); return dpdes; } @@ -278,14 +278,14 @@ void helper_store_dpdes(CPUPPCState *env, target_ulong val) } /* Does iothread need to be locked for walking CPU list? */ - qemu_mutex_lock_iothread(); + bql_lock(); THREAD_SIBLING_FOREACH(cs, ccs) { PowerPCCPU *ccpu = POWERPC_CPU(ccs); uint32_t thread_id = ppc_cpu_tir(ccpu); ppc_set_irq(cpu, PPC_INTERRUPT_DOORBELL, val & (0x1 << thread_id)); } - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif /* defined(TARGET_PPC64) */ diff --git a/target/ppc/ppc-qmp-cmds.c b/target/ppc/ppc-qmp-cmds.c index f9acc210562ecfbb3cd07cd4240023175a02c07c..c0c137d9d78e64809273848b3f3ac98d1c906484 100644 --- a/target/ppc/ppc-qmp-cmds.c +++ b/target/ppc/ppc-qmp-cmds.c @@ -181,8 +181,7 @@ static void ppc_cpu_defs_entry(gpointer data, gpointer user_data) typename = object_class_get_name(oc); info = g_malloc0(sizeof(*info)); - info->name = g_strndup(typename, - strlen(typename) - strlen(POWERPC_CPU_TYPE_SUFFIX)); + info->name = cpu_model_from_type(typename); QAPI_LIST_PREPEND(*first, info); } diff --git a/target/ppc/timebase_helper.c b/target/ppc/timebase_helper.c index 08a6b47ee0825699a3e6c8890657e5f232901f4f..f618ed292271ff382e0e8fb826f65c3e22837512 100644 --- a/target/ppc/timebase_helper.c +++ b/target/ppc/timebase_helper.c @@ -173,9 +173,9 @@ target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn) } else { int ret; - qemu_mutex_lock_iothread(); + bql_lock(); ret = ppc_dcr_read(env->dcr_env, (uint32_t)dcrn, &val); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (unlikely(ret != 0)) { qemu_log_mask(LOG_GUEST_ERROR, "DCR read error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn); @@ -196,9 +196,9 @@ void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val) POWERPC_EXCP_INVAL_INVAL, GETPC()); } else { int ret; - qemu_mutex_lock_iothread(); + bql_lock(); ret = ppc_dcr_write(env->dcr_env, (uint32_t)dcrn, (uint32_t)val); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (unlikely(ret != 0)) { qemu_log_mask(LOG_GUEST_ERROR, "DCR write error %d %03x\n", (uint32_t)dcrn, (uint32_t)dcrn); diff --git a/target/riscv/cpu-qom.h b/target/riscv/cpu-qom.h index 91b3361decc0e654d374fa970474ae4713d83be5..9219c2fcc3105ff22bfd64d261f0610e33fb9d39 100644 --- a/target/riscv/cpu-qom.h +++ b/target/riscv/cpu-qom.h @@ -23,6 +23,8 @@ #define TYPE_RISCV_CPU "riscv-cpu" #define TYPE_RISCV_DYNAMIC_CPU "riscv-dynamic-cpu" +#define TYPE_RISCV_VENDOR_CPU "riscv-vendor-cpu" +#define TYPE_RISCV_BARE_CPU "riscv-bare-cpu" #define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU #define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX) @@ -32,6 +34,9 @@ #define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32") #define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64") #define TYPE_RISCV_CPU_BASE128 RISCV_CPU_TYPE_NAME("x-rv128") +#define TYPE_RISCV_CPU_RV64I RISCV_CPU_TYPE_NAME("rv64i") +#define TYPE_RISCV_CPU_RVA22U64 RISCV_CPU_TYPE_NAME("rva22u64") +#define TYPE_RISCV_CPU_RVA22S64 RISCV_CPU_TYPE_NAME("rva22s64") #define TYPE_RISCV_CPU_IBEX RISCV_CPU_TYPE_NAME("lowrisc-ibex") #define TYPE_RISCV_CPU_SHAKTI_C RISCV_CPU_TYPE_NAME("shakti-c") #define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31") diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 83c7c0cf07be15fe68ec512de670cccf9de56949..8cbfc7e781ad4c39ba9759d0d1e956eb3443a234 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -53,6 +53,11 @@ const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, #define BYTE(x) (x) #endif +bool riscv_cpu_is_32bit(RISCVCPU *cpu) +{ + return riscv_cpu_mxl(&cpu->env) == MXL_RV32; +} + #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \ {#_name, _min_ver, CPU_CFG_OFFSET(_prop)} @@ -78,6 +83,7 @@ const uint32_t misa_bits[] = {RVI, RVE, RVM, RVA, RVF, RVD, RVV, */ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_zicbom), + ISA_EXT_DATA_ENTRY(zicbop, PRIV_VERSION_1_12_0, ext_zicbop), ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_zicboz), ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond), ISA_EXT_DATA_ENTRY(zicntr, PRIV_VERSION_1_12_0, ext_zicntr), @@ -87,6 +93,7 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), + ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin), @@ -370,6 +377,17 @@ static void set_satp_mode_max_supported(RISCVCPU *cpu, /* Set the satp mode to the max supported */ static void set_satp_mode_default_map(RISCVCPU *cpu) { + /* + * Bare CPUs do not default to the max available. + * Users must set a valid satp_mode in the command + * line. + */ + if (object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_BARE_CPU) != NULL) { + warn_report("No satp mode set. Defaulting to 'bare'"); + cpu->cfg.satp_mode.map = (1 << VM_1_10_MBARE); + return; + } + cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported; } #endif @@ -552,6 +570,28 @@ static void rv128_base_cpu_init(Object *obj) set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57); #endif } + +static void rv64i_bare_cpu_init(Object *obj) +{ + CPURISCVState *env = &RISCV_CPU(obj)->env; + riscv_cpu_set_misa(env, MXL_RV64, RVI); + + /* Remove the defaults from the parent class */ + RISCV_CPU(obj)->cfg.ext_zicntr = false; + RISCV_CPU(obj)->cfg.ext_zihpm = false; + + /* Set to QEMU's first supported priv version */ + env->priv_ver = PRIV_VERSION_1_10_0; + + /* + * Support all available satp_mode settings. The default + * value will be set to MBARE if the user doesn't set + * satp_mode manually (see set_satp_mode_default()). + */ +#ifndef CONFIG_USER_ONLY + set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV64); +#endif +} #else static void rv32_base_cpu_init(Object *obj) { @@ -646,9 +686,7 @@ static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model) oc = object_class_by_name(typename); g_strfreev(cpuname); g_free(typename); - if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU)) { - return NULL; - } + return oc; } @@ -659,8 +697,7 @@ char *riscv_cpu_get_name(RISCVCPU *cpu) g_assert(g_str_has_suffix(typename, RISCV_CPU_TYPE_SUFFIX)); - return g_strndup(typename, - strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX)); + return cpu_model_from_type(typename); } static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) @@ -894,6 +931,14 @@ static void riscv_cpu_reset_hold(Object *obj) /* mmte is supposed to have pm.current hardwired to 1 */ env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT); + /* + * Bits 10, 6, 2 and 12 of mideleg are read only 1 when the Hypervisor + * extension is enabled. + */ + if (riscv_has_ext(env, RVH)) { + env->mideleg |= HS_MODE_INTERRUPTS; + } + /* * Clear mseccfg and unlock all the PMP entries upon reset. * This is allowed as per the priv and smepmp specifications @@ -946,7 +991,7 @@ static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) #ifndef CONFIG_USER_ONLY static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp) { - bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32; + bool rv32 = riscv_cpu_is_32bit(cpu); uint8_t satp_mode_map_max, satp_mode_supported_max; /* The CPU wants the OS to decide which satp mode to use */ @@ -1022,6 +1067,14 @@ void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) { Error *local_err = NULL; +#ifndef CONFIG_USER_ONLY + riscv_cpu_satp_mode_finalize(cpu, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } +#endif + /* * KVM accel does not have a specialized finalize() * callback because its extensions are validated @@ -1034,14 +1087,6 @@ void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp) return; } } - -#ifndef CONFIG_USER_ONLY - riscv_cpu_satp_mode_finalize(cpu, &local_err); - if (local_err != NULL) { - error_propagate(errp, local_err); - return; - } -#endif } static void riscv_cpu_realize(DeviceState *dev, Error **errp) @@ -1300,6 +1345,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), + MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), MULTI_EXT_CFG_BOOL("zfa", ext_zfa, true), MULTI_EXT_CFG_BOOL("zfh", ext_zfh, false), @@ -1343,6 +1389,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("zhinxmin", ext_zhinxmin, false), MULTI_EXT_CFG_BOOL("zicbom", ext_zicbom, true), + MULTI_EXT_CFG_BOOL("zicbop", ext_zicbop, true), MULTI_EXT_CFG_BOOL("zicboz", ext_zicboz, true), MULTI_EXT_CFG_BOOL("zmmul", ext_zmmul, false), @@ -1409,6 +1456,13 @@ const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[] = { DEFINE_PROP_END_OF_LIST(), }; +const RISCVCPUMultiExtConfig riscv_cpu_named_features[] = { + MULTI_EXT_CFG_BOOL("svade", svade, true), + MULTI_EXT_CFG_BOOL("zic64b", zic64b, true), + + DEFINE_PROP_END_OF_LIST(), +}; + /* Deprecated entries marked for future removal */ const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[] = { MULTI_EXT_CFG_BOOL("Zifencei", ext_zifencei, true), @@ -1477,11 +1531,79 @@ Property riscv_cpu_options[] = { DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64), + DEFINE_PROP_UINT16("cbop_blocksize", RISCVCPU, cfg.cbop_blocksize, 64), DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64), DEFINE_PROP_END_OF_LIST(), }; +/* + * RVA22U64 defines some 'named features' or 'synthetic extensions' + * that are cache related: Za64rs, Zic64b, Ziccif, Ziccrse, Ziccamoa + * and Zicclsm. We do not implement caching in QEMU so we'll consider + * all these named features as always enabled. + * + * There's no riscv,isa update for them (nor for zic64b, despite it + * having a cfg offset) at this moment. + */ +static RISCVCPUProfile RVA22U64 = { + .parent = NULL, + .name = "rva22u64", + .misa_ext = RVI | RVM | RVA | RVF | RVD | RVC | RVU, + .priv_spec = RISCV_PROFILE_ATTR_UNUSED, + .satp_mode = RISCV_PROFILE_ATTR_UNUSED, + .ext_offsets = { + CPU_CFG_OFFSET(ext_zicsr), CPU_CFG_OFFSET(ext_zihintpause), + CPU_CFG_OFFSET(ext_zba), CPU_CFG_OFFSET(ext_zbb), + CPU_CFG_OFFSET(ext_zbs), CPU_CFG_OFFSET(ext_zfhmin), + CPU_CFG_OFFSET(ext_zkt), CPU_CFG_OFFSET(ext_zicntr), + CPU_CFG_OFFSET(ext_zihpm), CPU_CFG_OFFSET(ext_zicbom), + CPU_CFG_OFFSET(ext_zicbop), CPU_CFG_OFFSET(ext_zicboz), + + /* mandatory named features for this profile */ + CPU_CFG_OFFSET(zic64b), + + RISCV_PROFILE_EXT_LIST_END + } +}; + +/* + * As with RVA22U64, RVA22S64 also defines 'named features'. + * + * Cache related features that we consider enabled since we don't + * implement cache: Ssccptr + * + * Other named features that we already implement: Sstvecd, Sstvala, + * Sscounterenw + * + * Named features that we need to enable: svade + * + * The remaining features/extensions comes from RVA22U64. + */ +static RISCVCPUProfile RVA22S64 = { + .parent = &RVA22U64, + .name = "rva22s64", + .misa_ext = RVS, + .priv_spec = PRIV_VERSION_1_12_0, + .satp_mode = VM_1_10_SV39, + .ext_offsets = { + /* rva22s64 exts */ + CPU_CFG_OFFSET(ext_zifencei), CPU_CFG_OFFSET(ext_svpbmt), + CPU_CFG_OFFSET(ext_svinval), + + /* rva22s64 named features */ + CPU_CFG_OFFSET(svade), + + RISCV_PROFILE_EXT_LIST_END + } +}; + +RISCVCPUProfile *riscv_profiles[] = { + &RVA22U64, + &RVA22S64, + NULL, +}; + static Property riscv_cpu_properties[] = { DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true), @@ -1502,6 +1624,22 @@ static Property riscv_cpu_properties[] = { DEFINE_PROP_END_OF_LIST(), }; +#if defined(TARGET_RISCV64) +static void rva22u64_profile_cpu_init(Object *obj) +{ + rv64i_bare_cpu_init(obj); + + RVA22U64.enabled = true; +} + +static void rva22s64_profile_cpu_init(Object *obj) +{ + rv64i_bare_cpu_init(obj); + + RVA22S64.enabled = true; +} +#endif + static const gchar *riscv_gdb_arch_name(CPUState *cs) { RISCVCPU *cpu = RISCV_CPU(cs); @@ -1573,9 +1711,9 @@ static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name, static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - bool value = RISCV_CPU(obj)->cfg.mvendorid; + uint32_t value = RISCV_CPU(obj)->cfg.mvendorid; - visit_type_bool(v, name, &value, errp); + visit_type_uint32(v, name, &value, errp); } static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, @@ -1602,9 +1740,9 @@ static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name, static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - bool value = RISCV_CPU(obj)->cfg.mimpid; + uint64_t value = RISCV_CPU(obj)->cfg.mimpid; - visit_type_bool(v, name, &value, errp); + visit_type_uint64(v, name, &value, errp); } static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, @@ -1652,9 +1790,9 @@ static void cpu_set_marchid(Object *obj, Visitor *v, const char *name, static void cpu_get_marchid(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - bool value = RISCV_CPU(obj)->cfg.marchid; + uint64_t value = RISCV_CPU(obj)->cfg.marchid; - visit_type_bool(v, name, &value, errp); + visit_type_uint64(v, name, &value, errp); } static void riscv_cpu_class_init(ObjectClass *c, void *data) @@ -1735,35 +1873,6 @@ char *riscv_isa_string(RISCVCPU *cpu) return isa_str; } -static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b) -{ - ObjectClass *class_a = (ObjectClass *)a; - ObjectClass *class_b = (ObjectClass *)b; - const char *name_a, *name_b; - - name_a = object_class_get_name(class_a); - name_b = object_class_get_name(class_b); - return strcmp(name_a, name_b); -} - -static void riscv_cpu_list_entry(gpointer data, gpointer user_data) -{ - const char *typename = object_class_get_name(OBJECT_CLASS(data)); - int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX); - - qemu_printf("%.*s\n", len, typename); -} - -void riscv_cpu_list(void) -{ - GSList *list; - - list = object_class_get_list(TYPE_RISCV_CPU, false); - list = g_slist_sort(list, riscv_cpu_list_compare); - g_slist_foreach(list, riscv_cpu_list_entry, NULL); - g_slist_free(list); -} - #define DEFINE_CPU(type_name, initfn) \ { \ .name = type_name, \ @@ -1778,6 +1887,27 @@ void riscv_cpu_list(void) .instance_init = initfn \ } +#define DEFINE_VENDOR_CPU(type_name, initfn) \ + { \ + .name = type_name, \ + .parent = TYPE_RISCV_VENDOR_CPU, \ + .instance_init = initfn \ + } + +#define DEFINE_BARE_CPU(type_name, initfn) \ + { \ + .name = type_name, \ + .parent = TYPE_RISCV_BARE_CPU, \ + .instance_init = initfn \ + } + +#define DEFINE_PROFILE_CPU(type_name, initfn) \ + { \ + .name = type_name, \ + .parent = TYPE_RISCV_BARE_CPU, \ + .instance_init = initfn \ + } + static const TypeInfo riscv_cpu_type_infos[] = { { .name = TYPE_RISCV_CPU, @@ -1795,22 +1925,35 @@ static const TypeInfo riscv_cpu_type_infos[] = { .parent = TYPE_RISCV_CPU, .abstract = true, }, + { + .name = TYPE_RISCV_VENDOR_CPU, + .parent = TYPE_RISCV_CPU, + .abstract = true, + }, + { + .name = TYPE_RISCV_BARE_CPU, + .parent = TYPE_RISCV_CPU, + .abstract = true, + }, DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_MAX, riscv_max_cpu_init), #if defined(TARGET_RISCV32) DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init), #elif defined(TARGET_RISCV64) DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init), + DEFINE_VENDOR_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init), DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init), + DEFINE_BARE_CPU(TYPE_RISCV_CPU_RV64I, rv64i_bare_cpu_init), + DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22U64, rva22u64_profile_cpu_init), + DEFINE_PROFILE_CPU(TYPE_RISCV_CPU_RVA22S64, rva22s64_profile_cpu_init), #endif }; diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index d74b361be641ba9004f1e500b504fd5eb6350e4a..5f3955c38db42e46a3bb8814667551165743edc2 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -76,6 +76,22 @@ const char *riscv_get_misa_ext_description(uint32_t bit); #define CPU_CFG_OFFSET(_prop) offsetof(struct RISCVCPUConfig, _prop) +typedef struct riscv_cpu_profile { + struct riscv_cpu_profile *parent; + const char *name; + uint32_t misa_ext; + bool enabled; + bool user_set; + int priv_spec; + int satp_mode; + const int32_t ext_offsets[]; +} RISCVCPUProfile; + +#define RISCV_PROFILE_EXT_LIST_END -1 +#define RISCV_PROFILE_ATTR_UNUSED -1 + +extern RISCVCPUProfile *riscv_profiles[]; + /* Privileged specification version */ enum { PRIV_VERSION_1_10_0 = 0, @@ -490,9 +506,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); char *riscv_isa_string(RISCVCPU *cpu); -void riscv_cpu_list(void); -#define cpu_list riscv_cpu_list #define cpu_mmu_index riscv_cpu_mmu_index #ifndef CONFIG_USER_ONLY @@ -681,6 +695,7 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc, uint64_t *cs_base, uint32_t *pflags); void riscv_cpu_update_mask(CPURISCVState *env); +bool riscv_cpu_is_32bit(RISCVCPU *cpu); RISCVException riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, @@ -767,6 +782,7 @@ typedef struct RISCVCPUMultiExtConfig { extern const RISCVCPUMultiExtConfig riscv_cpu_extensions[]; extern const RISCVCPUMultiExtConfig riscv_cpu_vendor_exts[]; extern const RISCVCPUMultiExtConfig riscv_cpu_experimental_exts[]; +extern const RISCVCPUMultiExtConfig riscv_cpu_named_features[]; extern const RISCVCPUMultiExtConfig riscv_cpu_deprecated_exts[]; extern Property riscv_cpu_options[]; diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h index f4605fb190b98a0b8ebb69164c56be7c1b6bb797..780ae6ef17a30fd326c0131ae5093d400fe9307b 100644 --- a/target/riscv/cpu_cfg.h +++ b/target/riscv/cpu_cfg.h @@ -65,6 +65,7 @@ struct RISCVCPUConfig { bool ext_zicntr; bool ext_zicsr; bool ext_zicbom; + bool ext_zicbop; bool ext_zicboz; bool ext_zicond; bool ext_zihintntl; @@ -77,6 +78,7 @@ struct RISCVCPUConfig { bool ext_svnapot; bool ext_svpbmt; bool ext_zdinx; + bool ext_zacas; bool ext_zawrs; bool ext_zfa; bool ext_zfbfmin; @@ -115,6 +117,8 @@ struct RISCVCPUConfig { bool ext_smepmp; bool rvv_ta_all_1s; bool rvv_ma_all_1s; + bool svade; + bool zic64b; uint32_t mvendorid; uint64_t marchid; @@ -142,6 +146,7 @@ struct RISCVCPUConfig { uint16_t vlen; uint16_t elen; uint16_t cbom_blocksize; + uint16_t cbop_blocksize; uint16_t cboz_blocksize; bool mmu; bool pmp; diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index e7e23b34f455d4b9d01acdfe4808325c86d2169a..c7cc7eb423a064b5f8070bbbd6de3e8f4f1f75a3 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -655,7 +655,7 @@ void riscv_cpu_interrupt(CPURISCVState *env) uint64_t gein, vsgein = 0, vstip = 0, irqf = 0; CPUState *cs = env_cpu(env); - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); if (env->virt_enabled) { gein = get_field(env->hstatus, HSTATUS_VGEIN); @@ -681,7 +681,7 @@ uint64_t riscv_cpu_update_mip(CPURISCVState *env, uint64_t mask, uint64_t value) /* No need to update mip for VSTIP */ mask = ((mask == MIP_VSTIP) && env->vstime_irq) ? 0 : mask; - QEMU_IOTHREAD_LOCK_GUARD(); + BQL_LOCK_GUARD(); env->mip = (env->mip & ~mask) | (value & mask); @@ -1749,8 +1749,8 @@ void riscv_cpu_do_interrupt(CPUState *cs) * See if we need to adjust cause. Yes if its VS mode interrupt * no if hypervisor has delegated one of hs mode's interrupt */ - if (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT || - cause == IRQ_VS_EXT) { + if (async && (cause == IRQ_VS_TIMER || cause == IRQ_VS_SOFT || + cause == IRQ_VS_EXT)) { cause = cause - 1; } write_gva = false; diff --git a/target/riscv/csr.c b/target/riscv/csr.c index fde7ce1a5336b5d744c6676edadfe64b1688a2df..674ea075a4ddbb67874713e1c65a2ce49c1f45dd 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -195,8 +195,11 @@ static RISCVException mctr(CPURISCVState *env, int csrno) if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) { /* Offset for RV32 mhpmcounternh counters */ - base_csrno += 0x80; + csrno -= 0x80; } + + g_assert(csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31); + ctr_index = csrno - base_csrno; if ((BIT(ctr_index) & pmu_avail_ctrs >> 3) == 0) { /* The PMU is not enabled or counter is out of range */ @@ -907,11 +910,11 @@ static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val) static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, bool upper_half, uint32_t ctr_idx) { - PMUCTRState counter = env->pmu_ctrs[ctr_idx]; - target_ulong ctr_prev = upper_half ? counter.mhpmcounterh_prev : - counter.mhpmcounter_prev; - target_ulong ctr_val = upper_half ? counter.mhpmcounterh_val : - counter.mhpmcounter_val; + PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; + target_ulong ctr_prev = upper_half ? counter->mhpmcounterh_prev : + counter->mhpmcounter_prev; + target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val : + counter->mhpmcounter_val; if (get_field(env->mcountinhibit, BIT(ctr_idx))) { /* @@ -919,12 +922,12 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, * stop the icount counting. Just return the counter value written by * the supervisor to indicate that counter was not incremented. */ - if (!counter.started) { + if (!counter->started) { *val = ctr_val; return RISCV_EXCP_NONE; } else { /* Mark that the counter has been stopped */ - counter.started = false; + counter->started = false; } } @@ -1328,11 +1331,14 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno, mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM | MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR | - MSTATUS_TW | MSTATUS_VS; + MSTATUS_TW; if (riscv_has_ext(env, RVF)) { mask |= MSTATUS_FS; } + if (riscv_has_ext(env, RVV)) { + mask |= MSTATUS_VS; + } if (xl != MXL_RV32 || env->debugger) { if (riscv_has_ext(env, RVH)) { diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index 33597fe2bb1bb2be5d2c1e8b96df0ccbad0d74ac..f22df04cfd1b887e7672509ac81a98e384fe7705 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -1004,3 +1004,9 @@ vgmul_vv 101000 1 ..... 10001 010 ..... 1110111 @r2_vm_1 vsm4k_vi 100001 1 ..... ..... 010 ..... 1110111 @r_vm_1 vsm4r_vv 101000 1 ..... 10000 010 ..... 1110111 @r2_vm_1 vsm4r_vs 101001 1 ..... 10000 010 ..... 1110111 @r2_vm_1 + +# *** RV32 Zacas Standard Extension *** +amocas_w 00101 . . ..... ..... 010 ..... 0101111 @atom_st +amocas_d 00101 . . ..... ..... 011 ..... 0101111 @atom_st +# *** RV64 Zacas Standard Extension *** +amocas_q 00101 . . ..... ..... 100 ..... 0101111 @atom_st diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index 78bd363310d40039547a0fbe1a1843ae4834a865..3871f0ea73d8d8dda0dd15f87f24030e7d06ead3 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -3631,19 +3631,19 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r *a) } /* - * Whole Vector Register Move Instructions ignore vtype and vl setting. - * Thus, we don't need to check vill bit. (Section 16.6) + * Whole Vector Register Move Instructions depend on vtype register(vsew). + * Thus, we need to check vill bit. (Section 16.6) */ #define GEN_VMV_WHOLE_TRANS(NAME, LEN) \ static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ { \ if (require_rvv(s) && \ + vext_check_isa_ill(s) && \ QEMU_IS_ALIGNED(a->rd, LEN) && \ QEMU_IS_ALIGNED(a->rs2, LEN)) { \ uint32_t maxsz = (s->cfg_ptr->vlen >> 3) * LEN; \ if (s->vstart_eq_zero) { \ - /* EEW = 8 */ \ - tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd), \ + tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd), \ vreg_ofs(s, a->rs2), maxsz, maxsz); \ mark_vs_dirty(s); \ } else { \ diff --git a/target/riscv/insn_trans/trans_rvzacas.c.inc b/target/riscv/insn_trans/trans_rvzacas.c.inc new file mode 100644 index 0000000000000000000000000000000000000000..5d274d4c08b2efa110cd8a90398772a0f83d261e --- /dev/null +++ b/target/riscv/insn_trans/trans_rvzacas.c.inc @@ -0,0 +1,150 @@ +/* + * RISC-V translation routines for the RV64 Zacas Standard Extension. + * + * Copyright (c) 2020-2023 PLCT Lab + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#define REQUIRE_ZACAS(ctx) do { \ + if (!ctx->cfg_ptr->ext_zacas) { \ + return false; \ + } \ +} while (0) + +static bool gen_cmpxchg(DisasContext *ctx, arg_atomic *a, MemOp mop) +{ + TCGv dest = get_gpr(ctx, a->rd, EXT_NONE); + TCGv src1 = get_address(ctx, a->rs1, 0); + TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); + + decode_save_opc(ctx); + tcg_gen_atomic_cmpxchg_tl(dest, src1, dest, src2, ctx->mem_idx, mop); + + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_amocas_w(DisasContext *ctx, arg_amocas_w *a) +{ + REQUIRE_ZACAS(ctx); + return gen_cmpxchg(ctx, a, MO_ALIGN | MO_TESL); +} + +static TCGv_i64 get_gpr_pair(DisasContext *ctx, int reg_num) +{ + TCGv_i64 t; + + assert(get_ol(ctx) == MXL_RV32); + + if (reg_num == 0) { + return tcg_constant_i64(0); + } + + t = tcg_temp_new_i64(); + tcg_gen_concat_tl_i64(t, cpu_gpr[reg_num], cpu_gpr[reg_num + 1]); + return t; +} + +static void gen_set_gpr_pair(DisasContext *ctx, int reg_num, TCGv_i64 t) +{ + assert(get_ol(ctx) == MXL_RV32); + + if (reg_num != 0) { +#ifdef TARGET_RISCV32 + tcg_gen_extr_i64_i32(cpu_gpr[reg_num], cpu_gpr[reg_num + 1], t); +#else + tcg_gen_ext32s_i64(cpu_gpr[reg_num], t); + tcg_gen_sari_i64(cpu_gpr[reg_num + 1], t, 32); +#endif + + if (get_xl_max(ctx) == MXL_RV128) { + tcg_gen_sari_tl(cpu_gprh[reg_num], cpu_gpr[reg_num], 63); + tcg_gen_sari_tl(cpu_gprh[reg_num + 1], cpu_gpr[reg_num + 1], 63); + } + } +} + +static bool gen_cmpxchg64(DisasContext *ctx, arg_atomic *a, MemOp mop) +{ + /* + * Encodings with odd numbered registers specified in rs2 and rd are + * reserved. + */ + if ((a->rs2 | a->rd) & 1) { + return false; + } + + TCGv_i64 dest = get_gpr_pair(ctx, a->rd); + TCGv src1 = get_address(ctx, a->rs1, 0); + TCGv_i64 src2 = get_gpr_pair(ctx, a->rs2); + + decode_save_opc(ctx); + tcg_gen_atomic_cmpxchg_i64(dest, src1, dest, src2, ctx->mem_idx, mop); + + gen_set_gpr_pair(ctx, a->rd, dest); + return true; +} + +static bool trans_amocas_d(DisasContext *ctx, arg_amocas_d *a) +{ + REQUIRE_ZACAS(ctx); + switch (get_ol(ctx)) { + case MXL_RV32: + return gen_cmpxchg64(ctx, a, MO_ALIGN | MO_TEUQ); + case MXL_RV64: + case MXL_RV128: + return gen_cmpxchg(ctx, a, MO_ALIGN | MO_TEUQ); + default: + g_assert_not_reached(); + } +} + +static bool trans_amocas_q(DisasContext *ctx, arg_amocas_q *a) +{ + REQUIRE_ZACAS(ctx); + REQUIRE_64BIT(ctx); + + /* + * Encodings with odd numbered registers specified in rs2 and rd are + * reserved. + */ + if ((a->rs2 | a->rd) & 1) { + return false; + } + +#ifdef TARGET_RISCV64 + TCGv_i128 dest = tcg_temp_new_i128(); + TCGv src1 = get_address(ctx, a->rs1, 0); + TCGv_i128 src2 = tcg_temp_new_i128(); + TCGv_i64 src2l = get_gpr(ctx, a->rs2, EXT_NONE); + TCGv_i64 src2h = get_gpr(ctx, a->rs2 == 0 ? 0 : a->rs2 + 1, EXT_NONE); + TCGv_i64 destl = get_gpr(ctx, a->rd, EXT_NONE); + TCGv_i64 desth = get_gpr(ctx, a->rd == 0 ? 0 : a->rd + 1, EXT_NONE); + + tcg_gen_concat_i64_i128(src2, src2l, src2h); + tcg_gen_concat_i64_i128(dest, destl, desth); + decode_save_opc(ctx); + tcg_gen_atomic_cmpxchg_i128(dest, src1, dest, src2, ctx->mem_idx, + (MO_ALIGN | MO_TEUO)); + + tcg_gen_extr_i128_i64(destl, desth, dest); + + if (a->rd != 0) { + gen_set_gpr(ctx, a->rd, destl); + gen_set_gpr(ctx, a->rd + 1, desth); + } +#endif + + return true; +} diff --git a/target/riscv/insn_trans/trans_xthead.c.inc b/target/riscv/insn_trans/trans_xthead.c.inc index 810d76665a6aed99d0b8ec0435bb63821d9da7f9..dbb6411239606bac1cb7ac58b5d687f34ee99907 100644 --- a/target/riscv/insn_trans/trans_xthead.c.inc +++ b/target/riscv/insn_trans/trans_xthead.c.inc @@ -296,7 +296,7 @@ NOP_PRIVCHECK(th_dcache_csw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS) NOP_PRIVCHECK(th_dcache_cisw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS) NOP_PRIVCHECK(th_dcache_isw, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS) NOP_PRIVCHECK(th_dcache_cpal1, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS) -NOP_PRIVCHECK(th_dcache_cval1, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS) +NOP_PRIVCHECK(th_dcache_cval1, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MSU) NOP_PRIVCHECK(th_icache_iall, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS) NOP_PRIVCHECK(th_icache_ialls, REQUIRE_XTHEADCMO, REQUIRE_PRIV_MS) diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c index 45b6cf1cfa04a87dd9526c8b017ecca78a956e4f..680a729cd89a88ef3c444c0f940376c5d0a3b43e 100644 --- a/target/riscv/kvm/kvm-cpu.c +++ b/target/riscv/kvm/kvm-cpu.c @@ -18,6 +18,7 @@ #include "qemu/osdep.h" #include +#include #include @@ -47,6 +48,9 @@ #include "sysemu/runstate.h" #include "hw/riscv/numa.h" +#define PR_RISCV_V_SET_CONTROL 69 +#define PR_RISCV_V_VSTATE_CTRL_ON 2 + void riscv_kvm_aplic_request(void *opaque, int irq, int level) { kvm_set_irq(kvm_state, irq, !!level); @@ -54,7 +58,7 @@ void riscv_kvm_aplic_request(void *opaque, int irq, int level) static bool cap_has_mp_state; -static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type, +static uint64_t kvm_riscv_reg_id_ulong(CPURISCVState *env, uint64_t type, uint64_t idx) { uint64_t id = KVM_REG_RISCV | type | idx; @@ -72,18 +76,38 @@ static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type, return id; } -#define RISCV_CORE_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, \ - KVM_REG_RISCV_CORE_REG(name)) +static uint64_t kvm_riscv_reg_id_u32(uint64_t type, uint64_t idx) +{ + return KVM_REG_RISCV | KVM_REG_SIZE_U32 | type | idx; +} + +static uint64_t kvm_riscv_reg_id_u64(uint64_t type, uint64_t idx) +{ + return KVM_REG_RISCV | KVM_REG_SIZE_U64 | type | idx; +} -#define RISCV_CSR_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CSR, \ - KVM_REG_RISCV_CSR_REG(name)) +#define RISCV_CORE_REG(env, name) \ + kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, \ + KVM_REG_RISCV_CORE_REG(name)) -#define RISCV_TIMER_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_TIMER, \ +#define RISCV_CSR_REG(env, name) \ + kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CSR, \ + KVM_REG_RISCV_CSR_REG(name)) + +#define RISCV_CONFIG_REG(env, name) \ + kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, \ + KVM_REG_RISCV_CONFIG_REG(name)) + +#define RISCV_TIMER_REG(name) kvm_riscv_reg_id_u64(KVM_REG_RISCV_TIMER, \ KVM_REG_RISCV_TIMER_REG(name)) -#define RISCV_FP_F_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_F, idx) +#define RISCV_FP_F_REG(idx) kvm_riscv_reg_id_u32(KVM_REG_RISCV_FP_F, idx) + +#define RISCV_FP_D_REG(idx) kvm_riscv_reg_id_u64(KVM_REG_RISCV_FP_D, idx) -#define RISCV_FP_D_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_D, idx) +#define RISCV_VECTOR_CSR_REG(env, name) \ + kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_VECTOR, \ + KVM_REG_RISCV_VECTOR_CSR_REG(name)) #define KVM_RISCV_GET_CSR(cs, env, csr, reg) \ do { \ @@ -101,17 +125,17 @@ static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type, } \ } while (0) -#define KVM_RISCV_GET_TIMER(cs, env, name, reg) \ +#define KVM_RISCV_GET_TIMER(cs, name, reg) \ do { \ - int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(env, name), ®); \ + int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(name), ®); \ if (ret) { \ abort(); \ } \ } while (0) -#define KVM_RISCV_SET_TIMER(cs, env, name, reg) \ +#define KVM_RISCV_SET_TIMER(cs, name, reg) \ do { \ - int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, name), ®); \ + int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(name), ®); \ if (ret) { \ abort(); \ } \ @@ -138,6 +162,7 @@ static KVMCPUConfig kvm_misa_ext_cfgs[] = { KVM_MISA_CFG(RVH, KVM_RISCV_ISA_EXT_H), KVM_MISA_CFG(RVI, KVM_RISCV_ISA_EXT_I), KVM_MISA_CFG(RVM, KVM_RISCV_ISA_EXT_M), + KVM_MISA_CFG(RVV, KVM_RISCV_ISA_EXT_V), }; static void kvm_cpu_get_misa_ext_cfg(Object *obj, Visitor *v, @@ -202,8 +227,8 @@ static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs) /* If we're here we're going to disable the MISA bit */ reg = 0; - id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT, - misa_cfg->kvm_reg_id); + id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, + misa_cfg->kvm_reg_id); ret = kvm_set_one_reg(cs, id, ®); if (ret != 0) { /* @@ -364,8 +389,8 @@ static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs) continue; } - id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT, - multi_ext_cfg->kvm_reg_id); + id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, + multi_ext_cfg->kvm_reg_id); reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg); ret = kvm_set_one_reg(cs, id, ®); if (ret != 0) { @@ -398,7 +423,7 @@ static void cpu_set_cfg_unavailable(Object *obj, Visitor *v, } if (value) { - error_setg(errp, "extension %s is not available with KVM", + error_setg(errp, "'%s' is not available with KVM", propname); } } @@ -479,6 +504,11 @@ static void kvm_riscv_add_cpu_user_properties(Object *cpu_obj) riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_extensions); riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_vendor_exts); riscv_cpu_add_kvm_unavail_prop_array(cpu_obj, riscv_cpu_experimental_exts); + + /* We don't have the needed KVM support for profiles */ + for (i = 0; riscv_profiles[i] != NULL; i++) { + riscv_cpu_add_kvm_unavail_prop(cpu_obj, riscv_profiles[i]->name); + } } static int kvm_riscv_get_regs_core(CPUState *cs) @@ -495,7 +525,7 @@ static int kvm_riscv_get_regs_core(CPUState *cs) env->pc = reg; for (i = 1; i < 32; i++) { - uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i); + uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i); ret = kvm_get_one_reg(cs, id, ®); if (ret) { return ret; @@ -520,7 +550,7 @@ static int kvm_riscv_put_regs_core(CPUState *cs) } for (i = 1; i < 32; i++) { - uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i); + uint64_t id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CORE, i); reg = env->gpr[i]; ret = kvm_set_one_reg(cs, id, ®); if (ret) { @@ -574,7 +604,7 @@ static int kvm_riscv_get_regs_fp(CPUState *cs) if (riscv_has_ext(env, RVD)) { uint64_t reg; for (i = 0; i < 32; i++) { - ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(env, i), ®); + ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(i), ®); if (ret) { return ret; } @@ -586,7 +616,7 @@ static int kvm_riscv_get_regs_fp(CPUState *cs) if (riscv_has_ext(env, RVF)) { uint32_t reg; for (i = 0; i < 32; i++) { - ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(env, i), ®); + ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(i), ®); if (ret) { return ret; } @@ -608,7 +638,7 @@ static int kvm_riscv_put_regs_fp(CPUState *cs) uint64_t reg; for (i = 0; i < 32; i++) { reg = env->fpr[i]; - ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(env, i), ®); + ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(i), ®); if (ret) { return ret; } @@ -620,7 +650,7 @@ static int kvm_riscv_put_regs_fp(CPUState *cs) uint32_t reg; for (i = 0; i < 32; i++) { reg = env->fpr[i]; - ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(env, i), ®); + ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(i), ®); if (ret) { return ret; } @@ -639,10 +669,10 @@ static void kvm_riscv_get_regs_timer(CPUState *cs) return; } - KVM_RISCV_GET_TIMER(cs, env, time, env->kvm_timer_time); - KVM_RISCV_GET_TIMER(cs, env, compare, env->kvm_timer_compare); - KVM_RISCV_GET_TIMER(cs, env, state, env->kvm_timer_state); - KVM_RISCV_GET_TIMER(cs, env, frequency, env->kvm_timer_frequency); + KVM_RISCV_GET_TIMER(cs, time, env->kvm_timer_time); + KVM_RISCV_GET_TIMER(cs, compare, env->kvm_timer_compare); + KVM_RISCV_GET_TIMER(cs, state, env->kvm_timer_state); + KVM_RISCV_GET_TIMER(cs, frequency, env->kvm_timer_frequency); env->kvm_timer_dirty = true; } @@ -656,8 +686,8 @@ static void kvm_riscv_put_regs_timer(CPUState *cs) return; } - KVM_RISCV_SET_TIMER(cs, env, time, env->kvm_timer_time); - KVM_RISCV_SET_TIMER(cs, env, compare, env->kvm_timer_compare); + KVM_RISCV_SET_TIMER(cs, time, env->kvm_timer_time); + KVM_RISCV_SET_TIMER(cs, compare, env->kvm_timer_compare); /* * To set register of RISCV_TIMER_REG(state) will occur a error from KVM @@ -666,7 +696,7 @@ static void kvm_riscv_put_regs_timer(CPUState *cs) * TODO If KVM changes, adapt here. */ if (env->kvm_timer_state) { - KVM_RISCV_SET_TIMER(cs, env, state, env->kvm_timer_state); + KVM_RISCV_SET_TIMER(cs, state, env->kvm_timer_state); } /* @@ -675,7 +705,7 @@ static void kvm_riscv_put_regs_timer(CPUState *cs) * during the migration. */ if (migration_is_running(migrate_get_current()->state)) { - KVM_RISCV_GET_TIMER(cs, env, frequency, reg); + KVM_RISCV_GET_TIMER(cs, frequency, reg); if (reg != env->kvm_timer_frequency) { error_report("Dst Hosts timer frequency != Src Hosts"); } @@ -684,6 +714,65 @@ static void kvm_riscv_put_regs_timer(CPUState *cs) env->kvm_timer_dirty = false; } +static int kvm_riscv_get_regs_vector(CPUState *cs) +{ + CPURISCVState *env = &RISCV_CPU(cs)->env; + target_ulong reg; + int ret = 0; + + if (!riscv_has_ext(env, RVV)) { + return 0; + } + + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), ®); + if (ret) { + return ret; + } + env->vstart = reg; + + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), ®); + if (ret) { + return ret; + } + env->vl = reg; + + ret = kvm_get_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), ®); + if (ret) { + return ret; + } + env->vtype = reg; + + return 0; +} + +static int kvm_riscv_put_regs_vector(CPUState *cs) +{ + CPURISCVState *env = &RISCV_CPU(cs)->env; + target_ulong reg; + int ret = 0; + + if (!riscv_has_ext(env, RVV)) { + return 0; + } + + reg = env->vstart; + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vstart), ®); + if (ret) { + return ret; + } + + reg = env->vl; + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vl), ®); + if (ret) { + return ret; + } + + reg = env->vtype; + ret = kvm_set_one_reg(cs, RISCV_VECTOR_CSR_REG(env, vtype), ®); + + return ret; +} + typedef struct KVMScratchCPU { int kvmfd; int vmfd; @@ -746,24 +835,21 @@ static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) struct kvm_one_reg reg; int ret; - reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, - KVM_REG_RISCV_CONFIG_REG(mvendorid)); + reg.id = RISCV_CONFIG_REG(env, mvendorid); reg.addr = (uint64_t)&cpu->cfg.mvendorid; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { error_report("Unable to retrieve mvendorid from host, error %d", ret); } - reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, - KVM_REG_RISCV_CONFIG_REG(marchid)); + reg.id = RISCV_CONFIG_REG(env, marchid); reg.addr = (uint64_t)&cpu->cfg.marchid; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { error_report("Unable to retrieve marchid from host, error %d", ret); } - reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, - KVM_REG_RISCV_CONFIG_REG(mimpid)); + reg.id = RISCV_CONFIG_REG(env, mimpid); reg.addr = (uint64_t)&cpu->cfg.mimpid; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { @@ -778,8 +864,7 @@ static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu, struct kvm_one_reg reg; int ret; - reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, - KVM_REG_RISCV_CONFIG_REG(isa)); + reg.id = RISCV_CONFIG_REG(env, isa); reg.addr = (uint64_t)&env->misa_ext_mask; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); @@ -800,8 +885,8 @@ static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu, struct kvm_one_reg reg; int ret; - reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, - cbomz_cfg->kvm_reg_id); + reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_CONFIG, + cbomz_cfg->kvm_reg_id); reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg); ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { @@ -822,8 +907,8 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu, KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i]; struct kvm_one_reg reg; - reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT, - multi_ext_cfg->kvm_reg_id); + reg.id = kvm_riscv_reg_id_ulong(env, KVM_REG_RISCV_ISA_EXT, + multi_ext_cfg->kvm_reg_id); reg.addr = (uint64_t)&val; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { @@ -832,9 +917,8 @@ static void kvm_riscv_read_multiext_legacy(RISCVCPU *cpu, multi_ext_cfg->supported = false; val = false; } else { - error_report("Unable to read ISA_EXT KVM register %s, " - "error code: %s", multi_ext_cfg->name, - strerrorname_np(errno)); + error_report("Unable to read ISA_EXT KVM register %s: %s", + multi_ext_cfg->name, strerror(errno)); exit(EXIT_FAILURE); } } else { @@ -895,8 +979,8 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) * * Error out if we get any other errno. */ - error_report("Error when accessing get-reg-list, code: %s", - strerrorname_np(errno)); + error_report("Error when accessing get-reg-list: %s", + strerror(errno)); exit(EXIT_FAILURE); } @@ -905,8 +989,8 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) reglist->n = rl_struct.n; ret = ioctl(kvmcpu->cpufd, KVM_GET_REG_LIST, reglist); if (ret) { - error_report("Error when reading KVM_GET_REG_LIST, code %s ", - strerrorname_np(errno)); + error_report("Error when reading KVM_GET_REG_LIST: %s", + strerror(errno)); exit(EXIT_FAILURE); } @@ -915,8 +999,8 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) { multi_ext_cfg = &kvm_multi_ext_cfgs[i]; - reg_id = kvm_riscv_reg_id(&cpu->env, KVM_REG_RISCV_ISA_EXT, - multi_ext_cfg->kvm_reg_id); + reg_id = kvm_riscv_reg_id_ulong(&cpu->env, KVM_REG_RISCV_ISA_EXT, + multi_ext_cfg->kvm_reg_id); reg_search = bsearch(®_id, reglist->reg, reglist->n, sizeof(uint64_t), uint64_cmp); if (!reg_search) { @@ -927,9 +1011,8 @@ static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu) reg.addr = (uint64_t)&val; ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, ®); if (ret != 0) { - error_report("Unable to read ISA_EXT KVM register %s, " - "error code: %s", multi_ext_cfg->name, - strerrorname_np(errno)); + error_report("Unable to read ISA_EXT KVM register %s: %s", + multi_ext_cfg->name, strerror(errno)); exit(EXIT_FAILURE); } @@ -985,6 +1068,11 @@ int kvm_arch_get_registers(CPUState *cs) return ret; } + ret = kvm_riscv_get_regs_vector(cs); + if (ret) { + return ret; + } + return ret; } @@ -1025,6 +1113,11 @@ int kvm_arch_put_registers(CPUState *cs, int level) return ret; } + ret = kvm_riscv_put_regs_vector(cs); + if (ret) { + return ret; + } + if (KVM_PUT_RESET_STATE == level) { RISCVCPU *cpu = RISCV_CPU(cs); if (cs->cpu_index == 0) { @@ -1084,8 +1177,7 @@ static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs) uint64_t id; int ret; - id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, - KVM_REG_RISCV_CONFIG_REG(mvendorid)); + id = RISCV_CONFIG_REG(env, mvendorid); /* * cfg.mvendorid is an uint32 but a target_ulong will * be written. Assign it to a target_ulong var to avoid @@ -1097,15 +1189,13 @@ static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs) return ret; } - id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, - KVM_REG_RISCV_CONFIG_REG(marchid)); + id = RISCV_CONFIG_REG(env, marchid); ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid); if (ret != 0) { return ret; } - id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG, - KVM_REG_RISCV_CONFIG_REG(mimpid)); + id = RISCV_CONFIG_REG(env, mimpid); ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid); return ret; @@ -1378,21 +1468,24 @@ void kvm_riscv_aia_create(MachineState *machine, uint64_t group_shift, exit(1); } - socket_bits = find_last_bit(&socket_count, BITS_PER_LONG) + 1; - ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, - KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS, - &socket_bits, true, NULL); - if (ret < 0) { - error_report("KVM AIA: failed to set group_bits"); - exit(1); - } - ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, - KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT, - &group_shift, true, NULL); - if (ret < 0) { - error_report("KVM AIA: failed to set group_shift"); - exit(1); + if (socket_count > 1) { + socket_bits = find_last_bit(&socket_count, BITS_PER_LONG) + 1; + ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, + KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS, + &socket_bits, true, NULL); + if (ret < 0) { + error_report("KVM AIA: failed to set group_bits"); + exit(1); + } + + ret = kvm_device_access(aia_fd, KVM_DEV_RISCV_AIA_GRP_CONFIG, + KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT, + &group_shift, true, NULL); + if (ret < 0) { + error_report("KVM AIA: failed to set group_shift"); + exit(1); + } } guest_bits = guest_num == 0 ? 0 : @@ -1481,11 +1574,36 @@ static void kvm_cpu_instance_init(CPUState *cs) } } +/* + * We'll get here via the following path: + * + * riscv_cpu_realize() + * -> cpu_exec_realizefn() + * -> kvm_cpu_realize() (via accel_cpu_common_realize()) + */ +static bool kvm_cpu_realize(CPUState *cs, Error **errp) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + int ret; + + if (riscv_has_ext(&cpu->env, RVV)) { + ret = prctl(PR_RISCV_V_SET_CONTROL, PR_RISCV_V_VSTATE_CTRL_ON); + if (ret) { + error_setg(errp, "Error in prctl PR_RISCV_V_SET_CONTROL, code: %s", + strerrorname_np(errno)); + return false; + } + } + + return true; +} + static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data) { AccelCPUClass *acc = ACCEL_CPU_CLASS(oc); acc->cpu_instance_init = kvm_cpu_instance_init; + acc->cpu_target_realize = kvm_cpu_realize; } static const TypeInfo kvm_cpu_accel_type_info = { diff --git a/target/riscv/machine.c b/target/riscv/machine.c index fdde243e0409cc833230aa147a6fed32e92e42e6..72fe2374dc2a80d15571a9a2fdb6aa8480062b99 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -49,7 +49,7 @@ static const VMStateDescription vmstate_pmp_entry = { .name = "cpu/pmp/entry", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(addr_reg, pmp_entry_t), VMSTATE_UINT8(cfg_reg, pmp_entry_t), VMSTATE_END_OF_LIST() @@ -62,7 +62,7 @@ static const VMStateDescription vmstate_pmp = { .minimum_version_id = 1, .needed = pmp_needed, .post_load = pmp_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_STRUCT_ARRAY(env.pmp_state.pmp, RISCVCPU, MAX_RISCV_PMPS, 0, vmstate_pmp_entry, pmp_entry_t), VMSTATE_END_OF_LIST() @@ -82,7 +82,7 @@ static const VMStateDescription vmstate_hyper = { .version_id = 3, .minimum_version_id = 3, .needed = hyper_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(env.hstatus, RISCVCPU), VMSTATE_UINTTL(env.hedeleg, RISCVCPU), VMSTATE_UINT64(env.hideleg, RISCVCPU), @@ -138,7 +138,7 @@ static const VMStateDescription vmstate_vector = { .version_id = 2, .minimum_version_id = 2, .needed = vector_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.vreg, RISCVCPU, 32 * RV_VLEN_MAX / 64), VMSTATE_UINTTL(env.vxrm, RISCVCPU), VMSTATE_UINTTL(env.vxsat, RISCVCPU), @@ -163,7 +163,7 @@ static const VMStateDescription vmstate_pointermasking = { .version_id = 1, .minimum_version_id = 1, .needed = pointermasking_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(env.mmte, RISCVCPU), VMSTATE_UINTTL(env.mpmmask, RISCVCPU), VMSTATE_UINTTL(env.mpmbase, RISCVCPU), @@ -189,7 +189,7 @@ static const VMStateDescription vmstate_rv128 = { .version_id = 1, .minimum_version_id = 1, .needed = rv128_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gprh, RISCVCPU, 32), VMSTATE_UINT64(env.mscratchh, RISCVCPU), VMSTATE_UINT64(env.sscratchh, RISCVCPU), @@ -218,7 +218,7 @@ static const VMStateDescription vmstate_kvmtimer = { .minimum_version_id = 1, .needed = kvmtimer_needed, .post_load = cpu_kvmtimer_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.kvm_timer_time, RISCVCPU), VMSTATE_UINT64(env.kvm_timer_compare, RISCVCPU), VMSTATE_UINT64(env.kvm_timer_state, RISCVCPU), @@ -252,7 +252,7 @@ static const VMStateDescription vmstate_debug = { .minimum_version_id = 2, .needed = debug_needed, .post_load = debug_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(env.trigger_cur, RISCVCPU), VMSTATE_UINTTL_ARRAY(env.tdata1, RISCVCPU, RV_MAX_TRIGGERS), VMSTATE_UINTTL_ARRAY(env.tdata2, RISCVCPU, RV_MAX_TRIGGERS), @@ -283,7 +283,7 @@ static const VMStateDescription vmstate_smstateen = { .version_id = 1, .minimum_version_id = 1, .needed = smstateen_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.mstateen, RISCVCPU, 4), VMSTATE_UINT64_ARRAY(env.hstateen, RISCVCPU, 4), VMSTATE_UINT64_ARRAY(env.sstateen, RISCVCPU, 4), @@ -304,7 +304,7 @@ static const VMStateDescription vmstate_envcfg = { .version_id = 1, .minimum_version_id = 1, .needed = envcfg_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.menvcfg, RISCVCPU), VMSTATE_UINTTL(env.senvcfg, RISCVCPU), VMSTATE_UINT64(env.henvcfg, RISCVCPU), @@ -324,7 +324,7 @@ static const VMStateDescription vmstate_pmu_ctr_state = { .version_id = 1, .minimum_version_id = 1, .needed = pmu_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(mhpmcounter_val, PMUCTRState), VMSTATE_UINTTL(mhpmcounterh_val, PMUCTRState), VMSTATE_UINTTL(mhpmcounter_prev, PMUCTRState), @@ -346,7 +346,7 @@ static const VMStateDescription vmstate_jvt = { .version_id = 1, .minimum_version_id = 1, .needed = jvt_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL(env.jvt, RISCVCPU), VMSTATE_END_OF_LIST() } @@ -357,7 +357,7 @@ const VMStateDescription vmstate_riscv_cpu = { .version_id = 9, .minimum_version_id = 9, .post_load = riscv_cpu_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32), VMSTATE_UINT64_ARRAY(env.fpr, RISCVCPU, 32), VMSTATE_UINT8_ARRAY(env.miprio, RISCVCPU, 64), @@ -411,7 +411,7 @@ const VMStateDescription vmstate_riscv_cpu = { VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription * []) { + .subsections = (const VMStateDescription * const []) { &vmstate_pmp, &vmstate_hyper, &vmstate_vector, diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index 162e88a90a63227e4eb2a6c844ec0b067018ee5f..2a76b611a00369adf6ec65a1d2d58c00f3ab684a 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -126,7 +126,7 @@ static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) /* If !mseccfg.MML then ignore writes with encoding RW=01 */ if ((val & PMP_WRITE) && !(val & PMP_READ) && !MSECCFG_MML_ISSET(env)) { - val &= ~(PMP_WRITE | PMP_READ); + return false; } env->pmp_state.pmp[pmp_index].cfg_reg = val; pmp_update_rule_addr(env, pmp_index); @@ -150,8 +150,7 @@ void pmp_unlock_entries(CPURISCVState *env) } } -static void pmp_decode_napot(target_ulong a, target_ulong *sa, - target_ulong *ea) +static void pmp_decode_napot(hwaddr a, hwaddr *sa, hwaddr *ea) { /* * aaaa...aaa0 8-byte NAPOT range @@ -173,8 +172,8 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index) uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg; target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg; target_ulong prev_addr = 0u; - target_ulong sa = 0u; - target_ulong ea = 0u; + hwaddr sa = 0u; + hwaddr ea = 0u; if (pmp_index >= 1u) { prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg; @@ -227,8 +226,7 @@ void pmp_update_rule_nums(CPURISCVState *env) } } -static int pmp_is_in_range(CPURISCVState *env, int pmp_index, - target_ulong addr) +static int pmp_is_in_range(CPURISCVState *env, int pmp_index, hwaddr addr) { int result = 0; @@ -305,14 +303,14 @@ static bool pmp_hart_has_privs_default(CPURISCVState *env, pmp_priv_t privs, * Return true if a pmp rule match or default match * Return false if no match */ -bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, +bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs, target_ulong mode) { int i = 0; int pmp_size = 0; - target_ulong s = 0; - target_ulong e = 0; + hwaddr s = 0; + hwaddr e = 0; /* Short cut if no rules */ if (0 == pmp_get_num_rules(env)) { @@ -624,12 +622,12 @@ target_ulong mseccfg_csr_read(CPURISCVState *env) * To avoid this we return a size of 1 (which means no caching) if the PMP * region only covers partial of the TLB page. */ -target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr) +target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr) { - target_ulong pmp_sa; - target_ulong pmp_ea; - target_ulong tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1); - target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1; + hwaddr pmp_sa; + hwaddr pmp_ea; + hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1); + hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1; int i; /* diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h index 9af8614cd4ff9bc7d92246c361fc4294ec3b69d1..f5c10ce85c95914db773ec64f962b4014e017799 100644 --- a/target/riscv/pmp.h +++ b/target/riscv/pmp.h @@ -53,8 +53,8 @@ typedef struct { } pmp_entry_t; typedef struct { - target_ulong sa; - target_ulong ea; + hwaddr sa; + hwaddr ea; } pmp_addr_t; typedef struct { @@ -73,11 +73,11 @@ target_ulong mseccfg_csr_read(CPURISCVState *env); void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, target_ulong val); target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index); -bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, +bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr, target_ulong size, pmp_priv_t privs, pmp_priv_t *allowed_privs, target_ulong mode); -target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr); +target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr); void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index); void pmp_update_rule_nums(CPURISCVState *env); uint32_t pmp_get_num_rules(CPURISCVState *env); diff --git a/target/riscv/riscv-qmp-cmds.c b/target/riscv/riscv-qmp-cmds.c index 2f2dbae7c85f4c25b4aa7b969d295dcfdc2f5718..c48b9cfa67974e8329275674fe6ba671d70668c0 100644 --- a/target/riscv/riscv-qmp-cmds.c +++ b/target/riscv/riscv-qmp-cmds.c @@ -26,6 +26,7 @@ #include "qapi/error.h" #include "qapi/qapi-commands-machine-target.h" +#include "qapi/qmp/qbool.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qerror.h" #include "qapi/qobject-input-visitor.h" @@ -44,8 +45,7 @@ static void riscv_cpu_add_definition(gpointer data, gpointer user_data) const char *typename = object_class_get_name(oc); ObjectClass *dyn_class; - info->name = g_strndup(typename, - strlen(typename) - strlen("-" TYPE_RISCV_CPU)); + info->name = cpu_model_from_type(typename); info->q_typename = g_strdup(typename); dyn_class = object_class_dynamic_cast(oc, TYPE_RISCV_DYNAMIC_CPU); @@ -99,6 +99,35 @@ static void riscv_obj_add_multiext_props(Object *obj, QDict *qdict_out, } } +static void riscv_obj_add_named_feats_qdict(Object *obj, QDict *qdict_out) +{ + const RISCVCPUMultiExtConfig *named_cfg; + RISCVCPU *cpu = RISCV_CPU(obj); + QObject *value; + bool flag_val; + + for (int i = 0; riscv_cpu_named_features[i].name != NULL; i++) { + named_cfg = &riscv_cpu_named_features[i]; + flag_val = isa_ext_is_enabled(cpu, named_cfg->offset); + value = QOBJECT(qbool_from_bool(flag_val)); + + qdict_put_obj(qdict_out, named_cfg->name, value); + } +} + +static void riscv_obj_add_profiles_qdict(Object *obj, QDict *qdict_out) +{ + RISCVCPUProfile *profile; + QObject *value; + + for (int i = 0; riscv_profiles[i] != NULL; i++) { + profile = riscv_profiles[i]; + value = QOBJECT(qbool_from_bool(profile->enabled)); + + qdict_put_obj(qdict_out, profile->name, value); + } +} + static void riscv_cpuobj_validate_qdict_in(Object *obj, QObject *props, const QDict *qdict_in, Error **errp) @@ -129,11 +158,6 @@ static void riscv_cpuobj_validate_qdict_in(Object *obj, QObject *props, goto err; } - riscv_cpu_finalize_features(RISCV_CPU(obj), &local_err); - if (local_err) { - goto err; - } - visit_end_struct(visitor, NULL); err: @@ -191,6 +215,13 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, } } + riscv_cpu_finalize_features(RISCV_CPU(obj), &local_err); + if (local_err) { + error_propagate(errp, local_err); + object_unref(obj); + return NULL; + } + expansion_info = g_new0(CpuModelExpansionInfo, 1); expansion_info->model = g_malloc0(sizeof(*expansion_info->model)); expansion_info->model->name = g_strdup(model->name); @@ -200,6 +231,8 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, riscv_obj_add_multiext_props(obj, qdict_out, riscv_cpu_extensions); riscv_obj_add_multiext_props(obj, qdict_out, riscv_cpu_experimental_exts); riscv_obj_add_multiext_props(obj, qdict_out, riscv_cpu_vendor_exts); + riscv_obj_add_named_feats_qdict(obj, qdict_out); + riscv_obj_add_profiles_qdict(obj, qdict_out); /* Add our CPU boolean options too */ riscv_obj_add_qdict_prop(obj, qdict_out, "mmu"); diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c index 8a35683a345dadfcd9d67aef8866c497d18ed028..14133ff66568ca9cc509a417fe1ef406ba56aa11 100644 --- a/target/riscv/tcg/tcg-cpu.c +++ b/target/riscv/tcg/tcg-cpu.c @@ -34,6 +34,7 @@ /* Hash that stores user set extensions */ static GHashTable *multi_ext_user_opts; +static GHashTable *misa_ext_user_opts; static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) { @@ -41,6 +42,52 @@ static bool cpu_cfg_ext_is_user_set(uint32_t ext_offset) GUINT_TO_POINTER(ext_offset)); } +static bool cpu_misa_ext_is_user_set(uint32_t misa_bit) +{ + return g_hash_table_contains(misa_ext_user_opts, + GUINT_TO_POINTER(misa_bit)); +} + +static void cpu_cfg_ext_add_user_opt(uint32_t ext_offset, bool value) +{ + g_hash_table_insert(multi_ext_user_opts, GUINT_TO_POINTER(ext_offset), + (gpointer)value); +} + +static void cpu_misa_ext_add_user_opt(uint32_t bit, bool value) +{ + g_hash_table_insert(misa_ext_user_opts, GUINT_TO_POINTER(bit), + (gpointer)value); +} + +static void riscv_cpu_write_misa_bit(RISCVCPU *cpu, uint32_t bit, + bool enabled) +{ + CPURISCVState *env = &cpu->env; + + if (enabled) { + env->misa_ext |= bit; + env->misa_ext_mask |= bit; + } else { + env->misa_ext &= ~bit; + env->misa_ext_mask &= ~bit; + } +} + +static const char *cpu_priv_ver_to_str(int priv_ver) +{ + switch (priv_ver) { + case PRIV_VERSION_1_10_0: + return "v1.10.0"; + case PRIV_VERSION_1_11_0: + return "v1.11.0"; + case PRIV_VERSION_1_12_0: + return "v1.12.0"; + } + + g_assert_not_reached(); +} + static void riscv_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -114,6 +161,79 @@ static int cpu_cfg_ext_get_min_version(uint32_t ext_offset) g_assert_not_reached(); } +static const char *cpu_cfg_ext_get_name(uint32_t ext_offset) +{ + const RISCVCPUMultiExtConfig *feat; + const RISCVIsaExtData *edata; + + for (edata = isa_edata_arr; edata->name != NULL; edata++) { + if (edata->ext_enable_offset == ext_offset) { + return edata->name; + } + } + + for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { + if (feat->offset == ext_offset) { + return feat->name; + } + } + + g_assert_not_reached(); +} + +static bool cpu_cfg_offset_is_named_feat(uint32_t ext_offset) +{ + const RISCVCPUMultiExtConfig *feat; + + for (feat = riscv_cpu_named_features; feat->name != NULL; feat++) { + if (feat->offset == ext_offset) { + return true; + } + } + + return false; +} + +static void riscv_cpu_enable_named_feat(RISCVCPU *cpu, uint32_t feat_offset) +{ + switch (feat_offset) { + case CPU_CFG_OFFSET(zic64b): + cpu->cfg.cbom_blocksize = 64; + cpu->cfg.cbop_blocksize = 64; + cpu->cfg.cboz_blocksize = 64; + break; + case CPU_CFG_OFFSET(svade): + cpu->cfg.ext_svadu = false; + break; + default: + g_assert_not_reached(); + } +} + +static void cpu_bump_multi_ext_priv_ver(CPURISCVState *env, + uint32_t ext_offset) +{ + int ext_priv_ver; + + if (env->priv_ver == PRIV_VERSION_LATEST) { + return; + } + + if (cpu_cfg_offset_is_named_feat(ext_offset)) { + return; + } + + ext_priv_ver = cpu_cfg_ext_get_min_version(ext_offset); + + if (env->priv_ver < ext_priv_ver) { + /* + * Note: the 'priv_spec' command line option, if present, + * will take precedence over this priv_ver bump. + */ + env->priv_ver = ext_priv_ver; + } +} + static void cpu_cfg_ext_auto_update(RISCVCPU *cpu, uint32_t ext_offset, bool value) { @@ -273,6 +393,55 @@ static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) } } +static void riscv_cpu_update_named_features(RISCVCPU *cpu) +{ + cpu->cfg.zic64b = cpu->cfg.cbom_blocksize == 64 && + cpu->cfg.cbop_blocksize == 64 && + cpu->cfg.cboz_blocksize == 64; + + cpu->cfg.svade = !cpu->cfg.ext_svadu; +} + +static void riscv_cpu_validate_g(RISCVCPU *cpu) +{ + const char *warn_msg = "RVG mandates disabled extension %s"; + uint32_t g_misa_bits[] = {RVI, RVM, RVA, RVF, RVD}; + bool send_warn = cpu_misa_ext_is_user_set(RVG); + + for (int i = 0; i < ARRAY_SIZE(g_misa_bits); i++) { + uint32_t bit = g_misa_bits[i]; + + if (riscv_has_ext(&cpu->env, bit)) { + continue; + } + + if (!cpu_misa_ext_is_user_set(bit)) { + riscv_cpu_write_misa_bit(cpu, bit, true); + continue; + } + + if (send_warn) { + warn_report(warn_msg, riscv_get_misa_ext_name(bit)); + } + } + + if (!cpu->cfg.ext_zicsr) { + if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr))) { + cpu->cfg.ext_zicsr = true; + } else if (send_warn) { + warn_report(warn_msg, "zicsr"); + } + } + + if (!cpu->cfg.ext_zifencei) { + if (!cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei))) { + cpu->cfg.ext_zifencei = true; + } else if (send_warn) { + warn_report(warn_msg, "zifencei"); + } + } +} + /* * Check consistency between chosen extensions while setting * cpu->cfg accordingly. @@ -282,31 +451,8 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) CPURISCVState *env = &cpu->env; Error *local_err = NULL; - /* Do some ISA extension error checking */ - if (riscv_has_ext(env, RVG) && - !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) && - riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) && - riscv_has_ext(env, RVD) && - cpu->cfg.ext_zicsr && cpu->cfg.ext_zifencei)) { - - if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zicsr)) && - !cpu->cfg.ext_zicsr) { - error_setg(errp, "RVG requires Zicsr but user set Zicsr to false"); - return; - } - - if (cpu_cfg_ext_is_user_set(CPU_CFG_OFFSET(ext_zifencei)) && - !cpu->cfg.ext_zifencei) { - error_setg(errp, "RVG requires Zifencei but user set " - "Zifencei to false"); - return; - } - - cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zicsr), true); - cpu_cfg_ext_auto_update(cpu, CPU_CFG_OFFSET(ext_zifencei), true); - - env->misa_ext |= RVI | RVM | RVA | RVF | RVD; - env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD; + if (riscv_has_ext(env, RVG)) { + riscv_cpu_validate_g(cpu); } if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) { @@ -343,6 +489,11 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) return; } + if ((cpu->cfg.ext_zacas) && !riscv_has_ext(env, RVA)) { + error_setg(errp, "Zacas extension requires A extension"); + return; + } + if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) { error_setg(errp, "Zawrs extension requires A extension"); return; @@ -620,6 +771,106 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) riscv_cpu_disable_priv_spec_isa_exts(cpu); } +#ifndef CONFIG_USER_ONLY +static bool riscv_cpu_validate_profile_satp(RISCVCPU *cpu, + RISCVCPUProfile *profile, + bool send_warn) +{ + int satp_max = satp_mode_max_from_map(cpu->cfg.satp_mode.supported); + + if (profile->satp_mode > satp_max) { + if (send_warn) { + bool is_32bit = riscv_cpu_is_32bit(cpu); + const char *req_satp = satp_mode_str(profile->satp_mode, is_32bit); + const char *cur_satp = satp_mode_str(satp_max, is_32bit); + + warn_report("Profile %s requires satp mode %s, " + "but satp mode %s was set", profile->name, + req_satp, cur_satp); + } + + return false; + } + + return true; +} +#endif + +static void riscv_cpu_validate_profile(RISCVCPU *cpu, + RISCVCPUProfile *profile) +{ + CPURISCVState *env = &cpu->env; + const char *warn_msg = "Profile %s mandates disabled extension %s"; + bool send_warn = profile->user_set && profile->enabled; + bool parent_enabled, profile_impl = true; + int i; + +#ifndef CONFIG_USER_ONLY + if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { + profile_impl = riscv_cpu_validate_profile_satp(cpu, profile, + send_warn); + } +#endif + + if (profile->priv_spec != RISCV_PROFILE_ATTR_UNUSED && + profile->priv_spec != env->priv_ver) { + profile_impl = false; + + if (send_warn) { + warn_report("Profile %s requires priv spec %s, " + "but priv ver %s was set", profile->name, + cpu_priv_ver_to_str(profile->priv_spec), + cpu_priv_ver_to_str(env->priv_ver)); + } + } + + for (i = 0; misa_bits[i] != 0; i++) { + uint32_t bit = misa_bits[i]; + + if (!(profile->misa_ext & bit)) { + continue; + } + + if (!riscv_has_ext(&cpu->env, bit)) { + profile_impl = false; + + if (send_warn) { + warn_report(warn_msg, profile->name, + riscv_get_misa_ext_name(bit)); + } + } + } + + for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { + int ext_offset = profile->ext_offsets[i]; + + if (!isa_ext_is_enabled(cpu, ext_offset)) { + profile_impl = false; + + if (send_warn) { + warn_report(warn_msg, profile->name, + cpu_cfg_ext_get_name(ext_offset)); + } + } + } + + profile->enabled = profile_impl; + + if (profile->parent != NULL) { + parent_enabled = object_property_get_bool(OBJECT(cpu), + profile->parent->name, + NULL); + profile->enabled = profile->enabled && parent_enabled; + } +} + +static void riscv_cpu_validate_profiles(RISCVCPU *cpu) +{ + for (int i = 0; riscv_profiles[i] != NULL; i++) { + riscv_cpu_validate_profile(cpu, riscv_profiles[i]); + } +} + void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) { CPURISCVState *env = &cpu->env; @@ -637,6 +888,9 @@ void riscv_tcg_cpu_finalize_features(RISCVCPU *cpu, Error **errp) return; } + riscv_cpu_update_named_features(cpu); + riscv_cpu_validate_profiles(cpu); + if (cpu->cfg.ext_smepmp && !cpu->cfg.pmp) { /* * Enhanced PMP should only be available @@ -663,6 +917,11 @@ static bool riscv_cpu_is_generic(Object *cpu_obj) return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL; } +static bool riscv_cpu_is_vendor(Object *cpu_obj) +{ + return object_dynamic_cast(cpu_obj, TYPE_RISCV_VENDOR_CPU) != NULL; +} + /* * We'll get here via the following path: * @@ -731,13 +990,15 @@ static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, target_ulong misa_bit = misa_ext_cfg->misa_bit; RISCVCPU *cpu = RISCV_CPU(obj); CPURISCVState *env = &cpu->env; - bool generic_cpu = riscv_cpu_is_generic(obj); + bool vendor_cpu = riscv_cpu_is_vendor(obj); bool prev_val, value; if (!visit_type_bool(v, name, &value, errp)) { return; } + cpu_misa_ext_add_user_opt(misa_bit, value); + prev_val = env->misa_ext & misa_bit; if (value == prev_val) { @@ -745,19 +1006,23 @@ static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name, } if (value) { - if (!generic_cpu) { + if (vendor_cpu) { g_autofree char *cpuname = riscv_cpu_get_name(cpu); error_setg(errp, "'%s' CPU does not allow enabling extensions", cpuname); return; } - env->misa_ext |= misa_bit; - env->misa_ext_mask |= misa_bit; - } else { - env->misa_ext &= ~misa_bit; - env->misa_ext_mask &= ~misa_bit; + if (misa_bit == RVH && env->priv_ver < PRIV_VERSION_1_12_0) { + /* + * Note: the 'priv_spec' command line option, if present, + * will take precedence over this priv_ver bump. + */ + env->priv_ver = PRIV_VERSION_1_12_0; + } } + + riscv_cpu_write_misa_bit(cpu, misa_bit, value); } static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name, @@ -821,7 +1086,116 @@ static void riscv_cpu_add_misa_properties(Object *cpu_obj) NULL, (void *)misa_cfg); object_property_set_description(cpu_obj, name, desc); if (use_def_vals) { - object_property_set_bool(cpu_obj, name, misa_cfg->enabled, NULL); + riscv_cpu_write_misa_bit(RISCV_CPU(cpu_obj), bit, + misa_cfg->enabled); + } + } +} + +static void cpu_set_profile(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPUProfile *profile = opaque; + RISCVCPU *cpu = RISCV_CPU(obj); + bool value; + int i, ext_offset; + + if (riscv_cpu_is_vendor(obj)) { + error_setg(errp, "Profile %s is not available for vendor CPUs", + profile->name); + return; + } + + if (cpu->env.misa_mxl != MXL_RV64) { + error_setg(errp, "Profile %s only available for 64 bit CPUs", + profile->name); + return; + } + + if (!visit_type_bool(v, name, &value, errp)) { + return; + } + + profile->user_set = true; + profile->enabled = value; + + if (profile->parent != NULL) { + object_property_set_bool(obj, profile->parent->name, + profile->enabled, NULL); + } + + if (profile->enabled) { + cpu->env.priv_ver = profile->priv_spec; + } + +#ifndef CONFIG_USER_ONLY + if (profile->satp_mode != RISCV_PROFILE_ATTR_UNUSED) { + const char *satp_prop = satp_mode_str(profile->satp_mode, + riscv_cpu_is_32bit(cpu)); + object_property_set_bool(obj, satp_prop, profile->enabled, NULL); + } +#endif + + for (i = 0; misa_bits[i] != 0; i++) { + uint32_t bit = misa_bits[i]; + + if (!(profile->misa_ext & bit)) { + continue; + } + + if (bit == RVI && !profile->enabled) { + /* + * Disabling profiles will not disable the base + * ISA RV64I. + */ + continue; + } + + cpu_misa_ext_add_user_opt(bit, profile->enabled); + riscv_cpu_write_misa_bit(cpu, bit, profile->enabled); + } + + for (i = 0; profile->ext_offsets[i] != RISCV_PROFILE_EXT_LIST_END; i++) { + ext_offset = profile->ext_offsets[i]; + + if (profile->enabled) { + if (cpu_cfg_offset_is_named_feat(ext_offset)) { + riscv_cpu_enable_named_feat(cpu, ext_offset); + } + + cpu_bump_multi_ext_priv_ver(&cpu->env, ext_offset); + } + + cpu_cfg_ext_add_user_opt(ext_offset, profile->enabled); + isa_ext_update_enabled(cpu, ext_offset, profile->enabled); + } +} + +static void cpu_get_profile(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + RISCVCPUProfile *profile = opaque; + bool value = profile->enabled; + + visit_type_bool(v, name, &value, errp); +} + +static void riscv_cpu_add_profiles(Object *cpu_obj) +{ + for (int i = 0; riscv_profiles[i] != NULL; i++) { + const RISCVCPUProfile *profile = riscv_profiles[i]; + + object_property_add(cpu_obj, profile->name, "bool", + cpu_get_profile, cpu_set_profile, + NULL, (void *)profile); + + /* + * CPUs might enable a profile right from the start. + * Enable its mandatory extensions right away in this + * case. + */ + if (profile->enabled) { + object_property_set_bool(cpu_obj, profile->name, true, NULL); } } } @@ -850,7 +1224,7 @@ static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, { const RISCVCPUMultiExtConfig *multi_ext_cfg = opaque; RISCVCPU *cpu = RISCV_CPU(obj); - bool generic_cpu = riscv_cpu_is_generic(obj); + bool vendor_cpu = riscv_cpu_is_vendor(obj); bool prev_val, value; if (!visit_type_bool(v, name, &value, errp)) { @@ -864,9 +1238,7 @@ static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, multi_ext_cfg->name, lower); } - g_hash_table_insert(multi_ext_user_opts, - GUINT_TO_POINTER(multi_ext_cfg->offset), - (gpointer)value); + cpu_cfg_ext_add_user_opt(multi_ext_cfg->offset, value); prev_val = isa_ext_is_enabled(cpu, multi_ext_cfg->offset); @@ -874,13 +1246,17 @@ static void cpu_set_multi_ext_cfg(Object *obj, Visitor *v, const char *name, return; } - if (value && !generic_cpu) { + if (value && vendor_cpu) { g_autofree char *cpuname = riscv_cpu_get_name(cpu); error_setg(errp, "'%s' CPU does not allow enabling extensions", cpuname); return; } + if (value) { + cpu_bump_multi_ext_priv_ver(&cpu->env, multi_ext_cfg->offset); + } + isa_ext_update_enabled(cpu, multi_ext_cfg->offset, value); } @@ -949,6 +1325,8 @@ static void riscv_cpu_add_user_properties(Object *obj) riscv_cpu_add_multiext_prop_array(obj, riscv_cpu_deprecated_exts); + riscv_cpu_add_profiles(obj); + for (Property *prop = riscv_cpu_options; prop && prop->name; prop++) { qdev_property_add_static(DEVICE(obj), prop); } @@ -999,6 +1377,7 @@ static void tcg_cpu_instance_init(CPUState *cs) RISCVCPU *cpu = RISCV_CPU(cs); Object *obj = OBJECT(cpu); + misa_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); multi_ext_user_opts = g_hash_table_new(NULL, g_direct_equal); riscv_cpu_add_user_properties(obj); diff --git a/target/riscv/translate.c b/target/riscv/translate.c index f0be79bb160ba45f234468228ef1809d1701bb6e..071fbad7ef434faf40cc89505928486ec2851cb5 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -1089,6 +1089,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) #include "insn_trans/trans_rvv.c.inc" #include "insn_trans/trans_rvb.c.inc" #include "insn_trans/trans_rvzicond.c.inc" +#include "insn_trans/trans_rvzacas.c.inc" #include "insn_trans/trans_rvzawrs.c.inc" #include "insn_trans/trans_rvzicbo.c.inc" #include "insn_trans/trans_rvzfa.c.inc" diff --git a/target/rx/cpu.c b/target/rx/cpu.c index 9cc9d9d15ec4fbc5e437c70e43e4838076aee072..c5ffeffe323f61b967029f99b09d90e4cf0f8828 100644 --- a/target/rx/cpu.c +++ b/target/rx/cpu.c @@ -89,22 +89,6 @@ static void rx_cpu_reset_hold(Object *obj) set_flush_inputs_to_zero(1, &env->fp_status); } -static void rx_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - - qemu_printf(" %s\n", object_class_get_name(oc)); -} - -void rx_cpu_list(void) -{ - GSList *list; - list = object_class_get_list_sorted(TYPE_RX_CPU, false); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, rx_cpu_list_entry, NULL); - g_slist_free(list); -} - static ObjectClass *rx_cpu_class_by_name(const char *cpu_model) { ObjectClass *oc; diff --git a/target/rx/cpu.h b/target/rx/cpu.h index e931e77e854861e2e97ac1d24b90d27b79c83cec..65f9cd2d0ac87999f74b7d9cf3b5d984245e66e8 100644 --- a/target/rx/cpu.h +++ b/target/rx/cpu.h @@ -139,11 +139,8 @@ int rx_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int rx_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void rx_translate_init(void); -void rx_cpu_list(void); void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte); -#define cpu_list rx_cpu_list - #include "exec/cpu-all.h" #define CPU_INTERRUPT_SOFT CPU_INTERRUPT_TGT_INT_0 diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c index 33ab3551f47b035d9fc818d7346e6e68f96ef5de..888d6c1a1c2418e7c253252088934e8281f2ff8a 100644 --- a/target/s390x/kvm/kvm.c +++ b/target/s390x/kvm/kvm.c @@ -1923,7 +1923,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) S390CPU *cpu = S390_CPU(cs); int ret = 0; - qemu_mutex_lock_iothread(); + bql_lock(); kvm_cpu_synchronize_state(cs); @@ -1947,7 +1947,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); break; } - qemu_mutex_unlock_iothread(); + bql_unlock(); if (ret == 0) { ret = EXCP_INTERRUPT; diff --git a/target/s390x/kvm/pv.c b/target/s390x/kvm/pv.c index 6a69be7e5c533cea94d600e47ad08cbd3c8feee2..7ca7faec73e93e33bca269f7ec76d5a844447d31 100644 --- a/target/s390x/kvm/pv.c +++ b/target/s390x/kvm/pv.c @@ -29,7 +29,8 @@ static bool info_valid; static struct kvm_s390_pv_info_vm info_vm; static struct kvm_s390_pv_info_dump info_dump; -static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data) +static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data, + int *pvrc) { struct kvm_pv_cmd pv_cmd = { .cmd = cmd, @@ -46,6 +47,9 @@ static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data) "IOCTL rc: %d", cmd, cmdname, pv_cmd.rc, pv_cmd.rrc, rc); } + if (pvrc) { + *pvrc = pv_cmd.rc; + } return rc; } @@ -53,12 +57,13 @@ static int __s390_pv_cmd(uint32_t cmd, const char *cmdname, void *data) * This macro lets us pass the command as a string to the function so * we can print it on an error. */ -#define s390_pv_cmd(cmd, data) __s390_pv_cmd(cmd, #cmd, data) +#define s390_pv_cmd(cmd, data) __s390_pv_cmd(cmd, #cmd, data, NULL) +#define s390_pv_cmd_pvrc(cmd, data, pvrc) __s390_pv_cmd(cmd, #cmd, data, pvrc) #define s390_pv_cmd_exit(cmd, data) \ { \ int rc; \ \ - rc = __s390_pv_cmd(cmd, #cmd, data);\ + rc = __s390_pv_cmd(cmd, #cmd, data, NULL); \ if (rc) { \ exit(1); \ } \ @@ -142,14 +147,24 @@ bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms) return true; } -int s390_pv_set_sec_parms(uint64_t origin, uint64_t length) +int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, Error **errp) { + int ret, pvrc; struct kvm_s390_pv_sec_parm args = { .origin = origin, .length = length, }; - return s390_pv_cmd(KVM_PV_SET_SEC_PARMS, &args); + ret = s390_pv_cmd_pvrc(KVM_PV_SET_SEC_PARMS, &args, &pvrc); + if (ret) { + error_setg(errp, "Failed to set secure execution parameters"); + if (pvrc == 0x108) { + error_append_hint(errp, "Please check whether the image is " + "correctly encrypted for this host\n"); + } + } + + return ret; } /* diff --git a/target/s390x/kvm/pv.h b/target/s390x/kvm/pv.h index 7b935e2246c3f04f0c800ae44e6f49498b2b6d7c..5877d28ff10a73a3ac59d33c9c0c308db6fb0d85 100644 --- a/target/s390x/kvm/pv.h +++ b/target/s390x/kvm/pv.h @@ -42,7 +42,7 @@ int s390_pv_query_info(void); int s390_pv_vm_enable(void); void s390_pv_vm_disable(void); bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms); -int s390_pv_set_sec_parms(uint64_t origin, uint64_t length); +int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, Error **errp); int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak); void s390_pv_prep_reset(void); int s390_pv_verify(void); @@ -62,7 +62,8 @@ static inline int s390_pv_query_info(void) { return 0; } static inline int s390_pv_vm_enable(void) { return 0; } static inline void s390_pv_vm_disable(void) {} static inline bool s390_pv_vm_try_disable_async(S390CcwMachineState *ms) { return false; } -static inline int s390_pv_set_sec_parms(uint64_t origin, uint64_t length) { return 0; } +static inline int s390_pv_set_sec_parms(uint64_t origin, uint64_t length, + Error **errp) { return 0; } static inline int s390_pv_unpack(uint64_t addr, uint64_t size, uint64_t tweak) { return 0; } static inline void s390_pv_prep_reset(void) {} static inline int s390_pv_verify(void) { return 0; } diff --git a/target/s390x/machine.c b/target/s390x/machine.c index 37a076858c7566ae07684ad014f4a99956107538..a125ebcc2fadd9b58483948d7f41574da0642b3d 100644 --- a/target/s390x/machine.c +++ b/target/s390x/machine.c @@ -66,7 +66,7 @@ static const VMStateDescription vmstate_fpu = { .version_id = 1, .minimum_version_id = 1, .needed = fpu_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.vregs[0][0], S390CPU), VMSTATE_UINT64(env.vregs[1][0], S390CPU), VMSTATE_UINT64(env.vregs[2][0], S390CPU), @@ -98,7 +98,7 @@ static const VMStateDescription vmstate_vregs = { .version_id = 1, .minimum_version_id = 1, .needed = vregs_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { /* vregs[0][0] -> vregs[15][0] and fregs are overlays */ VMSTATE_UINT64(env.vregs[16][0], S390CPU), VMSTATE_UINT64(env.vregs[17][0], S390CPU), @@ -157,12 +157,12 @@ static bool riccb_needed(void *opaque) return s390_has_feat(S390_FEAT_RUNTIME_INSTRUMENTATION); } -const VMStateDescription vmstate_riccb = { +static const VMStateDescription vmstate_riccb = { .name = "cpu/riccb", .version_id = 1, .minimum_version_id = 1, .needed = riccb_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT8_ARRAY(env.riccb, S390CPU, 64), VMSTATE_END_OF_LIST() } @@ -174,12 +174,12 @@ static bool exval_needed(void *opaque) return cpu->env.ex_value != 0; } -const VMStateDescription vmstate_exval = { +static const VMStateDescription vmstate_exval = { .name = "cpu/exval", .version_id = 1, .minimum_version_id = 1, .needed = exval_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.ex_value, S390CPU), VMSTATE_END_OF_LIST() } @@ -190,12 +190,12 @@ static bool gscb_needed(void *opaque) return s390_has_feat(S390_FEAT_GUARDED_STORAGE); } -const VMStateDescription vmstate_gscb = { +static const VMStateDescription vmstate_gscb = { .name = "cpu/gscb", .version_id = 1, .minimum_version_id = 1, .needed = gscb_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.gscb, S390CPU, 4), VMSTATE_END_OF_LIST() } @@ -206,12 +206,12 @@ static bool bpbc_needed(void *opaque) return s390_has_feat(S390_FEAT_BPB); } -const VMStateDescription vmstate_bpbc = { +static const VMStateDescription vmstate_bpbc = { .name = "cpu/bpbc", .version_id = 1, .minimum_version_id = 1, .needed = bpbc_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(env.bpbc, S390CPU), VMSTATE_END_OF_LIST() } @@ -222,12 +222,12 @@ static bool etoken_needed(void *opaque) return s390_has_feat(S390_FEAT_ETOKEN); } -const VMStateDescription vmstate_etoken = { +static const VMStateDescription vmstate_etoken = { .name = "cpu/etoken", .version_id = 1, .minimum_version_id = 1, .needed = etoken_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.etoken, S390CPU), VMSTATE_UINT64(env.etoken_extension, S390CPU), VMSTATE_END_OF_LIST() @@ -239,12 +239,12 @@ static bool diag318_needed(void *opaque) return s390_has_feat(S390_FEAT_DIAG_318); } -const VMStateDescription vmstate_diag318 = { +static const VMStateDescription vmstate_diag318 = { .name = "cpu/diag318", .version_id = 1, .minimum_version_id = 1, .needed = diag318_needed, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(env.diag318_info, S390CPU), VMSTATE_END_OF_LIST() } @@ -256,7 +256,7 @@ const VMStateDescription vmstate_s390_cpu = { .pre_save = cpu_pre_save, .version_id = 4, .minimum_version_id = 3, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64_ARRAY(env.regs, S390CPU, 16), VMSTATE_UINT64(env.psw.mask, S390CPU), VMSTATE_UINT64(env.psw.addr, S390CPU), @@ -278,7 +278,7 @@ const VMStateDescription vmstate_s390_cpu = { irqstate_saved_size), VMSTATE_END_OF_LIST() }, - .subsections = (const VMStateDescription*[]) { + .subsections = (const VMStateDescription * const []) { &vmstate_fpu, &vmstate_vregs, &vmstate_riccb, diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c index 6aa7907438fd7d22058e83d36632f91fe67f569d..89b5268fd49d4467cc9d936953eeee458cbd84a8 100644 --- a/target/s390x/tcg/misc_helper.c +++ b/target/s390x/tcg/misc_helper.c @@ -101,9 +101,9 @@ uint64_t HELPER(stck)(CPUS390XState *env) /* SCLP service call */ uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2) { - qemu_mutex_lock_iothread(); + bql_lock(); int r = sclp_service_call(env_archcpu(env), r1, r2); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (r < 0) { tcg_s390_program_interrupt(env, -r, GETPC()); } @@ -117,9 +117,9 @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num) switch (num) { case 0x500: /* KVM hypercall */ - qemu_mutex_lock_iothread(); + bql_lock(); r = s390_virtio_hypercall(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); break; case 0x44: /* yield */ @@ -127,9 +127,9 @@ void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num) break; case 0x308: /* ipl */ - qemu_mutex_lock_iothread(); + bql_lock(); handle_diag_308(env, r1, r3, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); r = 0; break; case 0x288: @@ -185,7 +185,7 @@ static void update_ckc_timer(CPUS390XState *env) /* stop the timer and remove pending CKC IRQs */ timer_del(env->tod_timer); - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR; /* the tod has to exceed the ckc, this can never happen if ckc is all 1's */ @@ -207,9 +207,9 @@ void HELPER(sckc)(CPUS390XState *env, uint64_t ckc) { env->ckc = ckc; - qemu_mutex_lock_iothread(); + bql_lock(); update_ckc_timer(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void tcg_s390_tod_updated(CPUState *cs, run_on_cpu_data opaque) @@ -229,9 +229,9 @@ uint32_t HELPER(sck)(CPUS390XState *env, uint64_t tod_low) .low = tod_low, }; - qemu_mutex_lock_iothread(); + bql_lock(); tdc->set(td, &tod, &error_abort); - qemu_mutex_unlock_iothread(); + bql_unlock(); return 0; } @@ -421,9 +421,9 @@ uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1, int cc; /* TODO: needed to inject interrupts - push further down */ - qemu_mutex_lock_iothread(); + bql_lock(); cc = handle_sigp(env, order_code & SIGP_ORDER_MASK, r1, r3); - qemu_mutex_unlock_iothread(); + bql_unlock(); return cc; } @@ -433,92 +433,92 @@ uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1, void HELPER(xsch)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_xsch(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(csch)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_csch(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(hsch)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_hsch(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_msch(cpu, r1, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(rchp)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_rchp(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(rsch)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_rsch(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(sal)(CPUS390XState *env, uint64_t r1) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_sal(cpu, r1, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(stcrw)(CPUS390XState *env, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_stcrw(cpu, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) @@ -533,10 +533,10 @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) tcg_s390_program_interrupt(env, PGM_SPECIFICATION, ra); } - qemu_mutex_lock_iothread(); + bql_lock(); io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]); if (!io) { - qemu_mutex_unlock_iothread(); + bql_unlock(); return 0; } @@ -554,7 +554,7 @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) if (s390_cpu_virt_mem_write(cpu, addr, 0, &intc, sizeof(intc))) { /* writing failed, reinject and properly clean up */ s390_io_interrupt(io->id, io->nr, io->parm, io->word); - qemu_mutex_unlock_iothread(); + bql_unlock(); g_free(io); s390_cpu_virt_mem_handle_exc(cpu, ra); return 0; @@ -570,24 +570,24 @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) } g_free(io); - qemu_mutex_unlock_iothread(); + bql_unlock(); return 1; } void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(chsc)(CPUS390XState *env, uint64_t inst) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); ioinst_handle_chsc(cpu, inst >> 16, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif @@ -726,27 +726,27 @@ void HELPER(clp)(CPUS390XState *env, uint32_t r2) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); clp_service_call(cpu, r2, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); pcilg_service_call(cpu, r1, r2, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); pcistg_service_call(cpu, r1, r2, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, @@ -754,9 +754,9 @@ void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); stpcifc_service_call(cpu, r1, fiba, ar, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3) @@ -764,9 +764,9 @@ void HELPER(sic)(CPUS390XState *env, uint64_t r1, uint64_t r3) S390CPU *cpu = env_archcpu(env); int r; - qemu_mutex_lock_iothread(); + bql_lock(); r = css_do_sic(cpu, (r3 >> 27) & 0x7, r1 & 0xffff); - qemu_mutex_unlock_iothread(); + bql_unlock(); /* css_do_sic() may actually return a PGM_xxx value to inject */ if (r) { tcg_s390_program_interrupt(env, -r, GETPC()); @@ -777,9 +777,9 @@ void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2) { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); rpcit_service_call(cpu, r1, r2, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3, @@ -787,9 +787,9 @@ void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3, { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, @@ -797,8 +797,8 @@ void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, { S390CPU *cpu = env_archcpu(env); - qemu_mutex_lock_iothread(); + bql_lock(); mpcifc_service_call(cpu, r1, fiba, ar, GETPC()); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c index 62ab2be8b127c906ca3f07db75db9bc6b7c7b3cd..8df00b7df9fabdafaf6a00f2cb1eb3658ac9b0d1 100644 --- a/target/s390x/tcg/translate.c +++ b/target/s390x/tcg/translate.c @@ -3221,6 +3221,7 @@ static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o) { int b2 = get_field(s, b2); TCGv ar1 = tcg_temp_new_i64(); + int r1 = get_field(s, r1); o->out = o->in2; o->in2 = NULL; @@ -3244,7 +3245,7 @@ static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o) break; } - tcg_gen_st32_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[1])); + tcg_gen_st32_i64(ar1, tcg_env, offsetof(CPUS390XState, aregs[r1])); return DISAS_NEXT; } diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c index a8ec98b1348181ea95f7f392ac71ecac299070c0..806a0ef875e22e443016b00e4cff9da040221b2a 100644 --- a/target/sh4/cpu.c +++ b/target/sh4/cpu.c @@ -122,23 +122,6 @@ static void superh_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) info->print_insn = print_insn_sh; } -static void superh_cpu_list_entry(gpointer data, gpointer user_data) -{ - const char *typename = object_class_get_name(OBJECT_CLASS(data)); - int len = strlen(typename) - strlen(SUPERH_CPU_TYPE_SUFFIX); - - qemu_printf("%.*s\n", len, typename); -} - -void sh4_cpu_list(void) -{ - GSList *list; - - list = object_class_get_list_sorted(TYPE_SUPERH_CPU, false); - g_slist_foreach(list, superh_cpu_list_entry, NULL); - g_slist_free(list); -} - static ObjectClass *superh_cpu_class_by_name(const char *cpu_model) { ObjectClass *oc; diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h index 031dc0b457be556c48ac5f042b8b1aa3036586ec..0e6fa65baec86c8cd36532733fd04739b708db29 100644 --- a/target/sh4/cpu.h +++ b/target/sh4/cpu.h @@ -238,7 +238,6 @@ G_NORETURN void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, uintptr_t retaddr); void sh4_translate_init(void); -void sh4_cpu_list(void); #if !defined(CONFIG_USER_ONLY) hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); @@ -272,8 +271,6 @@ void cpu_load_tlb(CPUSH4State * env); #define CPU_RESOLVING_TYPE TYPE_SUPERH_CPU -#define cpu_list sh4_cpu_list - /* MMU modes definitions */ #define MMU_USER_IDX 1 static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch) diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h index 6999a10a4016d572847fcbec79b7fb68594cb25e..12a11ecb2616f961d65095b30b27efd62feead5f 100644 --- a/target/sparc/cpu.h +++ b/target/sparc/cpu.h @@ -549,7 +549,7 @@ struct CPUArchState { sparc_def_t def; void *irq_manager; - void (*qemu_irq_ack)(CPUSPARCState *env, void *irq_manager, int intno); + void (*qemu_irq_ack)(CPUSPARCState *env, int intno); /* Leon3 cache control */ uint32_t cache_control; diff --git a/target/sparc/int32_helper.c b/target/sparc/int32_helper.c index 156361358220d35e0921da32731a10aa506705f6..058dd712b587297f4efd7d1f338e38eabe5c963f 100644 --- a/target/sparc/int32_helper.c +++ b/target/sparc/int32_helper.c @@ -70,7 +70,7 @@ void cpu_check_irqs(CPUSPARCState *env) CPUState *cs; /* We should be holding the BQL before we mess with IRQs */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); if (env->pil_in && (env->interrupt_index == 0 || (env->interrupt_index & ~15) == TT_EXTINT)) { @@ -160,7 +160,7 @@ void sparc_cpu_do_interrupt(CPUState *cs) #if !defined(CONFIG_USER_ONLY) /* IRQ acknowledgment */ if ((intno & ~15) == TT_EXTINT && env->qemu_irq_ack != NULL) { - env->qemu_irq_ack(env, env->irq_manager, intno); + env->qemu_irq_ack(env, intno); } #endif } diff --git a/target/sparc/int64_helper.c b/target/sparc/int64_helper.c index 1b4155f5f37d97e9f258a33c1159d1eee2b0e269..27df9dba89bb79006a8b35f9a94be0b9863b7428 100644 --- a/target/sparc/int64_helper.c +++ b/target/sparc/int64_helper.c @@ -69,7 +69,7 @@ void cpu_check_irqs(CPUSPARCState *env) (env->softint & ~(SOFTINT_TIMER | SOFTINT_STIMER)); /* We should be holding the BQL before we mess with IRQs */ - g_assert(qemu_mutex_iothread_locked()); + g_assert(bql_locked()); /* TT_IVEC has a higher priority (16) than TT_EXTINT (31..17) */ if (env->ivec_status & 0x20) { @@ -267,9 +267,9 @@ static bool do_modify_softint(CPUSPARCState *env, uint32_t value) env->softint = value; #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_check_irqs(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif return true; diff --git a/target/sparc/machine.c b/target/sparc/machine.c index 44dfc07014f46703fc278b242bb626d7a55fe91a..2b5686c3307b7fb01b87d814ed73375d0ae62206 100644 --- a/target/sparc/machine.c +++ b/target/sparc/machine.c @@ -10,7 +10,7 @@ static const VMStateDescription vmstate_cpu_timer = { .name = "cpu_timer", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(frequency, CPUTimer), VMSTATE_UINT32(disabled, CPUTimer), VMSTATE_UINT64(disabled_mask, CPUTimer), @@ -29,7 +29,7 @@ static const VMStateDescription vmstate_trap_state = { .name = "trap_state", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(tpc, trap_state), VMSTATE_UINT64(tnpc, trap_state), VMSTATE_UINT64(tstate, trap_state), @@ -42,7 +42,7 @@ static const VMStateDescription vmstate_tlb_entry = { .name = "tlb_entry", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(tag, SparcTLBEntry), VMSTATE_UINT64(tte, SparcTLBEntry), VMSTATE_END_OF_LIST() @@ -147,7 +147,7 @@ const VMStateDescription vmstate_sparc_cpu = { .version_id = SPARC_VMSTATE_VER, .minimum_version_id = SPARC_VMSTATE_VER, .pre_save = cpu_pre_save, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gregs, SPARCCPU, 8), VMSTATE_UINT32(env.nwindows, SPARCCPU), VMSTATE_VARRAY_MULTIPLY(env.regbase, SPARCCPU, env.nwindows, 16, diff --git a/target/sparc/win_helper.c b/target/sparc/win_helper.c index 16d1c70fe713ddb6eb56c753a4e55b2bd1df955c..b53fc9ce94074d41f3caefcd40e4e64a86b0d72e 100644 --- a/target/sparc/win_helper.c +++ b/target/sparc/win_helper.c @@ -179,9 +179,9 @@ void helper_wrpsr(CPUSPARCState *env, target_ulong new_psr) cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC()); } else { /* cpu_put_psr may trigger interrupts, hence BQL */ - qemu_mutex_lock_iothread(); + bql_lock(); cpu_put_psr(env, new_psr); - qemu_mutex_unlock_iothread(); + bql_unlock(); } } @@ -407,9 +407,9 @@ void helper_wrpstate(CPUSPARCState *env, target_ulong new_state) #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_check_irqs(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif } @@ -422,9 +422,9 @@ void helper_wrpil(CPUSPARCState *env, target_ulong new_pil) env->psrpil = new_pil; if (cpu_interrupts_enabled(env)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_check_irqs(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif } @@ -451,9 +451,9 @@ void helper_done(CPUSPARCState *env) #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_check_irqs(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif } @@ -480,9 +480,9 @@ void helper_retry(CPUSPARCState *env) #if !defined(CONFIG_USER_ONLY) if (cpu_interrupts_enabled(env)) { - qemu_mutex_lock_iothread(); + bql_lock(); cpu_check_irqs(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } #endif } diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c index 034e01c189121c74e386e5ffc208b1f464f2fe6a..8acacdf0c0a29ee76f146b148c791dd6ed05c9f0 100644 --- a/target/tricore/cpu.c +++ b/target/tricore/cpu.c @@ -132,9 +132,7 @@ static ObjectClass *tricore_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(TRICORE_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (!oc || !object_class_dynamic_cast(oc, TYPE_TRICORE_CPU)) { - return NULL; - } + return oc; } diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h index de3ab53a8378deee684777f74d2aef09eaa42642..2d4446cea58261fe5cd1219330f175b70a0a61af 100644 --- a/target/tricore/cpu.h +++ b/target/tricore/cpu.h @@ -246,10 +246,6 @@ void fpu_set_state(CPUTriCoreState *env); #define MMU_USER_IDX 2 -void tricore_cpu_list(void); - -#define cpu_list tricore_cpu_list - static inline int cpu_mmu_index(CPUTriCoreState *env, bool ifetch) { return 0; diff --git a/target/tricore/helper.c b/target/tricore/helper.c index 7e5da3cb23edfcd0f9c7d93f619d78e8d85a2bee..174f666e1e84a75b926e81b4a3f6827d1885375b 100644 --- a/target/tricore/helper.c +++ b/target/tricore/helper.c @@ -96,28 +96,6 @@ bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } } -static void tricore_cpu_list_entry(gpointer data, gpointer user_data) -{ - ObjectClass *oc = data; - const char *typename; - char *name; - - typename = object_class_get_name(oc); - name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_TRICORE_CPU)); - qemu_printf(" %s\n", name); - g_free(name); -} - -void tricore_cpu_list(void) -{ - GSList *list; - - list = object_class_get_list_sorted(TYPE_TRICORE_CPU, false); - qemu_printf("Available CPUs:\n"); - g_slist_foreach(list, tricore_cpu_list_entry, NULL); - g_slist_free(list); -} - void fpu_set_state(CPUTriCoreState *env) { switch (extract32(env->PSW, 24, 2)) { diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c index e20fe87bf255bef4d5963ae3ba82869e53103e61..93e782a6e05ed92d28424ba13d687ad327ee3392 100644 --- a/target/xtensa/cpu.c +++ b/target/xtensa/cpu.c @@ -141,9 +141,7 @@ static ObjectClass *xtensa_cpu_class_by_name(const char *cpu_model) typename = g_strdup_printf(XTENSA_CPU_TYPE_NAME("%s"), cpu_model); oc = object_class_by_name(typename); g_free(typename); - if (oc == NULL || !object_class_dynamic_cast(oc, TYPE_XTENSA_CPU)) { - return NULL; - } + return oc; } diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h index dd817293065394059ed32f843f3d52b9cf24f76b..d9c49a35fa225d802a1f800de84f89d2e97374db 100644 --- a/target/xtensa/cpu.h +++ b/target/xtensa/cpu.h @@ -600,8 +600,6 @@ G_NORETURN void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr); -#define cpu_list xtensa_cpu_list - #define CPU_RESOLVING_TYPE TYPE_XTENSA_CPU #if TARGET_BIG_ENDIAN @@ -626,7 +624,6 @@ void check_interrupts(CPUXtensaState *s); void xtensa_irq_init(CPUXtensaState *env); qemu_irq *xtensa_get_extints(CPUXtensaState *env); qemu_irq xtensa_get_runstall(CPUXtensaState *env); -void xtensa_cpu_list(void); void xtensa_sync_window_from_phys(CPUXtensaState *env); void xtensa_sync_phys_from_window(CPUXtensaState *env); void xtensa_rotate_window(CPUXtensaState *env, uint32_t delta); diff --git a/target/xtensa/exc_helper.c b/target/xtensa/exc_helper.c index 91354884f7ec8456bac49bf5979fb501cfca48ed..168419a505f586d8d7be2ffd5a8314a762377a6c 100644 --- a/target/xtensa/exc_helper.c +++ b/target/xtensa/exc_helper.c @@ -105,9 +105,9 @@ void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel) env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | (intlevel << PS_INTLEVEL_SHIFT); - qemu_mutex_lock_iothread(); + bql_lock(); check_interrupts(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); if (env->pending_irq_level) { cpu_loop_exit(cpu); @@ -120,9 +120,9 @@ void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel) void HELPER(check_interrupts)(CPUXtensaState *env) { - qemu_mutex_lock_iothread(); + bql_lock(); check_interrupts(env); - qemu_mutex_unlock_iothread(); + bql_unlock(); } void HELPER(intset)(CPUXtensaState *env, uint32_t v) diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c index dbeb97a953cc9385b072c288fca533f11b93c79e..f6632df64676252d266b10e2debf0e8ebb90a137 100644 --- a/target/xtensa/helper.c +++ b/target/xtensa/helper.c @@ -234,15 +234,6 @@ void xtensa_breakpoint_handler(CPUState *cs) } } -void xtensa_cpu_list(void) -{ - XtensaConfigList *core = xtensa_cores; - qemu_printf("Available CPUs:\n"); - for (; core; core = core->next) { - qemu_printf(" %s\n", core->config->name); - } -} - #ifndef CONFIG_USER_ONLY void xtensa_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index a83f8aab30446df4f60cf584a37e615e790048c0..d268199fc1bfde7405688eecfa03e2ef3590ea65 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -244,6 +244,7 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) #define P_VEXL 0x80000 /* Set VEX.L = 1 */ #define P_EVEX 0x100000 /* Requires EVEX encoding */ +#define OPC_ARITH_EbIb (0x80) #define OPC_ARITH_EvIz (0x81) #define OPC_ARITH_EvIb (0x83) #define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */ @@ -1316,23 +1317,41 @@ static void tgen_arithi(TCGContext *s, int c, int r0, c &= 7; } - /* ??? While INC is 2 bytes shorter than ADDL $1, they also induce - partial flags update stalls on Pentium4 and are not recommended - by current Intel optimization manuals. */ - if (!cf && (c == ARITH_ADD || c == ARITH_SUB) && (val == 1 || val == -1)) { - int is_inc = (c == ARITH_ADD) ^ (val < 0); - if (TCG_TARGET_REG_BITS == 64) { - /* The single-byte increment encodings are re-tasked as the - REX prefixes. Use the MODRM encoding. */ - tcg_out_modrm(s, OPC_GRP5 + rexw, - (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0); - } else { - tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0); + switch (c) { + case ARITH_ADD: + case ARITH_SUB: + if (!cf) { + /* + * ??? While INC is 2 bytes shorter than ADDL $1, they also induce + * partial flags update stalls on Pentium4 and are not recommended + * by current Intel optimization manuals. + */ + if (val == 1 || val == -1) { + int is_inc = (c == ARITH_ADD) ^ (val < 0); + if (TCG_TARGET_REG_BITS == 64) { + /* + * The single-byte increment encodings are re-tasked + * as the REX prefixes. Use the MODRM encoding. + */ + tcg_out_modrm(s, OPC_GRP5 + rexw, + (is_inc ? EXT5_INC_Ev : EXT5_DEC_Ev), r0); + } else { + tcg_out8(s, (is_inc ? OPC_INC_r32 : OPC_DEC_r32) + r0); + } + return; + } + if (val == 128) { + /* + * Facilitate using an 8-bit immediate. Carry is inverted + * by this transformation, so do it only if cf == 0. + */ + c ^= ARITH_ADD ^ ARITH_SUB; + val = -128; + } } - return; - } + break; - if (c == ARITH_AND) { + case ARITH_AND: if (TCG_TARGET_REG_BITS == 64) { if (val == 0xffffffffu) { tcg_out_ext32u(s, r0, r0); @@ -1351,6 +1370,17 @@ static void tgen_arithi(TCGContext *s, int c, int r0, tcg_out_ext16u(s, r0, r0); return; } + break; + + case ARITH_OR: + case ARITH_XOR: + if (val >= 0x80 && val <= 0xff + && (r0 < 4 || TCG_TARGET_REG_BITS == 64)) { + tcg_out_modrm(s, OPC_ARITH_EbIb + P_REXB_RM, c, r0); + tcg_out8(s, val); + return; + } + break; } if (val == (int8_t)val) { diff --git a/tcg/meson.build b/tcg/meson.build index 895a11d3fa255017061902ef6c74678ee34399b2..5afdec1e1ae171e235d1af29001b9ddadf2a21b8 100644 --- a/tcg/meson.build +++ b/tcg/meson.build @@ -22,7 +22,7 @@ if get_option('tcg_interpreter') tcg_ss.add(files('tci.c')) endif -tcg_ss = tcg_ss.apply(config_targetos, strict: false) +tcg_ss = tcg_ss.apply({}) libtcg_user = static_library('tcg_user', tcg_ss.sources() + genh, diff --git a/tcg/ppc/tcg-target-con-set.h b/tcg/ppc/tcg-target-con-set.h index bbd7b2124742870438b7173d6600e5845858193e..cb47b29452e4219795c144a131eb325b5ed6bbcd 100644 --- a/tcg/ppc/tcg-target-con-set.h +++ b/tcg/ppc/tcg-target-con-set.h @@ -35,7 +35,7 @@ C_O1_I3(v, v, v, v) C_O1_I4(r, r, ri, rZ, rZ) C_O1_I4(r, r, r, ri, ri) C_O2_I1(r, r, r) -C_O2_I1(o, m, r) +C_N1O1_I1(o, m, r) C_O2_I2(r, r, r, r) C_O2_I4(r, r, rI, rZM, r, r) C_O2_I4(r, r, r, r, rI, rZM) diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index 856c3b18f537ef3cf7aae46422ece909a6c56dad..54816967bca89826def45818c936cbafe1fddd48 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -2595,6 +2595,7 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi, tcg_debug_assert(!need_bswap); tcg_debug_assert(datalo & 1); tcg_debug_assert(datahi == datalo - 1); + tcg_debug_assert(!is_ld || datahi != index); insn = is_ld ? LQ : STQ; tcg_out32(s, insn | TAI(datahi, index, 0)); } else { @@ -4071,7 +4072,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_qemu_ld_a32_i128: case INDEX_op_qemu_ld_a64_i128: - return C_O2_I1(o, m, r); + return C_N1O1_I1(o, m, r); case INDEX_op_qemu_st_a32_i128: case INDEX_op_qemu_st_a64_i128: return C_O0_I3(o, m, r); diff --git a/tcg/tcg.c b/tcg/tcg.c index 896a36caeba6ce0c480106a77e118c6a05417216..e2c38f6d11c668d7387895c41693397b7ddd19c5 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -653,6 +653,7 @@ static void tcg_out_movext3(TCGContext *s, const TCGMovExtend *i1, #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4), #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2), +#define C_N1O1_I1(O1, O2, I1) C_PFX3(c_n1o1_i1_, O1, O2, I1), #define C_N2_I1(O1, O2, I1) C_PFX3(c_n2_i1_, O1, O2, I1), #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1), @@ -676,6 +677,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode); #undef C_O1_I3 #undef C_O1_I4 #undef C_N1_I2 +#undef C_N1O1_I1 #undef C_N2_I1 #undef C_O2_I1 #undef C_O2_I2 @@ -696,6 +698,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode); #define C_O1_I4(O1, I1, I2, I3, I4) { .args_ct_str = { #O1, #I1, #I2, #I3, #I4 } }, #define C_N1_I2(O1, I1, I2) { .args_ct_str = { "&" #O1, #I1, #I2 } }, +#define C_N1O1_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, #O2, #I1 } }, #define C_N2_I1(O1, O2, I1) { .args_ct_str = { "&" #O1, "&" #O2, #I1 } }, #define C_O2_I1(O1, O2, I1) { .args_ct_str = { #O1, #O2, #I1 } }, @@ -718,6 +721,7 @@ static const TCGTargetOpDef constraint_sets[] = { #undef C_O1_I3 #undef C_O1_I4 #undef C_N1_I2 +#undef C_N1O1_I1 #undef C_N2_I1 #undef C_O2_I1 #undef C_O2_I2 @@ -738,6 +742,7 @@ static const TCGTargetOpDef constraint_sets[] = { #define C_O1_I4(O1, I1, I2, I3, I4) C_PFX5(c_o1_i4_, O1, I1, I2, I3, I4) #define C_N1_I2(O1, I1, I2) C_PFX3(c_n1_i2_, O1, I1, I2) +#define C_N1O1_I1(O1, O2, I1) C_PFX3(c_n1o1_i1_, O1, O2, I1) #define C_N2_I1(O1, O2, I1) C_PFX3(c_n2_i1_, O1, O2, I1) #define C_O2_I1(O1, O2, I1) C_PFX3(c_o2_i1_, O1, O2, I1) @@ -2988,6 +2993,7 @@ static void process_op_defs(TCGContext *s) .pair = 2, .pair_index = o, .regs = def->args_ct[o].regs << 1, + .newreg = def->args_ct[o].newreg, }; def->args_ct[o].pair = 1; def->args_ct[o].pair_index = i; @@ -3004,6 +3010,7 @@ static void process_op_defs(TCGContext *s) .pair = 1, .pair_index = o, .regs = def->args_ct[o].regs >> 1, + .newreg = def->args_ct[o].newreg, }; def->args_ct[o].pair = 2; def->args_ct[o].pair_index = i; @@ -5036,17 +5043,21 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op) break; case 1: /* first of pair */ - tcg_debug_assert(!arg_ct->newreg); if (arg_ct->oalias) { reg = new_args[arg_ct->alias_index]; - break; + } else if (arg_ct->newreg) { + reg = tcg_reg_alloc_pair(s, arg_ct->regs, + i_allocated_regs | o_allocated_regs, + output_pref(op, k), + ts->indirect_base); + } else { + reg = tcg_reg_alloc_pair(s, arg_ct->regs, o_allocated_regs, + output_pref(op, k), + ts->indirect_base); } - reg = tcg_reg_alloc_pair(s, arg_ct->regs, o_allocated_regs, - output_pref(op, k), ts->indirect_base); break; case 2: /* second of pair */ - tcg_debug_assert(!arg_ct->newreg); if (arg_ct->oalias) { reg = new_args[arg_ct->alias_index]; } else { diff --git a/tests/avocado/acpi-bits/bits-tests/smilatency.py2 b/tests/avocado/acpi-bits/bits-tests/smilatency.py2 new file mode 100644 index 0000000000000000000000000000000000000000..405af67e19076cf465305ed7200bd190246fc027 --- /dev/null +++ b/tests/avocado/acpi-bits/bits-tests/smilatency.py2 @@ -0,0 +1,107 @@ +# Copyright (c) 2015, Intel Corporation +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This script runs only from the biosbits VM. + +"""SMI latency test.""" + +import bits +from collections import namedtuple +import testsuite +import time +import usb + +def register_tests(): + pass +# testsuite.add_test("SMI latency test", smi_latency); +# testsuite.add_test("SMI latency test with USB disabled via BIOS handoff", test_with_usb_disabled, runall=False); + +def smi_latency(): + MSR_SMI_COUNT = 0x34 + + print "Warning: touching the keyboard can affect the results of this test." + + tsc_per_sec = bits.tsc_per_sec() + tsc_per_usec = tsc_per_sec / (1000 * 1000) + bins = [long(tsc_per_usec * 10**i) for i in range(9)] + bin_descs = [ + "0 < t <= 1us", + "1us < t <= 10us", + "10us < t <= 100us", + "100us < t <= 1ms", + "1ms < t <= 10ms", + "10ms < t <= 100ms", + "100ms < t <= 1s ", + "1s < t <= 10s ", + "10s < t <= 100s ", + "100s < t ", + ] + + print "Starting test. Wait here, I will be back in 15 seconds." + (max_latency, smi_count_delta, bins) = bits.smi_latency(long(15 * tsc_per_sec), bins) + BinType = namedtuple('BinType', ("max", "total", "count", "times")) + bins = [BinType(*b) for b in bins] + + testsuite.test("SMI latency < 150us to minimize risk of OS timeouts", max_latency / tsc_per_usec <= 150) + if not testsuite.show_detail(): + return + + for bin, desc in zip(bins, bin_descs): + if bin.count == 0: + continue + testsuite.print_detail("{}; average = {}; count = {}".format(desc, bits.format_tsc(bin.total/bin.count), bin.count)) + deltas = (bits.format_tsc(t2 - t1) for t1,t2 in zip(bin.times, bin.times[1:])) + testsuite.print_detail(" Times between first few observations: {}".format(" ".join("{:>6}".format(delta) for delta in deltas))) + + if smi_count_delta is not None: + testsuite.print_detail("{} SMI detected using MSR_SMI_COUNT (MSR {:#x})".format(smi_count_delta, MSR_SMI_COUNT)) + + testsuite.print_detail("Summary of impact: observed maximum latency = {}".format(bits.format_tsc(max_latency))) + +def test_with_usb_disabled(): + if usb.handoff_to_os(): + smi_latency() + +def average_io_smi(port, value, count): + def f(): + tsc_start = bits.rdtsc() + bits.outb(port, value) + return bits.rdtsc() - tsc_start + counts = [f() for i in range(count)] + return sum(counts)/len(counts) + +def time_io_smi(port=0xb2, value=0, count=1000): + count_for_estimate = 10 + start = time.time() + average_io_smi(port, value, count_for_estimate) + avg10 = time.time() - start + estimate = avg10 * count/count_for_estimate + if estimate > 1: + print "Running test, estimated time: {}s".format(int(estimate)) + average = average_io_smi(port, value, count) + print "Average of {} SMIs (via outb, port={:#x}, value={:#x}): {}".format(count, port, value, bits.format_tsc(average)) diff --git a/tests/avocado/kvm_xen_guest.py b/tests/avocado/kvm_xen_guest.py index 5391283113ee506e0b5df44f39743e8baa864910..f8cb458d5db9335ed31de3f19730557acbf3b470 100644 --- a/tests/avocado/kvm_xen_guest.py +++ b/tests/avocado/kvm_xen_guest.py @@ -59,7 +59,7 @@ def common_vm_setup(self): def run_and_check(self): self.vm.add_args('-kernel', self.kernel_path, '-append', self.kernel_params, - '-drive', f"file={self.rootfs},if=none,format=raw,id=drv0", + '-drive', f"file={self.rootfs},if=none,snapshot=on,format=raw,id=drv0", '-device', 'xen-disk,drive=drv0,vdev=xvda', '-device', 'virtio-net-pci,netdev=unet', '-netdev', 'user,id=unet,hostfwd=:127.0.0.1:0-:22') diff --git a/tests/avocado/machine_microblaze.py b/tests/avocado/machine_microblaze.py index 8d0efff30d28c70b8ee38720f9a8994f666e6134..807709cd11eb1d37a54e349503556e053720dfe8 100644 --- a/tests/avocado/machine_microblaze.py +++ b/tests/avocado/machine_microblaze.py @@ -5,6 +5,8 @@ # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. +import time +from avocado_qemu import exec_command, exec_command_and_wait_for_pattern from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado.utils import archive @@ -33,3 +35,27 @@ def test_microblaze_s3adsp1800(self): # The kernel sometimes gets stuck after the "This architecture ..." # message, that's why we don't test for a later string here. This # needs some investigation by a microblaze wizard one day... + + def test_microblazeel_s3adsp1800(self): + """ + :avocado: tags=arch:microblazeel + :avocado: tags=machine:petalogix-s3adsp1800 + """ + + self.require_netdev('user') + tar_url = ('http://www.qemu-advent-calendar.org/2023/download/' + 'day13.tar.gz') + tar_hash = '6623d5fff5f84cfa8f34e286f32eff6a26546f44' + file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) + archive.extract(file_path, self.workdir) + self.vm.set_console() + self.vm.add_args('-kernel', self.workdir + '/day13/xmaton.bin') + self.vm.add_args('-nic', 'user,tftp=' + self.workdir + '/day13/') + self.vm.launch() + wait_for_console_pattern(self, 'QEMU Advent Calendar 2023') + time.sleep(0.1) + exec_command(self, 'root') + time.sleep(0.1) + exec_command_and_wait_for_pattern(self, + 'tftp -g -r xmaton.png 10.0.2.2 ; md5sum xmaton.png', + '821cd3cab8efd16ad6ee5acc3642a8ea') diff --git a/tests/avocado/replay_kernel.py b/tests/avocado/replay_kernel.py index c37afa662c28a0f725c396517db8f0bd46cc42b0..10d99403a4c06a730a905d315f45c01caee99eb3 100644 --- a/tests/avocado/replay_kernel.py +++ b/tests/avocado/replay_kernel.py @@ -82,7 +82,23 @@ def run_rr(self, kernel_path, kernel_command_line, console_pattern, class ReplayKernelNormal(ReplayKernelBase): - # See https://gitlab.com/qemu-project/qemu/-/issues/2010 + def test_i386_pc(self): + """ + :avocado: tags=arch:i386 + :avocado: tags=machine:pc + """ + kernel_url = ('https://storage.tuxboot.com/20230331/i386/bzImage') + kernel_hash = 'a3e5b32a354729e65910f5a1ffcda7c14a6c12a55e8213fb86e277f1b76ed956' + kernel_path = self.fetch_asset(kernel_url, + asset_hash=kernel_hash, + algorithm = "sha256") + + kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'console=ttyS0' + console_pattern = 'VFS: Cannot open root device' + + self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) + + # See https://gitlab.com/qemu-project/qemu/-/issues/2094 @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test sometimes gets stuck') def test_x86_64_pc(self): """ @@ -119,8 +135,6 @@ def test_mips_malta(self): self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) - # See https://gitlab.com/qemu-project/qemu/-/issues/2013 - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') def test_mips64el_malta(self): """ This test requires the ar tool to extract "data.tar.gz" from @@ -136,7 +150,6 @@ def test_mips64el_malta(self): :avocado: tags=arch:mips64el :avocado: tags=machine:malta - :avocado: tags=flaky """ deb_url = ('http://snapshot.debian.org/archive/debian/' '20130217T032700Z/pool/main/l/linux-2.6/' @@ -184,13 +197,10 @@ def test_arm_virt(self): self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=1) - @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') - def test_arm_cubieboard_initrd(self): """ :avocado: tags=arch:arm :avocado: tags=machine:cubieboard - :avocado: tags=flaky """ deb_url = ('https://apt.armbian.com/pool/main/l/' 'linux-5.10.16-sunxi/linux-image-current-sunxi_21.02.2_armhf.deb') @@ -338,7 +348,6 @@ def test_m68k_mcf5208evb(self): file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'sanity-clause.elf') - @skip("Test currently broken") # Console stuck as of 5.2-rc1 def test_microblaze_s3adsp1800(self): """ :avocado: tags=arch:microblaze @@ -373,7 +382,6 @@ def test_or1k_sim(self): file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) self.do_test_advcal_2018(file_path, 'vmlinux') - @skip("nios2 emulation is buggy under record/replay") def test_nios2_10m50(self): """ :avocado: tags=arch:nios2 diff --git a/tests/avocado/replay_linux.py b/tests/avocado/replay_linux.py index 270ccc1eae81340ff9b539f1fc6f713eac263f88..f3a43dc98c2949b2e2d48066118726d0e40a0eed 100644 --- a/tests/avocado/replay_linux.py +++ b/tests/avocado/replay_linux.py @@ -48,12 +48,15 @@ def vm_add_disk(self, vm, path, id, device): bus_string = '' if self.bus: bus_string = ',bus=%s.%d' % (self.bus, id,) - vm.add_args('-drive', 'file=%s,snapshot,id=disk%s,if=none' % (path, id)) + vm.add_args('-drive', 'file=%s,snapshot=on,id=disk%s,if=none' % (path, id)) vm.add_args('-drive', 'driver=blkreplay,id=disk%s-rr,if=none,image=disk%s' % (id, id)) vm.add_args('-device', '%s,drive=disk%s-rr%s' % (device, id, bus_string)) + def vm_add_cdrom(self, vm, path, id, device): + vm.add_args('-drive', 'file=%s,id=disk%s,if=none,media=cdrom' % (path, id)) + def launch_and_wait(self, record, args, shift): self.require_netdev('user') vm = self.get_vm() @@ -65,7 +68,7 @@ def launch_and_wait(self, record, args, shift): if args: vm.add_args(*args) self.vm_add_disk(vm, self.boot_path, 0, self.hdd) - self.vm_add_disk(vm, self.cloudinit_path, 1, self.cd) + self.vm_add_cdrom(vm, self.cloudinit_path, 1, self.cd) logger = logging.getLogger('replay') if record: logger.info('recording the execution...') @@ -94,7 +97,7 @@ def launch_and_wait(self, record, args, shift): else: vm.event_wait('SHUTDOWN', self.timeout) vm.wait() - logger.info('successfully fihished the replay') + logger.info('successfully finished the replay') elapsed = time.time() - start_time logger.info('elapsed time %.2f sec' % elapsed) return elapsed diff --git a/tests/bench/meson.build b/tests/bench/meson.build index 3c799dbd983779efd0e4d9b03fccc59fdeeaf1f7..7e76338a52d8a778f83861a6f9251f2a09aaf526 100644 --- a/tests/bench/meson.build +++ b/tests/bench/meson.build @@ -3,9 +3,9 @@ qht_bench = executable('qht-bench', sources: 'qht-bench.c', dependencies: [qemuutil]) -qtree_bench = executable('qtree-bench', - sources: 'qtree-bench.c', - dependencies: [qemuutil]) +executable('qtree-bench', + sources: 'qtree-bench.c', + dependencies: [qemuutil]) executable('atomic_add-bench', sources: files('atomic_add-bench.c'), diff --git a/tests/data/acpi/virt/SSDT.memhp b/tests/data/acpi/virt/SSDT.memhp index 31ff6ac46940a3e304a2d68efabe12bab50be0d7..fb3dcde5a10936667ad75a759b8bd444a7b19fc2 100644 Binary files a/tests/data/acpi/virt/SSDT.memhp and b/tests/data/acpi/virt/SSDT.memhp differ diff --git a/tests/fp/meson.build b/tests/fp/meson.build index cbc17392d678f2b1b8920be900154b0bbce24192..114b4b483ea772648b91b2745a616e17ef2ed363 100644 --- a/tests/fp/meson.build +++ b/tests/fp/meson.build @@ -1,9 +1,9 @@ -if 'CONFIG_TCG' not in config_all +if 'CONFIG_TCG' not in config_all_accel subdir_done() endif # There are namespace pollution issues on Windows, due to osdep.h # bringing in Windows headers that define a FLOAT128 type. -if targetos == 'windows' +if host_os == 'windows' subdir_done() endif @@ -124,7 +124,7 @@ test('fp-test-mulAdd', fptest, # no fptest_rounding_args args: fptest_args + ['f16_mulAdd', 'f32_mulAdd', 'f64_mulAdd', 'f128_mulAdd'], - suite: ['softfloat-slow', 'softfloat-ops-slow', 'slow'], timeout: 90) + suite: ['softfloat-slow', 'softfloat-ops-slow', 'slow'], timeout: 180) executable( 'fp-bench', diff --git a/tests/meson.build b/tests/meson.build index 9996a293fbb5e95b6e20cf113b92042ff6fa420f..0a6f96f8f8427e5049f4bd7fa095dd4ef86758d8 100644 --- a/tests/meson.build +++ b/tests/meson.build @@ -68,7 +68,7 @@ test_deps = { 'test-qht-par': qht_bench, } -if have_tools and have_vhost_user and targetos == 'linux' +if have_tools and have_vhost_user and host_os == 'linux' executable('vhost-user-bridge', sources: files('vhost-user-bridge.c'), dependencies: [qemuutil, vhost_user]) @@ -76,7 +76,7 @@ endif subdir('decode') -if 'CONFIG_TCG' in config_all +if 'CONFIG_TCG' in config_all_accel subdir('fp') endif diff --git a/tests/migration/i386/Makefile b/tests/migration/i386/Makefile index 5c0324134a7d98906cab64ab615b96856ac01e60..37a72ae353be5cbf1e41930c022462c0e164335f 100644 --- a/tests/migration/i386/Makefile +++ b/tests/migration/i386/Makefile @@ -4,9 +4,10 @@ .PHONY: all clean all: a-b-bootblock.h -a-b-bootblock.h: x86.bootsect +a-b-bootblock.h: x86.bootsect x86.o echo "$$__note" > header.tmp xxd -i $< | sed -e 's/.*int.*//' >> header.tmp + nm x86.o | awk '{print "#define SYM_"$$3" 0x"$$1}' >> header.tmp mv header.tmp $@ x86.bootsect: x86.boot @@ -16,7 +17,7 @@ x86.boot: x86.o $(CROSS_PREFIX)objcopy -O binary $< $@ x86.o: a-b-bootblock.S - $(CROSS_PREFIX)gcc -m32 -march=i486 -c $< -o $@ + $(CROSS_PREFIX)gcc -I.. -m32 -march=i486 -c $< -o $@ clean: @rm -rf *.boot *.o *.bootsect diff --git a/tests/migration/i386/a-b-bootblock.S b/tests/migration/i386/a-b-bootblock.S index 6bb9999d601ca733d6c5012522b712553479da12..6f39eb6051102c50c830dc7f510ee18423392e42 100644 --- a/tests/migration/i386/a-b-bootblock.S +++ b/tests/migration/i386/a-b-bootblock.S @@ -9,6 +9,23 @@ # # Author: dgilbert@redhat.com +#include "migration-test.h" + +#define ACPI_ENABLE 0xf1 +#define ACPI_PORT_SMI_CMD 0xb2 +#define ACPI_PM_BASE 0x600 +#define PM1A_CNT_OFFSET 4 + +#define ACPI_SCI_ENABLE 0x0001 +#define ACPI_SLEEP_TYPE 0x0400 +#define ACPI_SLEEP_ENABLE 0x2000 +#define SLEEP (ACPI_SCI_ENABLE + ACPI_SLEEP_TYPE + ACPI_SLEEP_ENABLE) + +#define LOW_ADDR X86_TEST_MEM_START +#define HIGH_ADDR X86_TEST_MEM_END + +/* Save the suspended status at an address that is not written in the loop. */ +#define suspended (X86_TEST_MEM_START + 4) .code16 .org 0x7c00 @@ -35,8 +52,8 @@ start: # at 0x7c00 ? mov %eax,%ds # Start from 1MB -.set TEST_MEM_START, (1024*1024) -.set TEST_MEM_END, (100*1024*1024) +.set TEST_MEM_START, X86_TEST_MEM_START +.set TEST_MEM_END, X86_TEST_MEM_END mov $65,%ax mov $0x3f8,%dx @@ -69,7 +86,30 @@ innerloop: mov $0x3f8,%dx outb %al,%dx - jmp mainloop + # should this test suspend? + mov (suspend_me),%eax + cmp $0,%eax + je mainloop + + # are we waking after suspend? do not suspend again. + mov $suspended,%eax + mov (%eax),%eax + cmp $1,%eax + je mainloop + + # enable acpi + mov $ACPI_ENABLE,%al + outb %al,$ACPI_PORT_SMI_CMD + + # suspend to ram + mov $suspended,%eax + movl $1,(%eax) + mov $SLEEP,%ax + mov $(ACPI_PM_BASE + PM1A_CNT_OFFSET),%dx + outw %ax,%dx + # not reached. The wakeup causes reset and restart at 0x7c00, and we + # do not save and restore registers as a real kernel would do. + # GDT magic from old (GPLv2) Grub startup.S .p2align 2 /* force 4-byte alignment */ @@ -95,6 +135,10 @@ gdtdesc: .word 0x27 /* limit */ .long gdt /* addr */ + /* test launcher can poke a 1 here to exercise suspend */ +suspend_me: + .int 0 + /* I'm a bootable disk */ .org 0x7dfe .byte 0x55 diff --git a/tests/migration/i386/a-b-bootblock.h b/tests/migration/i386/a-b-bootblock.h index 5b523917cef76eb9d8a643ab861c112eea0905db..c83f8711dbfbc8adb812a62a936cf3b0ab28d54a 100644 --- a/tests/migration/i386/a-b-bootblock.h +++ b/tests/migration/i386/a-b-bootblock.h @@ -4,7 +4,7 @@ * the header and the assembler differences in your patch submission. */ unsigned char x86_bootsect[] = { - 0xfa, 0x0f, 0x01, 0x16, 0x8c, 0x7c, 0x66, 0xb8, 0x01, 0x00, 0x00, 0x00, + 0xfa, 0x0f, 0x01, 0x16, 0xb8, 0x7c, 0x66, 0xb8, 0x01, 0x00, 0x00, 0x00, 0x0f, 0x22, 0xc0, 0x66, 0xea, 0x20, 0x7c, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe4, 0x92, 0x0c, 0x02, 0xe6, 0x92, 0xb8, 0x10, 0x00, 0x00, 0x00, 0x8e, 0xd8, 0x66, 0xb8, 0x41, @@ -13,13 +13,13 @@ unsigned char x86_bootsect[] = { 0x40, 0x06, 0x7c, 0xf1, 0xb8, 0x00, 0x00, 0x10, 0x00, 0xfe, 0x00, 0x05, 0x00, 0x10, 0x00, 0x00, 0x3d, 0x00, 0x00, 0x40, 0x06, 0x7c, 0xf2, 0xfe, 0xc3, 0x80, 0xe3, 0x3f, 0x75, 0xe6, 0x66, 0xb8, 0x42, 0x00, 0x66, 0xba, - 0xf8, 0x03, 0xee, 0xeb, 0xdb, 0x8d, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x9a, 0xcf, 0x00, - 0xff, 0xff, 0x00, 0x00, 0x00, 0x92, 0xcf, 0x00, 0x27, 0x00, 0x74, 0x7c, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xf8, 0x03, 0xee, 0xa1, 0xbe, 0x7c, 0x00, 0x00, 0x83, 0xf8, 0x00, 0x74, + 0xd3, 0xb8, 0x04, 0x00, 0x10, 0x00, 0x8b, 0x00, 0x83, 0xf8, 0x01, 0x74, + 0xc7, 0xb0, 0xf1, 0xe6, 0xb2, 0xb8, 0x04, 0x00, 0x10, 0x00, 0xc7, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x66, 0xb8, 0x01, 0x24, 0x66, 0xba, 0x04, 0x06, + 0x66, 0xef, 0x66, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x00, 0x00, 0x00, 0x9a, 0xcf, 0x00, 0xff, 0xff, 0x00, 0x00, + 0x00, 0x92, 0xcf, 0x00, 0x27, 0x00, 0xa0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -49,3 +49,13 @@ unsigned char x86_bootsect[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0xaa }; +#define SYM_do_zero 0x00007c3d +#define SYM_gdt 0x00007ca0 +#define SYM_gdtdesc 0x00007cb8 +#define SYM_innerloop 0x00007c51 +#define SYM_mainloop 0x00007c4c +#define SYM_pre_zero 0x00007c38 +#define SYM_start 0x00007c00 +#define SYM_suspend_me 0x00007cbe +#define SYM_TEST_MEM_END 0x06400000 +#define SYM_TEST_MEM_START 0x00100000 diff --git a/tests/plugin/meson.build b/tests/plugin/meson.build index 28a929dbcc053faf5a41f0ef033dfc4151f263a7..e18183aaeda034b69d598a9c0e0a8efbde1b378d 100644 --- a/tests/plugin/meson.build +++ b/tests/plugin/meson.build @@ -1,7 +1,7 @@ t = [] if get_option('plugins') foreach i : ['bb', 'empty', 'insn', 'mem', 'syscall'] - if targetos == 'windows' + if host_os == 'windows' t += shared_module(i, files(i + '.c') + '../../contrib/plugins/win32_linker.c', include_directories: '../../include/qemu', link_depends: [win32_qemu_plugin_api_lib], diff --git a/tests/qemu-iotests/202 b/tests/qemu-iotests/202 index b784dcd791a4aa1fe894e2b2489773f6e8d53e47..13304242e5c8b6b525a2c989f7b4d214db0e5264 100755 --- a/tests/qemu-iotests/202 +++ b/tests/qemu-iotests/202 @@ -21,7 +21,7 @@ # Check that QMP 'transaction' blockdev-snapshot-sync with multiple drives on a # single IOThread completes successfully. This particular command triggered a # hang due to recursive AioContext locking and BDRV_POLL_WHILE(). Protect -# against regressions. +# against regressions even though the AioContext lock no longer exists. import iotests diff --git a/tests/qemu-iotests/203 b/tests/qemu-iotests/203 index ab80fd0e44af819259f28fbfc68f78604ead08aa..1ba878522b0e627c7cdcdf0c3ceeceddd6b567fd 100755 --- a/tests/qemu-iotests/203 +++ b/tests/qemu-iotests/203 @@ -21,7 +21,8 @@ # Check that QMP 'migrate' with multiple drives on a single IOThread completes # successfully. This particular command triggered a hang in the source QEMU # process due to recursive AioContext locking in bdrv_invalidate_all() and -# BDRV_POLL_WHILE(). +# BDRV_POLL_WHILE(). Protect against regressions even though the AioContext +# lock no longer exists. import iotests diff --git a/tests/qemu-iotests/meson.build b/tests/qemu-iotests/meson.build index 53847cb98fc73bf08c8eb1128cab6f7cbd5361e8..fad340ad59579d88705fe9b38f8efaec419539d8 100644 --- a/tests/qemu-iotests/meson.build +++ b/tests/qemu-iotests/meson.build @@ -1,4 +1,4 @@ -if not have_tools or targetos == 'windows' +if not have_tools or host_os == 'windows' subdir_done() endif diff --git a/tests/qemu-iotests/tests/qcow2-internal-snapshots b/tests/qemu-iotests/tests/qcow2-internal-snapshots new file mode 100755 index 0000000000000000000000000000000000000000..36523aba06e307b95dfc52ca02c81f1cd73ad19f --- /dev/null +++ b/tests/qemu-iotests/tests/qcow2-internal-snapshots @@ -0,0 +1,170 @@ +#!/usr/bin/env bash +# group: rw quick +# +# Test case for internal snapshots in qcow2 +# +# Copyright (C) 2023 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +# creator +owner=kwolf@redhat.com + +seq="$(basename $0)" +echo "QA output created by $seq" + +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_test_img +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +. ../common.rc +. ../common.filter + +# This tests qcow2-specific low-level functionality +_supported_fmt qcow2 +_supported_proto generic +# Internal snapshots are (currently) impossible with refcount_bits=1, +# and generally impossible with external data files +_unsupported_imgopts 'compat=0.10' 'refcount_bits=1[^0-9]' data_file + +IMG_SIZE=64M + +_qemu() +{ + $QEMU -no-shutdown -nographic -monitor stdio -serial none \ + -blockdev file,filename="$TEST_IMG",node-name=disk0-file \ + -blockdev "$IMGFMT",file=disk0-file,node-name=disk0 \ + -object iothread,id=iothread0 \ + -device virtio-scsi,iothread=iothread0 \ + -device scsi-hd,drive=disk0,share-rw=on \ + "$@" 2>&1 |\ + _filter_qemu | _filter_hmp | _filter_qemu_io +} + +_make_test_img $IMG_SIZE + +echo +echo "=== Write some data, take a snapshot and overwrite part of it ===" +echo + +{ + echo 'qemu-io disk0 "write -P0x11 0 1M"' + # Give qemu some time to boot before saving the VM state + sleep 0.5 + echo "savevm snap0" + echo 'qemu-io disk0 "write -P0x22 0 512k"' + echo "quit" +} | _qemu + +echo +$QEMU_IMG snapshot -l "$TEST_IMG" | _filter_date | _filter_vmstate_size +_check_test_img + +echo +echo "=== Verify that loading the snapshot reverts to the old content ===" +echo + +{ + # -loadvm reverted the write from the previous QEMU instance + echo 'qemu-io disk0 "read -P0x11 0 1M"' + + # Verify that it works without restarting QEMU, too + echo 'qemu-io disk0 "write -P0x33 512k 512k"' + echo "loadvm snap0" + echo 'qemu-io disk0 "read -P0x11 0 1M"' + + # Verify COW by writing a partial cluster + echo 'qemu-io disk0 "write -P0x33 63k 2k"' + echo 'qemu-io disk0 "read -P0x11 0 63k"' + echo 'qemu-io disk0 "read -P0x33 63k 2k"' + echo 'qemu-io disk0 "read -P0x11 65k 63k"' + + # Take a second snapshot + echo "savevm snap1" + + echo "quit" +} | _qemu -loadvm snap0 + +echo +$QEMU_IMG snapshot -l "$TEST_IMG" | _filter_date | _filter_vmstate_size +_check_test_img + +echo +echo "=== qemu-img snapshot can revert to snapshots ===" +echo + +$QEMU_IMG snapshot -a snap0 "$TEST_IMG" +$QEMU_IO -c "read -P0x11 0 1M" "$TEST_IMG" | _filter_qemu_io +$QEMU_IMG snapshot -a snap1 "$TEST_IMG" +$QEMU_IO \ + -c "read -P0x11 0 63k" \ + -c "read -P0x33 63k 2k" \ + -c "read -P0x11 65k 63k" \ + "$TEST_IMG" | _filter_qemu_io + +echo +echo "=== Deleting snapshots ===" +echo +{ + # The active layer stays unaffected by deleting the snapshot + echo "delvm snap1" + echo 'qemu-io disk0 "read -P0x11 0 63k"' + echo 'qemu-io disk0 "read -P0x33 63k 2k"' + echo 'qemu-io disk0 "read -P0x11 65k 63k"' + + echo "quit" +} | _qemu + + +echo +$QEMU_IMG snapshot -l "$TEST_IMG" | _filter_date | _filter_vmstate_size +_check_test_img + +echo +echo "=== Error cases ===" +echo + +# snap1 should not exist any more +_qemu -loadvm snap1 + +echo +{ + echo "loadvm snap1" + echo "quit" +} | _qemu + +# Snapshot operations and inactive images are incompatible +echo +_qemu -loadvm snap0 -incoming defer +{ + echo "loadvm snap0" + echo "delvm snap0" + echo "savevm snap1" + echo "quit" +} | _qemu -incoming defer + +# -loadvm and -preconfig are incompatible +echo +_qemu -loadvm snap0 -preconfig + +# success, all done +echo "*** done" +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/tests/qcow2-internal-snapshots.out b/tests/qemu-iotests/tests/qcow2-internal-snapshots.out new file mode 100644 index 0000000000000000000000000000000000000000..438f535e6ac9a8b66648b67994a7b0751e3a8876 --- /dev/null +++ b/tests/qemu-iotests/tests/qcow2-internal-snapshots.out @@ -0,0 +1,107 @@ +QA output created by qcow2-internal-snapshots +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 + +=== Write some data, take a snapshot and overwrite part of it === + +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) qemu-io disk0 "write -P0x11 0 1M" +wrote 1048576/1048576 bytes at offset 0 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +(qemu) savevm snap0 +(qemu) qemu-io disk0 "write -P0x22 0 512k" +wrote 524288/524288 bytes at offset 0 +512 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +(qemu) quit + +Snapshot list: +ID TAG VM SIZE DATE VM CLOCK ICOUNT +1 snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +No errors were found on the image. + +=== Verify that loading the snapshot reverts to the old content === + +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) qemu-io disk0 "read -P0x11 0 1M" +read 1048576/1048576 bytes at offset 0 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +(qemu) qemu-io disk0 "write -P0x33 512k 512k" +wrote 524288/524288 bytes at offset 524288 +512 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +(qemu) loadvm snap0 +(qemu) qemu-io disk0 "read -P0x11 0 1M" +read 1048576/1048576 bytes at offset 0 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +(qemu) qemu-io disk0 "write -P0x33 63k 2k" +wrote 2048/2048 bytes at offset 64512 +2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +(qemu) qemu-io disk0 "read -P0x11 0 63k" +read 64512/64512 bytes at offset 0 +63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +(qemu) qemu-io disk0 "read -P0x33 63k 2k" +read 2048/2048 bytes at offset 64512 +2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +(qemu) qemu-io disk0 "read -P0x11 65k 63k" +read 64512/64512 bytes at offset 66560 +63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +(qemu) savevm snap1 +(qemu) quit + +Snapshot list: +ID TAG VM SIZE DATE VM CLOCK ICOUNT +1 snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +2 snap1 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +No errors were found on the image. + +=== qemu-img snapshot can revert to snapshots === + +read 1048576/1048576 bytes at offset 0 +1 MiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 64512/64512 bytes at offset 0 +63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 2048/2048 bytes at offset 64512 +2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +read 64512/64512 bytes at offset 66560 +63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +=== Deleting snapshots === + +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) delvm snap1 +(qemu) qemu-io disk0 "read -P0x11 0 63k" +read 64512/64512 bytes at offset 0 +63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +(qemu) qemu-io disk0 "read -P0x33 63k 2k" +read 2048/2048 bytes at offset 64512 +2 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +(qemu) qemu-io disk0 "read -P0x11 65k 63k" +read 64512/64512 bytes at offset 66560 +63 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) +(qemu) quit + +Snapshot list: +ID TAG VM SIZE DATE VM CLOCK ICOUNT +1 snap0 SIZE yyyy-mm-dd hh:mm:ss 00:00:00.000 +No errors were found on the image. + +=== Error cases === + +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) QEMU_PROG: Snapshot 'snap1' does not exist in one or more devices + +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) loadvm snap1 +Error: Snapshot 'snap1' does not exist in one or more devices +(qemu) quit + +QEMU_PROG: 'incoming' and 'loadvm' options are mutually exclusive +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) loadvm snap0 +Error: Device 'disk0' is writable but does not support snapshots +(qemu) delvm snap0 +Error: Device 'disk0' is writable but does not support snapshots +(qemu) savevm snap1 +Error: Device 'disk0' is writable but does not support snapshots +(qemu) quit + +QEMU_PROG: 'preconfig' and 'loadvm' options are mutually exclusive +*** done diff --git a/tests/qtest/bios-tables-test.c b/tests/qtest/bios-tables-test.c index fe6a9a8563ced0b7bd75b6d52c41f0ef9b813013..21811a1ab5c01b46102f357c138b87212271988c 100644 --- a/tests/qtest/bios-tables-test.c +++ b/tests/qtest/bios-tables-test.c @@ -1015,7 +1015,7 @@ static void test_acpi_q35_tcg(void) free_test_data(&data); } -static void test_acpi_q35_tcg_type4_count(void) +static void test_acpi_q35_kvm_type4_count(void) { test_data data = { .machine = MACHINE_Q35, @@ -1031,7 +1031,7 @@ static void test_acpi_q35_tcg_type4_count(void) free_test_data(&data); } -static void test_acpi_q35_tcg_core_count(void) +static void test_acpi_q35_kvm_core_count(void) { test_data data = { .machine = MACHINE_Q35, @@ -1048,7 +1048,7 @@ static void test_acpi_q35_tcg_core_count(void) free_test_data(&data); } -static void test_acpi_q35_tcg_core_count2(void) +static void test_acpi_q35_kvm_core_count2(void) { test_data data = { .machine = MACHINE_Q35, @@ -1065,7 +1065,7 @@ static void test_acpi_q35_tcg_core_count2(void) free_test_data(&data); } -static void test_acpi_q35_tcg_thread_count(void) +static void test_acpi_q35_kvm_thread_count(void) { test_data data = { .machine = MACHINE_Q35, @@ -1082,7 +1082,7 @@ static void test_acpi_q35_tcg_thread_count(void) free_test_data(&data); } -static void test_acpi_q35_tcg_thread_count2(void) +static void test_acpi_q35_kvm_thread_count2(void) { test_data data = { .machine = MACHINE_Q35, @@ -2262,15 +2262,15 @@ int main(int argc, char *argv[]) qtest_add_func("acpi/q35/kvm/xapic", test_acpi_q35_kvm_xapic); qtest_add_func("acpi/q35/kvm/dmar", test_acpi_q35_kvm_dmar); qtest_add_func("acpi/q35/type4-count", - test_acpi_q35_tcg_type4_count); + test_acpi_q35_kvm_type4_count); qtest_add_func("acpi/q35/core-count", - test_acpi_q35_tcg_core_count); + test_acpi_q35_kvm_core_count); qtest_add_func("acpi/q35/core-count2", - test_acpi_q35_tcg_core_count2); + test_acpi_q35_kvm_core_count2); qtest_add_func("acpi/q35/thread-count", - test_acpi_q35_tcg_thread_count); + test_acpi_q35_kvm_thread_count); qtest_add_func("acpi/q35/thread-count2", - test_acpi_q35_tcg_thread_count2); + test_acpi_q35_kvm_thread_count2); } if (qtest_has_device("virtio-iommu-pci")) { qtest_add_func("acpi/q35/viot", test_acpi_q35_viot); diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build index 47dabf91d048752e4c69318c4eba415e122373d8..fd40136fa9c8f5106fd20b8c60c1244b26559045 100644 --- a/tests/qtest/meson.build +++ b/tests/qtest/meson.build @@ -1,14 +1,15 @@ slow_qtests = { - 'ahci-test' : 60, - 'bios-tables-test' : 120, - 'boot-serial-test' : 60, - 'migration-test' : 150, - 'npcm7xx_pwm-test': 150, - 'prom-env-test' : 60, - 'pxe-test' : 60, - 'qos-test' : 60, - 'qom-test' : 300, - 'test-hmp' : 120, + 'aspeed_smc-test': 360, + 'bios-tables-test' : 540, + 'device-introspect-test' : 720, + 'migration-test' : 480, + 'npcm7xx_pwm-test': 300, + 'qom-test' : 900, + 'test-hmp' : 240, + 'pxe-test': 600, + 'prom-env-test': 360, + 'boot-serial-test': 180, + 'qos-test': 120, } qtests_generic = [ @@ -38,8 +39,8 @@ qtests_cxl = \ # for the availability of the default NICs in the tests qtests_filter = \ (get_option('default_devices') and slirp.found() ? ['test-netfilter'] : []) + \ - (get_option('default_devices') and targetos != 'windows' ? ['test-filter-mirror'] : []) + \ - (get_option('default_devices') and targetos != 'windows' ? ['test-filter-redirector'] : []) + (get_option('default_devices') and host_os != 'windows' ? ['test-filter-mirror'] : []) + \ + (get_option('default_devices') and host_os != 'windows' ? ['test-filter-redirector'] : []) qtests_i386 = \ (slirp.found() ? ['pxe-test'] : []) + \ @@ -48,7 +49,7 @@ qtests_i386 = \ (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \ (config_all_devices.has_key('CONFIG_SGA') ? ['boot-serial-test'] : []) + \ (config_all_devices.has_key('CONFIG_ISA_IPMI_KCS') ? ['ipmi-kcs-test'] : []) + \ - (targetos == 'linux' and \ + (host_os == 'linux' and \ config_all_devices.has_key('CONFIG_ISA_IPMI_BT') and config_all_devices.has_key('CONFIG_IPMI_EXTERN') ? ['ipmi-bt-test'] : []) + \ (config_all_devices.has_key('CONFIG_WDT_IB700') ? ['wdt_ib700-test'] : []) + \ @@ -74,7 +75,7 @@ qtests_i386 = \ (config_all_devices.has_key('CONFIG_SB16') ? ['fuzz-sb16-test'] : []) + \ (config_all_devices.has_key('CONFIG_SDHCI_PCI') ? ['fuzz-sdcard-test'] : []) + \ (config_all_devices.has_key('CONFIG_ESP_PCI') ? ['am53c974-test'] : []) + \ - (targetos != 'windows' and \ + (host_os != 'windows' and \ config_all_devices.has_key('CONFIG_ACPI_ERST') ? ['erst-test'] : []) + \ (config_all_devices.has_key('CONFIG_PCIE_PORT') and \ config_all_devices.has_key('CONFIG_VIRTIO_NET') and \ @@ -155,8 +156,8 @@ qtests_ppc = \ qtests_filter + \ (config_all_devices.has_key('CONFIG_ISA_TESTDEV') ? ['endianness-test'] : []) + \ (config_all_devices.has_key('CONFIG_M48T59') ? ['m48t59-test'] : []) + \ - (config_all.has_key('CONFIG_TCG') ? ['prom-env-test'] : []) + \ - (config_all.has_key('CONFIG_TCG') ? ['boot-serial-test'] : []) + \ + (config_all_accel.has_key('CONFIG_TCG') ? ['prom-env-test'] : []) + \ + (config_all_accel.has_key('CONFIG_TCG') ? ['boot-serial-test'] : []) + \ ['boot-order-test'] qtests_ppc64 = \ @@ -213,12 +214,12 @@ qtests_arm = \ # TODO: once aarch64 TCG is fixed on ARM 32 bit host, make bios-tables-test unconditional qtests_aarch64 = \ (cpu != 'arm' and unpack_edk2_blobs ? ['bios-tables-test'] : []) + \ - (config_all.has_key('CONFIG_TCG') and config_all_devices.has_key('CONFIG_TPM_TIS_SYSBUS') ? \ + (config_all_accel.has_key('CONFIG_TCG') and config_all_devices.has_key('CONFIG_TPM_TIS_SYSBUS') ? \ ['tpm-tis-device-test', 'tpm-tis-device-swtpm-test'] : []) + \ (config_all_devices.has_key('CONFIG_XLNX_ZYNQMP_ARM') ? ['xlnx-can-test', 'fuzz-xlnx-dp-test'] : []) + \ (config_all_devices.has_key('CONFIG_XLNX_VERSAL') ? ['xlnx-canfd-test', 'xlnx-versal-trng-test'] : []) + \ (config_all_devices.has_key('CONFIG_RASPI') ? ['bcm2835-dma-test'] : []) + \ - (config_all.has_key('CONFIG_TCG') and \ + (config_all_accel.has_key('CONFIG_TCG') and \ config_all_devices.has_key('CONFIG_TPM_TIS_I2C') ? ['tpm-tis-i2c-test'] : []) + \ ['arm-cpu-features', 'numa-test', @@ -277,7 +278,7 @@ if config_all_devices.has_key('CONFIG_VIRTIO_SERIAL') qos_test_ss.add(files('virtio-serial-test.c')) endif -if targetos != 'windows' +if host_os != 'windows' qos_test_ss.add(files('e1000e-test.c')) endif if have_virtfs @@ -310,7 +311,7 @@ qtests = { 'ivshmem-test': [rt, '../../contrib/ivshmem-server/ivshmem-server.c'], 'migration-test': migration_files, 'pxe-test': files('boot-sector.c'), - 'qos-test': [chardev, io, qos_test_ss.apply(config_targetos, strict: false).sources()], + 'qos-test': [chardev, io, qos_test_ss.apply({}).sources()], 'tpm-crb-swtpm-test': [io, tpmemu_files], 'tpm-crb-test': [io, tpmemu_files], 'tpm-tis-swtpm-test': [io, tpmemu_files, 'tpm-tis-util.c'], @@ -383,8 +384,8 @@ foreach dir : target_dirs env: qtest_env, args: ['--tap', '-k'], protocol: 'tap', - timeout: slow_qtests.get(test, 30), - priority: slow_qtests.get(test, 30), + timeout: slow_qtests.get(test, 60), + priority: slow_qtests.get(test, 60), suite: ['qtest', 'qtest-' + target_base]) endforeach endforeach diff --git a/tests/qtest/migration-helpers.c b/tests/qtest/migration-helpers.c index 24fb7b3525cf4ea023649dfc5de45f7c395b0876..e451dbdbed136571e0da2de7d194d4adb8ddfdb2 100644 --- a/tests/qtest/migration-helpers.c +++ b/tests/qtest/migration-helpers.c @@ -24,26 +24,19 @@ */ #define MIGRATION_STATUS_WAIT_TIMEOUT 120 -bool migrate_watch_for_stop(QTestState *who, const char *name, - QDict *event, void *opaque) +bool migrate_watch_for_events(QTestState *who, const char *name, + QDict *event, void *opaque) { - bool *seen = opaque; + QTestMigrationState *state = opaque; if (g_str_equal(name, "STOP")) { - *seen = true; + state->stop_seen = true; return true; - } - - return false; -} - -bool migrate_watch_for_resume(QTestState *who, const char *name, - QDict *event, void *opaque) -{ - bool *seen = opaque; - - if (g_str_equal(name, "RESUME")) { - *seen = true; + } else if (g_str_equal(name, "SUSPEND")) { + state->suspend_seen = true; + return true; + } else if (g_str_equal(name, "RESUME")) { + state->resume_seen = true; return true; } @@ -118,6 +111,12 @@ void migrate_incoming_qmp(QTestState *to, const char *uri, const char *fmt, ...) rsp = qtest_qmp(to, "{ 'execute': 'migrate-incoming', 'arguments': %p}", args); + + if (!qdict_haskey(rsp, "return")) { + g_autoptr(GString) s = qobject_to_json_pretty(QOBJECT(rsp), true); + g_test_message("%s", s->str); + } + g_assert(qdict_haskey(rsp, "return")); qobject_unref(rsp); @@ -292,3 +291,35 @@ char *resolve_machine_version(const char *alias, const char *var1, return find_common_machine_version(machine_name, var1, var2); } + +typedef struct { + char *name; + void (*func)(void); +} MigrationTest; + +static void migration_test_destroy(gpointer data) +{ + MigrationTest *test = (MigrationTest *)data; + + g_free(test->name); + g_free(test); +} + +static void migration_test_wrapper(const void *data) +{ + MigrationTest *test = (MigrationTest *)data; + + g_test_message("Running /%s%s", qtest_get_arch(), test->name); + test->func(); +} + +void migration_test_add(const char *path, void (*fn)(void)) +{ + MigrationTest *test = g_new0(MigrationTest, 1); + + test->func = fn; + test->name = g_strdup(path); + + qtest_add_data_func_full(path, test, migration_test_wrapper, + migration_test_destroy); +} diff --git a/tests/qtest/migration-helpers.h b/tests/qtest/migration-helpers.h index e31dc85cc7524ba1830f30f066b273e51db3b44f..3bf7ded1b97c5e0f1f466c32af0022f60129e0c8 100644 --- a/tests/qtest/migration-helpers.h +++ b/tests/qtest/migration-helpers.h @@ -15,9 +15,14 @@ #include "libqtest.h" -bool migrate_watch_for_stop(QTestState *who, const char *name, - QDict *event, void *opaque); -bool migrate_watch_for_resume(QTestState *who, const char *name, +typedef struct QTestMigrationState { + bool stop_seen; + bool resume_seen; + bool suspend_seen; + bool suspend_me; +} QTestMigrationState; + +bool migrate_watch_for_events(QTestState *who, const char *name, QDict *event, void *opaque); G_GNUC_PRINTF(3, 4) @@ -47,4 +52,5 @@ char *find_common_machine_version(const char *mtype, const char *var1, const char *var2); char *resolve_machine_version(const char *alias, const char *var1, const char *var2); +void migration_test_add(const char *path, void (*fn)(void)); #endif /* MIGRATION_HELPERS_H */ diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c index 0fbaa6a90fd6c1dfe2fc3c4e7f05f6b1b3fc10d8..d3066e119f4cf7de949f6f6d91a89d8b8565f0ac 100644 --- a/tests/qtest/migration-test.c +++ b/tests/qtest/migration-test.c @@ -43,8 +43,8 @@ unsigned start_address; unsigned end_address; static bool uffd_feature_thread_id; -static bool got_src_stop; -static bool got_dst_resume; +static QTestMigrationState src_state; +static QTestMigrationState dst_state; /* * An initial 3 MB offset is used as that corresponds @@ -133,7 +133,7 @@ static char *bootpath; #include "tests/migration/aarch64/a-b-kernel.h" #include "tests/migration/s390x/a-b-bios.h" -static void bootfile_create(char *dir) +static void bootfile_create(char *dir, bool suspend_me) { const char *arch = qtest_get_arch(); unsigned char *content; @@ -143,6 +143,7 @@ static void bootfile_create(char *dir) if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { /* the assembled x86 boot sector should be exactly one sector large */ g_assert(sizeof(x86_bootsect) == 512); + x86_bootsect[SYM_suspend_me - SYM_start] = suspend_me; content = x86_bootsect; len = sizeof(x86_bootsect); } else if (g_str_equal(arch, "s390x")) { @@ -177,7 +178,7 @@ static void bootfile_delete(void) /* * Wait for some output in the serial output file, * we get an 'A' followed by an endless string of 'B's - * but on the destination we won't have the A. + * but on the destination we won't have the A (unless we enabled suspend/resume) */ static void wait_for_serial(const char *side) { @@ -230,6 +231,27 @@ static void wait_for_serial(const char *side) } while (true); } +static void wait_for_stop(QTestState *who, QTestMigrationState *state) +{ + if (!state->stop_seen) { + qtest_qmp_eventwait(who, "STOP"); + } +} + +static void wait_for_resume(QTestState *who, QTestMigrationState *state) +{ + if (!state->resume_seen) { + qtest_qmp_eventwait(who, "RESUME"); + } +} + +static void wait_for_suspend(QTestState *who, QTestMigrationState *state) +{ + if (state->suspend_me && !state->suspend_seen) { + qtest_qmp_eventwait(who, "SUSPEND"); + } +} + /* * It's tricky to use qemu's migration event capability with qtest, * events suddenly appearing confuse the qmp()/hmp() responses. @@ -277,21 +299,19 @@ static void read_blocktime(QTestState *who) qobject_unref(rsp_return); } +/* + * Wait for two changes in the migration pass count, but bail if we stop. + */ static void wait_for_migration_pass(QTestState *who) { - uint64_t initial_pass = get_migration_pass(who); - uint64_t pass; + uint64_t pass, prev_pass = 0, changes = 0; - /* Wait for the 1st sync */ - while (!got_src_stop && !initial_pass) { - usleep(1000); - initial_pass = get_migration_pass(who); - } - - do { + while (changes < 2 && !src_state.stop_seen && !src_state.suspend_seen) { usleep(1000); pass = get_migration_pass(who); - } while (pass == initial_pass && !got_src_stop); + changes += (pass != prev_pass); + prev_pass = pass; + } } static void check_guests_ram(QTestState *who) @@ -571,6 +591,12 @@ static void migrate_wait_for_dirty_mem(QTestState *from, usleep(1000 * 10); } while (qtest_readq(to, marker_address) != MAGIC_MARKER); + + /* If suspended, src only iterates once, and watch_byte may never change */ + if (src_state.suspend_me) { + return; + } + /* * Now ensure that already transferred bytes are * dirty again from the guest workload. Note the @@ -617,10 +643,7 @@ static void migrate_postcopy_start(QTestState *from, QTestState *to) { qtest_qmp_assert_success(from, "{ 'execute': 'migrate-start-postcopy' }"); - if (!got_src_stop) { - qtest_qmp_eventwait(from, "STOP"); - } - + wait_for_stop(from, &src_state); qtest_qmp_eventwait(to, "RESUME"); } @@ -637,6 +660,8 @@ typedef struct { bool use_dirty_ring; const char *opts_source; const char *opts_target; + /* suspend the src before migrating to dest. */ + bool suspend_me; } MigrateStart; /* @@ -756,8 +781,11 @@ static int test_migrate_start(QTestState **from, QTestState **to, } } - got_src_stop = false; - got_dst_resume = false; + dst_state = (QTestMigrationState) { }; + src_state = (QTestMigrationState) { }; + bootfile_create(tmpfs, args->suspend_me); + src_state.suspend_me = args->suspend_me; + if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { memory_size = "150M"; @@ -848,8 +876,8 @@ static int test_migrate_start(QTestState **from, QTestState **to, if (!args->only_target) { *from = qtest_init_with_env(QEMU_ENV_SRC, cmd_source); qtest_qmp_set_event_callback(*from, - migrate_watch_for_stop, - &got_src_stop); + migrate_watch_for_events, + &src_state); } cmd_target = g_strdup_printf("-accel kvm%s -accel tcg " @@ -869,8 +897,8 @@ static int test_migrate_start(QTestState **from, QTestState **to, ignore_stderr); *to = qtest_init_with_env(QEMU_ENV_DST, cmd_target); qtest_qmp_set_event_callback(*to, - migrate_watch_for_resume, - &got_dst_resume); + migrate_watch_for_events, + &dst_state); /* * Remove shmem file immediately to avoid memory leak in test failed case. @@ -1319,6 +1347,7 @@ static int migrate_postcopy_prepare(QTestState **from_ptr, /* Wait for the first serial output from the source */ wait_for_serial("src_serial"); + wait_for_suspend(from, &src_state); g_autofree char *uri = migrate_get_socket_address(to, "socket-address"); migrate_qmp(from, uri, "{}"); @@ -1336,6 +1365,11 @@ static void migrate_postcopy_complete(QTestState *from, QTestState *to, { wait_for_migration_complete(from); + if (args->start.suspend_me) { + /* wakeup succeeds only if guest is suspended */ + qtest_qmp_assert_success(to, "{'execute': 'system_wakeup'}"); + } + /* Make sure we get at least one "B" on destination */ wait_for_serial("dest_serial"); @@ -1369,6 +1403,15 @@ static void test_postcopy(void) test_postcopy_common(&args); } +static void test_postcopy_suspend(void) +{ + MigrateCommon args = { + .start.suspend_me = true, + }; + + test_postcopy_common(&args); +} + static void test_postcopy_compress(void) { MigrateCommon args = { @@ -1703,6 +1746,7 @@ static void test_precopy_common(MigrateCommon *args) /* Wait for the first serial output from the source */ if (args->result == MIG_TEST_SUCCEED) { wait_for_serial("src_serial"); + wait_for_suspend(from, &src_state); } if (args->live) { @@ -1717,9 +1761,7 @@ static void test_precopy_common(MigrateCommon *args) */ if (args->result == MIG_TEST_SUCCEED) { qtest_qmp_assert_success(from, "{ 'execute' : 'stop'}"); - if (!got_src_stop) { - qtest_qmp_eventwait(from, "STOP"); - } + wait_for_stop(from, &src_state); migrate_ensure_converge(from); } } @@ -1765,9 +1807,8 @@ static void test_precopy_common(MigrateCommon *args) */ wait_for_migration_complete(from); - if (!got_src_stop) { - qtest_qmp_eventwait(from, "STOP"); - } + wait_for_stop(from, &src_state); + } else { wait_for_migration_complete(from); /* @@ -1780,8 +1821,11 @@ static void test_precopy_common(MigrateCommon *args) qtest_qmp_assert_success(to, "{ 'execute' : 'cont'}"); } - if (!got_dst_resume) { - qtest_qmp_eventwait(to, "RESUME"); + wait_for_resume(to, &dst_state); + + if (args->start.suspend_me) { + /* wakeup succeeds only if guest is suspended */ + qtest_qmp_assert_success(to, "{'execute': 'system_wakeup'}"); } wait_for_serial("dest_serial"); @@ -1821,9 +1865,7 @@ static void test_file_common(MigrateCommon *args, bool stop_src) if (stop_src) { qtest_qmp_assert_success(from, "{ 'execute' : 'stop'}"); - if (!got_src_stop) { - qtest_qmp_eventwait(from, "STOP"); - } + wait_for_stop(from, &src_state); } if (args->result == MIG_TEST_QMP_ERROR) { @@ -1844,10 +1886,7 @@ static void test_file_common(MigrateCommon *args, bool stop_src) if (stop_src) { qtest_qmp_assert_success(to, "{ 'execute' : 'cont'}"); } - - if (!got_dst_resume) { - qtest_qmp_eventwait(to, "RESUME"); - } + wait_for_resume(to, &dst_state); wait_for_serial("dest_serial"); @@ -1875,6 +1914,34 @@ static void test_precopy_unix_plain(void) test_precopy_common(&args); } +static void test_precopy_unix_suspend_live(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .listen_uri = uri, + .connect_uri = uri, + /* + * despite being live, the test is fast because the src + * suspends immediately. + */ + .live = true, + .start.suspend_me = true, + }; + + test_precopy_common(&args); +} + +static void test_precopy_unix_suspend_notlive(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + MigrateCommon args = { + .listen_uri = uri, + .connect_uri = uri, + .start.suspend_me = true, + }; + + test_precopy_common(&args); +} static void test_precopy_unix_dirty_ring(void) { @@ -1966,9 +2033,7 @@ static void test_ignore_shared(void) migrate_wait_for_dirty_mem(from, to); - if (!got_src_stop) { - qtest_qmp_eventwait(from, "STOP"); - } + wait_for_stop(from, &src_state); qtest_qmp_eventwait(to, "RESUME"); @@ -2503,7 +2568,7 @@ static void test_migrate_auto_converge(void) break; } usleep(20); - g_assert_false(got_src_stop); + g_assert_false(src_state.stop_seen); } while (true); /* The first percentage of throttling should be at least init_pct */ g_assert_cmpint(percentage, >=, init_pct); @@ -2842,9 +2907,7 @@ static void test_multifd_tcp_cancel(void) migrate_ensure_converge(from); - if (!got_src_stop) { - qtest_qmp_eventwait(from, "STOP"); - } + wait_for_stop(from, &src_state); qtest_qmp_eventwait(to2, "RESUME"); wait_for_serial("dest_serial"); @@ -2985,7 +3048,9 @@ static int64_t get_limit_rate(QTestState *who) static QTestState *dirtylimit_start_vm(void) { QTestState *vm = NULL; - g_autofree gchar * + g_autofree gchar *cmd = NULL; + + bootfile_create(tmpfs, false); cmd = g_strdup_printf("-accel kvm,dirty-ring-size=4096 " "-name dirtylimit-test,debug-threads=on " "-m 150M -smp 1 " @@ -3177,7 +3242,7 @@ static void test_migrate_dirty_limit(void) throttle_us_per_full = read_migrate_property_int(from, "dirty-limit-throttle-time-per-round"); usleep(100); - g_assert_false(got_src_stop); + g_assert_false(src_state.stop_seen); } /* Now cancel migrate and wait for dirty limit throttle switch off */ @@ -3189,7 +3254,7 @@ static void test_migrate_dirty_limit(void) throttle_us_per_full = read_migrate_property_int(from, "dirty-limit-throttle-time-per-round"); usleep(100); - g_assert_false(got_src_stop); + g_assert_false(src_state.stop_seen); } while (throttle_us_per_full != 0 && --max_try_count); /* Assert dirty limit is not in service */ @@ -3218,7 +3283,7 @@ static void test_migrate_dirty_limit(void) throttle_us_per_full = read_migrate_property_int(from, "dirty-limit-throttle-time-per-round"); usleep(100); - g_assert_false(got_src_stop); + g_assert_false(src_state.stop_seen); } /* @@ -3277,7 +3342,7 @@ static bool kvm_dirty_ring_supported(void) int main(int argc, char **argv) { bool has_kvm, has_tcg; - bool has_uffd; + bool has_uffd, is_x86; const char *arch; g_autoptr(GError) err = NULL; const char *qemu_src = getenv(QEMU_ENV_SRC); @@ -3307,6 +3372,7 @@ int main(int argc, char **argv) has_uffd = ufd_version_check(); arch = qtest_get_arch(); + is_x86 = !strcmp(arch, "i386") || !strcmp(arch, "x86_64"); /* * On ppc64, the test only works with kvm-hv, but not with kvm-pr and TCG @@ -3334,67 +3400,79 @@ int main(int argc, char **argv) g_get_tmp_dir(), err->message); } g_assert(tmpfs); - bootfile_create(tmpfs); module_call_init(MODULE_INIT_QOM); + if (is_x86) { + migration_test_add("/migration/precopy/unix/suspend/live", + test_precopy_unix_suspend_live); + migration_test_add("/migration/precopy/unix/suspend/notlive", + test_precopy_unix_suspend_notlive); + } + if (has_uffd) { - qtest_add_func("/migration/postcopy/plain", test_postcopy); - qtest_add_func("/migration/postcopy/recovery/plain", - test_postcopy_recovery); - qtest_add_func("/migration/postcopy/preempt/plain", test_postcopy_preempt); - qtest_add_func("/migration/postcopy/preempt/recovery/plain", - test_postcopy_preempt_recovery); + migration_test_add("/migration/postcopy/plain", test_postcopy); + migration_test_add("/migration/postcopy/recovery/plain", + test_postcopy_recovery); + migration_test_add("/migration/postcopy/preempt/plain", + test_postcopy_preempt); + migration_test_add("/migration/postcopy/preempt/recovery/plain", + test_postcopy_preempt_recovery); if (getenv("QEMU_TEST_FLAKY_TESTS")) { - qtest_add_func("/migration/postcopy/compress/plain", - test_postcopy_compress); - qtest_add_func("/migration/postcopy/recovery/compress/plain", - test_postcopy_recovery_compress); + migration_test_add("/migration/postcopy/compress/plain", + test_postcopy_compress); + migration_test_add("/migration/postcopy/recovery/compress/plain", + test_postcopy_recovery_compress); } #ifndef _WIN32 - qtest_add_func("/migration/postcopy/recovery/double-failures", - test_postcopy_recovery_double_fail); + migration_test_add("/migration/postcopy/recovery/double-failures", + test_postcopy_recovery_double_fail); #endif /* _WIN32 */ - + if (is_x86) { + migration_test_add("/migration/postcopy/suspend", + test_postcopy_suspend); + } } - qtest_add_func("/migration/bad_dest", test_baddest); + migration_test_add("/migration/bad_dest", test_baddest); #ifndef _WIN32 if (!g_str_equal(arch, "s390x")) { - qtest_add_func("/migration/analyze-script", test_analyze_script); + migration_test_add("/migration/analyze-script", test_analyze_script); } #endif - qtest_add_func("/migration/precopy/unix/plain", test_precopy_unix_plain); - qtest_add_func("/migration/precopy/unix/xbzrle", test_precopy_unix_xbzrle); + migration_test_add("/migration/precopy/unix/plain", + test_precopy_unix_plain); + migration_test_add("/migration/precopy/unix/xbzrle", + test_precopy_unix_xbzrle); /* * Compression fails from time to time. * Put test here but don't enable it until everything is fixed. */ if (getenv("QEMU_TEST_FLAKY_TESTS")) { - qtest_add_func("/migration/precopy/unix/compress/wait", - test_precopy_unix_compress); - qtest_add_func("/migration/precopy/unix/compress/nowait", - test_precopy_unix_compress_nowait); + migration_test_add("/migration/precopy/unix/compress/wait", + test_precopy_unix_compress); + migration_test_add("/migration/precopy/unix/compress/nowait", + test_precopy_unix_compress_nowait); } - qtest_add_func("/migration/precopy/file", - test_precopy_file); - qtest_add_func("/migration/precopy/file/offset", - test_precopy_file_offset); - qtest_add_func("/migration/precopy/file/offset/bad", - test_precopy_file_offset_bad); + migration_test_add("/migration/precopy/file", + test_precopy_file); + migration_test_add("/migration/precopy/file/offset", + test_precopy_file_offset); + migration_test_add("/migration/precopy/file/offset/bad", + test_precopy_file_offset_bad); /* * Our CI system has problems with shared memory. * Don't run this test until we find a workaround. */ if (getenv("QEMU_TEST_FLAKY_TESTS")) { - qtest_add_func("/migration/mode/reboot", test_mode_reboot); + migration_test_add("/migration/mode/reboot", test_mode_reboot); } #ifdef CONFIG_GNUTLS - qtest_add_func("/migration/precopy/unix/tls/psk", - test_precopy_unix_tls_psk); + migration_test_add("/migration/precopy/unix/tls/psk", + test_precopy_unix_tls_psk); if (has_uffd) { /* @@ -3402,110 +3480,108 @@ int main(int argc, char **argv) * channels are tested under precopy. Here what we want to test is the * general postcopy path that has TLS channel enabled. */ - qtest_add_func("/migration/postcopy/tls/psk", test_postcopy_tls_psk); - qtest_add_func("/migration/postcopy/recovery/tls/psk", - test_postcopy_recovery_tls_psk); - qtest_add_func("/migration/postcopy/preempt/tls/psk", - test_postcopy_preempt_tls_psk); - qtest_add_func("/migration/postcopy/preempt/recovery/tls/psk", - test_postcopy_preempt_all); + migration_test_add("/migration/postcopy/tls/psk", + test_postcopy_tls_psk); + migration_test_add("/migration/postcopy/recovery/tls/psk", + test_postcopy_recovery_tls_psk); + migration_test_add("/migration/postcopy/preempt/tls/psk", + test_postcopy_preempt_tls_psk); + migration_test_add("/migration/postcopy/preempt/recovery/tls/psk", + test_postcopy_preempt_all); } #ifdef CONFIG_TASN1 - qtest_add_func("/migration/precopy/unix/tls/x509/default-host", - test_precopy_unix_tls_x509_default_host); - qtest_add_func("/migration/precopy/unix/tls/x509/override-host", - test_precopy_unix_tls_x509_override_host); + migration_test_add("/migration/precopy/unix/tls/x509/default-host", + test_precopy_unix_tls_x509_default_host); + migration_test_add("/migration/precopy/unix/tls/x509/override-host", + test_precopy_unix_tls_x509_override_host); #endif /* CONFIG_TASN1 */ #endif /* CONFIG_GNUTLS */ - qtest_add_func("/migration/precopy/tcp/plain", test_precopy_tcp_plain); + migration_test_add("/migration/precopy/tcp/plain", test_precopy_tcp_plain); - qtest_add_func("/migration/precopy/tcp/plain/switchover-ack", - test_precopy_tcp_switchover_ack); + migration_test_add("/migration/precopy/tcp/plain/switchover-ack", + test_precopy_tcp_switchover_ack); #ifdef CONFIG_GNUTLS - qtest_add_func("/migration/precopy/tcp/tls/psk/match", - test_precopy_tcp_tls_psk_match); - qtest_add_func("/migration/precopy/tcp/tls/psk/mismatch", - test_precopy_tcp_tls_psk_mismatch); + migration_test_add("/migration/precopy/tcp/tls/psk/match", + test_precopy_tcp_tls_psk_match); + migration_test_add("/migration/precopy/tcp/tls/psk/mismatch", + test_precopy_tcp_tls_psk_mismatch); #ifdef CONFIG_TASN1 - qtest_add_func("/migration/precopy/tcp/tls/x509/default-host", - test_precopy_tcp_tls_x509_default_host); - qtest_add_func("/migration/precopy/tcp/tls/x509/override-host", - test_precopy_tcp_tls_x509_override_host); - qtest_add_func("/migration/precopy/tcp/tls/x509/mismatch-host", - test_precopy_tcp_tls_x509_mismatch_host); - qtest_add_func("/migration/precopy/tcp/tls/x509/friendly-client", - test_precopy_tcp_tls_x509_friendly_client); - qtest_add_func("/migration/precopy/tcp/tls/x509/hostile-client", - test_precopy_tcp_tls_x509_hostile_client); - qtest_add_func("/migration/precopy/tcp/tls/x509/allow-anon-client", - test_precopy_tcp_tls_x509_allow_anon_client); - qtest_add_func("/migration/precopy/tcp/tls/x509/reject-anon-client", - test_precopy_tcp_tls_x509_reject_anon_client); + migration_test_add("/migration/precopy/tcp/tls/x509/default-host", + test_precopy_tcp_tls_x509_default_host); + migration_test_add("/migration/precopy/tcp/tls/x509/override-host", + test_precopy_tcp_tls_x509_override_host); + migration_test_add("/migration/precopy/tcp/tls/x509/mismatch-host", + test_precopy_tcp_tls_x509_mismatch_host); + migration_test_add("/migration/precopy/tcp/tls/x509/friendly-client", + test_precopy_tcp_tls_x509_friendly_client); + migration_test_add("/migration/precopy/tcp/tls/x509/hostile-client", + test_precopy_tcp_tls_x509_hostile_client); + migration_test_add("/migration/precopy/tcp/tls/x509/allow-anon-client", + test_precopy_tcp_tls_x509_allow_anon_client); + migration_test_add("/migration/precopy/tcp/tls/x509/reject-anon-client", + test_precopy_tcp_tls_x509_reject_anon_client); #endif /* CONFIG_TASN1 */ #endif /* CONFIG_GNUTLS */ - /* qtest_add_func("/migration/ignore_shared", test_ignore_shared); */ + /* migration_test_add("/migration/ignore_shared", test_ignore_shared); */ #ifndef _WIN32 - qtest_add_func("/migration/fd_proto", test_migrate_fd_proto); + migration_test_add("/migration/fd_proto", test_migrate_fd_proto); #endif - qtest_add_func("/migration/validate_uuid", test_validate_uuid); - qtest_add_func("/migration/validate_uuid_error", test_validate_uuid_error); - qtest_add_func("/migration/validate_uuid_src_not_set", - test_validate_uuid_src_not_set); - qtest_add_func("/migration/validate_uuid_dst_not_set", - test_validate_uuid_dst_not_set); + migration_test_add("/migration/validate_uuid", test_validate_uuid); + migration_test_add("/migration/validate_uuid_error", + test_validate_uuid_error); + migration_test_add("/migration/validate_uuid_src_not_set", + test_validate_uuid_src_not_set); + migration_test_add("/migration/validate_uuid_dst_not_set", + test_validate_uuid_dst_not_set); /* * See explanation why this test is slow on function definition */ if (g_test_slow()) { - qtest_add_func("/migration/auto_converge", test_migrate_auto_converge); + migration_test_add("/migration/auto_converge", + test_migrate_auto_converge); if (g_str_equal(arch, "x86_64") && has_kvm && kvm_dirty_ring_supported()) { - qtest_add_func("/migration/dirty_limit", test_migrate_dirty_limit); + migration_test_add("/migration/dirty_limit", + test_migrate_dirty_limit); } } - qtest_add_func("/migration/multifd/tcp/plain/none", - test_multifd_tcp_none); - /* - * This test is flaky and sometimes fails in CI and otherwise: - * don't run unless user opts in via environment variable. - */ - if (getenv("QEMU_TEST_FLAKY_TESTS")) { - qtest_add_func("/migration/multifd/tcp/plain/cancel", + migration_test_add("/migration/multifd/tcp/plain/none", + test_multifd_tcp_none); + migration_test_add("/migration/multifd/tcp/plain/cancel", test_multifd_tcp_cancel); - } - qtest_add_func("/migration/multifd/tcp/plain/zlib", - test_multifd_tcp_zlib); + migration_test_add("/migration/multifd/tcp/plain/zlib", + test_multifd_tcp_zlib); #ifdef CONFIG_ZSTD - qtest_add_func("/migration/multifd/tcp/plain/zstd", - test_multifd_tcp_zstd); + migration_test_add("/migration/multifd/tcp/plain/zstd", + test_multifd_tcp_zstd); #endif #ifdef CONFIG_GNUTLS - qtest_add_func("/migration/multifd/tcp/tls/psk/match", - test_multifd_tcp_tls_psk_match); - qtest_add_func("/migration/multifd/tcp/tls/psk/mismatch", - test_multifd_tcp_tls_psk_mismatch); + migration_test_add("/migration/multifd/tcp/tls/psk/match", + test_multifd_tcp_tls_psk_match); + migration_test_add("/migration/multifd/tcp/tls/psk/mismatch", + test_multifd_tcp_tls_psk_mismatch); #ifdef CONFIG_TASN1 - qtest_add_func("/migration/multifd/tcp/tls/x509/default-host", - test_multifd_tcp_tls_x509_default_host); - qtest_add_func("/migration/multifd/tcp/tls/x509/override-host", - test_multifd_tcp_tls_x509_override_host); - qtest_add_func("/migration/multifd/tcp/tls/x509/mismatch-host", - test_multifd_tcp_tls_x509_mismatch_host); - qtest_add_func("/migration/multifd/tcp/tls/x509/allow-anon-client", - test_multifd_tcp_tls_x509_allow_anon_client); - qtest_add_func("/migration/multifd/tcp/tls/x509/reject-anon-client", - test_multifd_tcp_tls_x509_reject_anon_client); + migration_test_add("/migration/multifd/tcp/tls/x509/default-host", + test_multifd_tcp_tls_x509_default_host); + migration_test_add("/migration/multifd/tcp/tls/x509/override-host", + test_multifd_tcp_tls_x509_override_host); + migration_test_add("/migration/multifd/tcp/tls/x509/mismatch-host", + test_multifd_tcp_tls_x509_mismatch_host); + migration_test_add("/migration/multifd/tcp/tls/x509/allow-anon-client", + test_multifd_tcp_tls_x509_allow_anon_client); + migration_test_add("/migration/multifd/tcp/tls/x509/reject-anon-client", + test_multifd_tcp_tls_x509_reject_anon_client); #endif /* CONFIG_TASN1 */ #endif /* CONFIG_GNUTLS */ if (g_str_equal(arch, "x86_64") && has_kvm && kvm_dirty_ring_supported()) { - qtest_add_func("/migration/dirty_ring", - test_precopy_unix_dirty_ring); - qtest_add_func("/migration/vcpu_dirty_limit", - test_vcpu_dirty_limit); + migration_test_add("/migration/dirty_ring", + test_precopy_unix_dirty_ring); + migration_test_add("/migration/vcpu_dirty_limit", + test_vcpu_dirty_limit); } ret = g_test_run(); diff --git a/tests/qtest/netdev-socket.c b/tests/qtest/netdev-socket.c index bb99d08b5e78b73c9ca7e3b3b36788bfd8124577..fc7d11961eaa1659ffae58d5058ba123fc13cda9 100644 --- a/tests/qtest/netdev-socket.c +++ b/tests/qtest/netdev-socket.c @@ -16,33 +16,7 @@ #include "qapi/qobject-input-visitor.h" #include "qapi/qapi-visit-sockets.h" -#define CONNECTION_TIMEOUT 120 - -static double connection_timeout(void) -{ - double load; - int ret = getloadavg(&load, 1); - - /* - * If we can't get load data, or load is low because we just started - * running, assume load of 1 (we are alone in this system). - */ - if (ret < 1 || load < 1.0) { - load = 1.0; - } - /* - * No one wants to wait more than 10 minutes for this test. Higher load? - * Too bad. - */ - if (load > 10.0) { - fprintf(stderr, "Warning: load %f higher than 10 - test might timeout\n", - load); - load = 10.0; - } - - /* if load is high increase timeout as we might not get a chance to run */ - return load * CONNECTION_TIMEOUT; -} +#define CONNECTION_TIMEOUT 60 #define EXPECT_STATE(q, e, t) \ do { \ @@ -57,7 +31,7 @@ do { \ if (g_str_equal(resp, e)) { \ break; \ } \ - } while (g_test_timer_elapsed() < connection_timeout()); \ + } while (g_test_timer_elapsed() < CONNECTION_TIMEOUT); \ g_assert_cmpstr(resp, ==, e); \ g_free(resp); \ } while (0) @@ -153,7 +127,7 @@ static void test_stream_inet_ipv4(void) "addr.ipv4=on,addr.ipv6=off," "addr.host=127.0.0.1,addr.port=%d", port); - EXPECT_STATE(qts0, "st0: index=0,type=stream,\r\n", 0); + EXPECT_STATE(qts0, "st0: index=0,type=stream,listening\r\n", 0); qts1 = qtest_initf("-nodefaults -M none " "-netdev stream,server=false,id=st0,addr.type=inet," @@ -226,7 +200,7 @@ static void test_stream_unix_reconnect(void) "-netdev stream,id=st0,server=true,addr.type=unix," "addr.path=%s", path); - EXPECT_STATE(qts0, "st0: index=0,type=stream,\r\n", 0); + EXPECT_STATE(qts0, "st0: index=0,type=stream,listening\r\n", 0); qts1 = qtest_initf("-nodefaults -M none " "-netdev stream,server=false,id=st0,addr.type=unix," @@ -276,7 +250,7 @@ static void test_stream_inet_ipv6(void) "addr.ipv4=off,addr.ipv6=on," "addr.host=::1,addr.port=%d", port); - EXPECT_STATE(qts0, "st0: index=0,type=stream,\r\n", 0); + EXPECT_STATE(qts0, "st0: index=0,type=stream,listening\r\n", 0); qts1 = qtest_initf("-nodefaults -M none " "-netdev stream,server=false,id=st0,addr.type=inet," @@ -308,7 +282,7 @@ static void test_stream_unix(void) "addr.type=unix,addr.path=%s,", path); - EXPECT_STATE(qts0, "st0: index=0,type=stream,\r\n", 0); + EXPECT_STATE(qts0, "st0: index=0,type=stream,listening\r\n", 0); qts1 = qtest_initf("-nodefaults -M none " "-netdev stream,id=st0,server=false," @@ -340,7 +314,7 @@ static void test_stream_unix_abstract(void) "addr.abstract=on", path); - EXPECT_STATE(qts0, "st0: index=0,type=stream,\r\n", 0); + EXPECT_STATE(qts0, "st0: index=0,type=stream,listening\r\n", 0); qts1 = qtest_initf("-nodefaults -M none " "-netdev stream,id=st0,server=false," @@ -552,7 +526,7 @@ int main(int argc, char **argv) #ifndef _WIN32 qtest_add_func("/netdev/dgram/unix", test_dgram_unix); #endif - qtest_add_func("/netdev/stream/unix", test_stream_unix); + qtest_add_func("/netdev/stream/unix/oneshot", test_stream_unix); qtest_add_func("/netdev/stream/unix/reconnect", test_stream_unix_reconnect); #ifdef CONFIG_LINUX diff --git a/tests/qtest/npcm7xx_pwm-test.c b/tests/qtest/npcm7xx_pwm-test.c index ea4ca1d106e4dc55a1fab27bcd92cb247aa74095..b53a43c4171b192e39140675d272d674b3736f7c 100644 --- a/tests/qtest/npcm7xx_pwm-test.c +++ b/tests/qtest/npcm7xx_pwm-test.c @@ -606,6 +606,7 @@ static void test_toggle(gconstpointer test_data) uint32_t ppr, csr, pcr, cnr, cmr; int i, j, k, l; uint64_t expected_freq, expected_duty; + int cnr_step = g_test_quick() ? 2 : 1; mft_init(qts, td); @@ -618,7 +619,7 @@ static void test_toggle(gconstpointer test_data) csr = csr_list[j]; pwm_write_csr(qts, td, csr); - for (k = 0; k < ARRAY_SIZE(cnr_list); ++k) { + for (k = 0; k < ARRAY_SIZE(cnr_list); k += cnr_step) { cnr = cnr_list[k]; pwm_write_cnr(qts, td, cnr); @@ -678,6 +679,7 @@ static void pwm_add_test(const char *name, const TestData* td, int main(int argc, char **argv) { TestData test_data_list[ARRAY_SIZE(pwm_module_list) * ARRAY_SIZE(pwm_list)]; + int pwm_module_list_cnt = 1, pwm_list_cnt = 1; char *v_env = getenv("V"); @@ -687,8 +689,13 @@ int main(int argc, char **argv) g_test_init(&argc, &argv, NULL); - for (int i = 0; i < ARRAY_SIZE(pwm_module_list); ++i) { - for (int j = 0; j < ARRAY_SIZE(pwm_list); ++j) { + if (!g_test_quick()) { + pwm_module_list_cnt = ARRAY_SIZE(pwm_module_list); + pwm_list_cnt = ARRAY_SIZE(pwm_list); + } + + for (int i = 0; i < pwm_module_list_cnt; ++i) { + for (int j = 0; j < pwm_list_cnt; ++j) { TestData *td = &test_data_list[i * ARRAY_SIZE(pwm_list) + j]; td->module = &pwm_module_list[i]; diff --git a/tests/qtest/virtio-ccw-test.c b/tests/qtest/virtio-ccw-test.c index f4f5858b842ba4f5ab59d260b4638972d5a018aa..7a5357c212497793ebc12c3dde21dcc76e545ec1 100644 --- a/tests/qtest/virtio-ccw-test.c +++ b/tests/qtest/virtio-ccw-test.c @@ -85,7 +85,7 @@ int main(int argc, char **argv) if (qtest_has_device("virtio-rng-ccw")) { qtest_add_func("/virtio/rng/nop", virtio_rng_nop); } - if (qtest_has_device("virtio-rng-ccw")) { + if (qtest_has_device("virtio-scsi-ccw")) { qtest_add_func("/virtio/scsi/nop", virtio_scsi_nop); qtest_add_func("/virtio/scsi/hotplug", virtio_scsi_hotplug); } diff --git a/tests/qtest/virtio-net-failover.c b/tests/qtest/virtio-net-failover.c index 0d40bc1f2dd62935d0244e426c420b07960608d1..73dfabc2728bc26927c4c8df35e62f85d7907795 100644 --- a/tests/qtest/virtio-net-failover.c +++ b/tests/qtest/virtio-net-failover.c @@ -486,7 +486,7 @@ static void test_hotplug_1_reverse(void) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -517,7 +517,7 @@ static void test_hotplug_2(void) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -566,7 +566,7 @@ static void test_hotplug_2_reverse(void) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'rombar': 0," "'romfile': ''," @@ -639,7 +639,7 @@ static void test_migrate_out(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -754,7 +754,7 @@ static void test_migrate_in(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -808,7 +808,7 @@ static void test_off_migrate_out(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'off'," + "'failover': false," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -876,7 +876,7 @@ static void test_off_migrate_in(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'off'," + "'failover': false," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -927,7 +927,7 @@ static void test_guest_off_migrate_out(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1003,7 +1003,7 @@ static void test_guest_off_migrate_in(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1054,7 +1054,7 @@ static void test_migrate_guest_off_abort(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1154,7 +1154,7 @@ static void test_migrate_abort_wait_unplug(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1243,7 +1243,7 @@ static void test_migrate_abort_active(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1342,7 +1342,7 @@ static void test_migrate_off_abort(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'off'," + "'failover': false," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1434,7 +1434,7 @@ static void test_migrate_abort_timeout(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1530,7 +1530,7 @@ static void test_multi_out(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1561,7 +1561,7 @@ static void test_multi_out(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby1", "{'bus': 'root2'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs2'," "'mac': '"MAC_STANDBY1"'}"); @@ -1700,7 +1700,7 @@ static void test_multi_in(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby0", "{'bus': 'root0'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs0'," "'mac': '"MAC_STANDBY0"'}"); @@ -1724,7 +1724,7 @@ static void test_multi_in(gconstpointer opaque) qtest_qmp_device_add(qts, "virtio-net", "standby1", "{'bus': 'root2'," - "'failover': 'on'," + "'failover': true," "'netdev': 'hs2'," "'mac': '"MAC_STANDBY1"'}"); diff --git a/tests/tcg/i386/Makefile.target b/tests/tcg/i386/Makefile.target index 3dec7c6c42345844ddfa6921daed1b63c6babb9a..9906f9e116be314107aed4e5946321da7b434f58 100644 --- a/tests/tcg/i386/Makefile.target +++ b/tests/tcg/i386/Makefile.target @@ -13,7 +13,7 @@ config-cc.mak: Makefile I386_SRCS=$(notdir $(wildcard $(I386_SRC)/*.c)) ALL_X86_TESTS=$(I386_SRCS:.c=) -SKIP_I386_TESTS=test-i386-ssse3 test-avx test-3dnow test-mmx +SKIP_I386_TESTS=test-i386-ssse3 test-avx test-3dnow test-mmx test-flags X86_64_TESTS:=$(filter test-i386-adcox test-i386-bmi2 $(SKIP_I386_TESTS), $(ALL_X86_TESTS)) test-i386-sse-exceptions: CFLAGS += -msse4.1 -mfpmath=sse diff --git a/tests/tcg/i386/test-flags.c b/tests/tcg/i386/test-flags.c new file mode 100644 index 0000000000000000000000000000000000000000..c379e296275ef91980bedd657c08a3694be4bd1c --- /dev/null +++ b/tests/tcg/i386/test-flags.c @@ -0,0 +1,37 @@ +#define _GNU_SOURCE +#include +#include +#include +#include + +volatile unsigned long flags; +volatile unsigned long flags_after; +int *addr; + +void sigsegv(int sig, siginfo_t *info, ucontext_t *uc) +{ + flags = uc->uc_mcontext.gregs[REG_EFL]; + mprotect(addr, 4096, PROT_READ|PROT_WRITE); +} + +int main() +{ + struct sigaction sa = { .sa_handler = (void *)sigsegv, .sa_flags = SA_SIGINFO }; + sigaction(SIGSEGV, &sa, NULL); + + /* fault in the page then protect it */ + addr = mmap (NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0); + *addr = 0x1234; + mprotect(addr, 4096, PROT_READ); + + asm("# set flags to all ones \n" + "mov $-1, %%eax \n" + "movq addr, %%rdi \n" + "sahf \n" + "sub %%eax, (%%rdi) \n" + "pushf \n" + "pop flags_after(%%rip) \n" : : : "eax", "edi", "memory"); + + /* OF can have any value before the SUB instruction. */ + assert((flags & 0xff) == 0xd7 && (flags_after & 0x8ff) == 0x17); +} diff --git a/tests/tcg/s390x/Makefile.target b/tests/tcg/s390x/Makefile.target index 0e670f3f8b987f46c7651e022045d3f4a52c195d..30994dcf9c278c0a25decce70b00ee188b896b20 100644 --- a/tests/tcg/s390x/Makefile.target +++ b/tests/tcg/s390x/Makefile.target @@ -44,6 +44,7 @@ TESTS+=clgebr TESTS+=clc TESTS+=laalg TESTS+=add-logical-with-carry +TESTS+=lae cdsg: CFLAGS+=-pthread cdsg: LDFLAGS+=-pthread diff --git a/tests/tcg/s390x/lae.c b/tests/tcg/s390x/lae.c new file mode 100644 index 0000000000000000000000000000000000000000..59712b5e3715ee965e22f849c5ad82432b342a0b --- /dev/null +++ b/tests/tcg/s390x/lae.c @@ -0,0 +1,31 @@ +/* + * Test the LOAD ADDRESS EXTENDED instruction. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include +#include + +int main(void) +{ + unsigned long long ar = -1, b2 = 100000, r, x2 = 500; + /* + * Hardcode the register number, since clang does not allow using %rN in + * place of %aN. + */ + register unsigned long long r2 __asm__("2"); + int tmp; + + asm("ear %[tmp],%%a2\n" + "lae %%r2,42(%[x2],%[b2])\n" + "ear %[ar],%%a2\n" + "sar %%a2,%[tmp]" + : [tmp] "=&r" (tmp), "=&r" (r2), [ar] "+r" (ar) + : [b2] "r" (b2), [x2] "r" (x2) + : "memory"); + r = r2; + assert(ar == 0xffffffff00000000ULL); + assert(r == 100542); + + return EXIT_SUCCESS; +} diff --git a/tests/tsan/suppressions.tsan b/tests/tsan/suppressions.tsan index d9a002a2ef16018c305f6981087f77d92c87eaa2..b3ef59c27c04b5ac3a9084ceedac226455dba4a2 100644 --- a/tests/tsan/suppressions.tsan +++ b/tests/tsan/suppressions.tsan @@ -4,7 +4,6 @@ # TSan reports a double lock on RECURSIVE mutexes. # Since the recursive lock is intentional, we choose to ignore it. -mutex:aio_context_acquire mutex:pthread_mutex_lock # TSan reports a race between pthread_mutex_init() and diff --git a/tests/unit/meson.build b/tests/unit/meson.build index a05d471090401cab996c76e2d229699e99096e9b..0659532122308fdb14e1237efff50ae21d5167d6 100644 --- a/tests/unit/meson.build +++ b/tests/unit/meson.build @@ -100,7 +100,7 @@ if have_block } if gnutls.found() and \ tasn1.found() and \ - targetos != 'windows' + host_os != 'windows' tests += { 'test-crypto-tlscredsx509': ['crypto-tls-x509-helpers.c', 'pkix_asn1_tab.c', tasn1, crypto, gnutls], @@ -115,7 +115,7 @@ if have_block if xts == 'private' tests += {'test-crypto-xts': [crypto, io]} endif - if targetos != 'windows' + if host_os != 'windows' tests += { 'test-image-locking': [testblock], 'test-nested-aio-poll': [testblock], @@ -150,7 +150,7 @@ if have_system # are not runnable under TSan due to a known issue. # https://github.com/google/sanitizers/issues/1116 if not get_option('tsan') - if targetos != 'windows' + if host_os != 'windows' tests += { 'test-char': ['socket-helpers.c', qom, io, chardev] } @@ -162,7 +162,7 @@ if have_system endif endif -if have_ga and targetos == 'linux' +if have_ga and host_os == 'linux' tests += {'test-qga': ['../qtest/libqmp.c']} test_deps += {'test-qga': qga} endif @@ -172,6 +172,8 @@ test_env.set('G_TEST_SRCDIR', meson.current_source_dir()) test_env.set('G_TEST_BUILDDIR', meson.current_build_dir()) slow_tests = { + 'test-aio-multithread' : 120, + 'test-crypto-block' : 300, 'test-crypto-tlscredsx509': 45, 'test-crypto-tlssession': 45 } diff --git a/tests/unit/test-aio.c b/tests/unit/test-aio.c index 337b6e4ea75053ea0f97b961374ef93cff06ac10..e77d86be875fdb0aff267590f529f8c4fd7ad004 100644 --- a/tests/unit/test-aio.c +++ b/tests/unit/test-aio.c @@ -100,76 +100,12 @@ static void event_ready_cb(EventNotifier *e) /* Tests using aio_*. */ -typedef struct { - QemuMutex start_lock; - EventNotifier notifier; - bool thread_acquired; -} AcquireTestData; - -static void *test_acquire_thread(void *opaque) -{ - AcquireTestData *data = opaque; - - /* Wait for other thread to let us start */ - qemu_mutex_lock(&data->start_lock); - qemu_mutex_unlock(&data->start_lock); - - /* event_notifier_set might be called either before or after - * the main thread's call to poll(). The test case's outcome - * should be the same in either case. - */ - event_notifier_set(&data->notifier); - aio_context_acquire(ctx); - aio_context_release(ctx); - - data->thread_acquired = true; /* success, we got here */ - - return NULL; -} - static void set_event_notifier(AioContext *nctx, EventNotifier *notifier, EventNotifierHandler *handler) { aio_set_event_notifier(nctx, notifier, handler, NULL, NULL); } -static void dummy_notifier_read(EventNotifier *n) -{ - event_notifier_test_and_clear(n); -} - -static void test_acquire(void) -{ - QemuThread thread; - AcquireTestData data; - - /* Dummy event notifier ensures aio_poll() will block */ - event_notifier_init(&data.notifier, false); - set_event_notifier(ctx, &data.notifier, dummy_notifier_read); - g_assert(!aio_poll(ctx, false)); /* consume aio_notify() */ - - qemu_mutex_init(&data.start_lock); - qemu_mutex_lock(&data.start_lock); - data.thread_acquired = false; - - qemu_thread_create(&thread, "test_acquire_thread", - test_acquire_thread, - &data, QEMU_THREAD_JOINABLE); - - /* Block in aio_poll(), let other thread kick us and acquire context */ - aio_context_acquire(ctx); - qemu_mutex_unlock(&data.start_lock); /* let the thread run */ - g_assert(aio_poll(ctx, true)); - g_assert(!data.thread_acquired); - aio_context_release(ctx); - - qemu_thread_join(&thread); - set_event_notifier(ctx, &data.notifier, NULL); - event_notifier_cleanup(&data.notifier); - - g_assert(data.thread_acquired); -} - static void test_bh_schedule(void) { BHTestData data = { .n = 0 }; @@ -879,7 +815,7 @@ static void test_worker_thread_co_enter(void) qemu_thread_get_self(&this_thread); co = qemu_coroutine_create(co_check_current_thread, &this_thread); - qemu_thread_create(&worker_thread, "test_acquire_thread", + qemu_thread_create(&worker_thread, "test_aio_co_enter", test_aio_co_enter, co, QEMU_THREAD_JOINABLE); @@ -899,7 +835,6 @@ int main(int argc, char **argv) while (g_main_context_iteration(NULL, false)); g_test_init(&argc, &argv, NULL); - g_test_add_func("/aio/acquire", test_acquire); g_test_add_func("/aio/bh/schedule", test_bh_schedule); g_test_add_func("/aio/bh/schedule10", test_bh_schedule10); g_test_add_func("/aio/bh/cancel", test_bh_cancel); diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c index 704d1a3f361525edf42c67f36d64ff59db9af231..17830a69c1a5909ec10fd9aa71839fa5d7c7fa7a 100644 --- a/tests/unit/test-bdrv-drain.c +++ b/tests/unit/test-bdrv-drain.c @@ -179,13 +179,7 @@ static void do_drain_end(enum drain_type drain_type, BlockDriverState *bs) static void do_drain_begin_unlocked(enum drain_type drain_type, BlockDriverState *bs) { - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_acquire(bdrv_get_aio_context(bs)); - } do_drain_begin(drain_type, bs); - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_release(bdrv_get_aio_context(bs)); - } } static BlockBackend * no_coroutine_fn test_setup(void) @@ -209,13 +203,7 @@ static BlockBackend * no_coroutine_fn test_setup(void) static void do_drain_end_unlocked(enum drain_type drain_type, BlockDriverState *bs) { - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_acquire(bdrv_get_aio_context(bs)); - } do_drain_end(drain_type, bs); - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_release(bdrv_get_aio_context(bs)); - } } /* @@ -520,12 +508,8 @@ static void test_iothread_main_thread_bh(void *opaque) { struct test_iothread_data *data = opaque; - /* Test that the AioContext is not yet locked in a random BH that is - * executed during drain, otherwise this would deadlock. */ - aio_context_acquire(bdrv_get_aio_context(data->bs)); bdrv_flush(data->bs); bdrv_dec_in_flight(data->bs); /* incremented by test_iothread_common() */ - aio_context_release(bdrv_get_aio_context(data->bs)); } /* @@ -567,7 +551,6 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) blk_set_disable_request_queuing(blk, true); blk_set_aio_context(blk, ctx_a, &error_abort); - aio_context_acquire(ctx_a); s->bh_indirection_ctx = ctx_b; @@ -582,8 +565,6 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) g_assert(acb != NULL); g_assert_cmpint(aio_ret, ==, -EINPROGRESS); - aio_context_release(ctx_a); - data = (struct test_iothread_data) { .bs = bs, .drain_type = drain_type, @@ -592,10 +573,6 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) switch (drain_thread) { case 0: - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_acquire(ctx_a); - } - /* * Increment in_flight so that do_drain_begin() waits for * test_iothread_main_thread_bh(). This prevents the race between @@ -613,20 +590,10 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) do_drain_begin(drain_type, bs); g_assert_cmpint(bs->in_flight, ==, 0); - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_release(ctx_a); - } qemu_event_wait(&done_event); - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_acquire(ctx_a); - } g_assert_cmpint(aio_ret, ==, 0); do_drain_end(drain_type, bs); - - if (drain_type != BDRV_DRAIN_ALL) { - aio_context_release(ctx_a); - } break; case 1: co = qemu_coroutine_create(test_iothread_drain_co_entry, &data); @@ -637,9 +604,7 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) g_assert_not_reached(); } - aio_context_acquire(ctx_a); blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); - aio_context_release(ctx_a); bdrv_unref(bs); blk_unref(blk); @@ -757,7 +722,6 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, BlockJob *job; TestBlockJob *tjob; IOThread *iothread = NULL; - AioContext *ctx; int ret; src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR, @@ -787,11 +751,11 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, } if (use_iothread) { + AioContext *ctx; + iothread = iothread_new(); ctx = iothread_get_aio_context(iothread); blk_set_aio_context(blk_src, ctx, &error_abort); - } else { - ctx = qemu_get_aio_context(); } target = bdrv_new_open_driver(&bdrv_test, "target", BDRV_O_RDWR, @@ -800,16 +764,15 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, blk_insert_bs(blk_target, target, &error_abort); blk_set_allow_aio_context_change(blk_target, true); - aio_context_acquire(ctx); tjob = block_job_create("job0", &test_job_driver, NULL, src, 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort); tjob->bs = src; job = &tjob->common; - bdrv_graph_wrlock(target); + bdrv_graph_wrlock(); block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort); - bdrv_graph_wrunlock(target); + bdrv_graph_wrunlock(); switch (result) { case TEST_JOB_SUCCESS: @@ -821,7 +784,6 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, tjob->prepare_ret = -EIO; break; } - aio_context_release(ctx); job_start(&job->job); @@ -912,12 +874,10 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, } g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO)); - aio_context_acquire(ctx); if (use_iothread) { blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort); assert(blk_get_aio_context(blk_target) == qemu_get_aio_context()); } - aio_context_release(ctx); blk_unref(blk_src); blk_unref(blk_target); @@ -991,11 +951,11 @@ static void bdrv_test_top_close(BlockDriverState *bs) { BdrvChild *c, *next_c; - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { bdrv_unref_child(bs, c); } - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); } static int coroutine_fn GRAPH_RDLOCK @@ -1085,10 +1045,10 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete, null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, &error_abort); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); /* This child will be the one to pass to requests through to, and * it will stall until a drain occurs */ @@ -1096,21 +1056,21 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete, &error_abort); child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS; /* Takes our reference to child_bs */ - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child", &child_of_bds, BDRV_CHILD_DATA | BDRV_CHILD_PRIMARY, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); /* This child is just there to be deleted * (for detach_instead_of_delete == true) */ null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, &error_abort); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(bs, null_bs, "null-child", &child_of_bds, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); blk_insert_bs(blk, bs, &error_abort); @@ -1193,14 +1153,14 @@ static void no_coroutine_fn detach_indirect_bh(void *opaque) bdrv_dec_in_flight(data->child_b->bs); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_unref_child(data->parent_b, data->child_b); bdrv_ref(data->c); data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C", &child_of_bds, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); } static void coroutine_mixed_fn detach_by_parent_aio_cb(void *opaque, int ret) @@ -1298,7 +1258,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb) /* Set child relationships */ bdrv_ref(b); bdrv_ref(a); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_of_bds, BDRV_CHILD_DATA, &error_abort); child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_of_bds, @@ -1308,7 +1268,7 @@ static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb) bdrv_attach_child(parent_a, a, "PA-A", by_parent_cb ? &child_of_bds : &detach_by_driver_cb_class, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); g_assert_cmpint(parent_a->refcnt, ==, 1); g_assert_cmpint(parent_b->refcnt, ==, 1); @@ -1401,9 +1361,7 @@ static void test_append_to_drained(void) g_assert_cmpint(base_s->drain_count, ==, 1); g_assert_cmpint(base->in_flight, ==, 0); - aio_context_acquire(qemu_get_aio_context()); bdrv_append(overlay, base, &error_abort); - aio_context_release(qemu_get_aio_context()); g_assert_cmpint(base->in_flight, ==, 0); g_assert_cmpint(overlay->in_flight, ==, 0); @@ -1438,16 +1396,11 @@ static void test_set_aio_context(void) bdrv_drained_begin(bs); bdrv_try_change_aio_context(bs, ctx_a, NULL, &error_abort); - - aio_context_acquire(ctx_a); bdrv_drained_end(bs); bdrv_drained_begin(bs); bdrv_try_change_aio_context(bs, ctx_b, NULL, &error_abort); - aio_context_release(ctx_a); - aio_context_acquire(ctx_b); bdrv_try_change_aio_context(bs, qemu_get_aio_context(), NULL, &error_abort); - aio_context_release(ctx_b); bdrv_drained_end(bs); bdrv_unref(bs); @@ -1727,7 +1680,7 @@ static void test_drop_intermediate_poll(void) * Establish the chain last, so the chain links are the first * elements in the BDS.parents lists */ - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); for (i = 0; i < 3; i++) { if (i) { /* Takes the reference to chain[i - 1] */ @@ -1735,7 +1688,7 @@ static void test_drop_intermediate_poll(void) &chain_child_class, BDRV_CHILD_COW, &error_abort); } } - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); job = block_job_create("job", &test_simple_job_driver, NULL, job_node, 0, BLK_PERM_ALL, 0, 0, NULL, NULL, &error_abort); @@ -1982,10 +1935,10 @@ static void do_test_replace_child_mid_drain(int old_drain_count, new_child_bs->total_sectors = 1; bdrv_ref(old_child_bs); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(parent_bs, old_child_bs, "child", &child_of_bds, BDRV_CHILD_COW, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); parent_s->setup_completed = true; for (i = 0; i < old_drain_count; i++) { @@ -2016,9 +1969,9 @@ static void do_test_replace_child_mid_drain(int old_drain_count, g_assert(parent_bs->quiesce_counter == old_drain_count); bdrv_drained_begin(old_child_bs); bdrv_drained_begin(new_child_bs); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_replace_node(old_child_bs, new_child_bs, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); bdrv_drained_end(new_child_bs); bdrv_drained_end(old_child_bs); g_assert(parent_bs->quiesce_counter == new_drain_count); diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c index 074adcbb93744393e219c8333c1ed15e890fe366..cafc023db42c27fcd4c3066fe94cb44fca6e46fd 100644 --- a/tests/unit/test-bdrv-graph-mod.c +++ b/tests/unit/test-bdrv-graph-mod.c @@ -137,15 +137,13 @@ static void test_update_perm_tree(void) blk_insert_bs(root, bs, &error_abort); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(filter, bs, "child", &child_of_bds, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); - aio_context_acquire(qemu_get_aio_context()); ret = bdrv_append(filter, bs, NULL); g_assert_cmpint(ret, <, 0); - aio_context_release(qemu_get_aio_context()); bdrv_unref(filter); blk_unref(root); @@ -206,14 +204,12 @@ static void test_should_update_child(void) bdrv_set_backing_hd(target, bs, &error_abort); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); g_assert(target->backing->bs == bs); bdrv_attach_child(filter, target, "target", &child_of_bds, BDRV_CHILD_DATA, &error_abort); - bdrv_graph_wrunlock(NULL); - aio_context_acquire(qemu_get_aio_context()); + bdrv_graph_wrunlock(); bdrv_append(filter, bs, &error_abort); - aio_context_release(qemu_get_aio_context()); bdrv_graph_rdlock_main_loop(); g_assert(target->backing->bs == bs); @@ -248,7 +244,7 @@ static void test_parallel_exclusive_write(void) bdrv_ref(base); bdrv_ref(fl1); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(top, fl1, "backing", &child_of_bds, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); @@ -260,7 +256,7 @@ static void test_parallel_exclusive_write(void) &error_abort); bdrv_replace_node(fl1, fl2, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); bdrv_drained_end(fl2); bdrv_drained_end(fl1); @@ -367,7 +363,7 @@ static void test_parallel_perm_update(void) */ bdrv_ref(base); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(top, ws, "file", &child_of_bds, BDRV_CHILD_DATA, &error_abort); c_fl1 = bdrv_attach_child(ws, fl1, "first", &child_of_bds, @@ -380,7 +376,7 @@ static void test_parallel_perm_update(void) bdrv_attach_child(fl2, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); /* Select fl1 as first child to be active */ s->selected = c_fl1; @@ -434,15 +430,13 @@ static void test_append_greedy_filter(void) BlockDriverState *base = no_perm_node("base"); BlockDriverState *fl = exclusive_writer_node("fl1"); - bdrv_graph_wrlock(NULL); + bdrv_graph_wrlock(); bdrv_attach_child(top, base, "backing", &child_of_bds, BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, &error_abort); - bdrv_graph_wrunlock(NULL); + bdrv_graph_wrunlock(); - aio_context_acquire(qemu_get_aio_context()); bdrv_append(fl, base, &error_abort); - aio_context_release(qemu_get_aio_context()); bdrv_unref(fl); bdrv_unref(top); } diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c index 9b15d2768cc632483f7a457b02b013fd56e63cdc..3766d5de6be37b3e8fcebdbe3b6d81dc7d912ea0 100644 --- a/tests/unit/test-block-iothread.c +++ b/tests/unit/test-block-iothread.c @@ -483,7 +483,6 @@ static void test_sync_op(const void *opaque) bdrv_graph_rdunlock_main_loop(); blk_set_aio_context(blk, ctx, &error_abort); - aio_context_acquire(ctx); if (t->fn) { t->fn(c); } @@ -491,7 +490,6 @@ static void test_sync_op(const void *opaque) t->blkfn(blk); } blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); - aio_context_release(ctx); bdrv_unref(bs); blk_unref(blk); @@ -576,9 +574,7 @@ static void test_attach_blockjob(void) aio_poll(qemu_get_aio_context(), false); } - aio_context_acquire(ctx); blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); - aio_context_release(ctx); tjob->n = 0; while (tjob->n == 0) { @@ -595,9 +591,7 @@ static void test_attach_blockjob(void) WITH_JOB_LOCK_GUARD() { job_complete_sync_locked(&tjob->common.job, &error_abort); } - aio_context_acquire(ctx); blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); - aio_context_release(ctx); bdrv_unref(bs); blk_unref(blk); @@ -654,9 +648,7 @@ static void test_propagate_basic(void) /* Switch the AioContext back */ main_ctx = qemu_get_aio_context(); - aio_context_acquire(ctx); blk_set_aio_context(blk, main_ctx, &error_abort); - aio_context_release(ctx); g_assert(blk_get_aio_context(blk) == main_ctx); g_assert(bdrv_get_aio_context(bs_a) == main_ctx); g_assert(bdrv_get_aio_context(bs_verify) == main_ctx); @@ -732,9 +724,7 @@ static void test_propagate_diamond(void) /* Switch the AioContext back */ main_ctx = qemu_get_aio_context(); - aio_context_acquire(ctx); blk_set_aio_context(blk, main_ctx, &error_abort); - aio_context_release(ctx); g_assert(blk_get_aio_context(blk) == main_ctx); g_assert(bdrv_get_aio_context(bs_verify) == main_ctx); g_assert(bdrv_get_aio_context(bs_a) == main_ctx); @@ -764,13 +754,11 @@ static void test_propagate_mirror(void) &error_abort); /* Start a mirror job */ - aio_context_acquire(main_ctx); mirror_start("job0", src, target, NULL, JOB_DEFAULT, 0, 0, 0, MIRROR_SYNC_MODE_NONE, MIRROR_OPEN_BACKING_CHAIN, false, BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT, false, "filter_node", MIRROR_COPY_MODE_BACKGROUND, &error_abort); - aio_context_release(main_ctx); WITH_JOB_LOCK_GUARD() { job = job_get_locked("job0"); @@ -785,9 +773,7 @@ static void test_propagate_mirror(void) g_assert(job->aio_context == ctx); /* Change the AioContext of target */ - aio_context_acquire(ctx); bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort); - aio_context_release(ctx); g_assert(bdrv_get_aio_context(src) == main_ctx); g_assert(bdrv_get_aio_context(target) == main_ctx); g_assert(bdrv_get_aio_context(filter) == main_ctx); @@ -805,10 +791,8 @@ static void test_propagate_mirror(void) g_assert(bdrv_get_aio_context(filter) == main_ctx); /* ...unless we explicitly allow it */ - aio_context_acquire(ctx); blk_set_allow_aio_context_change(blk, true); bdrv_try_change_aio_context(target, ctx, NULL, &error_abort); - aio_context_release(ctx); g_assert(blk_get_aio_context(blk) == ctx); g_assert(bdrv_get_aio_context(src) == ctx); @@ -817,10 +801,8 @@ static void test_propagate_mirror(void) job_cancel_sync_all(); - aio_context_acquire(ctx); blk_set_aio_context(blk, main_ctx, &error_abort); bdrv_try_change_aio_context(target, main_ctx, NULL, &error_abort); - aio_context_release(ctx); blk_unref(blk); bdrv_unref(src); @@ -836,7 +818,6 @@ static void test_attach_second_node(void) BlockDriverState *bs, *filter; QDict *options; - aio_context_acquire(main_ctx); blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL); bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); blk_insert_bs(blk, bs, &error_abort); @@ -846,15 +827,12 @@ static void test_attach_second_node(void) qdict_put_str(options, "file", "base"); filter = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort); - aio_context_release(main_ctx); g_assert(blk_get_aio_context(blk) == ctx); g_assert(bdrv_get_aio_context(bs) == ctx); g_assert(bdrv_get_aio_context(filter) == ctx); - aio_context_acquire(ctx); blk_set_aio_context(blk, main_ctx, &error_abort); - aio_context_release(ctx); g_assert(blk_get_aio_context(blk) == main_ctx); g_assert(bdrv_get_aio_context(bs) == main_ctx); g_assert(bdrv_get_aio_context(filter) == main_ctx); @@ -868,11 +846,9 @@ static void test_attach_preserve_blk_ctx(void) { IOThread *iothread = iothread_new(); AioContext *ctx = iothread_get_aio_context(iothread); - AioContext *main_ctx = qemu_get_aio_context(); BlockBackend *blk; BlockDriverState *bs; - aio_context_acquire(main_ctx); blk = blk_new(ctx, BLK_PERM_ALL, BLK_PERM_ALL); bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); bs->total_sectors = 65536 / BDRV_SECTOR_SIZE; @@ -881,25 +857,18 @@ static void test_attach_preserve_blk_ctx(void) blk_insert_bs(blk, bs, &error_abort); g_assert(blk_get_aio_context(blk) == ctx); g_assert(bdrv_get_aio_context(bs) == ctx); - aio_context_release(main_ctx); /* Remove the node again */ - aio_context_acquire(ctx); blk_remove_bs(blk); - aio_context_release(ctx); g_assert(blk_get_aio_context(blk) == ctx); g_assert(bdrv_get_aio_context(bs) == qemu_get_aio_context()); /* Re-attach the node */ - aio_context_acquire(main_ctx); blk_insert_bs(blk, bs, &error_abort); - aio_context_release(main_ctx); g_assert(blk_get_aio_context(blk) == ctx); g_assert(bdrv_get_aio_context(bs) == ctx); - aio_context_acquire(ctx); blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); - aio_context_release(ctx); bdrv_unref(bs); blk_unref(blk); } diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c index a130f6fefbae0811cbcd85f39a7ffc7349b49075..fe3e0d2d38c9aa730a19c283cd3451fdf6c51245 100644 --- a/tests/unit/test-blockjob.c +++ b/tests/unit/test-blockjob.c @@ -228,7 +228,6 @@ static void cancel_common(CancelJob *s) BlockJob *job = &s->common; BlockBackend *blk = s->blk; JobStatus sts = job->job.status; - AioContext *ctx = job->job.aio_context; job_cancel_sync(&job->job, true); WITH_JOB_LOCK_GUARD() { @@ -240,9 +239,7 @@ static void cancel_common(CancelJob *s) job_unref_locked(&job->job); } - aio_context_acquire(ctx); destroy_blk(blk); - aio_context_release(ctx); } @@ -391,132 +388,6 @@ static void test_cancel_concluded(void) cancel_common(s); } -/* (See test_yielding_driver for the job description) */ -typedef struct YieldingJob { - BlockJob common; - bool should_complete; -} YieldingJob; - -static void yielding_job_complete(Job *job, Error **errp) -{ - YieldingJob *s = container_of(job, YieldingJob, common.job); - s->should_complete = true; - job_enter(job); -} - -static int coroutine_fn yielding_job_run(Job *job, Error **errp) -{ - YieldingJob *s = container_of(job, YieldingJob, common.job); - - job_transition_to_ready(job); - - while (!s->should_complete) { - job_yield(job); - } - - return 0; -} - -/* - * This job transitions immediately to the READY state, and then - * yields until it is to complete. - */ -static const BlockJobDriver test_yielding_driver = { - .job_driver = { - .instance_size = sizeof(YieldingJob), - .free = block_job_free, - .user_resume = block_job_user_resume, - .run = yielding_job_run, - .complete = yielding_job_complete, - }, -}; - -/* - * Test that job_complete_locked() works even on jobs that are in a paused - * state (i.e., STANDBY). - * - * To do this, run YieldingJob in an IO thread, get it into the READY - * state, then have a drained section. Before ending the section, - * acquire the context so the job will not be entered and will thus - * remain on STANDBY. - * - * job_complete_locked() should still work without error. - * - * Note that on the QMP interface, it is impossible to lock an IO - * thread before a drained section ends. In practice, the - * bdrv_drain_all_end() and the aio_context_acquire() will be - * reversed. However, that makes for worse reproducibility here: - * Sometimes, the job would no longer be in STANDBY then but already - * be started. We cannot prevent that, because the IO thread runs - * concurrently. We can only prevent it by taking the lock before - * ending the drained section, so we do that. - * - * (You can reverse the order of operations and most of the time the - * test will pass, but sometimes the assert(status == STANDBY) will - * fail.) - */ -static void test_complete_in_standby(void) -{ - BlockBackend *blk; - IOThread *iothread; - AioContext *ctx; - Job *job; - BlockJob *bjob; - - /* Create a test drive, move it to an IO thread */ - blk = create_blk(NULL); - iothread = iothread_new(); - - ctx = iothread_get_aio_context(iothread); - blk_set_aio_context(blk, ctx, &error_abort); - - /* Create our test job */ - bjob = mk_job(blk, "job", &test_yielding_driver, true, - JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS); - job = &bjob->job; - assert_job_status_is(job, JOB_STATUS_CREATED); - - /* Wait for the job to become READY */ - job_start(job); - /* - * Here we are waiting for the status to change, so don't bother - * protecting the read every time. - */ - AIO_WAIT_WHILE_UNLOCKED(ctx, job->status != JOB_STATUS_READY); - - /* Begin the drained section, pausing the job */ - bdrv_drain_all_begin(); - assert_job_status_is(job, JOB_STATUS_STANDBY); - - /* Lock the IO thread to prevent the job from being run */ - aio_context_acquire(ctx); - /* This will schedule the job to resume it */ - bdrv_drain_all_end(); - aio_context_release(ctx); - - WITH_JOB_LOCK_GUARD() { - /* But the job cannot run, so it will remain on standby */ - assert(job->status == JOB_STATUS_STANDBY); - - /* Even though the job is on standby, this should work */ - job_complete_locked(job, &error_abort); - - /* The test is done now, clean up. */ - job_finish_sync_locked(job, NULL, &error_abort); - assert(job->status == JOB_STATUS_PENDING); - - job_finalize_locked(job, &error_abort); - assert(job->status == JOB_STATUS_CONCLUDED); - - job_dismiss_locked(&job, &error_abort); - } - - aio_context_acquire(ctx); - destroy_blk(blk); - aio_context_release(ctx); - iothread_join(iothread); -} - int main(int argc, char **argv) { qemu_init_main_loop(&error_abort); @@ -531,13 +402,5 @@ int main(int argc, char **argv) g_test_add_func("/blockjob/cancel/standby", test_cancel_standby); g_test_add_func("/blockjob/cancel/pending", test_cancel_pending); g_test_add_func("/blockjob/cancel/concluded", test_cancel_concluded); - - /* - * This test is flaky and sometimes fails in CI and otherwise: - * don't run unless user opts in via environment variable. - */ - if (getenv("QEMU_TEST_FLAKY_TESTS")) { - g_test_add_func("/blockjob/complete_in_standby", test_complete_in_standby); - } return g_test_run(); } diff --git a/tests/unit/test-io-task.c b/tests/unit/test-io-task.c index 953a50ae66e3557a9f66712fbbe7d804a85af9ca..115dba89702bc660c62956a9579b011fc5a5621a 100644 --- a/tests/unit/test-io-task.c +++ b/tests/unit/test-io-task.c @@ -25,7 +25,7 @@ #include "qapi/error.h" #include "qemu/module.h" -#define TYPE_DUMMY "qemu:dummy" +#define TYPE_DUMMY "qemu-dummy" typedef struct DummyObject DummyObject; typedef struct DummyObjectClass DummyObjectClass; diff --git a/tests/unit/test-qmp-event.c b/tests/unit/test-qmp-event.c index 3626d2372f1c866dbfdb139c6015e55ebe657892..08e95a382bd8c1ebce3d8e15472e112514ac9126 100644 --- a/tests/unit/test-qmp-event.c +++ b/tests/unit/test-qmp-event.c @@ -24,19 +24,15 @@ #include "test-qapi-events.h" #include "test-qapi-emit-events.h" -typedef struct TestEventData { - QDict *expect; - bool emitted; -} TestEventData; - -TestEventData *test_event_data; -static GMutex test_event_lock; +static QDict *expected_event; void test_qapi_event_emit(test_QAPIEvent event, QDict *d) { QDict *t; int64_t s, ms; + g_assert(expected_event); + /* Verify that we have timestamp, then remove it to compare other fields */ t = qdict_get_qdict(d, "timestamp"); g_assert(t); @@ -52,71 +48,38 @@ void test_qapi_event_emit(test_QAPIEvent event, QDict *d) qdict_del(d, "timestamp"); - g_assert(qobject_is_equal(QOBJECT(d), QOBJECT(test_event_data->expect))); - test_event_data->emitted = true; -} - -static void event_prepare(TestEventData *data, - const void *unused) -{ - /* Global variable test_event_data was used to pass the expectation, so - test cases can't be executed at same time. */ - g_mutex_lock(&test_event_lock); - test_event_data = data; -} - -static void event_teardown(TestEventData *data, - const void *unused) -{ - test_event_data = NULL; - g_mutex_unlock(&test_event_lock); + g_assert(qobject_is_equal(QOBJECT(d), QOBJECT(expected_event))); + qobject_unref(expected_event); + expected_event = NULL; } -static void event_test_add(const char *testpath, - void (*test_func)(TestEventData *data, - const void *user_data)) +static void test_event_a(void) { - g_test_add(testpath, TestEventData, NULL, event_prepare, test_func, - event_teardown); -} - - -/* Test cases */ - -static void test_event_a(TestEventData *data, - const void *unused) -{ - data->expect = qdict_from_jsonf_nofail("{ 'event': 'EVENT_A' }"); + expected_event = qdict_from_jsonf_nofail("{ 'event': 'EVENT_A' }"); qapi_event_send_event_a(); - g_assert(data->emitted); - qobject_unref(data->expect); + g_assert(!expected_event); } -static void test_event_b(TestEventData *data, - const void *unused) +static void test_event_b(void) { - data->expect = qdict_from_jsonf_nofail("{ 'event': 'EVENT_B' }"); + expected_event = qdict_from_jsonf_nofail("{ 'event': 'EVENT_B' }"); qapi_event_send_event_b(); - g_assert(data->emitted); - qobject_unref(data->expect); + g_assert(!expected_event); } -static void test_event_c(TestEventData *data, - const void *unused) +static void test_event_c(void) { UserDefOne b = { .integer = 2, .string = (char *)"test1" }; - data->expect = qdict_from_jsonf_nofail( + expected_event = qdict_from_jsonf_nofail( "{ 'event': 'EVENT_C', 'data': {" " 'a': 1, 'b': { 'integer': 2, 'string': 'test1' }, 'c': 'test2' } }"); qapi_event_send_event_c(true, 1, &b, "test2"); - g_assert(data->emitted); - qobject_unref(data->expect); + g_assert(!expected_event); } /* Complex type */ -static void test_event_d(TestEventData *data, - const void *unused) +static void test_event_d(void) { UserDefOne struct1 = { .integer = 2, .string = (char *)"test1", @@ -129,65 +92,56 @@ static void test_event_d(TestEventData *data, .enum2 = ENUM_ONE_VALUE2, }; - data->expect = qdict_from_jsonf_nofail( + expected_event = qdict_from_jsonf_nofail( "{ 'event': 'EVENT_D', 'data': {" " 'a': {" " 'struct1': { 'integer': 2, 'string': 'test1', 'enum1': 'value1' }," " 'string': 'test2', 'enum2': 'value2' }," " 'b': 'test3', 'enum3': 'value3' } }"); qapi_event_send_event_d(&a, "test3", NULL, true, ENUM_ONE_VALUE3); - g_assert(data->emitted); - qobject_unref(data->expect); + g_assert(!expected_event); } -static void test_event_deprecated(TestEventData *data, const void *unused) +static void test_event_deprecated(void) { - data->expect = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES1' }"); + expected_event = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES1' }"); memset(&compat_policy, 0, sizeof(compat_policy)); qapi_event_send_test_event_features1(); - g_assert(data->emitted); + g_assert(!expected_event); compat_policy.has_deprecated_output = true; compat_policy.deprecated_output = COMPAT_POLICY_OUTPUT_HIDE; - data->emitted = false; qapi_event_send_test_event_features1(); - g_assert(!data->emitted); - - qobject_unref(data->expect); } -static void test_event_deprecated_data(TestEventData *data, const void *unused) +static void test_event_deprecated_data(void) { memset(&compat_policy, 0, sizeof(compat_policy)); - data->expect = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES0'," + expected_event = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES0'," " 'data': { 'foo': 42 } }"); qapi_event_send_test_event_features0(42); - g_assert(data->emitted); + g_assert(!expected_event); - qobject_unref(data->expect); compat_policy.has_deprecated_output = true; compat_policy.deprecated_output = COMPAT_POLICY_OUTPUT_HIDE; - data->expect = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES0' }"); + expected_event = qdict_from_jsonf_nofail("{ 'event': 'TEST_EVENT_FEATURES0' }"); qapi_event_send_test_event_features0(42); - g_assert(data->emitted); - - qobject_unref(data->expect); } int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); - event_test_add("/event/event_a", test_event_a); - event_test_add("/event/event_b", test_event_b); - event_test_add("/event/event_c", test_event_c); - event_test_add("/event/event_d", test_event_d); - event_test_add("/event/deprecated", test_event_deprecated); - event_test_add("/event/deprecated_data", test_event_deprecated_data); + g_test_add_func("/event/event_a", test_event_a); + g_test_add_func("/event/event_b", test_event_b); + g_test_add_func("/event/event_c", test_event_c); + g_test_add_func("/event/event_d", test_event_d); + g_test_add_func("/event/deprecated", test_event_deprecated); + g_test_add_func("/event/deprecated_data", test_event_deprecated_data); g_test_run(); return 0; diff --git a/tests/unit/test-replication.c b/tests/unit/test-replication.c index afff908d77a1f4a80c7822cad836d020e25d374b..5d2003b8ced00f18259af262bccc3016685f519d 100644 --- a/tests/unit/test-replication.c +++ b/tests/unit/test-replication.c @@ -199,17 +199,13 @@ static BlockBackend *start_primary(void) static void teardown_primary(void) { BlockBackend *blk; - AioContext *ctx; /* remove P_ID */ blk = blk_by_name(P_ID); assert(blk); - ctx = blk_get_aio_context(blk); - aio_context_acquire(ctx); monitor_remove_blk(blk); blk_unref(blk); - aio_context_release(ctx); } static void test_primary_read(void) @@ -345,27 +341,20 @@ static void teardown_secondary(void) { /* only need to destroy two BBs */ BlockBackend *blk; - AioContext *ctx; /* remove S_LOCAL_DISK_ID */ blk = blk_by_name(S_LOCAL_DISK_ID); assert(blk); - ctx = blk_get_aio_context(blk); - aio_context_acquire(ctx); monitor_remove_blk(blk); blk_unref(blk); - aio_context_release(ctx); /* remove S_ID */ blk = blk_by_name(S_ID); assert(blk); - ctx = blk_get_aio_context(blk); - aio_context_acquire(ctx); monitor_remove_blk(blk); blk_unref(blk); - aio_context_release(ctx); } static void test_secondary_read(void) diff --git a/tests/unit/test-vmstate.c b/tests/unit/test-vmstate.c index 0b7d5ecd6838da416318210b39f83fbf9657277d..c4f9faa2733f64b7fb6d52135c0542fd6e46c2fa 100644 --- a/tests/unit/test-vmstate.c +++ b/tests/unit/test-vmstate.c @@ -197,7 +197,7 @@ static const VMStateDescription vmstate_simple_primitive = { .name = "simple/primitive", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(b_1, TestSimple), VMSTATE_BOOL(b_2, TestSimple), VMSTATE_UINT8(u8_1, TestSimple), @@ -299,7 +299,7 @@ static const VMStateDescription vmstate_simple_arr = { .name = "simple/array", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT16_ARRAY(u16_1, TestSimpleArray, 3), VMSTATE_END_OF_LIST() } @@ -341,7 +341,7 @@ static const VMStateDescription vmstate_versioned = { .name = "test/versioned", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(a, TestStruct), VMSTATE_UINT32_V(b, TestStruct, 2), /* Versioned field in the middle, so * we catch bugs more easily. @@ -412,7 +412,7 @@ static const VMStateDescription vmstate_skipping = { .name = "test/skip", .version_id = 2, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(a, TestStruct), VMSTATE_UINT32(b, TestStruct), VMSTATE_UINT32_TEST(c, TestStruct, test_skip), @@ -524,7 +524,7 @@ const VMStateDescription vmsd_tst = { .name = "test/tst", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(i, TestStructTriv), VMSTATE_END_OF_LIST() } @@ -542,7 +542,7 @@ const VMStateDescription vmsd_arps = { .name = "test/arps", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(ar, TestArrayOfPtrToStuct, AR_SIZE, 0, vmsd_tst, TestStructTriv), VMSTATE_END_OF_LIST() @@ -630,7 +630,7 @@ const VMStateDescription vmsd_arpp = { .name = "test/arps", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_ARRAY_OF_POINTER(ar, TestArrayOfPtrToInt, AR_SIZE, 0, vmstate_info_int32, int32_t*), VMSTATE_END_OF_LIST() @@ -685,7 +685,7 @@ static const VMStateDescription vmstate_q_element = { .name = "test/queue-element", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_BOOL(b, TestQtailqElement), VMSTATE_UINT8(u8, TestQtailqElement), VMSTATE_END_OF_LIST() @@ -696,7 +696,7 @@ static const VMStateDescription vmstate_q = { .name = "test/queue", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT16(i16, TestQtailq), VMSTATE_QTAILQ_V(q, TestQtailq, 1, vmstate_q_element, TestQtailqElement, next), @@ -821,7 +821,7 @@ typedef struct TestGTreeInterval { .name = "interval", \ .version_id = 1, \ .minimum_version_id = 1, \ - .fields = (VMStateField[]) { \ + .fields = (const VMStateField[]) { \ VMSTATE_UINT64(low, TestGTreeInterval), \ VMSTATE_UINT64(high, TestGTreeInterval), \ VMSTATE_END_OF_LIST() \ @@ -839,7 +839,7 @@ typedef struct TestGTreeMapping { .name = "mapping", \ .version_id = 1, \ .minimum_version_id = 1, \ - .fields = (VMStateField[]) { \ + .fields = (const VMStateField[]) { \ VMSTATE_UINT64(phys_addr, TestGTreeMapping), \ VMSTATE_UINT32(flags, TestGTreeMapping), \ VMSTATE_END_OF_LIST() \ @@ -915,7 +915,7 @@ static const VMStateDescription vmstate_domain = { .version_id = 1, .minimum_version_id = 1, .pre_load = domain_preload, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(id, TestGTreeDomain), VMSTATE_GTREE_V(mappings, TestGTreeDomain, 1, vmstate_interval_mapping, @@ -940,7 +940,7 @@ static const VMStateDescription vmstate_qlist_element = { .name = "test/queue list", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(id, TestQListElement), VMSTATE_END_OF_LIST() } @@ -951,7 +951,7 @@ static const VMStateDescription vmstate_iommu = { .version_id = 1, .minimum_version_id = 1, .pre_load = iommu_preload, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT32(id, TestGTreeIOMMU), VMSTATE_GTREE_DIRECT_KEY_V(domains, TestGTreeIOMMU, 1, &vmstate_domain, TestGTreeDomain), @@ -963,7 +963,7 @@ static const VMStateDescription vmstate_container = { .name = "test/container/qlist", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(id, TestQListContainer), VMSTATE_QLIST_V(list, TestQListContainer, 1, vmstate_qlist_element, TestQListElement, next), @@ -1414,7 +1414,7 @@ static int tmp_child_post_load(void *opaque, int version_id) static const VMStateDescription vmstate_tmp_back_to_parent = { .name = "test/tmp_child_parent", - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT64(f, TestStruct), VMSTATE_END_OF_LIST() } @@ -1424,7 +1424,7 @@ static const VMStateDescription vmstate_tmp_child = { .name = "test/tmp_child", .pre_save = tmp_child_pre_save, .post_load = tmp_child_post_load, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_INT64(diff, TmpTestStruct), VMSTATE_STRUCT_POINTER(parent, TmpTestStruct, vmstate_tmp_back_to_parent, TestStruct), @@ -1435,7 +1435,7 @@ static const VMStateDescription vmstate_tmp_child = { static const VMStateDescription vmstate_with_tmp = { .name = "test/with_tmp", .version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_UINT32(a, TestStruct), VMSTATE_UINT64(d, TestStruct), VMSTATE_WITH_TMP(TestStruct, TmpTestStruct, vmstate_tmp_child), diff --git a/trace/meson.build b/trace/meson.build index b0d31a67e6864505a9593dcecd601f58e986c5b7..c3412dc0ba5a29490441e150d2cfe58f71551314 100644 --- a/trace/meson.build +++ b/trace/meson.build @@ -64,7 +64,7 @@ trace_events_all = custom_target('trace-events-all', input: trace_events_files, command: [ 'cat', '@INPUT@' ], capture: true, - install: true, + install: get_option('trace_backends') != [ 'nop' ], install_dir: qemu_datadir) if 'ust' in get_option('trace_backends') diff --git a/ui/cocoa.m b/ui/cocoa.m index cd069da6965b7aec43b0f3e542b7bcdc041c426f..eb99064beeb4f95031355659b4ed4e491c89b7e4 100644 --- a/ui/cocoa.m +++ b/ui/cocoa.m @@ -113,33 +113,33 @@ static void cocoa_switch(DisplayChangeListener *dcl, static QemuClipboardInfo *cbinfo; static QemuEvent cbevent; -// Utility functions to run specified code block with iothread lock held +// Utility functions to run specified code block with the BQL held typedef void (^CodeBlock)(void); typedef bool (^BoolCodeBlock)(void); -static void with_iothread_lock(CodeBlock block) +static void with_bql(CodeBlock block) { - bool locked = qemu_mutex_iothread_locked(); + bool locked = bql_locked(); if (!locked) { - qemu_mutex_lock_iothread(); + bql_lock(); } block(); if (!locked) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } } -static bool bool_with_iothread_lock(BoolCodeBlock block) +static bool bool_with_bql(BoolCodeBlock block) { - bool locked = qemu_mutex_iothread_locked(); + bool locked = bql_locked(); bool val; if (!locked) { - qemu_mutex_lock_iothread(); + bql_lock(); } val = block(); if (!locked) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } return val; } @@ -548,7 +548,7 @@ - (void) setContentDimensions - (void) updateUIInfoLocked { - /* Must be called with the iothread lock, i.e. via updateUIInfo */ + /* Must be called with the BQL, i.e. via updateUIInfo */ NSSize frameSize; QemuUIInfo info; @@ -605,7 +605,7 @@ - (void) updateUIInfo return; } - with_iothread_lock(^{ + with_bql(^{ [self updateUIInfoLocked]; }); } @@ -790,7 +790,7 @@ - (void) handleMonitorInput:(NSEvent *)event - (bool) handleEvent:(NSEvent *)event { - return bool_with_iothread_lock(^{ + return bool_with_bql(^{ return [self handleEventLocked:event]; }); } @@ -1182,7 +1182,7 @@ - (QEMUScreen) gscreen {return screen;} */ - (void) raiseAllKeys { - with_iothread_lock(^{ + with_bql(^{ qkbd_state_lift_all_keys(kbd); }); } @@ -1282,7 +1282,7 @@ - (void)applicationWillTerminate:(NSNotification *)aNotification { COCOA_DEBUG("QemuCocoaAppController: applicationWillTerminate\n"); - with_iothread_lock(^{ + with_bql(^{ shutdown_action = SHUTDOWN_ACTION_POWEROFF; qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_UI); }); @@ -1420,7 +1420,7 @@ - (void)displayConsole:(id)sender /* Pause the guest */ - (void)pauseQEMU:(id)sender { - with_iothread_lock(^{ + with_bql(^{ qmp_stop(NULL); }); [sender setEnabled: NO]; @@ -1431,7 +1431,7 @@ - (void)pauseQEMU:(id)sender /* Resume running the guest operating system */ - (void)resumeQEMU:(id) sender { - with_iothread_lock(^{ + with_bql(^{ qmp_cont(NULL); }); [sender setEnabled: NO]; @@ -1461,7 +1461,7 @@ - (void)removePause /* Restarts QEMU */ - (void)restartQEMU:(id)sender { - with_iothread_lock(^{ + with_bql(^{ qmp_system_reset(NULL); }); } @@ -1469,7 +1469,7 @@ - (void)restartQEMU:(id)sender /* Powers down QEMU */ - (void)powerDownQEMU:(id)sender { - with_iothread_lock(^{ + with_bql(^{ qmp_system_powerdown(NULL); }); } @@ -1488,7 +1488,7 @@ - (void)ejectDeviceMedia:(id)sender } __block Error *err = NULL; - with_iothread_lock(^{ + with_bql(^{ qmp_eject([drive cStringUsingEncoding: NSASCIIStringEncoding], NULL, false, false, &err); }); @@ -1523,7 +1523,7 @@ - (void)changeDeviceMedia:(id)sender } __block Error *err = NULL; - with_iothread_lock(^{ + with_bql(^{ qmp_blockdev_change_medium([drive cStringUsingEncoding: NSASCIIStringEncoding], NULL, @@ -1605,7 +1605,7 @@ - (void)adjustSpeed:(id)sender // get the throttle percentage throttle_pct = [sender tag]; - with_iothread_lock(^{ + with_bql(^{ cpu_throttle_set(throttle_pct); }); COCOA_DEBUG("cpu throttling at %d%c\n", cpu_throttle_get_percentage(), '%'); @@ -1819,7 +1819,7 @@ - (void)pasteboard:(NSPasteboard *)sender provideDataForType:(NSPasteboardType)t return; } - with_iothread_lock(^{ + with_bql(^{ QemuClipboardInfo *info = qemu_clipboard_info_ref(cbinfo); qemu_event_reset(&cbevent); qemu_clipboard_request(info, QEMU_CLIPBOARD_TYPE_TEXT); @@ -1827,9 +1827,9 @@ - (void)pasteboard:(NSPasteboard *)sender provideDataForType:(NSPasteboardType)t while (info == cbinfo && info->types[QEMU_CLIPBOARD_TYPE_TEXT].available && info->types[QEMU_CLIPBOARD_TYPE_TEXT].data == NULL) { - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_event_wait(&cbevent); - qemu_mutex_lock_iothread(); + bql_lock(); } if (info == cbinfo) { @@ -1927,9 +1927,9 @@ static void cocoa_clipboard_request(QemuClipboardInfo *info, int status; COCOA_DEBUG("Second thread: calling qemu_default_main()\n"); - qemu_mutex_lock_iothread(); + bql_lock(); status = qemu_default_main(); - qemu_mutex_unlock_iothread(); + bql_unlock(); COCOA_DEBUG("Second thread: qemu_default_main() returned, exiting\n"); [cbowner release]; exit(status); @@ -1941,7 +1941,7 @@ static int cocoa_main(void) COCOA_DEBUG("Entered %s()\n", __func__); - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_thread_create(&thread, "qemu_main", call_qemu_main, NULL, QEMU_THREAD_DETACHED); @@ -2075,7 +2075,7 @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts) * Create the menu entries which depend on QEMU state (for consoles * and removable devices). These make calls back into QEMU functions, * which is OK because at this point we know that the second thread - * holds the iothread lock and is synchronously waiting for us to + * holds the BQL and is synchronously waiting for us to * finish. */ add_console_menu_entries(); diff --git a/ui/dbus-display1.xml b/ui/dbus-display1.xml index f0e2fac21273cd9b04a898befd0e641e02509f44..ce35d64eea18ea2783b70216246d5c457477a18f 100644 --- a/ui/dbus-display1.xml +++ b/ui/dbus-display1.xml @@ -71,7 +71,7 @@ :dbus:iface:`org.qemu.Display1.Listener` interface. --> - + @@ -370,7 +370,7 @@ - + - + @@ -715,7 +715,7 @@ :dbus:iface:`org.qemu.Display1.AudioInListener` interface. --> - + @@ -976,7 +976,7 @@ The current handler, if any, will be replaced. --> - + diff --git a/ui/meson.build b/ui/meson.build index 0ccb3387ee6a0abad58e52210a86c0205a71b9c5..376e0d771ba9ac994efd7f9712d542b5250c7301 100644 --- a/ui/meson.build +++ b/ui/meson.build @@ -25,10 +25,9 @@ endif system_ss.add([spice_headers, files('spice-module.c')]) system_ss.add(when: spice_protocol, if_true: files('vdagent.c')) -system_ss.add(when: 'CONFIG_LINUX', if_true: files( - 'input-linux.c', - 'udmabuf.c', -)) +if host_os == 'linux' + system_ss.add(files('input-linux.c', 'udmabuf.c')) +endif system_ss.add(when: cocoa, if_true: files('cocoa.m')) vnc_ss = ss.source_set() @@ -76,7 +75,7 @@ endif if dbus_display dbus_ss = ss.source_set() env = environment() - env.set('TARGETOS', targetos) + env.set('HOST_OS', host_os) xml = custom_target('dbus-display preprocess', input: 'dbus-display1.xml', output: 'dbus-display1.xml', @@ -106,7 +105,9 @@ if dbus_display endif if gtk.found() - system_ss.add(when: 'CONFIG_WIN32', if_true: files('win32-kbd-hook.c')) + if host_os == 'windows' + system_ss.add(files('win32-kbd-hook.c')) + endif gtk_ss = ss.source_set() gtk_ss.add(gtk, vte, pixman, files('gtk.c')) @@ -120,7 +121,9 @@ if gtk.found() endif if sdl.found() - system_ss.add(when: 'CONFIG_WIN32', if_true: files('win32-kbd-hook.c')) + if host_os == 'windows' + system_ss.add(files('win32-kbd-hook.c')) + endif sdl_ss = ss.source_set() sdl_ss.add(sdl, sdl_image, pixman, glib, files( diff --git a/ui/spice-core.c b/ui/spice-core.c index db21db2c9428225defe66f8904e4fb7dd90bac33..37b277fd09fee5e801066f8f7f00954674f2612a 100644 --- a/ui/spice-core.c +++ b/ui/spice-core.c @@ -217,12 +217,12 @@ static void channel_event(int event, SpiceChannelEventInfo *info) * not do that. It isn't that easy to fix it in spice and even * when it is fixed we still should cover the already released * spice versions. So detect that we've been called from another - * thread and grab the iothread lock if so before calling qemu + * thread and grab the BQL if so before calling qemu * functions. */ bool need_lock = !qemu_thread_is_self(&me); if (need_lock) { - qemu_mutex_lock_iothread(); + bql_lock(); } if (info->flags & SPICE_CHANNEL_EVENT_FLAG_ADDR_EXT) { @@ -260,7 +260,7 @@ static void channel_event(int event, SpiceChannelEventInfo *info) } if (need_lock) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } qapi_free_SpiceServerInfo(server); diff --git a/util/aio-posix.c b/util/aio-posix.c index 7f2c99729d4475640b7c6b21977123a8e6f97bae..266c9dd35fa04c91fc65345a45bd56e6fd9a78d7 100644 --- a/util/aio-posix.c +++ b/util/aio-posix.c @@ -777,8 +777,7 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, aio_notify(ctx); } -void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch, - Error **errp) +void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch) { /* * No thread synchronization here, it doesn't matter if an incorrect value diff --git a/util/aio-win32.c b/util/aio-win32.c index 948ef47a4d3e01a940c7701ba2f51dcf6bfc83c4..d144f9391fb2df06eb29c5c9588671fdd13594bb 100644 --- a/util/aio-win32.c +++ b/util/aio-win32.c @@ -438,7 +438,6 @@ void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, } } -void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch, - Error **errp) +void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch) { } diff --git a/util/async.c b/util/async.c index 8f90ddc3047a9f0567da47ab6f89b6190e1d0f9d..36a8e76ab0d2de8dc636f1432b1f7785c91867a3 100644 --- a/util/async.c +++ b/util/async.c @@ -562,12 +562,10 @@ static void co_schedule_bh_cb(void *opaque) Coroutine *co = QSLIST_FIRST(&straight); QSLIST_REMOVE_HEAD(&straight, co_scheduled_next); trace_aio_co_schedule_bh_cb(ctx, co); - aio_context_acquire(ctx); /* Protected by write barrier in qemu_aio_coroutine_enter */ qatomic_set(&co->scheduled, NULL); qemu_aio_coroutine_enter(ctx, co); - aio_context_release(ctx); } } @@ -707,9 +705,7 @@ void aio_co_enter(AioContext *ctx, Coroutine *co) assert(self != co); QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next); } else { - aio_context_acquire(ctx); qemu_aio_coroutine_enter(ctx, co); - aio_context_release(ctx); } } @@ -723,16 +719,6 @@ void aio_context_unref(AioContext *ctx) g_source_unref(&ctx->source); } -void aio_context_acquire(AioContext *ctx) -{ - qemu_rec_mutex_lock(&ctx->lock); -} - -void aio_context_release(AioContext *ctx) -{ - qemu_rec_mutex_unlock(&ctx->lock); -} - QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext) AioContext *qemu_get_current_aio_context(void) @@ -741,7 +727,7 @@ AioContext *qemu_get_current_aio_context(void) if (ctx) { return ctx; } - if (qemu_mutex_iothread_locked()) { + if (bql_locked()) { /* Possibly in a vCPU thread. */ return qemu_get_aio_context(); } diff --git a/util/chardev_open.c b/util/chardev_open.c new file mode 100644 index 0000000000000000000000000000000000000000..f7764297882f59411c81b304a1ab5d16e903a389 --- /dev/null +++ b/util/chardev_open.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2019, Mellanox Technologies. All rights reserved. + * Copyright (C) 2023 Intel Corporation. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: Yi Liu + * + * Copied from + * https://github.com/linux-rdma/rdma-core/blob/master/util/open_cdev.c + * + */ + +#include "qemu/osdep.h" +#include "qemu/chardev_open.h" + +static int open_cdev_internal(const char *path, dev_t cdev) +{ + struct stat st; + int fd; + + fd = qemu_open_old(path, O_RDWR); + if (fd == -1) { + return -1; + } + if (fstat(fd, &st) || !S_ISCHR(st.st_mode) || + (cdev != 0 && st.st_rdev != cdev)) { + close(fd); + return -1; + } + return fd; +} + +static int open_cdev_robust(dev_t cdev) +{ + g_autofree char *devpath = NULL; + + /* + * This assumes that udev is being used and is creating the /dev/char/ + * symlinks. + */ + devpath = g_strdup_printf("/dev/char/%u:%u", major(cdev), minor(cdev)); + return open_cdev_internal(devpath, cdev); +} + +int open_cdev(const char *devpath, dev_t cdev) +{ + int fd; + + fd = open_cdev_internal(devpath, cdev); + if (fd == -1 && cdev != 0) { + return open_cdev_robust(cdev); + } + return fd; +} diff --git a/util/cpuinfo-ppc.c b/util/cpuinfo-ppc.c index 1ea3db0ac8209b30a7a746a521dcebbfabac8b49..b2d8893a06768cf1cfebf8950368cee618630086 100644 --- a/util/cpuinfo-ppc.c +++ b/util/cpuinfo-ppc.c @@ -6,10 +6,10 @@ #include "qemu/osdep.h" #include "host/cpuinfo.h" +#include #ifdef CONFIG_GETAUXVAL # include #else -# include # include "elf.h" #endif @@ -40,7 +40,7 @@ unsigned __attribute__((constructor)) cpuinfo_init(void) info |= CPUINFO_V2_06; } - if (hwcap2 & PPC_FEATURE2_HAS_ISEL) { + if (hwcap2 & PPC_FEATURE2_ISEL) { info |= CPUINFO_ISEL; } if (hwcap & PPC_FEATURE_HAS_ALTIVEC) { @@ -53,7 +53,7 @@ unsigned __attribute__((constructor)) cpuinfo_init(void) * always have both anyway, since VSX came with Power7 * and crypto came with Power8. */ - if (hwcap2 & PPC_FEATURE2_HAS_VEC_CRYPTO) { + if (hwcap2 & PPC_FEATURE2_VEC_CRYPTO) { info |= CPUINFO_CRYPTO; } } diff --git a/util/fifo8.c b/util/fifo8.c index d4d1c135e03b87b76b8979ce6b4275f926599e8e..4e01b532d9dfccb58c13f9d0b4c3eac6c6cf43bf 100644 --- a/util/fifo8.c +++ b/util/fifo8.c @@ -66,19 +66,37 @@ uint8_t fifo8_pop(Fifo8 *fifo) return ret; } -const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *num) +static const uint8_t *fifo8_peekpop_buf(Fifo8 *fifo, uint32_t max, + uint32_t *numptr, bool do_pop) { uint8_t *ret; + uint32_t num; assert(max > 0 && max <= fifo->num); - *num = MIN(fifo->capacity - fifo->head, max); + num = MIN(fifo->capacity - fifo->head, max); ret = &fifo->data[fifo->head]; - fifo->head += *num; - fifo->head %= fifo->capacity; - fifo->num -= *num; + + if (do_pop) { + fifo->head += num; + fifo->head %= fifo->capacity; + fifo->num -= num; + } + if (numptr) { + *numptr = num; + } return ret; } +const uint8_t *fifo8_peek_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr) +{ + return fifo8_peekpop_buf(fifo, max, numptr, false); +} + +const uint8_t *fifo8_pop_buf(Fifo8 *fifo, uint32_t max, uint32_t *numptr) +{ + return fifo8_peekpop_buf(fifo, max, numptr, true); +} + void fifo8_reset(Fifo8 *fifo) { fifo->num = 0; @@ -109,7 +127,7 @@ const VMStateDescription vmstate_fifo8 = { .name = "Fifo8", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { VMSTATE_VBUFFER_UINT32(data, Fifo8, 1, NULL, capacity), VMSTATE_UINT32(head, Fifo8), VMSTATE_UINT32(num, Fifo8), diff --git a/util/main-loop.c b/util/main-loop.c index 797b640c4152d6ff22cb1b6a37a38d8a1c639bdf..a0386cfeb60c7325cdb5e5386f8156687ad4cfa1 100644 --- a/util/main-loop.c +++ b/util/main-loop.c @@ -192,10 +192,7 @@ static void main_loop_update_params(EventLoopBase *base, Error **errp) return; } - aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch, errp); - if (*errp) { - return; - } + aio_context_set_aio_params(qemu_aio_context, base->aio_max_batch); aio_context_set_thread_pool_params(qemu_aio_context, base->thread_pool_min, base->thread_pool_max, errp); @@ -302,13 +299,13 @@ static int os_host_main_loop_wait(int64_t timeout) glib_pollfds_fill(&timeout); - qemu_mutex_unlock_iothread(); + bql_unlock(); replay_mutex_unlock(); ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout); replay_mutex_lock(); - qemu_mutex_lock_iothread(); + bql_lock(); glib_pollfds_poll(); @@ -517,7 +514,7 @@ static int os_host_main_loop_wait(int64_t timeout) poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout); - qemu_mutex_unlock_iothread(); + bql_unlock(); replay_mutex_unlock(); @@ -525,7 +522,7 @@ static int os_host_main_loop_wait(int64_t timeout) replay_mutex_lock(); - qemu_mutex_lock_iothread(); + bql_lock(); if (g_poll_ret > 0) { for (i = 0; i < w->num; i++) { w->revents[i] = poll_fds[n_poll_fds + i].revents; diff --git a/util/meson.build b/util/meson.build index c2322ef6e71a1de643d44dd3ad4bad497bc975ec..af3bf5692d8450897425a3dbaec1c00d7f001086 100644 --- a/util/meson.build +++ b/util/meson.build @@ -3,28 +3,31 @@ util_ss.add(files('thread-context.c'), numa) if not config_host_data.get('CONFIG_ATOMIC64') util_ss.add(files('atomic64.c')) endif -util_ss.add(when: 'CONFIG_POSIX', if_true: files('aio-posix.c')) -util_ss.add(when: 'CONFIG_POSIX', if_true: files('fdmon-poll.c')) -if config_host_data.get('CONFIG_EPOLL_CREATE1') - util_ss.add(files('fdmon-epoll.c')) +if host_os != 'windows' + util_ss.add(files('aio-posix.c')) + util_ss.add(files('fdmon-poll.c')) + if config_host_data.get('CONFIG_EPOLL_CREATE1') + util_ss.add(files('fdmon-epoll.c')) + endif + util_ss.add(files('compatfd.c')) + util_ss.add(files('event_notifier-posix.c')) + util_ss.add(files('mmap-alloc.c')) + freebsd_dep = [] + if host_os == 'freebsd' + freebsd_dep = util + endif + util_ss.add(files('oslib-posix.c'), freebsd_dep) + util_ss.add(files('qemu-thread-posix.c')) + util_ss.add(files('memfd.c')) + util_ss.add(files('drm.c')) +else + util_ss.add(files('aio-win32.c')) + util_ss.add(files('event_notifier-win32.c')) + util_ss.add(files('oslib-win32.c')) + util_ss.add(files('qemu-thread-win32.c')) + util_ss.add(winmm, pathcch) endif util_ss.add(when: linux_io_uring, if_true: files('fdmon-io_uring.c')) -util_ss.add(when: 'CONFIG_POSIX', if_true: files('compatfd.c')) -util_ss.add(when: 'CONFIG_POSIX', if_true: files('event_notifier-posix.c')) -util_ss.add(when: 'CONFIG_POSIX', if_true: files('mmap-alloc.c')) -freebsd_dep = [] -if targetos == 'freebsd' - freebsd_dep = util -endif -util_ss.add(when: 'CONFIG_POSIX', if_true: [files('oslib-posix.c'), freebsd_dep]) -util_ss.add(when: 'CONFIG_POSIX', if_true: files('qemu-thread-posix.c')) -util_ss.add(when: 'CONFIG_POSIX', if_true: files('memfd.c')) -util_ss.add(when: 'CONFIG_WIN32', if_true: files('aio-win32.c')) -util_ss.add(when: 'CONFIG_WIN32', if_true: files('event_notifier-win32.c')) -util_ss.add(when: 'CONFIG_WIN32', if_true: files('oslib-win32.c')) -util_ss.add(when: 'CONFIG_WIN32', if_true: files('qemu-thread-win32.c')) -util_ss.add(when: 'CONFIG_WIN32', if_true: winmm) -util_ss.add(when: 'CONFIG_WIN32', if_true: pathcch) if glib_has_gslice util_ss.add(files('qtree.c')) endif @@ -56,7 +59,6 @@ util_ss.add(files('reserved-region.c')) util_ss.add(files('stats64.c')) util_ss.add(files('systemd.c')) util_ss.add(files('transactions.c')) -util_ss.add(when: 'CONFIG_POSIX', if_true: files('drm.c')) util_ss.add(files('guest-random.c')) util_ss.add(files('yank.c')) util_ss.add(files('int128.c')) @@ -71,7 +73,9 @@ endif if have_system util_ss.add(files('crc-ccitt.c')) util_ss.add(when: gio, if_true: files('dbus.c')) - util_ss.add(when: 'CONFIG_LINUX', if_true: files('userfaultfd.c')) + if host_os == 'linux' + util_ss.add(files('userfaultfd.c')) + endif endif if have_block or have_ga @@ -92,9 +96,6 @@ if have_block util_ss.add(files('iova-tree.c')) util_ss.add(files('iov.c', 'uri.c')) util_ss.add(files('nvdimm-utils.c')) - util_ss.add(when: 'CONFIG_LINUX', if_true: [ - files('vhost-user-server.c'), vhost_user - ]) util_ss.add(files('block-helpers.c')) util_ss.add(files('qemu-coroutine-sleep.c')) util_ss.add(files('qemu-co-shared-resource.c')) @@ -107,7 +108,11 @@ if have_block else util_ss.add(files('filemonitor-stub.c')) endif - util_ss.add(when: 'CONFIG_LINUX', if_true: files('vfio-helpers.c')) + if host_os == 'linux' + util_ss.add(files('vhost-user-server.c'), vhost_user) + util_ss.add(files('vfio-helpers.c')) + util_ss.add(files('chardev_open.c')) + endif endif if cpu == 'aarch64' diff --git a/util/oslib-posix.c b/util/oslib-posix.c index e86fd64e099877cec2818d948da659d261fde040..7c297003b9ff27dce09524aaf55ffc451db8469b 100644 --- a/util/oslib-posix.c +++ b/util/oslib-posix.c @@ -497,7 +497,7 @@ static bool madv_populate_write_possible(char *area, size_t pagesize) errno != EINVAL; } -void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, +bool qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, ThreadContext *tc, Error **errp) { static gsize initialized; @@ -506,6 +506,7 @@ void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, size_t numpages = DIV_ROUND_UP(sz, hpagesize); bool use_madv_populate_write; struct sigaction act; + bool rv = true; /* * Sense on every invocation, as MADV_POPULATE_WRITE cannot be used for @@ -534,7 +535,7 @@ void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, qemu_mutex_unlock(&sigbus_mutex); error_setg_errno(errp, errno, "qemu_prealloc_mem: failed to install signal handler"); - return; + return false; } } @@ -544,6 +545,7 @@ void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, if (ret) { error_setg_errno(errp, -ret, "qemu_prealloc_mem: preallocating memory failed"); + rv = false; } if (!use_madv_populate_write) { @@ -555,6 +557,7 @@ void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, } qemu_mutex_unlock(&sigbus_mutex); } + return rv; } char *qemu_get_pid_name(pid_t pid) diff --git a/util/oslib-win32.c b/util/oslib-win32.c index 55b0189dc30424d3223f11d0775f40b939420707..c4a5f05a49ad9c6fc3f5f39314ef20eb4a75b970 100644 --- a/util/oslib-win32.c +++ b/util/oslib-win32.c @@ -264,7 +264,7 @@ int getpagesize(void) return system_info.dwPageSize; } -void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, +bool qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, ThreadContext *tc, Error **errp) { int i; @@ -274,6 +274,8 @@ void qemu_prealloc_mem(int fd, char *area, size_t sz, int max_threads, for (i = 0; i < sz / pagesize; i++) { memset(area + pagesize * i, 0, 1); } + + return true; } char *qemu_get_pid_name(pid_t pid) diff --git a/util/qsp.c b/util/qsp.c index 2fe3764906c53c53ec08572fc165c6f441241e19..6b783e2e7f8e495f041383716ebb161f5d9af0ed 100644 --- a/util/qsp.c +++ b/util/qsp.c @@ -124,7 +124,7 @@ static const char * const qsp_typenames[] = { [QSP_CONDVAR] = "condvar", }; -QemuMutexLockFunc qemu_bql_mutex_lock_func = qemu_mutex_lock_impl; +QemuMutexLockFunc bql_mutex_lock_func = qemu_mutex_lock_impl; QemuMutexLockFunc qemu_mutex_lock_func = qemu_mutex_lock_impl; QemuMutexTrylockFunc qemu_mutex_trylock_func = qemu_mutex_trylock_impl; QemuRecMutexLockFunc qemu_rec_mutex_lock_func = qemu_rec_mutex_lock_impl; @@ -439,7 +439,7 @@ void qsp_enable(void) { qatomic_set(&qemu_mutex_lock_func, qsp_mutex_lock); qatomic_set(&qemu_mutex_trylock_func, qsp_mutex_trylock); - qatomic_set(&qemu_bql_mutex_lock_func, qsp_bql_mutex_lock); + qatomic_set(&bql_mutex_lock_func, qsp_bql_mutex_lock); qatomic_set(&qemu_rec_mutex_lock_func, qsp_rec_mutex_lock); qatomic_set(&qemu_rec_mutex_trylock_func, qsp_rec_mutex_trylock); qatomic_set(&qemu_cond_wait_func, qsp_cond_wait); @@ -450,7 +450,7 @@ void qsp_disable(void) { qatomic_set(&qemu_mutex_lock_func, qemu_mutex_lock_impl); qatomic_set(&qemu_mutex_trylock_func, qemu_mutex_trylock_impl); - qatomic_set(&qemu_bql_mutex_lock_func, qemu_mutex_lock_impl); + qatomic_set(&bql_mutex_lock_func, qemu_mutex_lock_impl); qatomic_set(&qemu_rec_mutex_lock_func, qemu_rec_mutex_lock_impl); qatomic_set(&qemu_rec_mutex_trylock_func, qemu_rec_mutex_trylock_impl); qatomic_set(&qemu_cond_wait_func, qemu_cond_wait_impl); diff --git a/util/rcu.c b/util/rcu.c index e587bcc48314b81a5a79bc5c399e88a3cefec245..fa32c942e4bb98b4b69da6bc28213036a291002d 100644 --- a/util/rcu.c +++ b/util/rcu.c @@ -283,24 +283,24 @@ static void *call_rcu_thread(void *opaque) qatomic_sub(&rcu_call_count, n); synchronize_rcu(); - qemu_mutex_lock_iothread(); + bql_lock(); while (n > 0) { node = try_dequeue(); while (!node) { - qemu_mutex_unlock_iothread(); + bql_unlock(); qemu_event_reset(&rcu_call_ready_event); node = try_dequeue(); if (!node) { qemu_event_wait(&rcu_call_ready_event); node = try_dequeue(); } - qemu_mutex_lock_iothread(); + bql_lock(); } n--; node->func(node); } - qemu_mutex_unlock_iothread(); + bql_unlock(); } abort(); } @@ -337,13 +337,13 @@ static void drain_rcu_callback(struct rcu_head *node) void drain_call_rcu(void) { struct rcu_drain rcu_drain; - bool locked = qemu_mutex_iothread_locked(); + bool locked = bql_locked(); memset(&rcu_drain, 0, sizeof(struct rcu_drain)); qemu_event_init(&rcu_drain.drain_complete_event, false); if (locked) { - qemu_mutex_unlock_iothread(); + bql_unlock(); } @@ -365,7 +365,7 @@ void drain_call_rcu(void) qatomic_dec(&in_drain_call_rcu); if (locked) { - qemu_mutex_lock_iothread(); + bql_lock(); } } @@ -409,7 +409,7 @@ static void rcu_init_complete(void) qemu_event_init(&rcu_call_ready_event, false); - /* The caller is assumed to have iothread lock, so the call_rcu thread + /* The caller is assumed to have BQL, so the call_rcu thread * must have been quiescent even after forking, just recreate it. */ qemu_thread_create(&thread, "call_rcu", call_rcu_thread, diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c index a9a48fffb8745d1fb4b74e79199d750e156ff61a..3bfb1ad3ec1052a5ea98e91621bdcd9139eb9017 100644 --- a/util/vhost-user-server.c +++ b/util/vhost-user-server.c @@ -360,10 +360,7 @@ static void vu_accept(QIONetListener *listener, QIOChannelSocket *sioc, qio_channel_set_follow_coroutine_ctx(server->ioc, true); - /* Attaching the AioContext starts the vu_client_trip coroutine */ - aio_context_acquire(server->ctx); vhost_user_server_attach_aio_context(server, server->ctx); - aio_context_release(server->ctx); } /* server->ctx acquired by caller */