From a616d35f7eafe1f92195644b66320823892d6803 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=80=9Cwh02252983=E2=80=9D?= <“wh02252983@alibaba-inc.com”> Date: Wed, 28 May 2025 17:09:45 +0800 Subject: [PATCH] [Feature] Add patches to fix some bugs/cves To #N/A Add patches to fix some bugs/cves Project: TC2024080204 Signed-off-by: wh02252983 wh02252983@alibaba-inc.com --- 0285-add-sw64arch-support.patch | 25514 ++++++++++++++++ ...dd-max-size-bounds-check-in-input-cb.patch | 61 + ...eck-for-invalid-param-shift-operands.patch | 56 + ...-support-for-the-virtcca-cvm-feature.patch | 1045 + ...incorrect-device-name-check-for-vhos.patch | 34 + ...mmand-blacklist-for-cvm-security-enh.patch | 118 + ...undefined-reference-to-virtcca-cvm-a.patch | 30 + ...i386-csv-guest-introduce-secret-head.patch | 220 + ...support-to-get-and-enable-extensions.patch | 105 + ...request-to-set-private-memory-of-csv.patch | 147 + ...support-load-kernel-hashes-for-csv3-.patch | 40 + ...support-inject-secret-for-csv3-guest.patch | 43 + ...more-features-enumerated-by-cpuid-7-.patch | 63 + ...6-fix-feature-dependency-for-waitpkg.patch | 39 + ...support-for-fred-in-cpuid-enumeratio.patch | 108 + ...rget-i386-mark-cr4-fred-not-reserved.patch | 67 + ...ap-add-support-for-vmx-fred-controls.patch | 66 + ...umerate-vmx-nested-exception-support.patch | 62 + ...get-set-migrate-support-for-fred-msr.patch | 188 + ...te-duplicated-macro-definition-cr4-f.patch | 39 + ...vmx-control-bits-for-nested-fred-sup.patch | 48 + ...e-the-highest-index-value-used-for-a.patch | 66 + ...-x86cpu-to-x86-cpu-get-supported-fea.patch | 108 + ...e-subleaf-constraint-on-cpuid-leaf-1.patch | 38 + ...t-construct-a-all-zero-entry-for-cpu.patch | 57 + ...ble-fdp-excptn-only-and-zero-fcs-fds.patch | 70 + ...truct-cpuid-2-as-stateful-iff-times-.patch | 41 + ...-invtsc-migratable-when-user-sets-ts.patch | 66 + ...et-i386-cpu-fix-notes-for-cpu-models.patch | 45 + ...gration-of-rtc-and-memb-instructions.patch | 553 + ...oi-ability-of-256-vcpu-interrupt-rou.patch | 208 + ...ch-fix-vcpu-reset-command-word-issue.patch | 56 + ...rch-fix-the-cpu-unplug-resource-leak.patch | 76 + ...t-adjust-the-loading-position-of-the.patch | 95 + ...-fixed-loongson-rtc-emulation-errors.patch | 137 + ...i386-introduce-sierraforest-v2-model.patch | 62 + ...get-i386-export-bhi-no-bit-to-guests.patch | 45 + ...6-add-new-cpu-model-clearwaterforest.patch | 271 + ...-docs-add-gnr-srf-and-cwf-cpu-models.patch | 120 + ...i386-add-sha512-sm3-sm4-feature-bits.patch | 41 + ...chitectural-cpuid-leaf-generation-to.patch | 560 + ...e-pam-initialization-above-smram-ini.patch | 62 + ...smm-ranges-property-for-q35-pci-host.patch | 158 + ...-pcat-compat-bit-only-when-pic-is-no.patch | 45 + ...st-support-add-kvm-init-and-kvm-rese.patch | 84 + ...h-to-use-confidential-guest-kvm-init.patch | 314 + ...o-use-confidential-guest-kvm-init-re.patch | 133 + ...h-to-use-confidential-guest-kvm-init.patch | 103 + ...inux-headers-add-setup-data-h-to-imp.patch | 77 + ...inux-headers-add-bits-h-to-file-impo.patch | 31 + ...nux-headers-update-to-linux-v6-8-rc6.patch | 1010 + ...x-headers-update-to-current-kvm-next.patch | 2494 ++ 0337-cpus-vm-was-suspended.patch | 63 + 0338-cpus-stop-vm-in-suspended-runstate.patch | 243 + ...itial-cpu-reset-if-reset-is-not-actu.patch | 63 + ...t-migration-when-vm-has-poisoned-mem.patch | 113 + ...inux-header-sh-be-more-src-tree-frie.patch | 181 + ...inux-headers-sh-remove-temporary-dir.patch | 34 + ...inux-headers-sh-fix-the-path-of-setu.patch | 36 + ...d-interfaces-to-read-midr-on-aarch64.patch | 171 + 0345-cpu-add-phytium-v-cpu-support.patch | 184 + ...rt-vm-live-migration-between-phytium.patch | 154 + 0347-cpu-add-tengyun-s5000c-cpu-support.patch | 62 + 0348-sync-header-file-from-upstream.patch | 138 + qemu.spec | 88 +- 65 files changed, 36847 insertions(+), 2 deletions(-) create mode 100644 0285-add-sw64arch-support.patch create mode 100644 0286-virtio-snd-add-max-size-bounds-check-in-input-cb.patch create mode 100644 0287-virtio-snd-check-for-invalid-param-shift-operands.patch create mode 100644 0288-add-support-for-the-virtcca-cvm-feature.patch create mode 100644 0289-cvm-bug-fix-for-incorrect-device-name-check-for-vhos.patch create mode 100644 0290-cvm-implement-command-blacklist-for-cvm-security-enh.patch create mode 100644 0291-cvm-bug-fix-for-undefined-reference-to-virtcca-cvm-a.patch create mode 100644 0292-qapi-qom-target-i386-csv-guest-introduce-secret-head.patch create mode 100644 0293-target-i386-kvm-support-to-get-and-enable-extensions.patch create mode 100644 0294-target-i386-csv-request-to-set-private-memory-of-csv.patch create mode 100644 0295-target-i386-csv-support-load-kernel-hashes-for-csv3-.patch create mode 100644 0296-target-i386-csv-support-inject-secret-for-csv3-guest.patch create mode 100644 0297-target-i386-add-more-features-enumerated-by-cpuid-7-.patch create mode 100644 0298-target-i386-fix-feature-dependency-for-waitpkg.patch create mode 100644 0299-target-i386-add-support-for-fred-in-cpuid-enumeratio.patch create mode 100644 0300-target-i386-mark-cr4-fred-not-reserved.patch create mode 100644 0301-vmxcap-add-support-for-vmx-fred-controls.patch create mode 100644 0302-target-i386-enumerate-vmx-nested-exception-support.patch create mode 100644 0303-target-i386-add-get-set-migrate-support-for-fred-msr.patch create mode 100644 0304-target-i386-delete-duplicated-macro-definition-cr4-f.patch create mode 100644 0305-target-i386-add-vmx-control-bits-for-nested-fred-sup.patch create mode 100644 0306-target-i386-raise-the-highest-index-value-used-for-a.patch create mode 100644 0307-target-i386-pass-x86cpu-to-x86-cpu-get-supported-fea.patch create mode 100644 0308-i386-cpuid-remove-subleaf-constraint-on-cpuid-leaf-1.patch create mode 100644 0309-target-i386-don-t-construct-a-all-zero-entry-for-cpu.patch create mode 100644 0310-target-i386-enable-fdp-excptn-only-and-zero-fcs-fds.patch create mode 100644 0311-target-i386-construct-cpuid-2-as-stateful-iff-times-.patch create mode 100644 0312-target-i386-make-invtsc-migratable-when-user-sets-ts.patch create mode 100644 0313-target-i386-cpu-fix-notes-for-cpu-models.patch create mode 100644 0314-sw64-add-the-migration-of-rtc-and-memb-instructions.patch create mode 100644 0315-hw-intc-add-extioi-ability-of-256-vcpu-interrupt-rou.patch create mode 100644 0316-target-loongarch-fix-vcpu-reset-command-word-issue.patch create mode 100644 0317-target-loongarch-fix-the-cpu-unplug-resource-leak.patch create mode 100644 0318-hw-loongarch-boot-adjust-the-loading-position-of-the.patch create mode 100644 0319-hw-rtc-fixed-loongson-rtc-emulation-errors.patch create mode 100644 0320-target-i386-introduce-sierraforest-v2-model.patch create mode 100644 0321-target-i386-export-bhi-no-bit-to-guests.patch create mode 100644 0322-target-i386-add-new-cpu-model-clearwaterforest.patch create mode 100644 0323-docs-add-gnr-srf-and-cwf-cpu-models.patch create mode 100644 0324-target-i386-add-sha512-sm3-sm4-feature-bits.patch create mode 100644 0325-i386-kvm-move-architectural-cpuid-leaf-generation-to.patch create mode 100644 0326-pci-host-q35-move-pam-initialization-above-smram-ini.patch create mode 100644 0327-q35-introduce-smm-ranges-property-for-q35-pci-host.patch create mode 100644 0328-hw-i386-acpi-set-pcat-compat-bit-only-when-pic-is-no.patch create mode 100644 0329-confidential-guest-support-add-kvm-init-and-kvm-rese.patch create mode 100644 0330-i386-sev-switch-to-use-confidential-guest-kvm-init.patch create mode 100644 0331-ppc-pef-switch-to-use-confidential-guest-kvm-init-re.patch create mode 100644 0332-s390-switch-to-use-confidential-guest-kvm-init.patch create mode 100644 0333-scripts-update-linux-headers-add-setup-data-h-to-imp.patch create mode 100644 0334-scripts-update-linux-headers-add-bits-h-to-file-impo.patch create mode 100644 0335-linux-headers-update-to-linux-v6-8-rc6.patch create mode 100644 0336-linux-headers-update-to-current-kvm-next.patch create mode 100644 0337-cpus-vm-was-suspended.patch create mode 100644 0338-cpus-stop-vm-in-suspended-runstate.patch create mode 100644 0339-runstate-skip-initial-cpu-reset-if-reset-is-not-actu.patch create mode 100644 0340-migration-prevent-migration-when-vm-has-poisoned-mem.patch create mode 100644 0341-scripts-update-linux-header-sh-be-more-src-tree-frie.patch create mode 100644 0342-scripts-update-linux-headers-sh-remove-temporary-dir.patch create mode 100644 0343-scripts-update-linux-headers-sh-fix-the-path-of-setu.patch create mode 100644 0344-util-add-interfaces-to-read-midr-on-aarch64.patch create mode 100644 0345-cpu-add-phytium-v-cpu-support.patch create mode 100644 0346-target-arm-support-vm-live-migration-between-phytium.patch create mode 100644 0347-cpu-add-tengyun-s5000c-cpu-support.patch create mode 100644 0348-sync-header-file-from-upstream.patch diff --git a/0285-add-sw64arch-support.patch b/0285-add-sw64arch-support.patch new file mode 100644 index 0000000..ccca835 --- /dev/null +++ b/0285-add-sw64arch-support.patch @@ -0,0 +1,25514 @@ +From 9d0b14e76ea6885e78d97c75d8df2718d0ad677b Mon Sep 17 00:00:00 2001 +From: lufeifei +Date: Thu, 24 Feb 2022 16:07:40 +0800 +Subject: [PATCH] Add sw64arch support + +Signed-off-by: lufeifei +--- + configs/devices/sw64-softmmu/default.mak | 19 + + configs/targets/sw64-linux-user.mak | 5 + + configs/targets/sw64-softmmu.mak | 10 + + configure | 18 +- + disas/meson.build | 1 + + disas/sw64.c | 1364 ++++++ + gdb-xml/sw64-core.xml | 43 + + hw/Kconfig | 1 + + hw/acpi/Kconfig | 8 + + hw/acpi/aml-build.c | 12 + + hw/acpi/gpio_sunway.c | 257 ++ + hw/acpi/meson.build | 2 + + hw/acpi/sw64_pm_device.c | 285 ++ + hw/meson.build | 1 + + hw/sw64/Kconfig | 32 + + hw/sw64/core3.c | 116 + + hw/sw64/core3_board.c | 419 ++ + hw/sw64/core4.c | 400 ++ + hw/sw64/core4_board.c | 868 ++++ + hw/sw64/gpio.h | 54 + + hw/sw64/meson.build | 17 + + hw/sw64/pm.h | 59 + + hw/sw64/sunway.c | 631 +++ + hw/sw64/sw64-acpi-build.c | 876 ++++ + hw/sw64/sw64_iommu.c | 570 +++ + hw/sw64/trace-events | 3 + + include/disas/dis-asm.h | 6 + + include/elf.h | 44 + + include/hw/acpi/aml-build.h | 5 + + include/hw/sw64/core.h | 138 + + include/hw/sw64/sunway.h | 51 + + include/hw/sw64/sw64_iommu.h | 103 + + include/sysemu/arch_init.h | 1 + + include/tcg/tcg-op.h | 55 + + linux-headers/asm-sw64/kvm.h | 90 + + linux-headers/asm-sw64/unistd.h | 380 ++ + linux-headers/asm-sw64/unistd.h.bak | 481 +++ + linux-user/elfload.c | 18 + + linux-user/host/sw64/host-signal.h | 46 + + linux-user/host/sw64/hostdep.h | 14 + + linux-user/meson.build | 1 + + linux-user/sw64/cpu_loop.c | 110 + + linux-user/sw64/meson.build | 5 + + linux-user/sw64/signal.c | 288 ++ + linux-user/sw64/sockbits.h | 1 + + linux-user/sw64/syscall.tbl | 488 +++ + linux-user/sw64/syscall_nr.h | 471 +++ + linux-user/sw64/syscallhdr.sh | 32 + + linux-user/sw64/target_cpu.h | 52 + + linux-user/sw64/target_elf.h | 14 + + linux-user/sw64/target_errno_defs.h | 204 + + linux-user/sw64/target_fcntl.h | 11 + + linux-user/sw64/target_mman.h | 12 + + linux-user/sw64/target_prctl.h | 1 + + linux-user/sw64/target_proc.h | 1 + + linux-user/sw64/target_resource.h | 1 + + linux-user/sw64/target_signal.h | 100 + + linux-user/sw64/target_structs.h | 47 + + linux-user/sw64/target_syscall.h | 125 + + linux-user/sw64/termbits.h | 266 ++ + linux-user/syscall_defs.h | 46 +- + meson.build | 13 +- + pc-bios/c3-uefi-bios-sw | Bin 0 -> 3145728 bytes + pc-bios/c4-uefi-bios-sw | Bin 0 -> 3145728 bytes + pc-bios/core3-hmcode | Bin 0 -> 229688 bytes + pc-bios/core3-reset | Bin 0 -> 229200 bytes + pc-bios/core4-hmcode | Bin 0 -> 254392 bytes + pc-bios/core4-reset | Bin 0 -> 5024 bytes + pc-bios/meson.build | 6 + + pc-bios/uefi-bios-sw-old | Bin 0 -> 3145728 bytes + qapi/machine-target.json | 4 +- + qapi/machine.json | 2 +- + qemu-options.hx | 6 +- + scripts/qemu-version.sh | 2 +- + system/qdev-monitor.c | 2 +- + target/Kconfig | 1 + + target/meson.build | 1 + + target/openrisc/cpu.h | 8 +- + target/sw64/Kconfig | 2 + + target/sw64/cpu-param.h | 16 + + target/sw64/cpu-qom.h | 29 + + target/sw64/cpu.c | 462 ++ + target/sw64/cpu.h | 449 ++ + target/sw64/exception.c | 79 + + target/sw64/float_helper.c | 798 ++++ + target/sw64/gdbstub.c | 55 + + target/sw64/helper.c | 520 +++ + target/sw64/helper.h | 123 + + target/sw64/int_helper.c | 118 + + target/sw64/kvm.c | 399 ++ + target/sw64/kvm_sw64.h | 56 + + target/sw64/machine.c | 17 + + target/sw64/meson.build | 20 + + target/sw64/profile.c | 2262 ++++++++++ + target/sw64/profile.h | 541 +++ + target/sw64/simd_helper.c | 986 +++++ + target/sw64/translate.c | 4878 ++++++++++++++++++++++ + target/sw64/translate.h | 60 + + tcg/sw64/tcg-target-con-set.h | 39 + + tcg/sw64/tcg-target-con-str.h | 28 + + tcg/sw64/tcg-target-reg-bits.h | 12 + + tcg/sw64/tcg-target.c.inc | 2560 ++++++++++++ + tcg/sw64/tcg-target.h | 137 + + tcg/sw64/tcg-target.opc.h | 15 + + 104 files changed, 24470 insertions(+), 15 deletions(-) + create mode 100644 configs/devices/sw64-softmmu/default.mak + create mode 100644 configs/targets/sw64-linux-user.mak + create mode 100644 configs/targets/sw64-softmmu.mak + create mode 100755 disas/sw64.c + create mode 100644 gdb-xml/sw64-core.xml + create mode 100644 hw/acpi/gpio_sunway.c + create mode 100644 hw/acpi/sw64_pm_device.c + create mode 100644 hw/sw64/Kconfig + create mode 100644 hw/sw64/core3.c + create mode 100644 hw/sw64/core3_board.c + create mode 100644 hw/sw64/core4.c + create mode 100644 hw/sw64/core4_board.c + create mode 100644 hw/sw64/gpio.h + create mode 100644 hw/sw64/meson.build + create mode 100644 hw/sw64/pm.h + create mode 100644 hw/sw64/sunway.c + create mode 100644 hw/sw64/sw64-acpi-build.c + create mode 100644 hw/sw64/sw64_iommu.c + create mode 100644 hw/sw64/trace-events + create mode 100644 include/hw/sw64/core.h + create mode 100644 include/hw/sw64/sunway.h + create mode 100644 include/hw/sw64/sw64_iommu.h + create mode 100644 linux-headers/asm-sw64/kvm.h + create mode 100644 linux-headers/asm-sw64/unistd.h + create mode 100644 linux-headers/asm-sw64/unistd.h.bak + create mode 100644 linux-user/host/sw64/host-signal.h + create mode 100755 linux-user/host/sw64/hostdep.h + create mode 100644 linux-user/sw64/cpu_loop.c + create mode 100644 linux-user/sw64/meson.build + create mode 100644 linux-user/sw64/signal.c + create mode 100644 linux-user/sw64/sockbits.h + create mode 100644 linux-user/sw64/syscall.tbl + create mode 100644 linux-user/sw64/syscall_nr.h + create mode 100644 linux-user/sw64/syscallhdr.sh + create mode 100644 linux-user/sw64/target_cpu.h + create mode 100644 linux-user/sw64/target_elf.h + create mode 100644 linux-user/sw64/target_errno_defs.h + create mode 100644 linux-user/sw64/target_fcntl.h + create mode 100644 linux-user/sw64/target_mman.h + create mode 100644 linux-user/sw64/target_prctl.h + create mode 100644 linux-user/sw64/target_proc.h + create mode 100644 linux-user/sw64/target_resource.h + create mode 100644 linux-user/sw64/target_signal.h + create mode 100644 linux-user/sw64/target_structs.h + create mode 100644 linux-user/sw64/target_syscall.h + create mode 100644 linux-user/sw64/termbits.h + create mode 100755 pc-bios/c3-uefi-bios-sw + create mode 100644 pc-bios/c4-uefi-bios-sw + create mode 100644 pc-bios/core3-hmcode + create mode 100755 pc-bios/core3-reset + create mode 100755 pc-bios/core4-hmcode + create mode 100755 pc-bios/core4-reset + create mode 100644 pc-bios/uefi-bios-sw-old + create mode 100644 target/sw64/Kconfig + create mode 100644 target/sw64/cpu-param.h + create mode 100644 target/sw64/cpu-qom.h + create mode 100644 target/sw64/cpu.c + create mode 100644 target/sw64/cpu.h + create mode 100644 target/sw64/exception.c + create mode 100644 target/sw64/float_helper.c + create mode 100644 target/sw64/gdbstub.c + create mode 100644 target/sw64/helper.c + create mode 100644 target/sw64/helper.h + create mode 100644 target/sw64/int_helper.c + create mode 100644 target/sw64/kvm.c + create mode 100644 target/sw64/kvm_sw64.h + create mode 100644 target/sw64/machine.c + create mode 100644 target/sw64/meson.build + create mode 100644 target/sw64/profile.c + create mode 100644 target/sw64/profile.h + create mode 100644 target/sw64/simd_helper.c + create mode 100644 target/sw64/translate.c + create mode 100644 target/sw64/translate.h + create mode 100755 tcg/sw64/tcg-target-con-set.h + create mode 100755 tcg/sw64/tcg-target-con-str.h + create mode 100644 tcg/sw64/tcg-target-reg-bits.h + create mode 100755 tcg/sw64/tcg-target.c.inc + create mode 100755 tcg/sw64/tcg-target.h + create mode 100755 tcg/sw64/tcg-target.opc.h + +diff --git a/configs/devices/sw64-softmmu/default.mak b/configs/devices/sw64-softmmu/default.mak +new file mode 100644 +index 0000000000..f76bbf6fdd +--- /dev/null ++++ b/configs/devices/sw64-softmmu/default.mak +@@ -0,0 +1,19 @@ ++# Default configuration for sw64-softmmu ++ ++# Uncomment the following lines to disable these optional devices: ++# ++#CONFIG_PCI_DEVICES=n ++#CONFIG_TEST_DEVICES=n ++ ++# Boards: ++# ++CONFIG_CORE3=y ++CONFIG_CORE4=y ++CONFIG_ACPI=y ++CONFIG_ACPI_MEMORY_HOTPLUG=y ++CONFIG_ACPI_CPU_HOTPLUG = y ++CONFIG_ACPI_SW64_PM=y ++CONFIG_ACPI_PCI=y ++CONFIG_MEM_DEVICE=y ++CONFIG_DIMM=y ++CONFIG_PCI_EXPRESS_GENERIC_BRIDGE=y +diff --git a/configs/targets/sw64-linux-user.mak b/configs/targets/sw64-linux-user.mak +new file mode 100644 +index 0000000000..ae00665692 +--- /dev/null ++++ b/configs/targets/sw64-linux-user.mak +@@ -0,0 +1,5 @@ ++TARGET_ARCH=sw64 ++TARGET_SYSTBL_ABI=common ++TARGET_SYSTBL=syscall.tbl ++TARGET_ALIGNED_ONLY=y ++TARGET_XML_FILES= gdb-xml/sw64-core.xml +diff --git a/configs/targets/sw64-softmmu.mak b/configs/targets/sw64-softmmu.mak +new file mode 100644 +index 0000000000..6735150257 +--- /dev/null ++++ b/configs/targets/sw64-softmmu.mak +@@ -0,0 +1,10 @@ ++# Default configuration for sw64-softmmu ++ ++# Boards: ++# ++TARGET_ARCH=sw64 ++TARGET_BASE_ARCH=sw64 ++TARGET_ABI_DIR=sw64 ++TARGET_SUPPORTS_MTTCG=y ++TARGET_XML_FILES= gdb-xml/sw64-core.xml ++TARGET_NEED_FDT=y +diff --git a/configure b/configure +index 0609aa95b4..10d8824974 100755 +--- a/configure ++++ b/configure +@@ -411,7 +411,6 @@ else + # Using uname is really broken, but it is just a fallback for architectures + # that are going to use TCI anyway + cpu=$(uname -m) +- echo "WARNING: unrecognized host CPU, proceeding with 'uname -m' output '$cpu'" + fi + + # Normalise host CPU name to the values used by Meson cross files and in source +@@ -475,7 +474,10 @@ case "$cpu" in + linux_arch=powerpc + CPU_CFLAGS="-m64 -mlittle-endian" + ;; +- ++ sw_64) ++ cpu=sw64 ++ linux_arch=sw64 ++ ;; + riscv32 | riscv64) + host_arch=riscv + linux_arch=riscv +@@ -764,6 +766,18 @@ for opt do + # Pass through -Dxxxx options to meson + -D*) meson_options="$meson_options $opt" + ;; ++ --disable-vt-iommu) vt_iommu="no" ++ ;; ++ --enable-vt-iommu) vt_iommu="yes" ++ ;; ++ --disable-vhost-net) vhost_net="no" ++ ;; ++ --enable-vhost-net) vhost_net="yes" ++ ;; ++ --disable-vt-memhp) vt_memhp="no" ++ ;; ++ --enable-vt-memhp) vt_memhp="yes" ++ ;; + esac + done + +diff --git a/disas/meson.build b/disas/meson.build +index 815523ab85..8586ac7bbd 100644 +--- a/disas/meson.build ++++ b/disas/meson.build +@@ -14,6 +14,7 @@ common_ss.add(when: 'CONFIG_RISCV_DIS', if_true: files( + common_ss.add(when: 'CONFIG_SH4_DIS', if_true: files('sh4.c')) + common_ss.add(when: 'CONFIG_SPARC_DIS', if_true: files('sparc.c')) + common_ss.add(when: 'CONFIG_XTENSA_DIS', if_true: files('xtensa.c')) ++common_ss.add(when: 'CONFIG_SW64_DIS', if_true: files('sw64.c')) + common_ss.add(when: capstone, if_true: [files('capstone.c'), capstone]) + common_ss.add(files('disas.c')) + +diff --git a/disas/sw64.c b/disas/sw64.c +new file mode 100755 +index 0000000000..0453d47d39 +--- /dev/null ++++ b/disas/sw64.c +@@ -0,0 +1,1364 @@ ++/* ++ * sw64-dis.c -- Disassemble SW64 instructions ++ * Copyright (C) 1996-2015 Free Software Foundation, Inc. ++ * Contributed by Richard Henderson , ++ * patterned after the PPC opcode handling written by Ian Lance Taylor. ++ * ++ * This file is part of libopcodes. ++ * ++ * This library is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 3, or (at your option) ++ * any later version. ++ * ++ * It is distributed in the hope that it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++ * License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; see the file COPYING. If not, write to the Free ++ * Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA ++ * 02110-1301, USA. ++ */ ++ ++#include "qemu/osdep.h" ++#include "disas/dis-asm.h" ++ ++#undef MAX ++ ++struct sw64_opcode { ++ /* The opcode name. */ ++ const char *name; ++ ++ /* The opcode itself. Those bits which will be filled in with ++ operands are zeroes. */ ++ unsigned opcode; ++ ++ /* The opcode mask. This is used by the disassembler. This is a ++ mask containing ones indicating those bits which must match the ++ opcode field, and zeroes indicating those bits which need not ++ match (and are presumably filled in by operands). */ ++ unsigned mask; ++ ++ /* One bit flags for the opcode. These are primarily used to ++ indicate specific processors and environments support the ++ instructions. The defined values are listed below. */ ++ unsigned flags; ++ ++ /* An array of operand codes. Each code is an index into the ++ operand table. They appear in the order which the operands must ++ appear in assembly code, and are terminated by a zero. */ ++ unsigned char operands[5]; ++}; ++ ++/* The table itself is sorted by major opcode number, and is otherwise ++ in the order in which the disassembler should consider ++ instructions. */ ++extern const struct sw64_opcode sw64_opcodes[]; ++extern const unsigned sw64_num_opcodes; ++ ++/* Values defined for the flags field of a struct sw64_opcode. */ ++ ++/* CPU Availability */ ++#define SW_OPCODE_BASE 0x0001 /* Base architecture insns. */ ++#define SW_OPCODE_CORE3 0x0002 /* Core3 private insns. */ ++#define SW_OPCODE_CORE4 0x0004 /* Core4 private insns. */ ++#define SW_LITOP(i) (((i) >> 26) & 0x3D) ++ ++#define SW_OPCODE_NOHMCODE (~(SW_OPCODE_BASE|SW_OPCODE_CORE3|SW_OPCODE_CORE4)) ++ ++/* A macro to extract the major opcode from an instruction. */ ++#define SW_OP(i) (((i) >> 26) & 0x3F) ++ ++/* The total number of major opcodes. */ ++#define SW_NOPS 0x40 ++ ++/* The operands table is an array of struct sw64_operand. */ ++ ++struct sw64_operand { ++ /* The number of bits in the operand. */ ++ unsigned int bits : 5; ++ ++ /* How far the operand is left shifted in the instruction. */ ++ unsigned int shift : 5; ++ ++ /* The default relocation type for this operand. */ ++ signed int default_reloc : 16; ++ ++ /* One bit syntax flags. */ ++ unsigned int flags : 16; ++ ++ /* Insertion function. This is used by the assembler. To insert an ++ operand value into an instruction, check this field. ++ ++ If it is NULL, execute ++ i |= (op & ((1 << o->bits) - 1)) << o->shift; ++ (i is the instruction which we are filling in, o is a pointer to ++ this structure, and op is the opcode value; this assumes twos ++ complement arithmetic). ++ ++ If this field is not NULL, then simply call it with the ++ instruction and the operand value. It will return the new value ++ of the instruction. If the ERRMSG argument is not NULL, then if ++ the operand value is illegal, *ERRMSG will be set to a warning ++ string (the operand will be inserted in any case). If the ++ operand value is legal, *ERRMSG will be unchanged (most operands ++ can accept any value). */ ++ unsigned (*insert) (unsigned instruction, int op, const char **errmsg); ++ ++ /* Extraction function. This is used by the disassembler. To ++ extract this operand type from an instruction, check this field. ++ ++ If it is NULL, compute ++ op = ((i) >> o->shift) & ((1 << o->bits) - 1); ++ if ((o->flags & SW_OPERAND_SIGNED) != 0 ++ && (op & (1 << (o->bits - 1))) != 0) ++ op -= 1 << o->bits; ++ (i is the instruction, o is a pointer to this structure, and op ++ is the result; this assumes twos complement arithmetic). ++ ++ If this field is not NULL, then simply call it with the ++ instruction value. It will return the value of the operand. If ++ the INVALID argument is not NULL, *INVALID will be set to ++ non-zero if this operand type can not actually be extracted from ++ this operand (i.e., the instruction does not match). If the ++ operand is valid, *INVALID will not be changed. */ ++ int (*extract) (unsigned instruction, int *invalid); ++}; ++ ++/* Elements in the table are retrieved by indexing with values from ++ the operands field of the sw64_opcodes table. */ ++ ++extern const struct sw64_operand sw64_operands[]; ++extern const unsigned sw64_num_operands; ++/* Values defined for the flags field of a struct sw64_operand. */ ++ ++/* Mask for selecting the type for typecheck purposes */ ++#define SW_OPERAND_TYPECHECK_MASK \ ++ (SW_OPERAND_PARENS | SW_OPERAND_COMMA | SW_OPERAND_IR | \ ++ SW_OPERAND_FPR | SW_OPERAND_RELATIVE | SW_OPERAND_SIGNED | \ ++ SW_OPERAND_UNSIGNED) ++ ++/* This operand does not actually exist in the assembler input. This ++ is used to support extended mnemonics, for which two operands fields ++ are identical. The assembler should call the insert function with ++ any op value. The disassembler should call the extract function, ++ ignore the return value, and check the value placed in the invalid ++ argument. */ ++#define SW_OPERAND_FAKE 01 ++ ++/* The operand should be wrapped in parentheses rather than separated ++ from the previous by a comma. This is used for the load and store ++ instructions which want their operands to look like "Ra,disp(Rb)". */ ++#define SW_OPERAND_PARENS 02 ++ ++/* Used in combination with PARENS, this supresses the supression of ++ the comma. This is used for "jmp Ra,(Rb),hint". */ ++#define SW_OPERAND_COMMA 04 ++ ++/* This operand names an integer register. */ ++#define SW_OPERAND_IR 010 ++ ++/* This operand names a floating point register. */ ++#define SW_OPERAND_FPR 020 ++ ++/* This operand is a relative branch displacement. The disassembler ++ prints these symbolically if possible. */ ++#define SW_OPERAND_RELATIVE 040 ++ ++/* This operand takes signed values. */ ++#define SW_OPERAND_SIGNED 0100 ++ ++/* This operand takes unsigned values. This exists primarily so that ++ a flags value of 0 can be treated as end-of-arguments. */ ++#define SW_OPERAND_UNSIGNED 0200 ++ ++/* Supress overflow detection on this field. This is used for hints. */ ++#define SW_OPERAND_NOOVERFLOW 0400 ++ ++/* Mask for optional argument default value. */ ++#define SW_OPERAND_OPTIONAL_MASK 07000 ++ ++/* This operand defaults to zero. This is used for jump hints. */ ++#define SW_OPERAND_DEFAULT_ZERO 01000 ++ ++/* This operand should default to the first (real) operand and is used ++ in conjunction with SW_OPERAND_OPTIONAL. This allows ++ "and $0,3,$0" to be written as "and $0,3", etc. I don't like ++ it, but it's what DEC does. */ ++#define SW_OPERAND_DEFAULT_FIRST 02000 ++ ++/* Similarly, this operand should default to the second (real) operand. ++ This allows "negl $0" instead of "negl $0,$0". */ ++#define SW_OPERAND_DEFAULT_SECOND 04000 ++ ++/* Register common names */ ++ ++#define SW_REG_V0 0 ++#define SW_REG_T0 1 ++#define SW_REG_T1 2 ++#define SW_REG_T2 3 ++#define SW_REG_T3 4 ++#define SW_REG_T4 5 ++#define SW_REG_T5 6 ++#define SW_REG_T6 7 ++#define SW_REG_T7 8 ++#define SW_REG_S0 9 ++#define SW_REG_S1 10 ++#define SW_REG_S2 11 ++#define SW_REG_S3 12 ++#define SW_REG_S4 13 ++#define SW_REG_S5 14 ++#define SW_REG_FP 15 ++#define SW_REG_A0 16 ++#define SW_REG_A1 17 ++#define SW_REG_A2 18 ++#define SW_REG_A3 19 ++#define SW_REG_A4 20 ++#define SW_REG_A5 21 ++#define SW_REG_T8 22 ++#define SW_REG_T9 23 ++#define SW_REG_T10 24 ++#define SW_REG_T11 25 ++#define SW_REG_RA 26 ++#define SW_REG_PV 27 ++#define SW_REG_T12 27 ++#define SW_REG_AT 28 ++#define SW_REG_GP 29 ++#define SW_REG_SP 30 ++#define SW_REG_ZERO 31 ++ ++static unsigned insert_rba(unsigned insn, int value ATTRIBUTE_UNUSED, ++ const char **errmsg ATTRIBUTE_UNUSED) ++{ ++ return insn | (((insn >> 21) & 0x1f) << 16); ++} ++ ++static int extract_rba(unsigned insn, int *invalid) ++{ ++ if (invalid != (int *) NULL ++ && ((insn >> 21) & 0x1f) != ((insn >> 16) & 0x1f)) ++ *invalid = 1; ++ return 0; ++} ++ ++/* The same for the RC field. */ ++static unsigned insert_rca(unsigned insn, int value ATTRIBUTE_UNUSED, ++ const char **errmsg ATTRIBUTE_UNUSED) ++{ ++ return insn | ((insn >> 21) & 0x1f); ++} ++ ++static unsigned insert_rdc(unsigned insn, int value ATTRIBUTE_UNUSED, ++ const char **errmsg ATTRIBUTE_UNUSED) ++{ ++ return insn | ((insn >> 5) & 0x1f); ++} ++ ++static int extract_rdc(unsigned insn, int *invalid) ++{ ++ if (invalid != (int *) NULL ++ && ((insn >> 5) & 0x1f) != (insn & 0x1f)) ++ *invalid = 1; ++ return 0; ++} ++ ++static int extract_rca(unsigned insn, int *invalid) ++{ ++ if (invalid != (int *) NULL ++ && ((insn >> 21) & 0x1f) != (insn & 0x1f)) ++ *invalid = 1; ++ return 0; ++} ++ ++/* Fake arguments in which the registers must be set to ZERO. */ ++static unsigned insert_za(unsigned insn, int value ATTRIBUTE_UNUSED, ++ const char **errmsg ATTRIBUTE_UNUSED) ++{ ++ return insn | (31 << 21); ++} ++ ++static int extract_za(unsigned insn, int *invalid) ++{ ++ if (invalid != (int *) NULL && ((insn >> 21) & 0x1f) != 31) ++ *invalid = 1; ++ return 0; ++} ++ ++static unsigned insert_zb(unsigned insn, int value ATTRIBUTE_UNUSED, ++ const char **errmsg ATTRIBUTE_UNUSED) ++{ ++ return insn | (31 << 16); ++} ++ ++static int extract_zb(unsigned insn, int *invalid) ++{ ++ if (invalid != (int *) NULL && ((insn >> 16) & 0x1f) != 31) ++ *invalid = 1; ++ return 0; ++} ++ ++static unsigned insert_zc(unsigned insn, int value ATTRIBUTE_UNUSED, ++ const char **errmsg ATTRIBUTE_UNUSED) ++{ ++ return insn | 31; ++} ++ ++static int extract_zc(unsigned insn, int *invalid) ++{ ++ if (invalid != (int *) NULL && (insn & 0x1f) != 31) ++ *invalid = 1; ++ return 0; ++} ++ ++ ++/* The displacement field of a Branch format insn. */ ++ ++static unsigned insert_bdisp(unsigned insn, int value, const char **errmsg) ++{ ++ if (errmsg != (const char **)NULL && (value & 3)) ++ *errmsg = "branch operand unaligned"; ++ return insn | ((value / 4) & 0x1FFFFF); ++} ++ ++static int extract_bdisp(unsigned insn, int *invalid ATTRIBUTE_UNUSED) ++{ ++ return 4 * (((insn & 0x1FFFFF) ^ 0x100000) - 0x100000); ++} ++ ++static unsigned ++insert_bdisp26 (unsigned insn, int value, const char **errmsg) ++{ ++ if (errmsg != (const char **)NULL && (value & 3)) ++ *errmsg = "branch operand unaligned"; ++ return insn | ((value / 4) & 0x3FFFFFF); ++} ++ ++static int ++extract_bdisp26 (unsigned insn, int *invalid ATTRIBUTE_UNUSED) ++{ ++ return 4 * (((insn & 0x3FFFFFF) ^ 0x2000000) - 0x2000000); ++} ++ ++/* The hint field of a JMP/JSR insn. */ ++/* sw use 16 bits hint disp. */ ++static unsigned insert_jhint(unsigned insn, int value, const char **errmsg) ++{ ++ if (errmsg != (const char **)NULL && (value & 3)) ++ *errmsg = "jump hint unaligned"; ++ return insn | ((value / 4) & 0xFFFF); ++} ++ ++static int extract_jhint(unsigned insn, int *invalid ATTRIBUTE_UNUSED) ++{ ++ return 4 * (((insn & 0xFFFF) ^ 0x8000) - 0x8000); ++} ++ ++/* The operands table. */ ++ ++const struct sw64_operand sw64_operands[] = { ++ /* The fields are bits, shift, insert, extract, flags */ ++ /* The zero index is used to indicate end-of-list */ ++#define UNUSED 0 ++ { 0, 0, 0, 0, 0, 0 }, ++ ++ /* The plain integer register fields. */ ++#define RA (UNUSED + 1) ++ { 5, 21, 0, SW_OPERAND_IR, 0, 0 }, ++#define RB (RA + 1) ++ { 5, 16, 0, SW_OPERAND_IR, 0, 0 }, ++#define RC (RB + 1) ++ { 5, 0, 0, SW_OPERAND_IR, 0, 0 }, ++ ++ /* The plain fp register fields. */ ++#define FA (RC + 1) ++ { 5, 21, 0, SW_OPERAND_FPR, 0, 0 }, ++#define FB (FA + 1) ++ { 5, 16, 0, SW_OPERAND_FPR, 0, 0 }, ++#define FC (FB + 1) ++ { 5, 0, 0, SW_OPERAND_FPR, 0, 0 }, ++ ++ /* The integer registers when they are ZERO. */ ++#define ZA (FC + 1) ++ { 5, 21, 0, SW_OPERAND_FAKE, insert_za, extract_za }, ++#define ZB (ZA + 1) ++ { 5, 16, 0, SW_OPERAND_FAKE, insert_zb, extract_zb }, ++#define ZC (ZB + 1) ++ { 5, 0, 0, SW_OPERAND_FAKE, insert_zc, extract_zc }, ++ ++ /* The RB field when it needs parentheses. */ ++#define PRB (ZC + 1) ++ { 5, 16, 0, SW_OPERAND_IR | SW_OPERAND_PARENS, 0, 0 }, ++ ++ /* The RB field when it needs parentheses _and_ a preceding comma. */ ++#define CPRB (PRB + 1) ++ { 5, 16, 0, ++ SW_OPERAND_IR | SW_OPERAND_PARENS | SW_OPERAND_COMMA, 0, 0 }, ++ ++ /* The RB field when it must be the same as the RA field. */ ++#define RBA (CPRB + 1) ++ { 5, 16, 0, SW_OPERAND_FAKE, insert_rba, extract_rba }, ++ ++ /* The RC field when it must be the same as the RB field. */ ++#define RCA (RBA + 1) ++ { 5, 0, 0, SW_OPERAND_FAKE, insert_rca, extract_rca }, ++ ++#define RDC (RCA + 1) ++ { 5, 0, 0, SW_OPERAND_FAKE, insert_rdc, extract_rdc }, ++ ++ /* The RC field when it can *default* to RA. */ ++#define DRC1 (RDC + 1) ++ { 5, 0, 0, ++ SW_OPERAND_IR | SW_OPERAND_DEFAULT_FIRST, 0, 0 }, ++ ++ /* The RC field when it can *default* to RB. */ ++#define DRC2 (DRC1 + 1) ++ { 5, 0, 0, ++ SW_OPERAND_IR | SW_OPERAND_DEFAULT_SECOND, 0, 0 }, ++ ++ /* The FC field when it can *default* to RA. */ ++#define DFC1 (DRC2 + 1) ++ { 5, 0, 0, ++ SW_OPERAND_FPR | SW_OPERAND_DEFAULT_FIRST, 0, 0 }, ++ ++ /* The FC field when it can *default* to RB. */ ++#define DFC2 (DFC1 + 1) ++ { 5, 0, 0, ++ SW_OPERAND_FPR | SW_OPERAND_DEFAULT_SECOND, 0, 0 }, ++ ++ /* The unsigned 8-bit literal of Operate format insns. */ ++#define LIT (DFC2 + 1) ++ { 8, 13, -LIT, SW_OPERAND_UNSIGNED, 0, 0 }, ++ ++ /* The signed 16-bit displacement of Memory format insns. From here ++ we can't tell what relocation should be used, so don't use a default. */ ++#define MDISP (LIT + 1) ++ { 16, 0, -MDISP, SW_OPERAND_SIGNED, 0, 0 }, ++ ++ /* The signed "23-bit" aligned displacement of Branch format insns. */ ++#define BDISP (MDISP + 1) ++ { 21, 0, -BDISP, ++ SW_OPERAND_RELATIVE, insert_bdisp, extract_bdisp }, ++ ++ /* The 26-bit hmcode function for sys_call and sys_call / b. */ ++#define HMFN (BDISP + 1) ++ { 25, 0, -HMFN, SW_OPERAND_UNSIGNED, 0, 0 }, ++ ++ /* sw jsr/ret insntructions has no function bits. */ ++ /* The optional signed "16-bit" aligned displacement of the JMP/JSR hint. */ ++#define JMPHINT (HMFN + 1) ++ { 16, 0, -JMPHINT, ++ SW_OPERAND_RELATIVE | SW_OPERAND_DEFAULT_ZERO | SW_OPERAND_NOOVERFLOW, ++ insert_jhint, extract_jhint }, ++ ++ /* The optional hint to RET/JSR_COROUTINE. */ ++#define RETHINT (JMPHINT + 1) ++ { 16, 0, -RETHINT, ++ SW_OPERAND_UNSIGNED | SW_OPERAND_DEFAULT_ZERO, 0, 0 }, ++ ++ /* The 12-bit displacement for the core3 hw_{ld,st} (pal1b/pal1f) insns. */ ++#define HWDISP (RETHINT + 1) ++ { 12, 0, -HWDISP, SW_OPERAND_SIGNED, 0, 0 }, ++ ++ /* The 16-bit combined index/scoreboard mask for the core3 ++ hw_m[ft]pr (pal19/pal1d) insns. */ ++#define HWINDEX (HWDISP + 1) ++ { 16, 0, -HWINDEX, SW_OPERAND_UNSIGNED, 0, 0 }, ++ ++ /* for the third operand of ternary operands integer insn. */ ++#define R3 (HWINDEX + 1) ++ { 5, 5, 0, SW_OPERAND_IR, 0, 0 }, ++ /* The plain fp register fields */ ++#define F3 (R3 + 1) ++ { 5, 5, 0, SW_OPERAND_FPR, 0, 0 }, ++ /* sw simd settle instruction lit */ ++#define FMALIT (F3 + 1) ++ { 5, 5, -FMALIT, SW_OPERAND_UNSIGNED, 0, 0 }, ++#define RPIINDEX (FMALIT + 1) ++ { 8, 0, -RPIINDEX, SW_OPERAND_UNSIGNED, 0, 0 }, ++#define ATMDISP (RPIINDEX + 1) ++ { 12, 0, -ATMDISP, SW_OPERAND_SIGNED, 0, 0 }, ++#define DISP13 (ATMDISP + 1) ++ { 13, 13, -DISP13, SW_OPERAND_SIGNED, 0, 0 }, ++#define BDISP26 (DISP13 + 1) ++ { 26, 0, 222, ++ SW_OPERAND_RELATIVE, insert_bdisp26, extract_bdisp26 }, ++#define DPFTH (BDISP26 + 1) ++ { 5, 21, -DPFTH, SW_OPERAND_UNSIGNED, 0, 0 } ++}; ++ ++const unsigned sw64_num_operands = sizeof(sw64_operands) / sizeof(*sw64_operands); ++ ++/* Macros used to form opcodes. */ ++ ++/* The main opcode. */ ++#define OP(x) (((uint32_t)(x) & 0x3F) << 26) ++#define OP_MASK 0xFC000000 ++ ++/* Branch format instructions. */ ++#define BRA_(oo) OP(oo) ++#define BRA_MASK OP_MASK ++#define BRA(oo) BRA_(oo), BRA_MASK ++ ++#ifdef HUANGLM20171113 ++/* Floating point format instructions. */ ++#define FP_(oo,fff) (OP(oo) | (((fff) & 0x7FF) << 5)) ++#define FP_MASK (OP_MASK | 0xFFE0) ++#define FP(oo,fff) FP_(oo,fff), FP_MASK ++ ++#else ++/* Floating point format instructions. */ ++#define FP_(oo,fff) (OP(oo) | (((fff) & 0xFF) << 5)) ++#define FP_MASK (OP_MASK | 0x1FE0) ++#define FP(oo,fff) FP_(oo,fff), FP_MASK ++ ++#define FMA_(oo,fff) (OP(oo) | (((fff) & 0x3F) << 10 )) ++#define FMA_MASK (OP_MASK | 0xFC00) ++#define FMA(oo,fff) FMA_(oo,fff), FMA_MASK ++#endif ++ ++/* Memory format instructions. */ ++#define MEM_(oo) OP(oo) ++#define MEM_MASK OP_MASK ++#define MEM(oo) MEM_(oo), MEM_MASK ++ ++/* Memory/Func Code format instructions. */ ++#define MFC_(oo,ffff) (OP(oo) | ((ffff) & 0xFFFF)) ++#define MFC_MASK (OP_MASK | 0xFFFF) ++#define MFC(oo,ffff) MFC_(oo,ffff), MFC_MASK ++ ++/* Memory/Branch format instructions. */ ++#define MBR_(oo,h) (OP(oo) | (((h) & 3) << 14)) ++#define MBR_MASK (OP_MASK | 0xC000) ++#define MBR(oo,h) MBR_(oo,h), MBR_MASK ++ ++/* Now sw Operate format instructions is different with SW1. */ ++#define OPR_(oo,ff) (OP(oo) | (((ff) & 0xFF) << 5)) ++#define OPRL_(oo,ff) (OPR_((oo), (ff)) ) ++#define OPR_MASK (OP_MASK | 0x1FE0) ++#define OPR(oo,ff) OPR_(oo,ff), OPR_MASK ++#define OPRL(oo,ff) OPRL_(oo,ff), OPR_MASK ++ ++/* sw ternary operands Operate format instructions. */ ++#define TOPR_(oo,ff) (OP(oo) | (((ff) & 0x07) << 10)) ++#define TOPRL_(oo,ff) (TOPR_((oo), (ff))) ++#define TOPR_MASK (OP_MASK | 0x1C00) ++#define TOPR(oo,ff) TOPR_(oo,ff), TOPR_MASK ++#define TOPRL(oo,ff) TOPRL_(oo,ff), TOPR_MASK ++ ++/* sw atom instructions. */ ++#define ATMEM_(oo,h) (OP(oo) | (((h) & 0xF) << 12)) ++#define ATMEM_MASK (OP_MASK | 0xF000) ++#define ATMEM(oo,h) ATMEM_(oo,h), ATMEM_MASK ++ ++/* sw privilege instructions. */ ++#define PRIRET_(oo,h) (OP(oo) | (((h) & 0x1) << 20)) ++#define PRIRET_MASK (OP_MASK | 0x100000) ++#define PRIRET(oo,h) PRIRET_(oo,h), PRIRET_MASK ++ ++/* sw pri_rcsr,pri_wcsr. */ ++#define CSR_(oo,ff) (OP(oo) | (((ff) & 0xFF) << 8)) ++#define CSR_MASK (OP_MASK | 0xFF00) ++#define CSR(oo,ff) CSR_(oo,ff), CSR_MASK ++ ++#define PCD_(oo,ff) (OP(oo) | (ff << 25)) ++#define PCD_MASK OP_MASK ++#define PCD(oo,ff) PCD_(oo,ff), PCD_MASK ++ ++/* Hardware memory (hw_{ld,st}) instructions. */ ++#define HWMEM_(oo,f) (OP(oo) | (((f) & 0xF) << 12)) ++#define HWMEM_MASK (OP_MASK | 0xF000) ++#define HWMEM(oo,f) HWMEM_(oo,f), HWMEM_MASK ++ ++#define LOGX_(oo,ff) (OP(oo) | (((ff) & 0x3F) << 10)) ++#define LOGX_MASK (0xF0000000) ++#define LOGX(oo,ff) LOGX_(oo,ff), LOGX_MASK ++ ++/* Abbreviations for instruction subsets. */ ++#define BASE SW_OPCODE_BASE ++#define CORE3 SW_OPCODE_CORE3 ++#define CORE4 SW_OPCODE_CORE4 ++ ++/* Common combinations of arguments. */ ++#define ARG_NONE { 0 } ++#define ARG_BRA { RA, BDISP } ++#define ARG_FBRA { FA, BDISP } ++#define ARG_FP { FA, FB, DFC1 } ++#define ARG_FPZ1 { ZA, FB, DFC1 } ++#define ARG_MEM { RA, MDISP, PRB } ++#define ARG_FMEM { FA, MDISP, PRB } ++#define ARG_OPR { RA, RB, DRC1 } ++#define ARG_OPRCAS { RA, RB, RC } ++#define ARG_OPRL { RA, LIT, DRC1 } ++#define ARG_OPRZ1 { ZA, RB, DRC1 } ++#define ARG_OPRLZ1 { ZA, LIT, RC } ++#define ARG_PCD { HMFN } ++#define ARG_HWMEM { RA, HWDISP, PRB } ++#define ARG_FPL { FA,LIT, DFC1 } ++#define ARG_FMA { FA,FB,F3, DFC1 } ++#define ARG_PREFETCH { ZA, MDISP, PRB } ++#define ARG_TOPR { RA, RB,R3, DRC1 } ++#define ARG_TOPRL { RA, LIT, R3,DRC1 } ++#define ARG_FMAL { FA,FB,FMALIT, DFC1 } ++#define ARG_ATMEM { RA, ATMDISP, PRB } ++#define ARG_VUAMEM { FA, ATMDISP, PRB } ++#define ARG_DISP13 { DISP13, RC } ++ ++/* The opcode table. ++ ++ The format of the opcode table is: ++ ++ NAME OPCODE MASK { OPERANDS } ++ ++ NAME is the name of the instruction. ++ ++ OPCODE is the instruction opcode. ++ ++ MASK is the opcode mask; this is used to tell the disassembler ++ which bits in the actual opcode must match OPCODE. ++ ++ OPERANDS is the list of operands. ++ ++ The preceding macros merge the text of the OPCODE and MASK fields. ++ ++ The disassembler reads the table in order and prints the first ++ instruction which matches, so this table is sorted to put more ++ specific instructions before more general instructions. ++ ++ Otherwise, it is sorted by major opcode and minor function code. ++ */ ++ ++const struct sw64_opcode sw64_opcodes[] = { ++ { "sys_call/b", PCD(0x00,0x00), BASE, ARG_PCD }, ++ { "sys_call", PCD(0x00,0x01), BASE, ARG_PCD }, ++ ++ { "call", MEM(0x01), BASE, { RA, CPRB, JMPHINT } }, ++ { "ret", MEM(0x02), BASE, { RA, CPRB, RETHINT } }, ++ { "jmp", MEM(0x03), BASE, { RA, CPRB, JMPHINT } }, ++ { "br", BRA(0x04), BASE, { ZA, BDISP } }, ++ { "br", BRA(0x04), BASE, ARG_BRA }, ++ { "bsr", BRA(0x05), BASE, { ZA, BDISP } }, ++ { "bsr", BRA(0x05), BASE, ARG_BRA }, ++ { "memb", MFC(0x06,0x0000), BASE, ARG_NONE }, ++ { "imemb", MFC(0x06,0x0001), BASE, ARG_NONE }, ++ { "wmemb", MFC(0x06,0x0002), CORE4, ARG_NONE }, ++ { "rtc", MFC(0x06,0x0020), BASE, { RA, ZB } }, ++ { "rtc", MFC(0x06,0x0020), BASE, { RA, RB } }, ++ { "rcid", MFC(0x06,0x0040), BASE, { RA, ZB} }, ++ { "halt", MFC(0x06,0x0080), BASE, { ZA, ZB } }, ++ { "rd_f", MFC(0x06,0x1000), CORE3, { RA, ZB } }, ++ { "wr_f", MFC(0x06,0x1020), CORE3, { RA, ZB } }, ++ { "rtid", MFC(0x06,0x1040), BASE, { RA } }, ++ { "pri_rcsr", CSR(0x06,0xFE), CORE3, { RA, RPIINDEX, ZB } }, ++ { "pri_wcsr", CSR(0x06,0xFF), CORE3, { RA, RPIINDEX, ZB } }, ++ { "csrrs", CSR(0x06,0xFC), CORE4, { RA, RPIINDEX, RB } }, ++ { "csrrc", CSR(0x06,0xFD), CORE4, { RA, RPIINDEX, RB } }, ++ { "csrr", CSR(0x06,0xFE), CORE4, { RA, RPIINDEX, ZB } }, ++ { "csrw", CSR(0x06,0xFF), CORE4, { RA, RPIINDEX, ZB } }, ++ { "pri_ret", PRIRET(0x07,0x0), BASE, { RA } }, ++ { "pri_ret/b", PRIRET(0x07,0x1), BASE, { RA } }, ++ { "lldw", ATMEM(0x08,0x0), BASE, ARG_ATMEM }, ++ { "lldl", ATMEM(0x08,0x1), BASE, ARG_ATMEM }, ++ { "ldw_inc", ATMEM(0x08,0x2), CORE3, ARG_ATMEM }, ++ { "ldl_inc", ATMEM(0x08,0x3), CORE3, ARG_ATMEM }, ++ { "ldw_dec", ATMEM(0x08,0x4), CORE3, ARG_ATMEM }, ++ { "ldl_dec", ATMEM(0x08,0x5), CORE3, ARG_ATMEM }, ++ { "ldw_set", ATMEM(0x08,0x6), CORE3, ARG_ATMEM }, ++ { "ldl_set", ATMEM(0x08,0x7), CORE3, ARG_ATMEM }, ++ { "lstw", ATMEM(0x08,0x8), BASE, ARG_ATMEM }, ++ { "lstl", ATMEM(0x08,0x9), BASE, ARG_ATMEM }, ++ { "ldw_nc", ATMEM(0x08,0xA), BASE, ARG_ATMEM }, ++ { "ldl_nc", ATMEM(0x08,0xB), BASE, ARG_ATMEM }, ++ { "ldd_nc", ATMEM(0x08,0xC), BASE, ARG_VUAMEM }, ++ { "stw_nc", ATMEM(0x08,0xD), BASE, ARG_ATMEM }, ++ { "stl_nc", ATMEM(0x08,0xE), BASE, ARG_ATMEM }, ++ { "std_nc", ATMEM(0x08,0xF), BASE, ARG_VUAMEM }, ++ { "fillcs", MEM(0x09), BASE, ARG_PREFETCH }, ++ { "ldwe", MEM(0x09), BASE, ARG_FMEM }, ++ { "e_fillcs", MEM(0x0A), BASE, ARG_PREFETCH }, ++ { "ldse", MEM(0x0A), BASE, ARG_FMEM }, ++ { "fillcs_e", MEM(0x0B), BASE, ARG_PREFETCH }, ++ { "ldde", MEM(0x0B), BASE, ARG_FMEM }, ++ { "vlds", MEM(0x0C), BASE, ARG_FMEM }, ++ { "vldd", MEM(0x0D), BASE, ARG_FMEM }, ++ { "vsts", MEM(0x0E), BASE, ARG_FMEM }, ++ { "vstd", MEM(0x0F), BASE, ARG_FMEM }, ++ { "addw", OPR(0x10,0x00), BASE, ARG_OPR }, ++ { "addw", OPRL(0x12,0x00), BASE, ARG_OPRL }, ++ { "subw", OPR(0x10,0x01), BASE, ARG_OPR }, ++ { "subw", OPRL(0x12,0x01), BASE, ARG_OPRL }, ++ { "s4addw", OPR(0x10,0x02), BASE, ARG_OPR }, ++ { "s4addw", OPRL(0x12,0x02), BASE, ARG_OPRL }, ++ { "s4subw", OPR(0x10,0x03), BASE, ARG_OPR }, ++ { "s4subw", OPRL(0x12,0x03), BASE, ARG_OPRL }, ++ { "s8addw", OPR(0x10,0x04), BASE, ARG_OPR }, ++ { "s8addw", OPRL(0x12,0x04), BASE, ARG_OPRL }, ++ { "s8subw", OPR(0x10,0x05), BASE, ARG_OPR }, ++ { "s8subw", OPRL(0x12,0x05), BASE, ARG_OPRL }, ++ { "addl", OPR(0x10,0x08), BASE, ARG_OPR }, ++ { "addl", OPRL(0x12,0x08), BASE, ARG_OPRL }, ++ { "subl", OPR(0x10,0x09), BASE, ARG_OPR }, ++ { "subl", OPRL(0x12,0x09), BASE, ARG_OPRL }, ++ { "s4addl", OPR(0x10,0x0A), BASE, ARG_OPR }, ++ { "s4addl", OPRL(0x12,0x0A), BASE, ARG_OPRL }, ++ { "s4subl", OPR(0x10,0x0B), BASE, ARG_OPR }, ++ { "s4subl", OPRL(0x12,0x0B), BASE, ARG_OPRL }, ++ { "s8addl", OPR(0x10,0x0C), BASE, ARG_OPR }, ++ { "s8addl", OPRL(0x12,0x0C), BASE, ARG_OPRL }, ++ { "s8subl", OPR(0x10,0x0D), BASE, ARG_OPR }, ++ { "s8subl", OPRL(0x12,0x0D), BASE, ARG_OPRL }, ++ { "mulw", OPR(0x10,0x10), BASE, ARG_OPR }, ++ { "mulw", OPRL(0x12,0x10), BASE, ARG_OPRL }, ++ { "divw", OPR(0x10,0x11), CORE4, ARG_OPR }, ++ { "udivw", OPR(0x10,0x12), CORE4, ARG_OPR }, ++ { "remw", OPR(0x10,0x13), CORE4, ARG_OPR }, ++ { "uremw", OPR(0x10,0x14), CORE4, ARG_OPR }, ++ { "mull", OPR(0x10,0x18), BASE, ARG_OPR }, ++ { "mull", OPRL(0x12,0x18), BASE, ARG_OPRL }, ++ { "umulh", OPR(0x10,0x19), BASE, ARG_OPR }, ++ { "umulh", OPRL(0x12,0x19), BASE, ARG_OPRL }, ++ { "divl", OPR(0x10,0x1A), CORE4, ARG_OPR }, ++ { "udivl", OPR(0x10,0x1B), CORE4, ARG_OPR }, ++ { "reml", OPR(0x10,0x1C), CORE4, ARG_OPR }, ++ { "ureml", OPR(0x10,0x1D), CORE4, ARG_OPR }, ++ { "addpi", OPR(0x10,0x1E), CORE4, ARG_DISP13 }, ++ { "addpis", OPR(0x10,0x1F), CORE4, ARG_DISP13 }, ++ { "cmpeq", OPR(0x10,0x28), BASE, ARG_OPR }, ++ { "cmpeq", OPRL(0x12,0x28), BASE, ARG_OPRL }, ++ { "cmplt", OPR(0x10,0x29), BASE, ARG_OPR }, ++ { "cmplt", OPRL(0x12,0x29), BASE, ARG_OPRL }, ++ { "cmple", OPR(0x10,0x2A), BASE, ARG_OPR }, ++ { "cmple", OPRL(0x12,0x2A), BASE, ARG_OPRL }, ++ { "cmpult", OPR(0x10,0x2B), BASE, ARG_OPR }, ++ { "cmpult", OPRL(0x12,0x2B), BASE, ARG_OPRL }, ++ { "cmpule", OPR(0x10,0x2C), BASE, ARG_OPR }, ++ { "cmpule", OPRL(0x12,0x2C), BASE, ARG_OPRL }, ++ { "sbt", OPR(0x10,0x2D), CORE4, ARG_OPR }, ++ { "sbt", OPRL(0x12,0x2D), CORE4, ARG_OPRL }, ++ { "cbt", OPR(0x10,0x2E), CORE4, ARG_OPR }, ++ { "cbt", OPRL(0x12,0x2E), CORE4, ARG_OPRL }, ++ { "and", OPR(0x10,0x38), BASE, ARG_OPR }, ++ { "and", OPRL(0x12,0x38),BASE, ARG_OPRL }, ++ { "bic", OPR(0x10,0x39), BASE, ARG_OPR }, ++ { "bic", OPRL(0x12,0x39),BASE, ARG_OPRL }, ++ { "bis", OPR(0x10,0x3A), BASE, ARG_OPR }, ++ { "bis", OPRL(0x12,0x3A),BASE, ARG_OPRL }, ++ { "ornot", OPR(0x10,0x3B), BASE, ARG_OPR }, ++ { "ornot", OPRL(0x12,0x3B),BASE, ARG_OPRL }, ++ { "xor", OPR(0x10,0x3C), BASE, ARG_OPR }, ++ { "xor", OPRL(0x12,0x3C),BASE, ARG_OPRL }, ++ { "eqv", OPR(0x10,0x3D), BASE, ARG_OPR }, ++ { "eqv", OPRL(0x12,0x3D),BASE, ARG_OPRL }, ++ { "inslb", OPR(0x10,0x40), BASE, ARG_OPR }, ++ { "inslb", OPRL(0x12,0x40),BASE, ARG_OPRL }, ++ { "inslh", OPR(0x10,0x41), BASE, ARG_OPR }, ++ { "inslh", OPRL(0x12,0x41),BASE, ARG_OPRL }, ++ { "inslw", OPR(0x10,0x42), BASE, ARG_OPR }, ++ { "inslw", OPRL(0x12,0x42),BASE, ARG_OPRL }, ++ { "insll", OPR(0x10,0x43), BASE, ARG_OPR }, ++ { "insll", OPRL(0x12,0x43),BASE, ARG_OPRL }, ++ { "inshb", OPR(0x10,0x44), BASE, ARG_OPR }, ++ { "inshb", OPRL(0x12,0x44),BASE, ARG_OPRL }, ++ { "inshh", OPR(0x10,0x45), BASE, ARG_OPR }, ++ { "inshh", OPRL(0x12,0x45),BASE, ARG_OPRL }, ++ { "inshw", OPR(0x10,0x46), BASE, ARG_OPR }, ++ { "inshw", OPRL(0x12,0x46),BASE, ARG_OPRL }, ++ { "inshl", OPR(0x10,0x47), BASE, ARG_OPR }, ++ { "inshl", OPRL(0x12,0x47),BASE, ARG_OPRL }, ++ { "slll", OPR(0x10,0x48), CORE4, ARG_OPR }, ++ { "slll", OPRL(0x12,0x48),CORE4, ARG_OPRL }, ++ { "srll", OPR(0x10,0x49), CORE4, ARG_OPR }, ++ { "srll", OPRL(0x12,0x49),CORE4, ARG_OPRL }, ++ { "sral", OPR(0x10,0x4A), CORE4, ARG_OPR }, ++ { "sral", OPRL(0x12,0x4A),CORE4, ARG_OPRL }, ++ { "roll", OPR(0x10,0x4B), CORE4, ARG_OPR }, ++ { "roll", OPRL(0x12,0x4B),CORE4, ARG_OPRL }, ++ { "sllw", OPR(0x10,0x4C), CORE4, ARG_OPR }, ++ { "sllw", OPRL(0x12,0x4C),CORE4, ARG_OPRL }, ++ { "srlw", OPR(0x10,0x4D), CORE4, ARG_OPR }, ++ { "srlw", OPRL(0x12,0x4D),CORE4, ARG_OPRL }, ++ { "sraw", OPR(0x10,0x4E), CORE4, ARG_OPR }, ++ { "sraw", OPRL(0x12,0x4E),CORE4, ARG_OPRL }, ++ { "rolw", OPR(0x10,0x4F), CORE4, ARG_OPR }, ++ { "rolw", OPRL(0x12,0x4F),CORE4, ARG_OPRL }, ++ { "sll", OPR(0x10,0x48), BASE, ARG_OPR }, ++ { "sll", OPRL(0x12,0x48),BASE, ARG_OPRL }, ++ { "srl", OPR(0x10,0x49), BASE, ARG_OPR }, ++ { "srl", OPRL(0x12,0x49),BASE, ARG_OPRL }, ++ { "sra", OPR(0x10,0x4A), BASE, ARG_OPR }, ++ { "sra", OPRL(0x12,0x4A),BASE, ARG_OPRL }, ++ { "extlb", OPR(0x10,0x50), BASE, ARG_OPR }, ++ { "extlb", OPRL(0x12,0x50),BASE, ARG_OPRL }, ++ { "extlh", OPR(0x10,0x51), BASE, ARG_OPR }, ++ { "extlh", OPRL(0x12,0x51),BASE, ARG_OPRL }, ++ { "extlw", OPR(0x10,0x52), BASE, ARG_OPR }, ++ { "extlw", OPRL(0x12,0x52),BASE, ARG_OPRL }, ++ { "extll", OPR(0x10,0x53), BASE, ARG_OPR }, ++ { "extll", OPRL(0x12,0x53),BASE, ARG_OPRL }, ++ { "exthb", OPR(0x10,0x54), BASE, ARG_OPR }, ++ { "exthb", OPRL(0x12,0x54),BASE, ARG_OPRL }, ++ { "exthh", OPR(0x10,0x55), BASE, ARG_OPR }, ++ { "exthh", OPRL(0x12,0x55),BASE, ARG_OPRL }, ++ { "exthw", OPR(0x10,0x56), BASE, ARG_OPR }, ++ { "exthw", OPRL(0x12,0x56),BASE, ARG_OPRL }, ++ { "exthl", OPR(0x10,0x57), BASE, ARG_OPR }, ++ { "exthl", OPRL(0x12,0x57),BASE, ARG_OPRL }, ++ { "ctpop", OPR(0x10,0x58), BASE, ARG_OPRZ1 }, ++ { "ctlz", OPR(0x10,0x59), BASE, ARG_OPRZ1 }, ++ { "cttz", OPR(0x10,0x5A), BASE, ARG_OPRZ1 }, ++ { "revbh", OPR(0x10,0x5B), CORE4, ARG_OPRZ1 }, ++ { "revbw", OPR(0x10,0x5C), CORE4, ARG_OPRZ1 }, ++ { "revbl", OPR(0x10,0x5D), CORE4, ARG_OPRZ1 }, ++ { "casw", OPR(0x10,0x5E), CORE4, ARG_OPRCAS }, ++ { "casl", OPR(0x10,0x5F), CORE4, ARG_OPRCAS }, ++ { "masklb", OPR(0x10,0x60), BASE, ARG_OPR }, ++ { "masklb", OPRL(0x12,0x60),BASE, ARG_OPRL }, ++ { "masklh", OPR(0x10,0x61), BASE, ARG_OPR }, ++ { "masklh", OPRL(0x12,0x61),BASE, ARG_OPRL }, ++ { "masklw", OPR(0x10,0x62), BASE, ARG_OPR }, ++ { "masklw", OPRL(0x12,0x62),BASE, ARG_OPRL }, ++ { "maskll", OPR(0x10,0x63), BASE, ARG_OPR }, ++ { "maskll", OPRL(0x12,0x63),BASE, ARG_OPRL }, ++ { "maskhb", OPR(0x10,0x64), BASE, ARG_OPR }, ++ { "maskhb", OPRL(0x12,0x64),BASE, ARG_OPRL }, ++ { "maskhh", OPR(0x10,0x65), BASE, ARG_OPR }, ++ { "maskhh", OPRL(0x12,0x65),BASE, ARG_OPRL }, ++ { "maskhw", OPR(0x10,0x66), BASE, ARG_OPR }, ++ { "maskhw", OPRL(0x12,0x66),BASE, ARG_OPRL }, ++ { "maskhl", OPR(0x10,0x67), BASE, ARG_OPR }, ++ { "maskhl", OPRL(0x12,0x67),BASE, ARG_OPRL }, ++ { "zap", OPR(0x10,0x68), BASE, ARG_OPR }, ++ { "zap", OPRL(0x12,0x68),BASE, ARG_OPRL }, ++ { "zapnot", OPR(0x10,0x69), BASE, ARG_OPR }, ++ { "zapnot", OPRL(0x12,0x69),BASE, ARG_OPRL }, ++ { "sextb", OPR(0x10,0x6A), BASE, ARG_OPRZ1}, ++ { "sextb", OPRL(0x12,0x6A),BASE, ARG_OPRLZ1 }, ++ { "sexth", OPR(0x10,0x6B), BASE, ARG_OPRZ1 }, ++ { "sexth", OPRL(0x12,0x6B),BASE, ARG_OPRLZ1 }, ++ { "cmpgeb", OPR(0x10,0x6C), BASE, ARG_OPR }, ++ { "cmpgeb", OPRL(0x12,0x6C),BASE, ARG_OPRL }, ++ { "fimovs", OPR(0x10,0x70), BASE, { FA, ZB, RC } }, ++ { "fimovd", OPR(0x10,0x78), BASE, { FA, ZB, RC } }, ++ { "seleq", TOPR(0x11,0x0), BASE, ARG_TOPR }, ++ { "seleq", TOPRL(0x13,0x0),BASE, ARG_TOPRL }, ++ { "selge", TOPR(0x11,0x1), BASE, ARG_TOPR }, ++ { "selge", TOPRL(0x13,0x1),BASE, ARG_TOPRL }, ++ { "selgt", TOPR(0x11,0x2), BASE, ARG_TOPR }, ++ { "selgt", TOPRL(0x13,0x2),BASE, ARG_TOPRL }, ++ { "selle", TOPR(0x11,0x3), BASE, ARG_TOPR }, ++ { "selle", TOPRL(0x13,0x3),BASE, ARG_TOPRL }, ++ { "sellt", TOPR(0x11,0x4), BASE, ARG_TOPR }, ++ { "sellt", TOPRL(0x13,0x4),BASE, ARG_TOPRL }, ++ { "selne", TOPR(0x11,0x5), BASE, ARG_TOPR }, ++ { "selne", TOPRL(0x13,0x5),BASE, ARG_TOPRL }, ++ { "sellbc", TOPR(0x11,0x6), BASE, ARG_TOPR }, ++ { "sellbc", TOPRL(0x13,0x6),BASE, ARG_TOPRL }, ++ { "sellbs", TOPR(0x11,0x7), BASE, ARG_TOPR }, ++ { "sellbs", TOPRL(0x13,0x7),BASE, ARG_TOPRL }, ++ { "vlog", LOGX(0x14,0x00), BASE, ARG_FMA }, ++ ++ { "fadds", FP(0x18,0x00), BASE, ARG_FP }, ++ { "faddd", FP(0x18,0x01), BASE, ARG_FP }, ++ { "fsubs", FP(0x18,0x02), BASE, ARG_FP }, ++ { "fsubd", FP(0x18,0x03), BASE, ARG_FP }, ++ { "fmuls", FP(0x18,0x04), BASE, ARG_FP }, ++ { "fmuld", FP(0x18,0x05), BASE, ARG_FP }, ++ { "fdivs", FP(0x18,0x06), BASE, ARG_FP }, ++ { "fdivd", FP(0x18,0x07), BASE, ARG_FP }, ++ { "fsqrts", FP(0x18,0x08), BASE, ARG_FPZ1 }, ++ { "fsqrtd", FP(0x18,0x09), BASE, ARG_FPZ1 }, ++ { "fcmpeq", FP(0x18,0x10), BASE, ARG_FP }, ++ { "fcmple", FP(0x18,0x11), BASE, ARG_FP }, ++ { "fcmplt", FP(0x18,0x12), BASE, ARG_FP }, ++ { "fcmpun", FP(0x18,0x13), BASE, ARG_FP }, ++ ++ { "fcvtsd", FP(0x18,0x20), BASE, ARG_FPZ1 }, ++ { "fcvtds", FP(0x18,0x21), BASE, ARG_FPZ1 }, ++ { "fcvtdl_g", FP(0x18,0x22), BASE, ARG_FPZ1 }, ++ { "fcvtdl_p", FP(0x18,0x23), BASE, ARG_FPZ1 }, ++ { "fcvtdl_z", FP(0x18,0x24), BASE, ARG_FPZ1 }, ++ { "fcvtdl_n", FP(0x18,0x25), BASE, ARG_FPZ1 }, ++ { "fcvtdl", FP(0x18,0x27), BASE, ARG_FPZ1 }, ++ { "fcvtwl", FP(0x18,0x28), BASE, ARG_FPZ1 }, ++ { "fcvtlw", FP(0x18,0x29), BASE, ARG_FPZ1 }, ++ { "fcvtls", FP(0x18,0x2d), BASE, ARG_FPZ1 }, ++ { "fcvtld", FP(0x18,0x2f), BASE, ARG_FPZ1 }, ++ { "fcpys", FP(0x18,0x30), BASE, ARG_FP }, ++ { "fcpyse", FP(0x18,0x31), BASE, ARG_FP }, ++ { "fcpysn", FP(0x18,0x32), BASE, ARG_FP }, ++ { "ifmovs", FP(0x18,0x40), BASE, { RA, ZB, FC } }, ++ { "ifmovd", FP(0x18,0x41), BASE, { RA, ZB, FC } }, ++ { "rfpcr", FP(0x18,0x50), BASE, { FA, RBA, RCA } }, ++ { "wfpcr", FP(0x18,0x51), BASE, { FA, RBA, RCA } }, ++ { "setfpec0", FP(0x18,0x54), BASE, ARG_NONE }, ++ { "setfpec1", FP(0x18,0x55), BASE, ARG_NONE }, ++ { "setfpec2", FP(0x18,0x56), BASE, ARG_NONE }, ++ { "setfpec3", FP(0x18,0x57), BASE, ARG_NONE }, ++ { "frecs", FP(0x18,0x58), CORE4, ARG_FPZ1 }, ++ { "frecd", FP(0x18,0x59), CORE4, ARG_FPZ1 }, ++ { "fris", FP(0x18,0x5A), CORE4, ARG_FPZ1 }, ++ { "fris_g", FP(0x18,0x5B), CORE4, ARG_FPZ1 }, ++ { "fris_p", FP(0x18,0x5C), CORE4, ARG_FPZ1 }, ++ { "fris_z", FP(0x18,0x5D), CORE4, ARG_FPZ1 }, ++ { "fris_n", FP(0x18,0x5F), CORE4, ARG_FPZ1 }, ++ { "frid", FP(0x18,0x60), CORE4, ARG_FPZ1 }, ++ { "frid_g", FP(0x18,0x61), CORE4, ARG_FPZ1 }, ++ { "frid_p", FP(0x18,0x62), CORE4, ARG_FPZ1 }, ++ { "frid_z", FP(0x18,0x63), CORE4, ARG_FPZ1 }, ++ { "frid_n", FP(0x18,0x64), CORE4, ARG_FPZ1 }, ++ { "fmas", FMA(0x19,0x00), BASE, ARG_FMA }, ++ { "fmad", FMA(0x19,0x01), BASE, ARG_FMA }, ++ { "fmss", FMA(0x19,0x02), BASE, ARG_FMA }, ++ { "fmsd", FMA(0x19,0x03), BASE, ARG_FMA }, ++ { "fnmas", FMA(0x19,0x04), BASE, ARG_FMA }, ++ { "fnmad", FMA(0x19,0x05), BASE, ARG_FMA }, ++ { "fnmss", FMA(0x19,0x06), BASE, ARG_FMA }, ++ { "fnmsd", FMA(0x19,0x07), BASE, ARG_FMA }, ++ { "fseleq", FMA(0x19,0x10), BASE, ARG_FMA }, ++ { "fselne", FMA(0x19,0x11), BASE, ARG_FMA }, ++ { "fsellt", FMA(0x19,0x12), BASE, ARG_FMA }, ++ { "fselle", FMA(0x19,0x13), BASE, ARG_FMA }, ++ { "fselgt", FMA(0x19,0x14), BASE, ARG_FMA }, ++ { "fselge", FMA(0x19,0x15), BASE, ARG_FMA }, ++ { "vaddw", FP(0x1A,0x00), BASE, ARG_FP }, ++ { "vaddw", FP(0x1A,0x20), BASE, ARG_FPL }, ++ { "vsubw", FP(0x1A,0x01), BASE, ARG_FP }, ++ { "vsubw", FP(0x1A,0x21), BASE, ARG_FPL }, ++ { "vcmpgew", FP(0x1A,0x02), BASE, ARG_FP }, ++ { "vcmpgew", FP(0x1A,0x22), BASE, ARG_FPL }, ++ { "vcmpeqw", FP(0x1A,0x03), BASE, ARG_FP }, ++ { "vcmpeqw", FP(0x1A,0x23), BASE, ARG_FPL }, ++ { "vcmplew", FP(0x1A,0x04), BASE, ARG_FP }, ++ { "vcmplew", FP(0x1A,0x24), BASE, ARG_FPL }, ++ { "vcmpltw", FP(0x1A,0x05), BASE, ARG_FP }, ++ { "vcmpltw", FP(0x1A,0x25), BASE, ARG_FPL }, ++ { "vcmpulew", FP(0x1A,0x06), BASE, ARG_FP }, ++ { "vcmpulew", FP(0x1A,0x26), BASE, ARG_FPL }, ++ { "vcmpultw", FP(0x1A,0x07), BASE, ARG_FP }, ++ { "vcmpultw", FP(0x1A,0x27), BASE, ARG_FPL }, ++ ++ { "vsllw", FP(0x1A,0x08), BASE, ARG_FP }, ++ { "vsllw", FP(0x1A,0x28), BASE, ARG_FPL }, ++ { "vsrlw", FP(0x1A,0x09), BASE, ARG_FP }, ++ { "vsrlw", FP(0x1A,0x29), BASE, ARG_FPL }, ++ { "vsraw", FP(0x1A,0x0A), BASE, ARG_FP }, ++ { "vsraw", FP(0x1A,0x2A), BASE, ARG_FPL }, ++ { "vrolw", FP(0x1A,0x0B), BASE, ARG_FP }, ++ { "vrolw", FP(0x1A,0x2B), BASE, ARG_FPL }, ++ { "sllow", FP(0x1A,0x0C), BASE, ARG_FP }, ++ { "sllow", FP(0x1A,0x2C), BASE, ARG_FPL }, ++ { "srlow", FP(0x1A,0x0D), BASE, ARG_FP }, ++ { "srlow", FP(0x1A,0x2D), BASE, ARG_FPL }, ++ { "vaddl", FP(0x1A,0x0E), BASE, ARG_FP }, ++ { "vaddl", FP(0x1A,0x2E), BASE, ARG_FPL }, ++ { "vsubl", FP(0x1A,0x0F), BASE, ARG_FP }, ++ { "vsubl", FP(0x1A,0x2F), BASE, ARG_FPL }, ++ { "vsllb", FP(0x1A,0x10), CORE4, ARG_FP }, ++ { "vsllb", FP(0x1A,0x30), CORE4, ARG_FPL }, ++ { "vsrlb", FP(0x1A,0x11), CORE4, ARG_FP }, ++ { "vsrlb", FP(0x1A,0x31), CORE4, ARG_FPL }, ++ { "vsrab", FP(0x1A,0x12), CORE4, ARG_FP }, ++ { "vsrab", FP(0x1A,0x32), CORE4, ARG_FPL }, ++ { "vrolb", FP(0x1A,0x13), CORE4, ARG_FP }, ++ { "vrolb", FP(0x1A,0x33), CORE4, ARG_FPL }, ++ { "vsllh", FP(0x1A,0x14), CORE4, ARG_FP }, ++ { "vsllh", FP(0x1A,0x34), CORE4, ARG_FPL }, ++ { "vsrlh", FP(0x1A,0x15), CORE4, ARG_FP }, ++ { "vsrlh", FP(0x1A,0x35), CORE4, ARG_FPL }, ++ { "vsrah", FP(0x1A,0x16), CORE4, ARG_FP }, ++ { "vsrah", FP(0x1A,0x36), CORE4, ARG_FPL }, ++ { "vrolh", FP(0x1A,0x17), CORE4, ARG_FP }, ++ { "vrolh", FP(0x1A,0x37), CORE4, ARG_FPL }, ++ { "ctpopow", FP(0x1A,0x18), BASE, { FA, ZB, DFC1 } }, ++ { "ctlzow", FP(0x1A,0x19), BASE, { FA, ZB, DFC1 } }, ++ { "vslll", FP(0x1A,0x1A), CORE4, ARG_FP }, ++ { "vslll", FP(0x1A,0x3A), CORE4, ARG_FPL }, ++ { "vsrll", FP(0x1A,0x1B), CORE4, ARG_FP }, ++ { "vsrll", FP(0x1A,0x3B), CORE4, ARG_FPL }, ++ { "vsral", FP(0x1A,0x1C), CORE4, ARG_FP }, ++ { "vsral", FP(0x1A,0x3C), CORE4, ARG_FPL }, ++ { "vroll", FP(0x1A,0x1D), CORE4, ARG_FP }, ++ { "vroll", FP(0x1A,0x3D), CORE4, ARG_FPL }, ++ { "vmaxb", FP(0x1A,0x1E), CORE4, ARG_FP }, ++ { "vminb", FP(0x1A,0x1F), CORE4, ARG_FP }, ++ { "vucaddw", FP(0x1A,0x40), BASE, ARG_FP }, ++ { "vucaddw", FP(0x1A,0x60), BASE, ARG_FPL }, ++ { "vucsubw", FP(0x1A,0x41), BASE, ARG_FP }, ++ { "vucsubw", FP(0x1A,0x61), BASE, ARG_FPL }, ++ { "vucaddh", FP(0x1A,0x42), BASE, ARG_FP }, ++ { "vucaddh", FP(0x1A,0x62), BASE, ARG_FPL }, ++ { "vucsubh", FP(0x1A,0x43), BASE, ARG_FP }, ++ { "vucsubh", FP(0x1A,0x63), BASE, ARG_FPL }, ++ { "vucaddb", FP(0x1A,0x44), BASE, ARG_FP }, ++ { "vucaddb", FP(0x1A,0x64), BASE, ARG_FPL }, ++ { "vucsubb", FP(0x1A,0x45), BASE, ARG_FP }, ++ { "vucsubb", FP(0x1A,0x65), BASE, ARG_FPL }, ++ { "sraow", FP(0x1A,0x46), CORE4, ARG_FP }, ++ { "sraow", FP(0x1A,0x66), CORE4, ARG_FPL }, ++ { "vsumw", FP(0x1A,0x47), CORE4, ARG_FPZ1 }, ++ { "vsuml", FP(0x1A,0x48), CORE4, ARG_FPZ1 }, ++ { "vcmpueqb", FP(0x1A,0x4B), CORE4, ARG_FP }, ++ { "vcmpueqb", FP(0x1A,0x6B), CORE4, ARG_FPL }, ++ { "vcmpugtb", FP(0x1A,0x4C), CORE4, ARG_FP }, ++ { "vcmpugtb", FP(0x1A,0x6C), CORE4, ARG_FPL }, ++ { "vmaxh", FP(0x1A,0x50), CORE4, ARG_FP }, ++ { "vminh", FP(0x1A,0x51), CORE4, ARG_FP }, ++ { "vmaxw", FP(0x1A,0x52), CORE4, ARG_FP }, ++ { "vminw", FP(0x1A,0x53), CORE4, ARG_FP }, ++ { "vmaxl", FP(0x1A,0x54), CORE4, ARG_FP }, ++ { "vminl", FP(0x1A,0x55), CORE4, ARG_FP }, ++ { "vumaxb", FP(0x1A,0x56), CORE4, ARG_FP }, ++ { "vuminb", FP(0x1A,0x57), CORE4, ARG_FP }, ++ { "vumaxh", FP(0x1A,0x58), CORE4, ARG_FP }, ++ { "vuminh", FP(0x1A,0x59), CORE4, ARG_FP }, ++ { "vumaxw", FP(0x1A,0x5A), CORE4, ARG_FP }, ++ { "vuminw", FP(0x1A,0x5B), CORE4, ARG_FP }, ++ { "vumaxl", FP(0x1A,0x5C), CORE4, ARG_FP }, ++ { "vuminl", FP(0x1A,0x5D), CORE4, ARG_FP }, ++ { "vsm3msw", FP(0x1A,0x67), CORE4, ARG_FP }, ++ { "vsm4key", FP(0x1A,0x68), CORE4, ARG_FPL }, ++ { "vsm4r", FP(0x1A,0x69), CORE4, ARG_FP }, ++ { "vbinvw", FP(0x1A,0x6A), CORE4, ARG_FPZ1 }, ++ { "vadds", FP(0x1A,0x80), BASE, ARG_FP }, ++ { "vaddd", FP(0x1A,0x81), BASE, ARG_FP }, ++ { "vsubs", FP(0x1A,0x82), BASE, ARG_FP }, ++ { "vsubd", FP(0x1A,0x83), BASE, ARG_FP }, ++ { "vmuls", FP(0x1A,0x84), BASE, ARG_FP }, ++ { "vmuld", FP(0x1A,0x85), BASE, ARG_FP }, ++ { "vdivs", FP(0x1A,0x86), BASE, ARG_FP }, ++ { "vdivd", FP(0x1A,0x87), BASE, ARG_FP }, ++ { "vsqrts", FP(0x1A,0x88), BASE, ARG_FPZ1 }, ++ { "vsqrtd", FP(0x1A,0x89), BASE, ARG_FPZ1 }, ++ { "vfcmpeq", FP(0x1A,0x8C), BASE, ARG_FP }, ++ { "vfcmple", FP(0x1A,0x8D), BASE, ARG_FP }, ++ { "vfcmplt", FP(0x1A,0x8E), BASE, ARG_FP }, ++ { "vfcmpun", FP(0x1A,0x8F), BASE, ARG_FP }, ++ { "vcpys", FP(0x1A,0x90), BASE, ARG_FP }, ++ { "vcpyse", FP(0x1A,0x91), BASE, ARG_FP }, ++ { "vcpysn", FP(0x1A,0x92), BASE, ARG_FP }, ++ { "vsums", FP(0x1A,0x93), CORE4, ARG_FPZ1 }, ++ { "vsumd", FP(0x1A,0x94), CORE4, ARG_FPZ1 }, ++ { "vfcvtsd", FP(0x1A,0x95), CORE4, ARG_FPZ1 }, ++ { "vfcvtds", FP(0x1A,0x96), CORE4, ARG_FPZ1 }, ++ { "vfcvtls", FP(0x1A,0x99), CORE4, ARG_FPZ1 }, ++ { "vfcvtld", FP(0x1A,0x9A), CORE4, ARG_FPZ1 }, ++ { "vfcvtdl", FP(0x1A,0x9B), CORE4, ARG_FPZ1 }, ++ { "vfcvtdl_g", FP(0x1A,0x9C), CORE4, ARG_FPZ1 }, ++ { "vfcvtdl_p", FP(0x1A,0x9D), CORE4, ARG_FPZ1 }, ++ { "vfcvtdl_z", FP(0x1A,0x9E), CORE4, ARG_FPZ1 }, ++ { "vfcvtdl_n", FP(0x1A,0x9F), CORE4, ARG_FPZ1 }, ++ { "vfris", FP(0x1A,0xA0), CORE4, ARG_FPZ1 }, ++ { "vfris_g", FP(0x1A,0xA1), CORE4, ARG_FPZ1 }, ++ { "vfris_p", FP(0x1A,0xA2), CORE4, ARG_FPZ1 }, ++ { "vfris_z", FP(0x1A,0xA3), CORE4, ARG_FPZ1 }, ++ { "vfris_n", FP(0x1A,0xA4), CORE4, ARG_FPZ1 }, ++ { "vfrid", FP(0x1A,0xA5), CORE4, ARG_FPZ1 }, ++ { "vfrid_g", FP(0x1A,0xA6), CORE4, ARG_FPZ1 }, ++ { "vfrid_p", FP(0x1A,0xA7), CORE4, ARG_FPZ1 }, ++ { "vfrid_z", FP(0x1A,0xA8), CORE4, ARG_FPZ1 }, ++ { "vfrid_n", FP(0x1A,0xA9), CORE4, ARG_FPZ1 }, ++ { "vfrecs", FP(0x1A,0xAA), CORE4, ARG_FPZ1 }, ++ { "vfrecd", FP(0x1A,0xAB), CORE4, ARG_FPZ1 }, ++ { "vmaxs", FP(0x1A,0xAC), CORE4, ARG_FP }, ++ { "vmins", FP(0x1A,0xAD), CORE4, ARG_FP }, ++ { "vmaxd", FP(0x1A,0xAE), CORE4, ARG_FP }, ++ { "vmind", FP(0x1A,0xAF), CORE4, ARG_FP }, ++ { "vmas", FMA(0x1B,0x00), BASE, ARG_FMA }, ++ { "vmad", FMA(0x1B,0x01), BASE, ARG_FMA }, ++ { "vmss", FMA(0x1B,0x02), BASE, ARG_FMA }, ++ { "vmsd", FMA(0x1B,0x03), BASE, ARG_FMA }, ++ { "vnmas", FMA(0x1B,0x04), BASE, ARG_FMA }, ++ { "vnmad", FMA(0x1B,0x05), BASE, ARG_FMA }, ++ { "vnmss", FMA(0x1B,0x06), BASE, ARG_FMA }, ++ { "vnmsd", FMA(0x1B,0x07), BASE, ARG_FMA }, ++ { "vfseleq", FMA(0x1B,0x10), BASE, ARG_FMA }, ++ { "vfsellt", FMA(0x1B,0x12), BASE, ARG_FMA }, ++ { "vfselle", FMA(0x1B,0x13), BASE, ARG_FMA }, ++ { "vseleqw", FMA(0x1B,0x18), BASE, ARG_FMA }, ++ { "vseleqw", FMA(0x1B,0x38), BASE, ARG_FMAL }, ++ { "vsellbcw", FMA(0x1B,0x19), BASE, ARG_FMA }, ++ { "vsellbcw", FMA(0x1B,0x39), BASE, ARG_FMAL }, ++ { "vselltw", FMA(0x1B,0x1A), BASE, ARG_FMA }, ++ { "vselltw", FMA(0x1B,0x3A), BASE, ARG_FMAL }, ++ { "vsellew", FMA(0x1B,0x1B), BASE, ARG_FMA }, ++ { "vsellew", FMA(0x1B,0x3B), BASE, ARG_FMAL }, ++ { "vinsw", FMA(0x1B,0x20), BASE, ARG_FMAL }, ++ { "vinsf", FMA(0x1B,0x21), BASE, ARG_FMAL }, ++ { "vextw", FMA(0x1B,0x22), BASE, { FA, FMALIT, DFC1 }}, ++ { "vextf", FMA(0x1B,0x23), BASE, { FA, FMALIT, DFC1 }}, ++ { "vcpyw", FMA(0x1B,0x24), BASE, { FA, DFC1 }}, ++ { "vcpyf", FMA(0x1B,0x25), BASE, { FA, DFC1 }}, ++ { "vconw", FMA(0x1B,0x26), BASE, ARG_FMA }, ++ { "vshfw", FMA(0x1B,0x27), BASE, ARG_FMA }, ++ { "vcons", FMA(0x1B,0x28), BASE, ARG_FMA }, ++ { "vcond", FMA(0x1B,0x29), BASE, ARG_FMA }, ++ { "vinsb", FMA(0x1B,0x2A), CORE4, ARG_FMAL }, ++ { "vinsh", FMA(0x1B,0x2B), CORE4, ARG_FMAL }, ++ { "vinsectlh", FMA(0x1B,0x2C), CORE4, ARG_FMA }, ++ { "vinsectlw", FMA(0x1B,0x2D), CORE4, ARG_FMA }, ++ { "vinsectll", FMA(0x1B,0x2E), CORE4, ARG_FMA }, ++ { "vinsectlb", FMA(0x1B,0x2F), CORE4, ARG_FMA }, ++ { "vshfq", FMA(0x1B,0x30), CORE4, ARG_FMAL }, ++ { "vshfqb", FMA(0x1B,0x31), CORE4, ARG_FMA }, ++ { "vcpyb", FMA(0x1B,0x32), CORE4, { FA, DFC1 }}, ++ { "vcpyh", FMA(0x1B,0x33), CORE4, { FA, DFC1 }}, ++ { "vsm3r", FMA(0x1B,0x34), CORE4, ARG_FMAL }, ++ { "vfcvtsh", FMA(0x1B,0x35), CORE4, ARG_FMAL }, ++ { "vfcvths", FMA(0x1B,0x36), CORE4, {FA, FMALIT, FC} }, ++ { "vldw_u", ATMEM(0x1C,0x0), BASE, ARG_VUAMEM }, ++ { "vstw_u", ATMEM(0x1C,0x1), BASE, ARG_VUAMEM }, ++ { "vlds_u", ATMEM(0x1C,0x2), BASE, ARG_VUAMEM }, ++ { "vsts_u", ATMEM(0x1C,0x3), BASE, ARG_VUAMEM }, ++ { "vldd_u", ATMEM(0x1C,0x4), BASE, ARG_VUAMEM }, ++ { "vstd_u", ATMEM(0x1C,0x5), BASE, ARG_VUAMEM }, ++ { "vstw_ul", ATMEM(0x1C,0x8), BASE, ARG_VUAMEM }, ++ { "vstw_uh", ATMEM(0x1C,0x9), BASE, ARG_VUAMEM }, ++ { "vsts_ul", ATMEM(0x1C,0xA), BASE, ARG_VUAMEM }, ++ { "vsts_uh", ATMEM(0x1C,0xB), BASE, ARG_VUAMEM }, ++ { "vstd_ul", ATMEM(0x1C,0xC), BASE, ARG_VUAMEM }, ++ { "vstd_uh", ATMEM(0x1C,0xD), BASE, ARG_VUAMEM }, ++ { "vldd_nc", ATMEM(0x1C,0xE), BASE, ARG_VUAMEM }, ++ { "vstd_nc", ATMEM(0x1C,0xF), BASE, ARG_VUAMEM }, ++ { "lbr", BRA(0x1D), CORE4, { BDISP26 }}, ++ { "flushd", MEM(0x20), BASE, ARG_PREFETCH }, ++ { "ldbu", MEM(0x20), BASE, ARG_MEM }, ++ { "evictdg", MEM(0x21), BASE, ARG_PREFETCH }, ++ { "ldhu", MEM(0x21), BASE, ARG_MEM }, ++ { "s_fillcs", MEM(0x22), BASE, ARG_PREFETCH }, ++ { "ldw", MEM(0x22), BASE, ARG_MEM }, ++ { "s_fillde", MEM(0x23), BASE, ARG_PREFETCH }, ++ { "ldl", MEM(0x23), BASE, ARG_MEM }, ++ { "evictdl", MEM(0x24), BASE, ARG_PREFETCH }, ++ { "ldl_u", MEM(0x24), BASE, ARG_MEM }, ++ { "pri_ldw/p", HWMEM(0x25,0x0), BASE, ARG_HWMEM }, ++ { "pri_ldw/v", HWMEM(0x25,0x8), BASE, ARG_HWMEM }, ++ { "pri_ldl/p", HWMEM(0x25,0x1), BASE, ARG_HWMEM }, ++ { "pri_ldl/v", HWMEM(0x25,0x9), BASE, ARG_HWMEM }, ++ { "fillde", MEM(0x26), BASE, ARG_PREFETCH }, ++ { "flds", MEM(0x26), BASE, ARG_FMEM }, ++ { "fillde_e", MEM(0x27), BASE, ARG_PREFETCH }, ++ { "fldd", MEM(0x27), BASE, ARG_FMEM }, ++ ++ { "stb", MEM(0x28), BASE, ARG_MEM }, ++ { "sth", MEM(0x29), BASE, ARG_MEM }, ++ { "stw", MEM(0x2A), BASE, ARG_MEM }, ++ { "stl", MEM(0x2B), BASE, ARG_MEM }, ++ { "stl_u", MEM(0x2C), BASE, ARG_MEM }, ++ { "pri_stw/p", HWMEM(0x2D,0x0), BASE, ARG_HWMEM }, ++ { "pri_stw/v", HWMEM(0x2D,0x8), BASE, ARG_HWMEM }, ++ { "pri_stl/p", HWMEM(0x2D,0x1), BASE, ARG_HWMEM }, ++ { "pri_stl/v", HWMEM(0x2D,0x9), BASE, ARG_HWMEM }, ++ { "fsts", MEM(0x2E), BASE, ARG_FMEM }, ++ { "fstd", MEM(0x2F), BASE, ARG_FMEM }, ++ { "beq", BRA(0x30), BASE, ARG_BRA }, ++ { "bne", BRA(0x31), BASE, ARG_BRA }, ++ { "blt", BRA(0x32), BASE, ARG_BRA }, ++ { "ble", BRA(0x33), BASE, ARG_BRA }, ++ { "bgt", BRA(0x34), BASE, ARG_BRA }, ++ { "bge", BRA(0x35), BASE, ARG_BRA }, ++ { "blbc", BRA(0x36), BASE, ARG_BRA }, ++ { "blbs", BRA(0x37), BASE, ARG_BRA }, ++ ++ { "fbeq", BRA(0x38), BASE, ARG_FBRA }, ++ { "fbne", BRA(0x39), BASE, ARG_FBRA }, ++ { "fblt", BRA(0x3A), BASE, ARG_FBRA }, ++ { "fble", BRA(0x3B), BASE, ARG_FBRA }, ++ { "fbgt", BRA(0x3C), BASE, ARG_FBRA }, ++ { "fbge", BRA(0x3D), BASE, ARG_FBRA }, ++ { "ldi", MEM(0x3E), BASE, ARG_MEM }, ++ { "ldih", MEM(0x3F), BASE, ARG_MEM }, ++}; ++ ++const unsigned sw64_num_opcodes = sizeof(sw64_opcodes) / sizeof(*sw64_opcodes); ++ ++/* OSF register names. */ ++ ++static const char * const osf_regnames[64] = { ++ "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", ++ "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp", ++ "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", ++ "t10", "t11", "ra", "t12", "at", "gp", "sp", "zero", ++ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", ++ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", ++ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23", ++ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31" ++}; ++ ++/* VMS register names. */ ++ ++static const char * const vms_regnames[64] = { ++ "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7", ++ "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", ++ "R16", "R17", "R18", "R19", "R20", "R21", "R22", "R23", ++ "R24", "AI", "RA", "PV", "AT", "FP", "SP", "RZ", ++ "F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7", ++ "F8", "F9", "F10", "F11", "F12", "F13", "F14", "F15", ++ "F16", "F17", "F18", "F19", "F20", "F21", "F22", "F23", ++ "F24", "F25", "F26", "F27", "F28", "F29", "F30", "FZ" ++}; ++ ++int print_insn_sw64(bfd_vma memaddr, struct disassemble_info *info) ++{ ++ static const struct sw64_opcode *opcode_index[SW_NOPS + 1]; ++ const char * const * regnames; ++ const struct sw64_opcode *opcode, *opcode_end; ++ const unsigned char *opindex; ++ unsigned insn, op, isa_mask; ++ int need_comma; ++ ++ /* Initialize the majorop table the first time through */ ++ if (!opcode_index[0]) { ++ opcode = sw64_opcodes; ++ opcode_end = opcode + sw64_num_opcodes; ++ ++ for (op = 0; op < SW_NOPS; ++op) { ++ opcode_index[op] = opcode; ++ if ((SW_LITOP (opcode->opcode) != 0x10) && (SW_LITOP (opcode->opcode) != 0x11)) { ++ while (opcode < opcode_end && op == SW_OP (opcode->opcode)) ++ ++opcode; ++ } else { ++ while (opcode < opcode_end && op == SW_LITOP (opcode->opcode)) ++ ++opcode; ++ } ++ } ++ opcode_index[op] = opcode; ++ } ++ ++ if (info->flavour == bfd_target_evax_flavour) ++ regnames = vms_regnames; ++ else ++ regnames = osf_regnames; ++ isa_mask = SW_OPCODE_NOHMCODE; ++ switch (info->mach) { ++ case bfd_mach_sw64_core3: ++ isa_mask |= SW_OPCODE_BASE | SW_OPCODE_CORE3; ++ break; ++ case bfd_mach_sw64_core4: ++ isa_mask |= SW_OPCODE_BASE | SW_OPCODE_CORE4; ++ break; ++ } ++ ++ /* Read the insn into a host word */ ++ { ++ bfd_byte buffer[4]; ++ int status = (*info->read_memory_func) (memaddr, buffer, 4, info); ++ if (status != 0) { ++ (*info->memory_error_func) (status, memaddr, info); ++ return -1; ++ } ++ insn = bfd_getl32 (buffer); ++ } ++ ++ /* Get the major opcode of the instruction. */ ++ if ((SW_LITOP (insn) == 0x10) || (SW_LITOP (insn) == 0x11)) ++ op = SW_LITOP (insn); ++ else if ((SW_OP(insn) & 0x3C) == 0x14 ) ++ op = 0x14; ++ else ++ op = SW_OP (insn); ++ ++ /* Find the first match in the opcode table. */ ++ opcode_end = opcode_index[op + 1]; ++ for (opcode = opcode_index[op]; opcode < opcode_end; ++opcode) { ++ if ((insn ^ opcode->opcode) & opcode->mask) ++ continue; ++ ++ if (!(opcode->flags & isa_mask)) ++ continue; ++ ++ /* Make two passes over the operands. First see if any of them ++ have extraction functions, and, if they do, make sure the ++ instruction is valid. */ ++ { ++ int invalid = 0; ++ for (opindex = opcode->operands; *opindex != 0; opindex++) { ++ const struct sw64_operand *operand = sw64_operands + *opindex; ++ if (operand->extract) ++ (*operand->extract) (insn, &invalid); ++ } ++ if (invalid) ++ continue; ++ } ++ ++ /* The instruction is valid. */ ++ goto found; ++ } ++ ++ /* No instruction found */ ++ (*info->fprintf_func) (info->stream, ".long %#08x", insn); ++ ++ return 4; ++ ++found: ++ if (!strncmp("sys_call",opcode->name,8)) { ++ if (insn & (0x1 << 25)) ++ (*info->fprintf_func) (info->stream, "%s", "sys_call"); ++ else ++ (*info->fprintf_func) (info->stream, "%s", "sys_call/b"); ++ } else ++ (*info->fprintf_func) (info->stream, "%s", opcode->name); ++ ++ /* get zz[7:6] and zz[5:0] to form truth for vlog */ ++ if (!strcmp(opcode->name, "vlog")) ++ { ++ unsigned int truth; ++ char tr[4]; ++ truth = (SW_OP(insn) & 3) << 6; ++ truth = truth | ((insn & 0xFC00) >> 10); ++ sprintf(tr,"%x",truth); ++ (*info->fprintf_func) (info->stream, "%s", tr); ++ } ++ if (opcode->operands[0] != 0) ++ (*info->fprintf_func) (info->stream, "\t"); ++ ++ /* Now extract and print the operands. */ ++ need_comma = 0; ++ for (opindex = opcode->operands; *opindex != 0; opindex++) { ++ const struct sw64_operand *operand = sw64_operands + *opindex; ++ int value; ++ ++ /* Operands that are marked FAKE are simply ignored. We ++ already made sure that the extract function considered ++ the instruction to be valid. */ ++ if ((operand->flags & SW_OPERAND_FAKE) != 0) ++ continue; ++ ++ /* Extract the value from the instruction. */ ++ if (operand->extract) ++ value = (*operand->extract) (insn, (int *) NULL); ++ else { ++ value = (insn >> operand->shift) & ((1 << operand->bits) - 1); ++ if (operand->flags & SW_OPERAND_SIGNED) { ++ int signbit = 1 << (operand->bits - 1); ++ value = (value ^ signbit) - signbit; ++ } ++ } ++ ++ if (need_comma && ++ ((operand->flags & (SW_OPERAND_PARENS | SW_OPERAND_COMMA)) ++ != SW_OPERAND_PARENS)) { ++ (*info->fprintf_func) (info->stream, ","); ++ } ++ if (operand->flags & SW_OPERAND_PARENS) ++ (*info->fprintf_func) (info->stream, "("); ++ ++ /* Print the operand as directed by the flags. */ ++ if (operand->flags & SW_OPERAND_IR) ++ (*info->fprintf_func) (info->stream, "%s", regnames[value]); ++ else if (operand->flags & SW_OPERAND_FPR) ++ (*info->fprintf_func) (info->stream, "%s", regnames[value + 32]); ++ else if (operand->flags & SW_OPERAND_RELATIVE) ++ (*info->print_address_func) (memaddr + 4 + value, info); ++ else if (operand->flags & SW_OPERAND_SIGNED) ++ (*info->fprintf_func) (info->stream, "%d", value); ++ else ++ (*info->fprintf_func) (info->stream, "%#x", value); ++ ++ if (operand->flags & SW_OPERAND_PARENS) ++ (*info->fprintf_func) (info->stream, ")"); ++ need_comma = 1; ++ } ++ ++ return 4; ++} +diff --git a/gdb-xml/sw64-core.xml b/gdb-xml/sw64-core.xml +new file mode 100644 +index 0000000000..9498715e37 +--- /dev/null ++++ b/gdb-xml/sw64-core.xml +@@ -0,0 +1,43 @@ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ +diff --git a/hw/Kconfig b/hw/Kconfig +index 9ca7b38c31..b5b9da4b80 100644 +--- a/hw/Kconfig ++++ b/hw/Kconfig +@@ -67,6 +67,7 @@ source sparc/Kconfig + source sparc64/Kconfig + source tricore/Kconfig + source xtensa/Kconfig ++source sw64/Kconfig + + # Symbols used by multiple targets + config TEST_DEVICES +diff --git a/hw/acpi/Kconfig b/hw/acpi/Kconfig +index e07d3204eb..d528f5ef93 100644 +--- a/hw/acpi/Kconfig ++++ b/hw/acpi/Kconfig +@@ -78,3 +78,11 @@ config ACPI_ERST + config ACPI_CXL + bool + depends on ACPI ++ ++config ACPI_SW64_PM ++ bool ++ depends on ACPI ++ ++config GPIO_SUNWAY ++ bool ++ depends on ACPI +diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c +index af66bde0f5..0d4dea266a 100644 +--- a/hw/acpi/aml-build.c ++++ b/hw/acpi/aml-build.c +@@ -1490,6 +1490,18 @@ Aml *aml_dword_io(AmlMinFixed min_fixed, AmlMaxFixed max_fixed, + isa_ranges); + } + ++Aml *aml_qword_io(AmlMinFixed min_fixed, AmlMaxFixed max_fixed, ++ AmlDecode dec, AmlISARanges isa_ranges, ++ uint64_t addr_gran, uint64_t addr_min, ++ uint64_t addr_max, uint64_t addr_trans, ++ uint64_t len) ++ ++{ ++ return aml_qword_as_desc(AML_IO_RANGE, min_fixed, max_fixed, dec, ++ addr_gran, addr_min, addr_max, addr_trans, len, ++ isa_ranges); ++} ++ + /* + * ACPI 1.0b: 6.4.3.5.4 ASL Macros for DWORD Address Space Descriptor + * +diff --git a/hw/acpi/gpio_sunway.c b/hw/acpi/gpio_sunway.c +new file mode 100644 +index 0000000000..cdc956c7b9 +--- /dev/null ++++ b/hw/acpi/gpio_sunway.c +@@ -0,0 +1,257 @@ ++/* ++ * Copyright (c) 2024 Wxiat Corporation ++ * Written by Wuyacheng ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms and conditions of the GNU General Public License, ++ * version 2 or later, as published by the Free Software Foundation. ++ */ ++ ++// SPDX-License-Identifier: GPL-2.0+ ++ ++#include "qemu/osdep.h" ++#include "exec/address-spaces.h" ++#include "migration/vmstate.h" ++#include "hw/sw64/gpio.h" ++#include "hw/irq.h" ++ ++static void sw64_gpio_update_int(SW64GPIOState *s) ++{ ++ qemu_set_irq(s->irq[0], 1); ++} ++ ++static void sw64_gpio_set(void *opaque, int line, int level) ++{ ++ SW64GPIOState *s = SW64_GPIO(opaque); ++ ++ sw64_gpio_update_int(s); ++} ++ ++static void sw64_gpio_write(void *opaque, hwaddr offset, uint64_t reg_value, ++ unsigned size) ++{ ++ SW64GPIOState *s = SW64_GPIO(opaque); ++ reg_value = (uint32_t)reg_value; ++ ++ switch (offset) { ++ case GPIO_SWPORTA_DR: ++ s->padr = reg_value; ++ break; ++ case GPIO_SWPORTA_DDR: ++ s->paddr = reg_value ; ++ break; ++ case GPIO_INTEN: ++ s->inter = reg_value; ++ break; ++ case GPIO_INTMASK: ++ s->intmr = reg_value; ++ break; ++ case GPIO_INTTYPE_LEVEL: ++ s->intlr = reg_value; ++ break; ++ case GPIO_INTTYPE_POLA: ++ s->intpr = reg_value; ++ break; ++ case GPIO_INTTYPE_STATUS: ++ s->intsr = reg_value; ++ break; ++ case GPIO_RAW_INTTYPE_STATUS: ++ s->rintsr = reg_value; ++ break; ++ case GPIO_DEB_ENABLE: ++ s->deber = reg_value; ++ break; ++ case GPIO_CLEAN_INT: ++ s->clintr = reg_value; ++ break; ++ case GPIO_EXT_PORTA: ++ s->expar = reg_value; ++ break; ++ case GPIO_SYNC_LEVEL: ++ s->synlr = reg_value; ++ break; ++ case GPIO_ID_CODE: ++ s->idcr = reg_value; ++ break; ++ case GPIO_VERSION: ++ s->versionr = reg_value; ++ break; ++ case GPIO_CONF_R1: ++ s->conf1r = reg_value; ++ break; ++ case GPIO_CONF_R2: ++ s->conf2r = reg_value; ++ break; ++ default: ++ printf("error: Bad register at offset 0x%lx\n", offset); ++ } ++ ++ return; ++} ++ ++static uint64_t sw64_gpio_read(void *opaque, hwaddr offset, unsigned size) ++{ ++ SW64GPIOState *s = SW64_GPIO(opaque); ++ uint32_t reg_value = 0; ++ ++ switch (offset) { ++ case GPIO_SWPORTA_DR: ++ reg_value = s->padr; ++ break; ++ case GPIO_SWPORTA_DDR: ++ reg_value = s->paddr; ++ break; ++ case GPIO_INTEN: ++ reg_value = s->inter; ++ break; ++ case GPIO_INTMASK: ++ reg_value = s->intmr; ++ break; ++ case GPIO_INTTYPE_LEVEL: ++ reg_value = s->intlr; ++ break; ++ case GPIO_INTTYPE_POLA: ++ reg_value = s->intpr; ++ break; ++ case GPIO_INTTYPE_STATUS: ++ reg_value = s->intsr; ++ break; ++ case GPIO_RAW_INTTYPE_STATUS: ++ reg_value = s->rintsr; ++ break; ++ case GPIO_DEB_ENABLE: ++ reg_value = s->deber; ++ break; ++ case GPIO_CLEAN_INT: ++ reg_value = s->clintr; ++ break; ++ case GPIO_EXT_PORTA: ++ reg_value = s->expar; ++ break; ++ case GPIO_SYNC_LEVEL: ++ reg_value = s->synlr; ++ break; ++ case GPIO_ID_CODE: ++ reg_value = s->idcr; ++ break; ++ case GPIO_VERSION: ++ reg_value = s->versionr; ++ break; ++ case GPIO_CONF_R1: ++ reg_value = s->conf1r; ++ break; ++ case GPIO_CONF_R2: ++ reg_value = s->conf2r; ++ break; ++ default: ++ printf("error: Bad register at offset 0x%lx\n", offset); ++ return (uint64_t)0; ++ } ++ ++ return (uint64_t)reg_value; ++} ++ ++static const VMStateDescription vmstate_sw64_gpio = { ++ .name = TYPE_SW64_GPIO, ++ .version_id = 1, ++ .minimum_version_id = 1, ++ .fields = (VMStateField[]) { ++ VMSTATE_UINT32(padr, SW64GPIOState), ++ VMSTATE_UINT32(paddr, SW64GPIOState), ++ VMSTATE_UINT32(inter, SW64GPIOState), ++ VMSTATE_UINT32(intmr, SW64GPIOState), ++ VMSTATE_UINT32(intlr, SW64GPIOState), ++ VMSTATE_UINT32(intpr, SW64GPIOState), ++ VMSTATE_UINT32(intsr, SW64GPIOState), ++ VMSTATE_UINT32(rintsr, SW64GPIOState), ++ VMSTATE_UINT32(deber, SW64GPIOState), ++ VMSTATE_UINT32(clintr, SW64GPIOState), ++ VMSTATE_UINT32(expar, SW64GPIOState), ++ VMSTATE_UINT32(synlr, SW64GPIOState), ++ VMSTATE_UINT32(idcr, SW64GPIOState), ++ VMSTATE_UINT32(versionr, SW64GPIOState), ++ VMSTATE_UINT32(conf1r, SW64GPIOState), ++ VMSTATE_UINT32(conf2r, SW64GPIOState), ++ VMSTATE_END_OF_LIST() ++ } ++}; ++ ++static void sw64_gpio_reset(DeviceState *dev) ++{ ++ SW64GPIOState *s = SW64_GPIO(dev); ++ ++ s->padr = 0; ++ s->paddr = 0; ++ s->inter = 0; ++ s->intmr = 0; ++ s->intlr = 0; ++ s->intpr = 0; ++ s->intsr = 0; ++ s->rintsr = 0; ++ s->deber = 0; ++ s->clintr = 0; ++ s->expar = 0; ++ s->synlr = 0; ++ s->idcr = 0; ++ s->versionr = 0; ++ s->conf1r = 0; ++ s->conf2r = 0; ++ ++ sw64_gpio_update_int(s); ++} ++ ++static const MemoryRegionOps sw64_gpio_ops = { ++ .read = sw64_gpio_read, ++ .write = sw64_gpio_write, ++ .valid.min_access_size = 4, ++ .valid.max_access_size = 4, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++}; ++ ++ ++static void sw64_gpio_realize(DeviceState *dev, Error **errp) ++{ ++ SW64GPIOState *s = SW64_GPIO(dev); ++ MemoryRegion *sw64_gpio = g_new(MemoryRegion, 1); ++ memory_region_init_io(sw64_gpio, OBJECT(s), &sw64_gpio_ops, s, ++ TYPE_SW64_GPIO, SW64_GPIO_MEM_SIZE); ++ ++ qdev_init_gpio_in(DEVICE(s), sw64_gpio_set, SW64_GPIO_PIN_COUNT); ++ qdev_init_gpio_out(DEVICE(s), s->output, SW64_GPIO_PIN_COUNT); ++ s->irq[0] = qemu_allocate_irq(sw64_gpio_set_irq, s, 15); ++ memory_region_add_subregion(get_system_memory(), 0x804930000000ULL, ++ sw64_gpio); ++} ++ ++static void sw64_gpio_initfn(Object *obj) ++{ ++ DeviceState *dev = DEVICE(obj); ++ Error *errp; ++ sw64_gpio_realize(dev, &errp); ++} ++ ++ ++static void sw64_gpio_class_init(ObjectClass *klass, void *data) ++{ ++ DeviceClass *dc = DEVICE_CLASS(klass); ++ ++ dc->realize = sw64_gpio_realize; ++ dc->reset = sw64_gpio_reset; ++ dc->vmsd = &vmstate_sw64_gpio; ++ dc->desc = "SW64 GPIO controller"; ++} ++ ++static const TypeInfo sw64_gpio_info = { ++ .name = TYPE_SW64_GPIO, ++ .parent = TYPE_SYS_BUS_DEVICE, ++ .instance_size = sizeof(SW64GPIOState), ++ .instance_init = sw64_gpio_initfn, ++ .class_init = sw64_gpio_class_init, ++}; ++ ++static void sw64_gpio_register_types(void) ++{ ++ type_register_static(&sw64_gpio_info); ++} ++ ++type_init(sw64_gpio_register_types) +diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build +index fc1b952379..015d7afc5d 100644 +--- a/hw/acpi/meson.build ++++ b/hw/acpi/meson.build +@@ -11,6 +11,8 @@ acpi_ss.add(when: 'CONFIG_ACPI_CPU_HOTPLUG', if_false: files('acpi-cpu-hotplug-s + acpi_ss.add(when: 'CONFIG_ACPI_MEMORY_HOTPLUG', if_true: files('memory_hotplug.c')) + acpi_ss.add(when: 'CONFIG_ACPI_MEMORY_HOTPLUG', if_false: files('acpi-mem-hotplug-stub.c')) + acpi_ss.add(when: 'CONFIG_ACPI_NVDIMM', if_true: files('nvdimm.c')) ++acpi_ss.add(when: 'CONFIG_ACPI_SW64_PM', if_true: files('sw64_pm_device.c')) ++acpi_ss.add(when: 'CONFIG_GPIO_SUNWAY', if_true: files('gpio_sunway.c')) + acpi_ss.add(when: 'CONFIG_ACPI_NVDIMM', if_false: files('acpi-nvdimm-stub.c')) + acpi_ss.add(when: 'CONFIG_ACPI_PCI', if_true: files('pci.c')) + acpi_ss.add(when: 'CONFIG_ACPI_CXL', if_true: files('cxl.c'), if_false: files('cxl-stub.c')) +diff --git a/hw/acpi/sw64_pm_device.c b/hw/acpi/sw64_pm_device.c +new file mode 100644 +index 0000000000..9174e8f0ba +--- /dev/null ++++ b/hw/acpi/sw64_pm_device.c +@@ -0,0 +1,285 @@ ++/* ++ * Copyright (c) 2022 Wxiat Corporation ++ * Written by Lufeifei, Min fanlei ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms and conditions of the GNU General Public License, ++ * version 2 or later, as published by the Free Software Foundation. ++ */ ++ ++#include "qemu/osdep.h" ++#include "qapi/error.h" ++#include "hw/acpi/acpi.h" ++#include "hw/irq.h" ++#include "hw/mem/pc-dimm.h" ++#include "hw/qdev-properties.h" ++#include "migration/vmstate.h" ++#include "qemu/error-report.h" ++#include "hw/sw64/pm.h" ++#include "exec/address-spaces.h" ++#include "hw/boards.h" ++#include "hw/acpi/cpu.h" ++ ++static void sw64_pm_device_plug_cb(HotplugHandler *hotplug_dev, ++ DeviceState *dev, Error **errp) ++{ ++ SW64PMState *s = SW64_PM(hotplug_dev); ++ ++ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { ++ PCDIMMDevice *dimm = PC_DIMM(dev); ++ s->addr = dimm->addr; ++ s->length = object_property_get_uint(OBJECT(dimm), PC_DIMM_SIZE_PROP, NULL); ++ s->status = SUNWAY_MEMHOTPLUG_ADD; ++ s->slot = dimm->slot; ++ s->node = dimm->node; ++ ++ acpi_memory_plug_cb(hotplug_dev, &s->acpi_memory_hotplug, dev, errp); ++ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { ++ s->status = SUNWAY_CPUHOTPLUG_ADD; ++ s->cpuid = cpu_hot_id; ++ acpi_cpu_plug_cb(hotplug_dev, &s->cpuhp_state, dev, errp); ++ } else { ++ error_setg(errp, "virt: device plug request for unsupported device" ++ " type: %s", object_get_typename(OBJECT(dev))); ++ } ++} ++ ++static void sw64_pm_unplug_request_cb(HotplugHandler *hotplug_dev, ++ DeviceState *dev, Error **errp) ++{ ++ SW64PMState *s = SW64_PM(hotplug_dev); ++ ++ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { ++ PCDIMMDevice *dimm = PC_DIMM(dev); ++ s->addr = dimm->addr; ++ s->slot = dimm->slot; ++ s->node = dimm->node; ++ s->length = object_property_get_uint(OBJECT(dimm), PC_DIMM_SIZE_PROP, NULL); ++ s->status = SUNWAY_MEMHOTPLUG_REMOVE; ++ ++ acpi_memory_unplug_request_cb(hotplug_dev, &s->acpi_memory_hotplug, dev, errp); ++ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { ++ s->status = SUNWAY_CPUHOTPLUG_REMOVE; ++ s->cpuid = cpu_hot_id; ++ acpi_cpu_unplug_request_cb(hotplug_dev, &s->cpuhp_state, dev, errp); ++ } else { ++ error_setg(errp, "acpi: device unplug for unsupported device" ++ " type: %s", object_get_typename(OBJECT(dev))); ++ } ++} ++ ++static void sw64_pm_unplug_cb(HotplugHandler *hotplug_dev, ++ DeviceState *dev, Error **errp) ++{ ++ SW64PMState *s = SW64_PM(hotplug_dev); ++ ++ if ((object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM))) { ++ acpi_memory_unplug_cb(&s->acpi_memory_hotplug, dev, errp); ++ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { ++ acpi_cpu_unplug_cb(&s->cpuhp_state, dev, errp); ++ } else { ++ error_setg(errp, "acpi: device unplug request for unsupported device" ++ " type: %s", object_get_typename(OBJECT(dev))); ++ } ++} ++ ++static void sw64_pm_send_event(AcpiDeviceIf *adev, AcpiEventStatusBits ev) ++{ ++ SW64PMState *s = SW64_PM(adev); ++ ++ if (!(ev & ACPI_MEMORY_HOTPLUG_STATUS) && !(ev & ACPI_CPU_HOTPLUG_STATUS)) { ++ /* Unknown event. Return without generating interrupt. */ ++ warn_report("GED: Uns:upported event %d. No irq injected", ev); ++ return; ++ } ++ ++ /* Trigger the event by sending an interrupt to the guest. */ ++ qemu_irq_pulse(s->irq); ++} ++ ++static uint64_t pm_read(void *opaque, hwaddr addr, unsigned size) ++{ ++ SW64PMState *s = (SW64PMState *)opaque; ++ uint64_t ret = 0; ++ ++ switch (addr) { ++ case OFFSET_START_ADDR: ++ ret = s->addr; ++ break; ++ case OFFSET_LENGTH: ++ ret = s->length; ++ break; ++ case OFFSET_STATUS: ++ ret = s->status; ++ break; ++ case OFFSET_SLOT: ++ ret = s->slot; ++ break; ++ case OFFSET_CPUID: ++ ret = s->cpuid; ++ break; ++ case OFFSET_NODE: ++ ret = s->node; ++ break; ++ default: ++ break; ++ } ++ ++ return ret; ++} ++ ++static void pm_write(void *opaque, hwaddr addr, uint64_t val, ++ unsigned size) ++{ ++ SW64PMState *s = (SW64PMState *)opaque; ++ MemStatus *mdev; ++ AcpiCpuStatus *cdev = NULL; ++ DeviceState *dev = NULL; ++ HotplugHandler *hotplug_ctrl = NULL; ++ Error *local_err = NULL; ++ ++ switch (addr) { ++ case OFFSET_SLOT: ++ s->acpi_memory_hotplug.selector = val; ++ mdev = &s->acpi_memory_hotplug.devs[s->acpi_memory_hotplug.selector]; ++ dev = DEVICE(mdev->dimm); ++ hotplug_ctrl = qdev_get_hotplug_handler(dev); ++ /* call pc-dimm unplug cb */ ++ hotplug_handler_unplug(hotplug_ctrl, dev, &local_err); ++ object_unparent(OBJECT(dev)); ++ break; ++ case OFFSET_CPUID: ++ s->cpuhp_state.selector = val; ++ int i; ++ for (i = 0; i < s->cpuhp_state.dev_count; i++) { ++ if (s->cpuhp_state.selector == s->cpuhp_state.devs[i].arch_id) { ++ cdev = &s->cpuhp_state.devs[i]; ++ } ++ } ++ dev = DEVICE(cdev->cpu); ++ hotplug_ctrl = qdev_get_hotplug_handler(dev); ++ hotplug_handler_unplug(hotplug_ctrl, dev, &local_err); ++ object_unparent(OBJECT(dev)); ++ break; ++ default: ++ break; ++ } ++} ++ ++const MemoryRegionOps sw64_pm_hotplug_ops = { ++ .read = pm_read, ++ .write = pm_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++ .valid = { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++ .impl = { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++}; ++ ++ ++void sw64_cpu_hotplug_hw_init(CPUHotplugState *state, hwaddr base_addr) ++{ ++ MachineState *machine = MACHINE(qdev_get_machine()); ++ MachineClass *mc = MACHINE_GET_CLASS(machine); ++ const CPUArchIdList *id_list; ++ int i; ++ ++ assert(mc->possible_cpu_arch_ids); ++ id_list = mc->possible_cpu_arch_ids(machine); ++ state->dev_count = id_list->len; ++ state->devs = g_new0(typeof(*state->devs), state->dev_count); ++ for (i = 0; i < id_list->len; i++) { ++ state->devs[i].cpu = CPU(id_list->cpus[i].cpu); ++ state->devs[i].arch_id = id_list->cpus[i].arch_id; ++ } ++} ++ ++void sw64_acpi_switch_to_modern_cphp(CPUHotplugState *cpuhp_state, ++ uint16_t io_port) ++{ ++ sw64_cpu_hotplug_hw_init(cpuhp_state, io_port); ++} ++ ++static void sw64_set_cpu_hotplug_legacy(Object *obj, bool value) ++{ ++ DeviceState *dev = DEVICE(obj); ++ SW64PMState *s = SW64_PM(dev); ++ assert(!value); ++ if (s->cpu_hotplug_legacy && value == false) { ++ sw64_acpi_switch_to_modern_cphp(&s->cpuhp_state, 0); ++ } ++ s->cpu_hotplug_legacy = value; ++} ++ ++static void sw64_pm_initfn(Object *obj) ++{ ++ DeviceState *dev = DEVICE(obj); ++ SW64PMState *s = SW64_PM(dev); ++ MemoryRegion *pm_hotplug = g_new(MemoryRegion, 1); ++ ++ s->irq = qemu_allocate_irq(sw64_pm_set_irq, s, 13); ++ ++ memory_region_init_io(pm_hotplug, OBJECT(s), &sw64_pm_hotplug_ops, s, ++ "sw64_pm_hotplug", 4 * 1024 * 1024); ++ memory_region_add_subregion(get_system_memory(), 0x803600000000ULL, ++ pm_hotplug); ++ ++ if (s->acpi_memory_hotplug.is_enabled) { ++ MachineState *machine = MACHINE(qdev_get_machine()); ++ MemHotplugState *state = &s->acpi_memory_hotplug; ++ ++ state->dev_count = machine->ram_slots; ++ if (state->dev_count) { ++ state->devs = g_malloc0(sizeof(*state->devs) * state->dev_count); ++ } ++ } ++ ++ s->cpu_hotplug_legacy = true; ++ sw64_set_cpu_hotplug_legacy(obj, false); ++} ++ ++static Property sw64_pm_properties[] = { ++ DEFINE_PROP_BOOL("memory-hotplug-support", SW64PMState, ++ acpi_memory_hotplug.is_enabled, true), ++ DEFINE_PROP_END_OF_LIST(), ++}; ++ ++static void sw64_pm_class_init(ObjectClass *class, void *data) ++{ ++ DeviceClass *dc = DEVICE_CLASS(class); ++ HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(class); ++ AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_CLASS(class); ++ ++ dc->desc = "SW64 PM"; ++ device_class_set_props(dc, sw64_pm_properties); ++ ++ hc->plug = sw64_pm_device_plug_cb; ++ hc->unplug_request = sw64_pm_unplug_request_cb; ++ hc->unplug = sw64_pm_unplug_cb; ++ ++ adevc->send_event = sw64_pm_send_event; ++} ++ ++static const TypeInfo sw64_pm_info = { ++ .name = TYPE_SW64_PM, ++ .parent = TYPE_SYS_BUS_DEVICE, ++ .instance_size = sizeof(SW64PMState), ++ .instance_init = sw64_pm_initfn, ++ .class_init = sw64_pm_class_init, ++ .interfaces = (InterfaceInfo[]) { ++ { TYPE_HOTPLUG_HANDLER }, ++ { TYPE_ACPI_DEVICE_IF }, ++ { } ++ } ++}; ++ ++static void sw64_pm_register_types(void) ++{ ++ type_register_static(&sw64_pm_info); ++} ++ ++type_init(sw64_pm_register_types) +diff --git a/hw/meson.build b/hw/meson.build +index f01fac4617..9ccb7b77ac 100644 +--- a/hw/meson.build ++++ b/hw/meson.build +@@ -65,5 +65,6 @@ subdir('s390x') + subdir('sh4') + subdir('sparc') + subdir('sparc64') ++subdir('sw64') + subdir('tricore') + subdir('xtensa') +diff --git a/hw/sw64/Kconfig b/hw/sw64/Kconfig +new file mode 100644 +index 0000000000..d5209cf6a9 +--- /dev/null ++++ b/hw/sw64/Kconfig +@@ -0,0 +1,32 @@ ++config CORE3 ++ bool ++ imply PCI_DEVICES ++ imply TEST_DEVICES ++ imply E1000_PCI ++ select PCI_EXPRESS ++ select SUN4V_RTC ++ select SMBIOS ++ select VIRTIO_MMIO ++ select SERIAL ++ select VIRTIO_VGA ++ select ISA_BUS ++ select PCKBD ++ select MSI_NONBROKEN ++ ++config CORE4 ++ bool ++ imply PCI_DEVICES ++ imply TEST_DEVICES ++ imply E1000_PCI ++ select PCI_EXPRESS ++ select SUN4V_RTC ++ select SMBIOS ++ select VIRTIO_MMIO ++ select SERIAL ++ select VIRTIO_VGA ++ select ISA_BUS ++ select PCKBD ++ select MSI_NONBROKEN ++ select ACPI_NVDIMM ++ select GPIO_SUNWAY ++ select ACPI_HW_REDUCED +diff --git a/hw/sw64/core3.c b/hw/sw64/core3.c +new file mode 100644 +index 0000000000..1c586f8608 +--- /dev/null ++++ b/hw/sw64/core3.c +@@ -0,0 +1,116 @@ ++/* ++ * QEMU CORE3 hardware system emulator. ++ * ++ * Copyright (c) 2018 Li Hainan ++ * ++ * This work is licensed under the GNU GPL license version 2 or later. ++ */ ++ ++#include "qemu/osdep.h" ++#include "qemu/datadir.h" ++#include "cpu.h" ++#include "hw/hw.h" ++#include "elf.h" ++#include "hw/loader.h" ++#include "hw/boards.h" ++#include "qemu/error-report.h" ++#include "sysemu/sysemu.h" ++#include "sysemu/kvm.h" ++#include "sysemu/reset.h" ++#include "hw/char/serial.h" ++#include "qemu/cutils.h" ++#include "ui/console.h" ++#include "hw/sw64/core.h" ++#include "hw/sw64/sunway.h" ++#include "sysemu/numa.h" ++ ++#define MAX_CPUS_CORE3 64 ++#define C3_UEFI_BIOS_NAME "c3-uefi-bios-sw" ++ ++static void core3_init(MachineState *machine) ++{ ++ ram_addr_t ram_size = machine->ram_size; ++ const char *kernel_filename = machine->kernel_filename; ++ const char *kernel_cmdline = machine->kernel_cmdline; ++ const char *initrd_filename = machine->initrd_filename; ++ const char *hmcode_name = kvm_enabled() ? "core3-reset":"core3-hmcode"; ++ const char *bios_name = C3_UEFI_BIOS_NAME; ++ BOOT_PARAMS *sunway_boot_params = g_new0(BOOT_PARAMS, 1); ++ char *hmcode_filename; ++ uint64_t hmcode_entry, kernel_entry; ++ ++ if (kvm_enabled()) ++ sw64_set_clocksource(); ++ ++ core3_board_init(machine); ++ ++ sw64_set_ram_size(ram_size); ++ sw64_clear_smp_rcb(); ++ ++ hmcode_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, hmcode_name); ++ if (hmcode_filename == NULL) { ++ error_report("no '%s' provided", hmcode_name); ++ exit(1); ++ } ++ sw64_load_hmcode(hmcode_filename, &hmcode_entry); ++ ++ if (!kvm_enabled()) { ++ CPUState *cpu; ++ SW64CPU *sw64_cpu; ++ CPU_FOREACH(cpu) { ++ sw64_cpu = SW64_CPU(cpu); ++ sw64_cpu->env.pc = hmcode_entry; ++ sw64_cpu->env.hm_entry = hmcode_entry; ++ sw64_cpu->env.csr[CID] = sw64_cpu->core_id; ++ qemu_register_reset(sw64_cpu_reset, sw64_cpu); ++ } ++ } ++ g_free(hmcode_filename); ++ ++ if (!kernel_filename) ++ sw64_find_and_load_bios(bios_name); ++ else { ++ sw64_clear_uefi_bios(); ++ sw64_load_kernel(kernel_filename, &kernel_entry, kernel_cmdline); ++ ++ if (initrd_filename) { ++ sw64_load_initrd(initrd_filename, sunway_boot_params); ++ } ++ } ++ ++ if (sw64_load_dtb(machine, sunway_boot_params) < 0) { ++ exit(1); ++ } ++ ++ rom_add_blob_fixed("sunway_boot_params", (sunway_boot_params), 0x48, 0x90A100); ++} ++ ++static void core3_machine_class_init(ObjectClass *oc, void *data) ++{ ++ MachineClass *mc = MACHINE_CLASS(oc); ++ mc->desc = "CORE3 BOARD"; ++ mc->init = core3_init; ++ mc->block_default_type = IF_VIRTIO; ++ mc->max_cpus = MAX_CPUS_CORE3; ++ mc->no_cdrom = 1; ++ mc->pci_allow_0_address = true; ++ mc->reset = sw64_board_reset; ++ mc->possible_cpu_arch_ids = sw64_possible_cpu_arch_ids; ++ mc->cpu_index_to_instance_props = sw64_cpu_index_to_props; ++ mc->default_cpu_type = SW64_CPU_TYPE_NAME("core3"); ++ mc->default_ram_id = "ram"; ++ mc->get_default_cpu_node_id = sw64_get_default_cpu_node_id; ++} ++ ++static const TypeInfo core3_machine_info = { ++ .name = TYPE_CORE3_MACHINE, ++ .parent = TYPE_MACHINE, ++ .class_init = core3_machine_class_init, ++ .instance_size = sizeof(CORE3MachineState), ++}; ++ ++static void core3_machine_init(void) ++{ ++ type_register_static(&core3_machine_info); ++} ++type_init(core3_machine_init); +diff --git a/hw/sw64/core3_board.c b/hw/sw64/core3_board.c +new file mode 100644 +index 0000000000..92cd5cf4ef +--- /dev/null ++++ b/hw/sw64/core3_board.c +@@ -0,0 +1,419 @@ ++#include "qemu/osdep.h" ++#include "qapi/error.h" ++#include "cpu.h" ++#include "hw/sw64/core.h" ++#include "hw/hw.h" ++#include "hw/boards.h" ++#include "sysemu/sysemu.h" ++#include "exec/address-spaces.h" ++#include "hw/pci/pci_host.h" ++#include "hw/pci/pci.h" ++#include "hw/char/serial.h" ++#include "hw/irq.h" ++#include "net/net.h" ++#include "hw/usb.h" ++#include "sysemu/numa.h" ++#include "sysemu/kvm.h" ++#include "sysemu/cpus.h" ++#include "hw/pci/msi.h" ++#include "hw/sw64/sw64_iommu.h" ++#include "hw/sw64/sunway.h" ++#include "hw/loader.h" ++#include "hw/nvram/fw_cfg.h" ++#include "hw/firmware/smbios.h" ++#include "sysemu/device_tree.h" ++#include "qemu/datadir.h" ++ ++#define CORE3_MAX_CPUS_MASK 0x3ff ++#define CORE3_CORES_SHIFT 10 ++#define CORE3_CORES_MASK 0x3ff ++#define CORE3_THREADS_SHIFT 20 ++#define CORE3_THREADS_MASK 0xfff ++ ++static const MemMapEntry memmap[] = { ++ [VIRT_PCIE_MMIO] = { 0xe0000000, 0x20000000 }, ++ [VIRT_MSI] = { 0x8000fee00000, 0x100000 }, ++ [VIRT_INTPU] = { 0x802a00000000, 0x100000 }, ++ [VIRT_MCU] = { 0x803000000000, 0x1000000 }, ++ [VIRT_RTC] = { 0x804910000000, 0x8 }, ++ [VIRT_FW_CFG] = { 0x804920000000, 0x18 }, ++ [VIRT_PCIE_IO_BASE] = { 0x880000000000, 0x890000000000 }, ++ [VIRT_PCIE_PIO] = { 0x880100000000, 0x100000000 }, ++ [VIRT_UART] = { 0x8801000003f8, 0x10 }, ++ [VIRT_PCIE_CFG] = { 0x880600000000, 0x100000000 }, ++ [VIRT_HIGH_PCIE_MMIO] = { 0x888000000000, 0x8000000000 }, ++}; ++ ++static const int irqmap[] = { ++ [VIRT_UART] = 12, ++ [VIRT_SUNWAY_GED] = 13, ++}; ++ ++static void core3_virt_build_smbios(CORE3MachineState *core3ms) ++{ ++ FWCfgState *fw_cfg = core3ms->fw_cfg; ++ ++ if (!fw_cfg) ++ return; ++ ++ sw64_virt_build_smbios(fw_cfg); ++} ++ ++static uint64_t mcu_read(void *opaque, hwaddr addr, unsigned size) ++{ ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ unsigned int smp_cpus = ms->smp.cpus; ++ unsigned int smp_threads = ms->smp.threads; ++ unsigned int smp_cores = ms->smp.cores; ++ unsigned int max_cpus = ms->smp.max_cpus; ++ uint64_t ret = 0; ++ switch (addr) { ++ case 0x0080: ++ /* SMP_INFO */ ++ { ++ ret = (smp_threads & CORE3_THREADS_MASK) << CORE3_THREADS_SHIFT; ++ ret += (smp_cores & CORE3_CORES_MASK) << CORE3_CORES_SHIFT; ++ ret += max_cpus & CORE3_MAX_CPUS_MASK; ++ } ++ break; ++ case 0x0680: ++ /* INIT_CTL */ ++ ret = 0x3ae0000ddd9; ++ break; ++ case 0x0780: ++ /* CORE_ONLINE */ ++ ret = convert_bit(smp_cpus); ++ break; ++ case 0x3780: ++ /* MC_ONLINE */ ++ ret = convert_bit(smp_cpus); ++ break; ++ default: ++ fprintf(stderr, "Unsupported MCU addr: 0x%04lx\n", addr); ++ return -1; ++ } ++ return ret; ++} ++ ++static void mcu_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) ++{ ++#ifdef CONFIG_DUMP_PRINTK ++ uint64_t print_addr; ++ uint32_t len; ++ int i; ++ ++ if (kvm_enabled()) ++ return; ++ ++ if (addr == 0x40000) { ++ print_addr = val & 0x7fffffff; ++ len = (uint32_t)(val >> 32); ++ uint8_t *buf; ++ buf = malloc(len + 10); ++ memset(buf, 0, len + 10); ++ cpu_physical_memory_rw(print_addr, buf, len, 0); ++ for (i = 0; i < len; i++) ++ printf("%c", buf[i]); ++ ++ free(buf); ++ return; ++ } ++#endif ++} ++ ++static const MemoryRegionOps mcu_ops = { ++ .read = mcu_read, ++ .write = mcu_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++ .valid = ++ { ++ .min_access_size = 8, ++ .max_access_size = 8, ++ }, ++ .impl = ++ { ++ .min_access_size = 8, ++ .max_access_size = 8, ++ }, ++}; ++ ++static uint64_t intpu_read(void *opaque, hwaddr addr, unsigned size) ++{ ++ uint64_t ret = 0; ++ ++ if (kvm_enabled()) ++ return ret; ++ ++ switch (addr) { ++ case 0x180: ++ /* LONGTIME */ ++ ret = qemu_clock_get_ns(QEMU_CLOCK_HOST) / 32; ++ break; ++ } ++ return ret; ++} ++ ++static void intpu_write(void *opaque, hwaddr addr, uint64_t val, ++ unsigned size) ++{ ++ SW64CPU *cpu_current = SW64_CPU(current_cpu); ++ ++ if (kvm_enabled()) ++ return; ++ ++ switch (addr) { ++ case 0x00: ++ cpu_interrupt(qemu_get_cpu(val & 0x3f), CPU_INTERRUPT_II0); ++ cpu_current->env.csr[II_REQ] &= ~(1 << 20); ++ break; ++ default: ++ fprintf(stderr, "Unsupported IPU addr: 0x%04lx\n", addr); ++ break; ++ } ++} ++ ++static const MemoryRegionOps intpu_ops = { ++ .read = intpu_read, ++ .write = intpu_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++ .valid = ++ { ++ .min_access_size = 8, ++ .max_access_size = 8, ++ }, ++ .impl = ++ { ++ .min_access_size = 8, ++ .max_access_size = 8, ++ }, ++}; ++ ++static void create_fdt_misc_platform(CORE3MachineState *c3ms) ++{ ++ char *nodename; ++ MachineState *ms = MACHINE(c3ms); ++ ++ nodename = g_strdup_printf("/soc/misc_platform@0"); ++ qemu_fdt_add_subnode(ms->fdt, nodename); ++ qemu_fdt_setprop_string(ms->fdt, nodename, ++ "compatible", "sunway,misc-platform"); ++ qemu_fdt_setprop_cell(ms->fdt, nodename, "numa-node-id", 0); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "sunway,spbu_base", ++ 2, c3ms->memmap[VIRT_MCU].base); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "sunway,intpu_base", ++ 2, c3ms->memmap[VIRT_INTPU].base); ++ g_free(nodename); ++} ++ ++static void core3_create_fdt(CORE3MachineState *c3ms) ++{ ++ uint32_t intc_phandle; ++ MachineState *ms = MACHINE(c3ms); ++ ++ if (ms->dtb) { ++ char *filename; ++ ++ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, ms->dtb); ++ if (!filename) { ++ fprintf(stderr, "Couldn't open dtb file %s\n", ms->dtb); ++ exit(1); ++ } ++ ++ ms->fdt = load_device_tree(ms->dtb, &c3ms->fdt_size); ++ if (!ms->fdt) { ++ error_report("load_device_tree() failed"); ++ exit(1); ++ } ++ } else { ++ ms->fdt = create_device_tree(&c3ms->fdt_size); ++ if (!ms->fdt) { ++ error_report("create_device_tree() failed"); ++ exit(1); ++ } ++ ++ qemu_fdt_setprop_string(ms->fdt, "/", "compatible", "sunway,chip3"); ++ qemu_fdt_setprop_string(ms->fdt, "/", "model", "chip3"); ++ qemu_fdt_setprop_cell(ms->fdt, "/", "#address-cells", 0x2); ++ qemu_fdt_setprop_cell(ms->fdt, "/", "#size-cells", 0x2); ++ ++ qemu_fdt_add_subnode(ms->fdt, "/chosen"); ++ ++ qemu_fdt_add_subnode(ms->fdt, "/soc"); ++ qemu_fdt_setprop_string(ms->fdt, "/soc", "compatible", "simple-bus"); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc", "#address-cells", 0x2); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc", "#size-cells", 0x2); ++ qemu_fdt_setprop(ms->fdt, "/soc", "ranges", NULL, 0); ++ ++ intc_phandle = qemu_fdt_alloc_phandle(ms->fdt); ++ qemu_fdt_add_subnode(ms->fdt, "/soc/interrupt-controller"); ++ qemu_fdt_setprop_string(ms->fdt, "/soc/interrupt-controller", ++ "compatible", "sw64,pintc_vt"); ++ qemu_fdt_setprop(ms->fdt, "/soc/interrupt-controller", ++ "interrupt-controller", NULL, 0); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/interrupt-controller", ++ "sw64,node", 0); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/interrupt-controller", ++ "sw64,irq-num", 16); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/interrupt-controller", ++ "sw64,ver", 0x1); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/interrupt-controller", ++ "#interrupt-cells", 0x1); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/interrupt-controller", ++ "phandle", intc_phandle); ++ ++ qemu_fdt_add_subnode(ms->fdt, "/soc/serial0@8801"); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/serial0@8801", ++ "#address-cells", 0x2); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/serial0@8801", ++ "#size-cells", 0x2); ++ qemu_fdt_setprop_string(ms->fdt, "/soc/serial0@8801", ++ "compatible", "ns16550a"); ++ qemu_fdt_setprop_sized_cells(ms->fdt, "/soc/serial0@8801", "reg", ++ 2, c3ms->memmap[VIRT_UART].base, ++ 2, c3ms->memmap[VIRT_UART].size); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/serial0@8801", ++ "interrupt-parent", intc_phandle); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/serial0@8801", ++ "interrupts", c3ms->irqmap[VIRT_UART]); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/serial0@8801", "reg-shift", 0x0); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/serial0@8801", ++ "reg-io-width", 0x1); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/serial0@8801", ++ "clock-frequency", 24000000); ++ qemu_fdt_setprop_string(ms->fdt, "/soc/serial0@8801", ++ "status", "okay"); ++ } ++ create_fdt_misc_platform(c3ms); ++} ++ ++static void core3_cpus_init(MachineState *ms) ++{ ++ int i; ++ const CPUArchIdList *possible_cpus; ++ MachineClass *mc = MACHINE_GET_CLASS(ms); ++ ++ possible_cpus = mc->possible_cpu_arch_ids(ms); ++ for (i = 0; i < ms->smp.cpus; i++) { ++ sw64_new_cpu("core3-sw64-cpu", possible_cpus->cpus[i].arch_id, &error_fatal); ++ } ++} ++ ++void core3_board_init(MachineState *ms) ++{ ++ CORE3MachineState *core3ms = CORE3_MACHINE(ms); ++ DeviceState *dev = qdev_new(TYPE_CORE3_BOARD); ++ BoardState *bs = CORE3_BOARD(dev); ++ PCIHostState *phb = PCI_HOST_BRIDGE(dev); ++ PCIBus *b; ++ ++ core3ms->memmap = memmap; ++ core3ms->irqmap = irqmap; ++ ++ /* Create device tree */ ++ core3_create_fdt(core3ms); ++ ++ core3_cpus_init(ms); ++ ++ if (kvm_enabled()) { ++ if (kvm_has_gsi_routing()) ++ msi_nonbroken = true; ++ } ++ else ++ sw64_create_alarm_timer(ms, bs); ++ ++ memory_region_add_subregion(get_system_memory(), 0, ms->ram); ++ ++ memory_region_init_io(&bs->io_mcu, NULL, &mcu_ops, bs, "io_mcu", ++ memmap[VIRT_MCU].size); ++ memory_region_add_subregion(get_system_memory(), memmap[VIRT_MCU].base, ++ &bs->io_mcu); ++ ++ memory_region_init_io(&bs->io_intpu, NULL, &intpu_ops, bs, "io_intpu", ++ memmap[VIRT_INTPU].size); ++ memory_region_add_subregion(get_system_memory(), memmap[VIRT_INTPU].base, ++ &bs->io_intpu); ++ ++ memory_region_init_io(&bs->msi_ep, NULL, &msi_ops, bs, "msi_ep", ++ memmap[VIRT_MSI].size); ++ memory_region_add_subregion(get_system_memory(), memmap[VIRT_MSI].base, ++ &bs->msi_ep); ++ ++ memory_region_init(&bs->mem_ep, OBJECT(bs), "pci0-mem", ++ memmap[VIRT_PCIE_IO_BASE].size); ++ memory_region_add_subregion(get_system_memory(), ++ memmap[VIRT_PCIE_IO_BASE].base, &bs->mem_ep); ++ ++ memory_region_init_alias(&bs->mem_ep64, NULL, "mem_ep64", &bs->mem_ep, ++ memmap[VIRT_HIGH_PCIE_MMIO].base, ++ memmap[VIRT_HIGH_PCIE_MMIO].size); ++ memory_region_add_subregion(get_system_memory(), ++ memmap[VIRT_HIGH_PCIE_MMIO].base, &bs->mem_ep64); ++ ++ memory_region_init_io(&bs->io_ep, OBJECT(bs), &sw64_pci_ignore_ops, NULL, ++ "pci0-io-ep", memmap[VIRT_PCIE_PIO].size); ++ memory_region_add_subregion(get_system_memory(), memmap[VIRT_PCIE_PIO].base, ++ &bs->io_ep); ++ ++ b = pci_register_root_bus(dev, "pcie.0", sw64_board_set_irq, ++ sw64_board_map_irq, bs, ++ &bs->mem_ep, &bs->io_ep, 0, 537, TYPE_PCIE_BUS); ++ phb->bus = b; ++ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); ++ pci_bus_set_route_irq_fn(b, sw64_route_intx_pin_to_irq); ++ memory_region_init_io(&bs->conf_piu0, OBJECT(bs), &sw64_pci_config_ops, b, ++ "pci0-ep-conf-io", memmap[VIRT_PCIE_CFG].size); ++ memory_region_add_subregion(get_system_memory(), memmap[VIRT_PCIE_CFG].base, ++ &bs->conf_piu0); ++ sw64_init_rtc_base_info(); ++ memory_region_init_io(&bs->io_rtc, OBJECT(bs), &rtc_ops, b, ++ "sw64-rtc", memmap[VIRT_RTC].size); ++ memory_region_add_subregion(get_system_memory(), memmap[VIRT_RTC].base, ++ &bs->io_rtc); ++ object_property_add_tm(OBJECT(core3ms), "rtc-time", rtc_get_time); ++#ifdef CONFIG_SW64_VT_IOMMU ++ sw64_vt_iommu_init(b); ++#endif ++ ++ sw64_create_pcie(bs, b, phb); ++ ++ core3ms->fw_cfg = sw64_create_fw_cfg(memmap[VIRT_FW_CFG].base, ++ memmap[VIRT_FW_CFG].size); ++ rom_set_fw(core3ms->fw_cfg); ++ ++ core3_virt_build_smbios(core3ms); ++} ++ ++static Property core3_main_pci_host_props[] = { ++ DEFINE_PROP_UINT32("ofw-addr", BoardState, ofw_addr, 0), ++ DEFINE_PROP_END_OF_LIST() ++}; ++ ++static char *core3_main_ofw_unit_address(const SysBusDevice *dev) ++{ ++ BoardState *s = CORE3_BOARD(dev); ++ return g_strdup_printf("%x", s->ofw_addr); ++} ++ ++static void core3_board_pcihost_class_init(ObjectClass *obj, void *data) ++{ ++ DeviceClass *dc = DEVICE_CLASS(obj); ++ SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(obj); ++ ++ dc->props_ = core3_main_pci_host_props; ++ dc->fw_name = "pci"; ++ sbc->explicit_ofw_unit_address = core3_main_ofw_unit_address; ++} ++ ++static const TypeInfo swboard_pcihost_info = { ++ .name = TYPE_CORE3_BOARD, ++ .parent = TYPE_PCI_HOST_BRIDGE, ++ .instance_size = sizeof(BoardState), ++ .class_init = core3_board_pcihost_class_init, ++}; ++ ++static void swboard_register_types(void) ++{ ++ type_register_static(&swboard_pcihost_info); ++} ++ ++type_init(swboard_register_types) +diff --git a/hw/sw64/core4.c b/hw/sw64/core4.c +new file mode 100644 +index 0000000000..ee3a5851a7 +--- /dev/null ++++ b/hw/sw64/core4.c +@@ -0,0 +1,400 @@ ++/* ++ * QEMU CORE4 hardware system emulator. ++ */ ++ ++#include "qemu/osdep.h" ++#include "qemu/datadir.h" ++#include "cpu.h" ++#include "hw/hw.h" ++#include "elf.h" ++#include "hw/loader.h" ++#include "hw/boards.h" ++#include "qemu/error-report.h" ++#include "sysemu/sysemu.h" ++#include "sysemu/kvm.h" ++#include "sysemu/reset.h" ++#include "hw/char/serial.h" ++#include "qemu/cutils.h" ++#include "ui/console.h" ++#include "hw/sw64/core.h" ++#include "hw/sw64/sunway.h" ++#include "sysemu/numa.h" ++#include "hw/mem/pc-dimm.h" ++#include "qapi/error.h" ++#include "sysemu/device_tree.h" ++#include "hw/core/cpu.h" ++#include "hw/qdev-core.h" ++#include "qapi/qapi-visit-common.h" ++ ++#define C4_UEFI_BIOS_NAME "c4-uefi-bios-sw" ++ ++static unsigned long cpu_masks[4]; ++static int hot_cpu_num; ++int cpu_hot_id; ++ ++static void core4_init(MachineState *machine) ++{ ++ MachineClass *mc = MACHINE_GET_CLASS(machine); ++ const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(machine); ++ assert(possible_cpus->len); ++ ++ ram_addr_t ram_size = machine->ram_size; ++ const char *kernel_filename = machine->kernel_filename; ++ const char *kernel_cmdline = machine->kernel_cmdline; ++ const char *initrd_filename = machine->initrd_filename; ++ const char *hmcode_name = kvm_enabled() ? "core4-reset":"core4-hmcode"; ++ const char *bios_name = C4_UEFI_BIOS_NAME; ++ BOOT_PARAMS *sunway_boot_params = g_new0(BOOT_PARAMS, 1); ++ char *hmcode_filename; ++ uint64_t hmcode_entry, kernel_entry; ++ ++ core4_board_init(machine); ++ ++ sw64_set_ram_size(ram_size); ++ sw64_clear_smp_rcb(); ++ sw64_clear_kernel_print(); ++ ++ hmcode_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, hmcode_name); ++ if (hmcode_filename == NULL) { ++ error_report("no '%s' provided", hmcode_name); ++ exit(1); ++ } ++ sw64_load_hmcode(hmcode_filename, &hmcode_entry); ++ ++ g_free(hmcode_filename); ++ ++ if (!kernel_filename) ++ sw64_find_and_load_bios(bios_name); ++ else { ++ sw64_clear_uefi_bios(); ++ sw64_load_kernel(kernel_filename, &kernel_entry, kernel_cmdline); ++ ++ if (initrd_filename) { ++ unsigned long initrd_end_va; ++ ++ sw64_load_initrd(initrd_filename, sunway_boot_params); ++ initrd_end_va = sunway_boot_params->initrd_start + ++ (sunway_boot_params->initrd_size & ++ (~0xfff0000000000000UL)); ++ qemu_fdt_setprop_cell(machine->fdt, "/chosen", "linux,initrd-start", ++ sunway_boot_params->initrd_start); ++ qemu_fdt_setprop_cell(machine->fdt, "/chosen", "linux,initrd-end", ++ initrd_end_va); ++ } ++ } ++ ++ if (sw64_load_dtb(machine, sunway_boot_params) < 0) { ++ exit(1); ++ } ++ ++ /* Retained for forward compatibility */ ++ rom_add_blob_fixed("sunway_boot_params", (sunway_boot_params), 0x48, 0x90A100); ++ ++ if (!kvm_enabled()) { ++ CPUState *cpu; ++ SW64CPU *sw64_cpu; ++ CPU_FOREACH(cpu) { ++ sw64_cpu = SW64_CPU(cpu); ++ sw64_cpu->env.pc = hmcode_entry; ++ sw64_cpu->env.hm_entry = hmcode_entry; ++ sw64_cpu->env.csr[CID] = sw64_cpu->core_id; ++ qemu_register_reset(sw64_cpu_reset, sw64_cpu); ++ if (sw64_cpu->core_id == 0) { ++ sw64_cpu->env.ir[16] = 0xA2024; ++ sw64_cpu->env.ir[17] = dtb_start_c4; ++ } ++ } ++ } ++} ++ ++static void set_on_cpumask(int cpu_num) ++{ ++ set_bit(cpu_num, cpu_masks); ++} ++ ++static void set_off_cpumask(int cpu_num) ++{ ++ clear_bit(cpu_num, cpu_masks); ++} ++ ++int get_state_cpumask(int cpu_num) ++{ ++ return test_bit(cpu_num, cpu_masks); ++} ++ ++static HotplugHandler *sw64_get_hotplug_handler(MachineState *machine, ++ DeviceState *dev) ++{ ++ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) ++ return HOTPLUG_HANDLER(machine); ++ ++ if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { ++ return HOTPLUG_HANDLER(machine); ++ } ++ ++ return NULL; ++} ++ ++static void core4_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev, ++ DeviceState *dev, Error **errp) ++{ ++ MachineState *ms = MACHINE(hotplug_dev); ++ Error *local_err = NULL; ++ ++ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) ++ pc_dimm_pre_plug(PC_DIMM(dev), ms, NULL, &local_err); ++ ++ return; ++} ++ ++static CPUArchId *sw64_find_cpu_slot(MachineState *ms, uint32_t id) ++{ ++ if (id >= ms->possible_cpus->len) { ++ return NULL; ++ } ++ if (hot_cpu_num < ms->smp.cpus) { ++ ++hot_cpu_num; ++ } ++ set_on_cpumask(id); ++ return &ms->possible_cpus->cpus[id]; ++} ++ ++static void sw64_cpu_plug(HotplugHandler *hotplug_dev, DeviceState *dev, ++ Error **errp) ++{ ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ uint16_t smp_cpus = ms->smp.cpus; ++ CPUArchId *found_cpu; ++ HotplugHandlerClass *hhc; ++ Error *local_err = NULL; ++ CPUState *cs = NULL; ++ SW64CPU *cpu = NULL; ++ CORE4MachineState *pcms = CORE4_MACHINE(hotplug_dev); ++ ++ if (pcms->acpi_dev) { ++ if (get_state_cpumask(cpu_hot_id)) { ++ error_setg(&local_err, "error: Unable to add already online cpu!"); ++ return; ++ } ++ ++ hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev); ++ hhc->plug(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err); ++ if (local_err) { ++ goto out; ++ } ++ } ++ ++ cs = CPU(dev); ++ cpu = SW64_CPU(dev); ++ if (hot_cpu_num < smp_cpus) { ++ cs->cpu_index = hot_cpu_num; ++ cpu->core_id = hot_cpu_num; ++ found_cpu = sw64_find_cpu_slot(MACHINE(pcms), hot_cpu_num); ++ } else { ++ hot_cpu_num = smp_cpus; ++ cs->cpu_index = cpu_hot_id; ++ cpu->core_id = cpu_hot_id; ++ found_cpu = sw64_find_cpu_slot(MACHINE(pcms), cpu_hot_id); ++ } ++ if (!found_cpu) { ++ error_setg(&local_err, "error: No slot found for new hot add cpu!"); ++ return; ++ } ++ found_cpu->cpu = OBJECT(dev); ++out: ++ error_propagate(errp, local_err); ++} ++ ++static void sw64_qdev_unrealize(DeviceState *dev) ++{ ++ Error *err = NULL; ++ object_property_set_bool(OBJECT(dev), "realized", false, &err); ++} ++ ++ ++static void sw64_cpu_unplug_request(HotplugHandler *hotplug_dev, ++ DeviceState *dev, Error **errp) ++{ ++ Error *local_err = NULL; ++ SW64CPU *cpu = NULL; ++ CPUState *cs = NULL; ++ HotplugHandlerClass *hhc; ++ CORE4MachineState *pcms = CORE4_MACHINE(hotplug_dev); ++ ++ if (!pcms->acpi_dev) { ++ error_setg(&local_err, "CPU hot unplug not supported without ACPI"); ++ goto out; ++ } ++ ++ cpu = SW64_CPU(dev); ++ cpu_hot_id = cpu->core_id; ++ cs = CPU(dev); ++ cs->cpu_index = cpu_hot_id; ++ ++ if (!cpu_hot_id) { ++ error_setg(&local_err, "Boot CPU is unpluggable"); ++ goto out; ++ } ++ ++ if (!get_state_cpumask(cpu_hot_id) || !cpu_hot_id) { ++ error_setg(&local_err, "error:" ++ "Unable to delete already offline cpu and cpu 0 can not offline!"); ++ return; ++ } ++ ++ hhc = HOTPLUG_HANDLER_GET_CLASS(pcms->acpi_dev); ++ hhc->unplug_request(HOTPLUG_HANDLER(pcms->acpi_dev), dev, &local_err); ++ ++out: ++ error_propagate(errp, local_err); ++} ++ ++static void core4_machine_device_plug_cb(HotplugHandler *hotplug_dev, ++ DeviceState *dev, Error **errp) ++{ ++ MachineState *ms = MACHINE(hotplug_dev); ++ CORE4MachineState *core4ms = CORE4_MACHINE(hotplug_dev); ++ Error *local_err = NULL; ++ ++ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { ++ pc_dimm_plug(PC_DIMM(dev), ms); ++ hotplug_handler_plug(HOTPLUG_HANDLER(core4ms->acpi_dev), ++ dev, &local_err); ++ } ++ if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) ++ sw64_cpu_plug(hotplug_dev, dev, &local_err); ++} ++ ++static void core4_machine_device_unplug_request_cb(HotplugHandler *hotplug_dev, ++ DeviceState *dev, Error **errp) ++{ ++ CORE4MachineState *core4ms = CORE4_MACHINE(hotplug_dev); ++ Error *local_err = NULL; ++ ++ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { ++ hotplug_handler_unplug_request(HOTPLUG_HANDLER(core4ms->acpi_dev), ++ dev, &local_err); ++ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { ++ sw64_cpu_unplug_request(hotplug_dev, dev, &local_err); ++ } else { ++ error_setg(&local_err, "device unplug request for unsupported device" ++ " type: %s", object_get_typename(OBJECT(dev))); ++ } ++} ++ ++static void core4_machine_device_unplug_cb(HotplugHandler *hotplug_dev, ++ DeviceState *dev, Error **errp) ++{ ++ MachineState *ms = MACHINE(hotplug_dev); ++ CORE4MachineState *core4ms = CORE4_MACHINE(hotplug_dev); ++ Error *local_err = NULL; ++ CPUArchId *found_cpu; ++ ++ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { ++ hotplug_handler_unplug(HOTPLUG_HANDLER(core4ms->acpi_dev), ++ dev, &local_err); ++ if (local_err) { ++ goto out; ++ } ++ pc_dimm_unplug(PC_DIMM(dev), MACHINE(ms)); ++ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { ++ hotplug_handler_unplug(HOTPLUG_HANDLER(core4ms->acpi_dev), ++ dev, &local_err); ++ if (local_err) { ++ goto out; ++ } ++ found_cpu = sw64_find_cpu_slot(MACHINE(core4ms), cpu_hot_id); ++ found_cpu->cpu = NULL; ++ sw64_qdev_unrealize(dev); ++ set_off_cpumask(cpu_hot_id); ++ } else { ++ error_setg(&local_err, "device unplug for unsupported device" ++ " type: %s", object_get_typename(OBJECT(dev))); ++ } ++ ++out: ++ return; ++} ++ ++bool sw64_is_acpi_enabled(CORE4MachineState *c4ms) ++{ ++ return c4ms->acpi != ON_OFF_AUTO_OFF; ++} ++ ++static void core4_get_acpi(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ CORE4MachineState *c4ms = CORE4_MACHINE(obj); ++ OnOffAuto acpi = c4ms->acpi; ++ ++ visit_type_OnOffAuto(v, name, &acpi, errp); ++} ++ ++static void core4_set_acpi(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ CORE4MachineState *c4ms = CORE4_MACHINE(obj); ++ ++ visit_type_OnOffAuto(v, name, &c4ms->acpi, errp); ++} ++ ++static void core4_machine_class_init(ObjectClass *oc, void *data) ++{ ++ MachineClass *mc = MACHINE_CLASS(oc); ++ HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); ++ ++ mc->desc = "CORE4 BOARD"; ++ mc->init = core4_init; ++ mc->block_default_type = IF_VIRTIO; ++ mc->max_cpus = MAX_CPUS_CORE4; ++ mc->no_cdrom = 1; ++ mc->pci_allow_0_address = true; ++ mc->reset = sw64_board_reset; ++ mc->possible_cpu_arch_ids = sw64_possible_cpu_arch_ids; ++ mc->default_cpu_type = SW64_CPU_TYPE_NAME("core4"); ++ mc->default_ram_id = "ram"; ++ mc->cpu_index_to_instance_props = sw64_cpu_index_to_props; ++ mc->get_default_cpu_node_id = sw64_get_default_cpu_node_id; ++ mc->get_hotplug_handler = sw64_get_hotplug_handler; ++ mc->has_hotpluggable_cpus = true; ++ hc->pre_plug = core4_machine_device_pre_plug_cb; ++ hc->plug = core4_machine_device_plug_cb; ++ hc->unplug_request = core4_machine_device_unplug_request_cb; ++ hc->unplug = core4_machine_device_unplug_cb; ++ mc->auto_enable_numa = true; ++ ++ object_class_property_add(oc, "acpi", "OnOffAuto", ++ core4_get_acpi, core4_set_acpi, ++ NULL, NULL); ++ object_class_property_set_description(oc, "acpi", ++ "Enable ACPI"); ++} ++ ++static void core4_machine_initfn(Object *obj) ++{ ++ CORE4MachineState *c4ms = CORE4_MACHINE(obj); ++ ++ c4ms->oem_id = g_strndup(SW_ACPI_BUILD_APPNAME6, 6); ++ c4ms->oem_table_id = g_strndup(SW_ACPI_BUILD_APPNAME8, 8); ++ c4ms->acpi = ON_OFF_AUTO_AUTO; ++} ++ ++static const TypeInfo core4_machine_info = { ++ .name = TYPE_CORE4_MACHINE, ++ .parent = TYPE_MACHINE, ++ .instance_size = sizeof(CORE4MachineState), ++ .class_size = sizeof(CORE4MachineClass), ++ .class_init = core4_machine_class_init, ++ .instance_init = core4_machine_initfn, ++ .interfaces = (InterfaceInfo[]) { ++ { TYPE_HOTPLUG_HANDLER }, ++ { } ++ }, ++}; ++ ++static void core4_machine_init(void) ++{ ++ type_register_static(&core4_machine_info); ++} ++ ++type_init(core4_machine_init) +diff --git a/hw/sw64/core4_board.c b/hw/sw64/core4_board.c +new file mode 100644 +index 0000000000..ea3171d2ba +--- /dev/null ++++ b/hw/sw64/core4_board.c +@@ -0,0 +1,868 @@ ++#include "qemu/osdep.h" ++#include "qapi/error.h" ++#include "cpu.h" ++#include "hw/sw64/core.h" ++#include "hw/sw64/sunway.h" ++#include "hw/hw.h" ++#include "hw/boards.h" ++#include "sysemu/sysemu.h" ++#include "exec/address-spaces.h" ++#include "hw/pci/pci_host.h" ++#include "hw/pci/pci.h" ++#include "hw/char/serial.h" ++#include "hw/irq.h" ++#include "net/net.h" ++#include "hw/usb.h" ++#include "sysemu/numa.h" ++#include "sysemu/kvm.h" ++#include "hw/pci/msi.h" ++#include "sysemu/device_tree.h" ++#include "qemu/datadir.h" ++#include "hw/sw64/gpio.h" ++ ++#define CORE4_MAX_CPUS_MASK 0x3ff ++#define CORE4_CORES_SHIFT 10 ++#define CORE4_CORES_MASK 0x3ff ++#define CORE4_THREADS_SHIFT 20 ++#define CORE4_THREADS_MASK 0xfff ++ ++#define DOMAIN_ID_SHIFT 12 ++#define CORE_ID_SHIFT 0 ++ ++static unsigned long coreonlines[4]; ++ ++static const MemMapEntry memmap[] = { ++ [VIRT_BOOT_FLAG] = { 0x820000, 0x20 }, ++ [VIRT_PCIE_MMIO] = { 0xe0000000, 0x20000000 }, ++ [VIRT_MSI] = { 0x8000fee00000, 0x100000 }, ++ [VIRT_SPBU] = { 0x803000000000, 0x1000000 }, ++ [VIRT_SUNWAY_GED] = { 0x803600000000, 0x20 }, ++ [VIRT_INTPU] = { 0x803a00000000, 0x100000 }, ++ [VIRT_RTC] = { 0x804910000000, 0x8 }, ++ [VIRT_FW_CFG] = { 0x804920000000, 0x18 }, ++ [VIRT_GPIO] = { 0x804930000000, 0x0000008000 }, ++ [VIRT_PCIE_IO_BASE] = { 0x880000000000, 0x890000000000 }, ++ [VIRT_PCIE_PIO] = { 0x880100000000, 0x100000000 }, ++ [VIRT_UART] = { 0x8801000003f8, 0x10 }, ++ [VIRT_PCIE_CFG] = { 0x880600000000, 0x100000000 }, ++ [VIRT_HIGH_PCIE_MMIO] = { 0x888000000000, 0x8000000000 }, ++}; ++ ++static const int irqmap[] = { ++ [VIRT_UART] = 12, ++ [VIRT_SUNWAY_GED] = 13, ++ [VIRT_GPIO] = 15, ++}; ++ ++static void core4_virt_build_smbios(CORE4MachineState *core4ms) ++{ ++ FWCfgState *fw_cfg = core4ms->fw_cfg; ++ ++ if (!fw_cfg) ++ return; ++ ++ sw64_virt_build_smbios(fw_cfg); ++} ++ ++static uint64_t spbu_read(void *opaque, hwaddr addr, unsigned size) ++{ ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ unsigned int smp_cpus = ms->smp.cpus; ++ unsigned int smp_threads = ms->smp.threads; ++ unsigned int smp_cores = ms->smp.cores; ++ unsigned int max_cpus = ms->smp.max_cpus; ++ uint64_t ret = 0; ++ switch (addr) { ++ case 0x0080: ++ /* SMP_INFO */ ++ { ++ ret = (smp_threads & CORE4_THREADS_MASK) << CORE4_THREADS_SHIFT; ++ ret += (smp_cores & CORE4_CORES_MASK) << CORE4_CORES_SHIFT; ++ ret += max_cpus & CORE4_MAX_CPUS_MASK; ++ } ++ break; ++ case 0x3a00: ++ /* CLU_LV2_SEL_H */ ++ ret = 1; ++ break; ++ case 0x0680: ++ /* INIT_CTL */ ++ ret = 0x3ae0007802c; ++ break; ++ case 0x0780: ++ /* CORE_ONLINE */ ++ ret = convert_bit(max_cpus); ++ break; ++ case 0x3780: ++ /* MC_ONLINE */ ++ ret = convert_bit(smp_cpus); ++ break; ++ default: ++ fprintf(stderr, "Unsupported MCU addr: 0x%04lx\n", addr); ++ return -1; ++ } ++ return ret; ++} ++ ++static void spbu_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) ++{ ++#ifdef CONFIG_DUMP_PRINTK ++ uint64_t print_addr; ++ uint32_t len; ++ int i; ++ ++ if (kvm_enabled()) ++ return; ++ ++ if (addr == 0x40000) { ++ print_addr = val & 0x7fffffff; ++ len = (uint32_t)(val >> 32); ++ uint8_t *buf; ++ buf = malloc(len + 10); ++ memset(buf, 0, len + 10); ++ cpu_physical_memory_rw(print_addr, buf, len, 0); ++ for (i = 0; i < len; i++) ++ printf("%c", buf[i]); ++ ++ free(buf); ++ return; ++ } ++#endif ++} ++ ++static const MemoryRegionOps spbu_ops = { ++ .read = spbu_read, ++ .write = spbu_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++ .valid = ++ { ++ .min_access_size = 8, ++ .max_access_size = 8, ++ }, ++ .impl = ++ { ++ .min_access_size = 8, ++ .max_access_size = 8, ++ }, ++}; ++ ++static uint64_t intpu_read(void *opaque, hwaddr addr, unsigned size) ++{ ++ uint64_t ret = 0; ++ return ret; ++} ++ ++static void intpu_write(void *opaque, hwaddr addr, uint64_t val, ++ unsigned size) ++{ ++ SW64CPU *cpu_current = SW64_CPU(current_cpu); ++ ++ if (kvm_enabled()) ++ return; ++ ++ switch (addr) { ++ case 0x00: ++ if (((cpu_current->env.csr[II_REQ] >> 16) & 7) == 6) ++ cpu_interrupt(qemu_get_cpu(val & 0x3f), CPU_INTERRUPT_IINM); ++ else ++ cpu_interrupt(qemu_get_cpu(val & 0x3f), CPU_INTERRUPT_II0); ++ cpu_current->env.csr[II_REQ] &= ~(1 << 20); ++ break; ++ default: ++ fprintf(stderr, "Unsupported IPU addr: 0x%04lx\n", addr); ++ break; ++ } ++} ++ ++static const MemoryRegionOps intpu_ops = { ++ .read = intpu_read, ++ .write = intpu_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++ .valid = ++ { ++ .min_access_size = 8, ++ .max_access_size = 8, ++ }, ++ .impl = ++ { ++ .min_access_size = 8, ++ .max_access_size = 8, ++ }, ++}; ++ ++static void create_fdt_clk(CORE4MachineState *c4ms) ++{ ++ FILE *fp; ++ char buff[64]; ++ char *nodename; ++ unsigned long mclk_hz; ++ MachineState *ms = MACHINE(c4ms); ++ ++ fp = fopen("/sys/kernel/debug/sw64/mclk_hz", "rb"); ++ if (fp == NULL) { ++ fprintf(stderr, "%s: Failed to open file mclk_hz\n", __func__); ++ return; ++ } ++ ++ if (fgets(buff, 64, fp) == NULL) { ++ fprintf(stderr, "%s: Error in reading mclk_hz\n", __func__); ++ fclose(fp); ++ fp = NULL; ++ return; ++ } ++ ++ mclk_hz = atoi(buff); ++ fclose(fp); ++ ++ qemu_fdt_add_subnode(ms->fdt, "/soc/clocks"); ++ ++ nodename = g_strdup_printf("/soc/clocks/mclk"); ++ qemu_fdt_add_subnode(ms->fdt, nodename); ++ qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "sunway,mclk"); ++ qemu_fdt_setprop_cell(ms->fdt, nodename, "clock-frequency", mclk_hz); ++ qemu_fdt_setprop_cell(ms->fdt, nodename, "#clock-cells", 0x0); ++ qemu_fdt_setprop_string(ms->fdt, nodename, "clock-output-names", "mclk"); ++ g_free(nodename); ++ ++ nodename = g_strdup_printf("/soc/clocks/extclk"); ++ qemu_fdt_add_subnode(ms->fdt, nodename); ++ qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "sunway,extclk"); ++ qemu_fdt_setprop_cell(ms->fdt, nodename, "clock-frequency", 0x0); ++ qemu_fdt_setprop_cell(ms->fdt, nodename, "#clock-cells", 0x0); ++ qemu_fdt_setprop_string(ms->fdt, nodename, "clock-output-names", "extclk"); ++ g_free(nodename); ++} ++ ++static void core4_set_coreonline(int cpuid) ++{ ++ set_bit(cpuid, coreonlines); ++} ++ ++static int core4_get_stat_coreonline(int cpuid) ++{ ++ int ret = test_bit(cpuid, coreonlines); ++ ++ clear_bit(cpuid, coreonlines); ++ return ret; ++} ++ ++static void core4_numa_set_coreonlines(MachineState *ms, int node, ++ unsigned int *logical_core_id, int *coreid_idx) ++{ ++ unsigned int max_cpus = ms->smp.max_cpus; ++ int i, cpu_node_id, shift = 0; ++ ++ for (i = 0; i < max_cpus; i++) { ++ cpu_node_id = ms->possible_cpus->cpus[i].props.node_id; ++ ++ if (cpu_node_id == node) { ++ core4_set_coreonline(shift); ++ logical_core_id[*coreid_idx] = i; ++ shift++; ++ (*coreid_idx)++; ++ } ++ } ++} ++ ++static void core4_get_cpu_to_rcid(MachineState *ms, ++ unsigned long *__cpu_to_rcid) ++{ ++ int nb_numa_nodes = ms->numa_state->num_nodes; ++ unsigned long rcid[MAX_CPUS_CORE4]; ++ unsigned int logical_core_id[MAX_CPUS_CORE4]; ++ int i, j, coreid_idx = 0, cpuid = 0, idx = 0; ++ ++ for (i = 0; i < nb_numa_nodes; i++) { ++ core4_numa_set_coreonlines(ms, i, logical_core_id, &coreid_idx); ++ ++ for (j = 0; j < MAX_CPUS_CORE4; j++) { ++ if (core4_get_stat_coreonline(j)) { ++ rcid[idx] = (i << DOMAIN_ID_SHIFT) | (j << CORE_ID_SHIFT); ++ idx++; ++ } ++ } ++ } ++ ++ for (i = 0; i < ms->smp.max_cpus; i++) { ++ cpuid = logical_core_id[i]; ++ __cpu_to_rcid[cpuid] = rcid[i]; ++ } ++ ++ cpuid = ms->smp.max_cpus; ++ while (cpuid < MAX_CPUS_CORE4) { ++ __cpu_to_rcid[cpuid] = -1; ++ cpuid++; ++ } ++} ++ ++static int core4_fdt_add_memory_node(void *fdt, hwaddr mem_base, ++ hwaddr mem_len, int numa_node_id) ++{ ++ char *nodename; ++ int ret; ++ ++ nodename = g_strdup_printf("/memory@%" PRIx64, mem_base); ++ qemu_fdt_add_subnode(fdt, nodename); ++ qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory"); ++ ret = qemu_fdt_setprop_sized_cells(fdt, nodename, "reg", 2, mem_base, ++ 2, mem_len); ++ if (ret < 0) { ++ goto out; ++ } ++ ++ /* only set the NUMA ID if it is specified */ ++ if (numa_node_id >= 0) { ++ ret = qemu_fdt_setprop_cell(fdt, nodename, ++ "numa-node-id", numa_node_id); ++ } ++out: ++ g_free(nodename); ++ return ret; ++} ++ ++static void core4_add_memory_node(MachineState *ms) ++{ ++ hwaddr mem_len=0x0; ++ hwaddr mem_start=0x0; ++ int nb_numa_nodes = ms->numa_state->num_nodes; ++ int i, rc; ++ ++ if (ms->numa_state != NULL && nb_numa_nodes > 0) { ++ for (i = 0; i < nb_numa_nodes; i++) { ++ mem_len = ms->numa_state->nodes[i].node_mem; ++ if (!mem_len) { ++ continue; ++ } ++ ++ if (!i) { ++ mem_start = 0x910000; ++ mem_len -= mem_start; ++ } ++ ++ rc = core4_fdt_add_memory_node(ms->fdt, mem_start, mem_len, i); ++ if (rc < 0) { ++ fprintf(stderr, "couldn't add /memory@%"PRIx64" node\n", ++ mem_start); ++ } ++ mem_start += mem_len; ++ } ++ } ++} ++ ++static void core4_add_cpu_map(CORE4MachineState *c4ms) ++{ ++ MachineState *ms = MACHINE(c4ms); ++ int cpu; ++ ++ qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map"); ++ ++ for (cpu = ms->smp.max_cpus - 1; cpu >= 0; cpu--) { ++ char *cpu_path = g_strdup_printf("/cpus/cpu@%d", cpu); ++ char *map_path; ++ ++ if (ms->smp.threads > 1) { ++ map_path = g_strdup_printf( ++ "/cpus/cpu-map/cluster%d/core%d/thread%d", ++ cpu / (ms->smp.cores * ms->smp.threads), ++ (cpu / ms->smp.threads) % ms->smp.cores, ++ cpu % ms->smp.threads); ++ } else { ++ map_path = g_strdup_printf( ++ "/cpus/cpu-map/cluster%d/core%d", ++ cpu / ms->smp.cores, ++ cpu % ms->smp.cores); ++ } ++ qemu_fdt_add_path(ms->fdt, map_path); ++ qemu_fdt_setprop_phandle(ms->fdt, map_path, "cpu", cpu_path); ++ ++ g_free(map_path); ++ g_free(cpu_path); ++ } ++} ++ ++static void core4_add_cpu_node(CORE4MachineState *c4ms) ++{ ++ MachineState *ms = MACHINE(c4ms); ++ unsigned long __cpu_to_rcid[MAX_CPUS_CORE4]; ++ char *nodename; ++ int cpu; ++ ++ qemu_fdt_add_subnode(ms->fdt, "/cpus"); ++ qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", 1); ++ qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0); ++ ++ core4_get_cpu_to_rcid(ms, __cpu_to_rcid); ++ for (cpu = ms->smp.max_cpus - 1; cpu >= 0; cpu--) { ++ nodename = g_strdup_printf("/cpus/cpu@%d", cpu); ++ ++ qemu_fdt_add_subnode(ms->fdt, nodename); ++ if (ms->possible_cpus->cpus[cpu].props.has_node_id) { ++ qemu_fdt_setprop_cell(ms->fdt, nodename, "numa-node-id", ++ ms->possible_cpus->cpus[cpu].props.node_id); ++ } ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, ++ "sunway,boot_flag_address", 1, 0x0, 1, ++ c4ms->memmap[VIRT_BOOT_FLAG].base); ++ qemu_fdt_setprop_cell(ms->fdt, nodename, "reg", __cpu_to_rcid[cpu]); ++ qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "cpu"); ++ qemu_fdt_setprop_string(ms->fdt, nodename, ++ "compatible", "sunway,junzhang"); ++ qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", ++ qemu_fdt_alloc_phandle(ms->fdt)); ++ ++ if (cpu < ms->smp.cpus) { ++ qemu_fdt_setprop_cell(ms->fdt, nodename, "online-capable", 0); ++ qemu_fdt_setprop_string(ms->fdt, nodename, "status", "okay"); ++ } else { ++ qemu_fdt_setprop_cell(ms->fdt, nodename, "online-capable", 1); ++ qemu_fdt_setprop_string(ms->fdt, nodename, "status", "disable"); ++ } ++ ++ g_free(nodename); ++ } ++ ++ core4_add_cpu_map(c4ms); ++} ++ ++static void core4_add_distance_map_node(MachineState *ms) ++{ ++ int size; ++ uint32_t *matrix; ++ int idx, i, j; ++ int nb_numa_nodes = ms->numa_state->num_nodes; ++ ++ if (nb_numa_nodes > 0 && ms->numa_state->have_numa_distance) { ++ size = nb_numa_nodes * nb_numa_nodes * 3 * sizeof(uint32_t); ++ matrix = g_malloc0(size); ++ ++ for (i = 0; i < nb_numa_nodes; i++) { ++ for (j = 0; j < nb_numa_nodes; j++) { ++ idx = (i * nb_numa_nodes + j) * 3; ++ matrix[idx + 0] = cpu_to_be32(i); ++ } ++ } ++ ++ qemu_fdt_add_subnode(ms->fdt, "/distance-map"); ++ qemu_fdt_setprop_string(ms->fdt, "/distance-map", "compatible", ++ "numa-distance-map-v1"); ++ qemu_fdt_setprop(ms->fdt, "/distance-map", "distance-matrix", ++ matrix, size); ++ g_free(matrix); ++ } ++} ++ ++static void core4_create_numa_fdt(CORE4MachineState *c4ms) ++{ ++ MachineState *ms = MACHINE(c4ms); ++ ++ /* Add memory node information */ ++ core4_add_memory_node(ms); ++ /* Add cpus node information */ ++ core4_add_cpu_node(c4ms); ++ /* Add distance-map node information */ ++ core4_add_distance_map_node(ms); ++} ++ ++static void create_fdt_misc_platform(CORE4MachineState *c4ms) ++{ ++ char *nodename; ++ MachineState *ms = MACHINE(c4ms); ++ ++ nodename = g_strdup_printf("/soc/misc_platform@0"); ++ qemu_fdt_add_subnode(ms->fdt, nodename); ++ qemu_fdt_setprop_string(ms->fdt, nodename, ++ "compatible", "sunway,misc-platform"); ++ qemu_fdt_setprop_cell(ms->fdt, nodename, "numa-node-id", 0); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "sunway,spbu_base", ++ 2, c4ms->memmap[VIRT_SPBU].base); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "sunway,intpu_base", ++ 2, c4ms->memmap[VIRT_INTPU].base); ++ g_free(nodename); ++} ++ ++static void create_fdt_pcie_controller(CORE4MachineState *c4ms) ++{ ++ MachineState *ms = MACHINE(c4ms); ++ char *nodename; ++ ++ nodename = g_strdup_printf("/soc/pcie@8800"); ++ qemu_fdt_add_subnode(ms->fdt, nodename); ++ qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "sunway,pcie"); ++ qemu_fdt_setprop_cell(ms->fdt, nodename, "#address-cells", 0x3); ++ qemu_fdt_setprop_cell(ms->fdt, nodename, "#size-cells", 0x2); ++ qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "pci"); ++ qemu_fdt_setprop_cell(ms->fdt, nodename, "numa-node-id", 0); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", ++ 2, 0x880600000000, 2, 0x100000000); ++ qemu_fdt_setprop_string(ms->fdt, nodename, "reg-names", "ecam"); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "sunway,rc-config-base", ++ 1, 0x8805, 1, 0x0); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "sunway,ep-config-base", ++ 2, c4ms->memmap[VIRT_PCIE_CFG].base); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "sunway,ep-mem-32-base", ++ 2, c4ms->memmap[VIRT_PCIE_IO_BASE].base); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "sunway,ep-mem-64-base", ++ 2, c4ms->memmap[VIRT_HIGH_PCIE_MMIO].base); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "sunway,ep-io-base", ++ 2, c4ms->memmap[VIRT_PCIE_PIO].base); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "sunway,piu-ior0-base", ++ 2, 0x880200000000); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "sunway,piu-ior1-base", ++ 2, 0x880300000000); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "sunway,rc-index", 2, 0x0); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "sunway,pcie-io-base", ++ 2, c4ms->memmap[VIRT_PCIE_IO_BASE].base); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "bus-range", 2, 0xff); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "ranges", ++ 1, FDT_PCI_RANGE_IOPORT, 2, 0, ++ 2, c4ms->memmap[VIRT_PCIE_PIO].base, ++ 2, c4ms->memmap[VIRT_PCIE_PIO].size, ++ ++ 1, FDT_PCI_RANGE_MMIO, 2, ++ c4ms->memmap[VIRT_PCIE_MMIO].base, ++ 2, 0x8800e0000000, 2, 0x20000000, ++ ++ 1, 0x43000000, 2, ++ c4ms->memmap[VIRT_HIGH_PCIE_MMIO].base, ++ 2, c4ms->memmap[VIRT_HIGH_PCIE_MMIO].base, ++ 2, c4ms->memmap[VIRT_HIGH_PCIE_MMIO].size); ++ ++ qemu_fdt_setprop_string(ms->fdt, nodename, "status", "okay"); ++} ++ ++static void core4_create_fdt(CORE4MachineState *c4ms) ++{ ++ uint32_t intc_phandle; ++ MachineState *ms = MACHINE(c4ms); ++ ++ if (ms->dtb) { ++ char *filename; ++ ++ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, ms->dtb); ++ if (!filename) { ++ fprintf(stderr, "Couldn't open dtb file %s\n", ms->dtb); ++ exit(1); ++ } ++ ++ ms->fdt = load_device_tree(ms->dtb, &c4ms->fdt_size); ++ if (!ms->fdt) { ++ error_report("load_device_tree() failed"); ++ exit(1); ++ } ++ goto update_bootargs; ++ } else { ++ ms->fdt = create_device_tree(&c4ms->fdt_size); ++ if (!ms->fdt) { ++ error_report("create_device_tree() failed"); ++ exit(1); ++ } ++ ++ qemu_fdt_setprop_string(ms->fdt, "/", "compatible", "sunway,junzhang"); ++ qemu_fdt_setprop_string(ms->fdt, "/", "model", "junzhang"); ++ qemu_fdt_setprop_cell(ms->fdt, "/", "#address-cells", 0x2); ++ qemu_fdt_setprop_cell(ms->fdt, "/", "#size-cells", 0x2); ++ ++ qemu_fdt_add_subnode(ms->fdt, "/chosen"); ++ ++ qemu_fdt_add_subnode(ms->fdt, "/soc"); ++ qemu_fdt_setprop_string(ms->fdt, "/soc", "compatible", "simple-bus"); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc", "#address-cells", 0x2); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc", "#size-cells", 0x2); ++ qemu_fdt_setprop(ms->fdt, "/soc", "ranges", NULL, 0); ++ ++ intc_phandle = qemu_fdt_alloc_phandle(ms->fdt); ++ qemu_fdt_add_subnode(ms->fdt, "/soc/interrupt-controller"); ++ qemu_fdt_setprop_string(ms->fdt, "/soc/interrupt-controller", ++ "compatible", "sw64,pintc_vt"); ++ qemu_fdt_setprop(ms->fdt, "/soc/interrupt-controller", ++ "interrupt-controller", NULL, 0); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/interrupt-controller", ++ "sw64,node", 0); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/interrupt-controller", ++ "sw64,irq-num", 16); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/interrupt-controller", ++ "sw64,ver", 0x1); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/interrupt-controller", ++ "#interrupt-cells", 0x1); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/interrupt-controller", ++ "phandle", intc_phandle); ++ ++ qemu_fdt_add_subnode(ms->fdt, "/soc/serial0@8801"); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/serial0@8801", ++ "#address-cells", 0x2); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/serial0@8801", ++ "#size-cells", 0x2); ++ qemu_fdt_setprop_string(ms->fdt, "/soc/serial0@8801", ++ "compatible", "ns16550a"); ++ qemu_fdt_setprop_sized_cells(ms->fdt, "/soc/serial0@8801", "reg", ++ 2, c4ms->memmap[VIRT_UART].base, ++ 2, c4ms->memmap[VIRT_UART].size); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/serial0@8801", ++ "interrupt-parent", intc_phandle); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/serial0@8801", ++ "interrupts", c4ms->irqmap[VIRT_UART]); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/serial0@8801", "reg-shift", 0x0); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/serial0@8801", ++ "reg-io-width", 0x1); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/serial0@8801", ++ "clock-frequency", 20000000); ++ qemu_fdt_setprop_string(ms->fdt, "/soc/serial0@8801", ++ "status", "okay"); ++ ++ qemu_fdt_add_subnode(ms->fdt, "/soc/misc0@8036"); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/misc0@8036", ++ "#address-cells", 0x2); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/misc0@8036", "#size-cells", 0x2); ++ qemu_fdt_setprop_string(ms->fdt, "/soc/misc0@8036", ++ "compatible", "sw6,sunway-ged"); ++ qemu_fdt_setprop_sized_cells(ms->fdt, "/soc/misc0@8036", "reg", ++ 2, c4ms->memmap[VIRT_SUNWAY_GED].base, ++ 2, c4ms->memmap[VIRT_SUNWAY_GED].size); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/misc0@8036", ++ "interrupt-parent", intc_phandle); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/misc0@8036", ++ "interrupts", c4ms->irqmap[VIRT_SUNWAY_GED]); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/misc0@8036", "reg-shift", 0x0); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/misc0@8036", "reg-io-width", 0x8); ++ qemu_fdt_setprop_cell(ms->fdt, "/soc/misc0@8036", ++ "clock-frequency", 20000000); ++ qemu_fdt_setprop_string(ms->fdt, "/soc/misc0@8036", "status", "okay"); ++ ++ create_fdt_clk(c4ms); ++ core4_create_numa_fdt(c4ms); ++ create_fdt_misc_platform(c4ms); ++ create_fdt_pcie_controller(c4ms); ++ ++ } ++ ++update_bootargs: ++ if (ms->kernel_filename) { ++ qemu_fdt_setprop_string(ms->fdt, "/chosen", "bootargs", ms->kernel_cmdline); ++ } else { ++ qemu_fdt_setprop_string(ms->fdt, "/chosen", "bootargs", "pcie_ports=native"); ++ } ++} ++ ++static void core4_cpus_init(MachineState *ms) ++{ ++ int i; ++ const CPUArchIdList *possible_cpus; ++ ++ MachineClass *mc = MACHINE_GET_CLASS(ms); ++ possible_cpus = mc->possible_cpu_arch_ids(ms); ++ ++ for (i = 0; i < ms->smp.cpus; i++) { ++ sw64_new_cpu("core4-sw64-cpu", possible_cpus->cpus[i].arch_id, ++ &error_fatal); ++ } ++} ++ ++void sw64_pm_set_irq(void *opaque, int irq, int level) ++{ ++ if (kvm_enabled()) { ++ if (level == 0) ++ return; ++ kvm_set_irq(kvm_state, irq, level); ++ return; ++ } ++} ++ ++void sw64_gpio_set_irq(void *opaque, int irq, int level) ++{ ++ if (kvm_enabled()) { ++ if (level == 0) { ++ return; ++ } ++ kvm_set_irq(kvm_state, irq, level); ++ return; ++ } ++} ++ ++static inline DeviceState *create_sw64_pm(CORE4MachineState *c4ms) ++{ ++ DeviceState *dev; ++ ++ dev = qdev_try_new(TYPE_SW64_PM); ++ ++ if (!dev) { ++ printf("failed to create sw64_pm,Unknown device TYPE_SW64_PM"); ++ } ++ return dev; ++} ++ ++static inline DeviceState *create_sw64_gpio(CORE4MachineState *c4ms) ++{ ++ DeviceState *dev; ++ ++ dev = qdev_try_new(TYPE_SW64_GPIO); ++ ++ if (!dev) { ++ printf("failed to create sw64_gpio,Unknown device TYPE_SW64_GPIO\n"); ++ } ++ return dev; ++} ++ ++static void sw64_create_device_memory(MachineState *machine, BoardState *bs) ++{ ++ ram_addr_t ram_size = machine->ram_size; ++ ram_addr_t device_mem_size; ++ ++ /* always allocate the device memory information */ ++ machine->device_memory = g_malloc0(sizeof(*machine->device_memory)); ++ ++ /* initialize device memory address space */ ++ if (machine->ram_size < machine->maxram_size) { ++ device_mem_size = machine->maxram_size - machine->ram_size; ++ ++ if (machine->ram_slots > ACPI_MAX_RAM_SLOTS) { ++ printf("unsupported amount of memory slots: %"PRIu64, ++ machine->ram_slots); ++ exit(EXIT_FAILURE); ++ } ++ ++ if (QEMU_ALIGN_UP(machine->maxram_size, ++ TARGET_PAGE_SIZE) != machine->maxram_size) { ++ printf("maximum memory size must by aligned to multiple of " ++ "%d bytes", TARGET_PAGE_SIZE); ++ exit(EXIT_FAILURE); ++ } ++ ++ machine->device_memory->base = ram_size; ++ ++ memory_region_init(&machine->device_memory->mr, OBJECT(bs), ++ "device-memory", device_mem_size); ++ memory_region_add_subregion(get_system_memory(), machine->device_memory->base, ++ &machine->device_memory->mr); ++ } ++} ++ ++void core4_board_init(MachineState *ms) ++{ ++ CORE4MachineState *core4ms = CORE4_MACHINE(ms); ++ DeviceState *dev = qdev_new(TYPE_CORE4_BOARD); ++ BoardState *bs = CORE4_BOARD(dev); ++ PCIHostState *phb = PCI_HOST_BRIDGE(dev); ++ PCIBus *b; ++ ++ core4ms->memmap = memmap; ++ core4ms->irqmap = irqmap; ++ ++ /* Create device tree */ ++ core4_create_fdt(core4ms); ++ ++ core4ms->acpi_dev = create_sw64_pm(core4ms); ++ ++ core4_cpus_init(ms); ++ ++ if (kvm_enabled()) { ++ if (kvm_has_gsi_routing()) ++ msi_nonbroken = true; ++ } ++ else ++ sw64_create_alarm_timer(ms, bs); ++ ++ sw64_create_device_memory(ms, bs); ++ ++ memory_region_add_subregion(get_system_memory(), 0, ms->ram); ++ ++ memory_region_init_io(&bs->io_spbu, NULL, &spbu_ops, bs, "io_spbu", ++ memmap[VIRT_SPBU].size); ++ memory_region_add_subregion(get_system_memory(), memmap[VIRT_SPBU].base, ++ &bs->io_spbu); ++ ++ memory_region_init_io(&bs->io_intpu, NULL, &intpu_ops, bs, "io_intpu", ++ memmap[VIRT_INTPU].size); ++ memory_region_add_subregion(get_system_memory(), memmap[VIRT_INTPU].base, ++ &bs->io_intpu); ++ ++ memory_region_init_io(&bs->msi_ep, NULL, &msi_ops, bs, "msi_ep", ++ memmap[VIRT_MSI].size); ++ memory_region_add_subregion(get_system_memory(), memmap[VIRT_MSI].base, ++ &bs->msi_ep); ++ ++ memory_region_init(&bs->mem_ep, OBJECT(bs), "pci0-mem", ++ memmap[VIRT_PCIE_IO_BASE].size); ++ ++ memory_region_add_subregion(get_system_memory(), ++ memmap[VIRT_PCIE_IO_BASE].base, &bs->mem_ep); ++ ++ memory_region_init_alias(&bs->mem_ep64, NULL, "mem_ep64", &bs->mem_ep, ++ memmap[VIRT_HIGH_PCIE_MMIO].base, ++ memmap[VIRT_HIGH_PCIE_MMIO].size); ++ memory_region_add_subregion(get_system_memory(), ++ memmap[VIRT_HIGH_PCIE_MMIO].base, &bs->mem_ep64); ++ ++ memory_region_init_io(&bs->io_ep, OBJECT(bs), &sw64_pci_ignore_ops, NULL, ++ "pci0-io-ep", memmap[VIRT_PCIE_PIO].size); ++ memory_region_add_subregion(get_system_memory(), memmap[VIRT_PCIE_PIO].base, ++ &bs->io_ep); ++ ++ b = pci_register_root_bus(dev, "pcie.0", sw64_board_set_irq, ++ sw64_board_map_irq, bs, ++ &bs->mem_ep, &bs->io_ep, 0, 537, TYPE_PCI_BUS); ++ phb->bus = b; ++ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); ++ pci_bus_set_route_irq_fn(b, sw64_route_intx_pin_to_irq); ++ memory_region_init_io(&bs->conf_piu0, OBJECT(bs), &sw64_pci_config_ops, b, ++ "pci0-ep-conf-io", memmap[VIRT_PCIE_CFG].size); ++ memory_region_add_subregion(get_system_memory(), ++ memmap[VIRT_PCIE_CFG].base, &bs->conf_piu0); ++ ++ sw64_init_rtc_base_info(); ++ memory_region_init_io(&bs->io_rtc, OBJECT(bs), &rtc_ops, b, ++ "sw64-rtc", memmap[VIRT_RTC].size); ++ memory_region_add_subregion(get_system_memory(), memmap[VIRT_RTC].base, ++ &bs->io_rtc); ++ object_property_add_tm(OBJECT(core4ms), "rtc-time", rtc_get_time); ++ ++ sw64_create_pcie(bs, b, phb); ++ ++ core4ms->fw_cfg = sw64_create_fw_cfg(memmap[VIRT_FW_CFG].base, ++ memmap[VIRT_FW_CFG].size); ++ rom_set_fw(core4ms->fw_cfg); ++ ++ core4ms->gpio_dev = create_sw64_gpio(core4ms); ++ ++ core4ms->bus = phb->bus; ++ ++ if (sw64_is_acpi_enabled(core4ms)) { ++ sw64_acpi_setup((SW64MachineState *)core4ms); ++ } ++ ++ core4_virt_build_smbios(core4ms); ++} ++ ++static Property core4_main_pci_host_props[] = { ++ DEFINE_PROP_UINT32("ofw-addr", BoardState, ofw_addr, 0), ++ DEFINE_PROP_END_OF_LIST() ++}; ++ ++static char *core4_main_ofw_unit_address(const SysBusDevice *dev) ++{ ++ BoardState *s = CORE4_BOARD(dev); ++ return g_strdup_printf("%x", s->ofw_addr); ++} ++ ++static void core4_board_pcihost_class_init(ObjectClass *obj, void *data) ++{ ++ DeviceClass *dc = DEVICE_CLASS(obj); ++ SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(obj); ++ ++ dc->props_ = core4_main_pci_host_props; ++ dc->fw_name = "pci"; ++ sbc->explicit_ofw_unit_address = core4_main_ofw_unit_address; ++} ++ ++static const TypeInfo swboard_pcihost_info = { ++ .name = TYPE_CORE4_BOARD, ++ .parent = TYPE_PCI_HOST_BRIDGE, ++ .instance_size = sizeof(BoardState), ++ .class_init = core4_board_pcihost_class_init, ++}; ++ ++static void swboard_register_types(void) ++{ ++ type_register_static(&swboard_pcihost_info); ++} ++ ++type_init(swboard_register_types) +diff --git a/hw/sw64/gpio.h b/hw/sw64/gpio.h +new file mode 100644 +index 0000000000..b523ab879e +--- /dev/null ++++ b/hw/sw64/gpio.h +@@ -0,0 +1,54 @@ ++#ifndef HW_SW64_GPIO_H ++#define HW_SW64_GPIO_H ++ ++#include "hw/sysbus.h" ++ ++#define TYPE_SW64_GPIO "SW64_GPIO" ++#define SW64_GPIO(obj) OBJECT_CHECK(SW64GPIOState, (obj), TYPE_SW64_GPIO) ++ ++#define GPIO_SWPORTA_DR (0x00UL) ++#define GPIO_SWPORTA_DDR (0X200UL) ++#define GPIO_INTEN (0X1800UL) ++#define GPIO_INTMASK (0X1a00UL) ++#define GPIO_INTTYPE_LEVEL (0x1c00UL) ++#define GPIO_INTTYPE_POLA (0x1e00UL) ++#define GPIO_INTTYPE_STATUS (0x2000UL) ++#define GPIO_RAW_INTTYPE_STATUS (0x2200UL) ++#define GPIO_DEB_ENABLE (0x2400UL) ++#define GPIO_CLEAN_INT (0x2600UL) ++#define GPIO_EXT_PORTA (0x2800UL) ++#define GPIO_SYNC_LEVEL (0x3000UL) ++#define GPIO_ID_CODE (0x3200UL) ++#define GPIO_VERSION (0x3600UL) ++#define GPIO_CONF_R1 (0x3a00UL) ++#define GPIO_CONF_R2 (0x3800UL) ++ ++#define SW64_GPIO_MEM_SIZE 0x8000 ++#define SW64_GPIO_PIN_COUNT 1 ++ ++typedef struct SW64GPIOState { ++ SysBusDevice parent_obj; ++ ++ uint32_t padr; ++ uint32_t paddr; ++ uint32_t inter; ++ uint32_t intmr; ++ uint32_t intlr; ++ uint32_t intpr; ++ uint32_t intsr; ++ uint32_t rintsr; ++ uint32_t deber; ++ uint32_t clintr; ++ uint32_t expar; ++ uint32_t synlr; ++ uint32_t idcr; ++ uint32_t versionr; ++ uint32_t conf1r; ++ uint32_t conf2r; ++ ++ qemu_irq irq[SW64_GPIO_PIN_COUNT]; ++ qemu_irq output[SW64_GPIO_PIN_COUNT]; ++} SW64GPIOState; ++ ++void sw64_gpio_set_irq(void *opaque, int irq, int level); ++#endif +diff --git a/hw/sw64/meson.build b/hw/sw64/meson.build +new file mode 100644 +index 0000000000..456eb0290b +--- /dev/null ++++ b/hw/sw64/meson.build +@@ -0,0 +1,17 @@ ++sw64_ss = ss.source_set() ++ ++sw64_ss.add(files('sunway.c'), fdt) ++sw64_ss.add(when: 'CONFIG_SW64_VT_IOMMU', if_true: files('sw64_iommu.c')) ++sw64_ss.add(when: 'CONFIG_ACPI', if_true: files('sw64-acpi-build.c')) ++ ++sw64_ss.add(when: 'CONFIG_CORE3', if_true: files( ++ 'core3.c', ++ 'core3_board.c', ++)) ++ ++sw64_ss.add(when: 'CONFIG_CORE4', if_true: files( ++ 'core4.c', ++ 'core4_board.c', ++)) ++ ++hw_arch += {'sw64': sw64_ss} +diff --git a/hw/sw64/pm.h b/hw/sw64/pm.h +new file mode 100644 +index 0000000000..242fef0946 +--- /dev/null ++++ b/hw/sw64/pm.h +@@ -0,0 +1,59 @@ ++#ifndef HW_SW64_PM_H ++#define HW_SW64_PM_H ++ ++#include "hw/sysbus.h" ++#include "hw/acpi/memory_hotplug.h" ++ ++#include "hw/acpi/cpu_hotplug.h" ++#include "hw/acpi/cpu.h" ++#include "hw/irq.h" ++#include "hw/acpi/acpi_dev_interface.h" ++#include "hw/core/cpu.h" ++#include "qemu/option.h" ++#include "qemu/option_int.h" ++#include "qemu/config-file.h" ++#include "qapi/qmp/qdict.h" ++ ++#define SUNWAY_CPUHOTPLUG_ADD 0x4 ++#define SUNWAY_CPUHOTPLUG_REMOVE 0x8 ++#define OFFSET_CPUID 0x20 ++ ++extern int cpu_hot_id; ++int get_state_cpumask(int cpu_num); ++void sw64_cpu_hotplug_hw_init(CPUHotplugState *state, hwaddr base_addr); ++void sw64_acpi_switch_to_modern_cphp(CPUHotplugState *cpuhp_state, uint16_t io_port); ++ ++#define OFFSET_START_ADDR 0x0 ++#define OFFSET_LENGTH 0x8 ++#define OFFSET_STATUS 0x10 ++#define OFFSET_SLOT 0x18 ++ ++#define OFFSET_NODE 0x28 ++ ++#define SUNWAY_MEMHOTPLUG_ADD 0x1 ++#define SUNWAY_MEMHOTPLUG_REMOVE 0x2 ++ ++typedef struct SW64PMState { ++ /*< private >*/ ++ SysBusDevice parent_obj; ++ /*< public >*/ ++ ++ qemu_irq irq; ++ MemHotplugState acpi_memory_hotplug; ++ unsigned long addr; ++ unsigned long length; ++ unsigned long status; ++ unsigned long slot; ++ unsigned long cpuid; ++ unsigned long node; ++ bool cpu_hotplug_legacy; ++ AcpiCpuHotplug gpe_cpu; ++ CPUHotplugState cpuhp_state; ++} SW64PMState; ++ ++#define TYPE_SW64_PM "SW64_PM" ++ ++DECLARE_INSTANCE_CHECKER(SW64PMState, SW64_PM, TYPE_SW64_PM) ++ ++void sw64_pm_set_irq(void *opaque, int irq, int level); ++#endif +diff --git a/hw/sw64/sunway.c b/hw/sw64/sunway.c +new file mode 100644 +index 0000000000..f0c43b8c2f +--- /dev/null ++++ b/hw/sw64/sunway.c +@@ -0,0 +1,631 @@ ++/* ++ * QEMU SUNWAY syetem helper. ++ * ++ * Copyright (c) 2023 Lu Feifei ++ * ++ * This work is licensed under the GNU GPL license version 2 or later. ++ */ ++ ++#include "qemu/osdep.h" ++#include "qemu/datadir.h" ++#include "qapi/error.h" ++#include "cpu.h" ++#include "hw/hw.h" ++#include "hw/irq.h" ++#include "elf.h" ++#include "hw/loader.h" ++#include "hw/boards.h" ++#include "qemu/error-report.h" ++#include "sysemu/sysemu.h" ++#include "sysemu/kvm.h" ++#include "sysemu/reset.h" ++#include "sysemu/rtc.h" ++#include "hw/char/serial.h" ++#include "hw/pci/msi.h" ++#include "hw/firmware/smbios.h" ++#include "hw/nvram/fw_cfg.h" ++#include "qemu/cutils.h" ++#include "ui/console.h" ++#include "hw/sw64/core.h" ++#include "hw/sw64/sunway.h" ++#include "sysemu/numa.h" ++#include "net/net.h" ++#include "sysemu/device_tree.h" ++#include ++ ++#define SW_PIN_TO_IRQ 16 ++#define SW_FDT_BASE 0x2d00000ULL ++#define SW_BIOS_BASE 0x2f00000ULL ++#define SW_INITRD_BASE 0x3000000UL ++ ++static uint64_t base_rtc; ++static uint64_t last_update; ++unsigned long dtb_start_c4; ++ ++void sw64_init_rtc_base_info(void) ++{ ++ struct tm tm; ++ qemu_get_timedate(&tm, 0); ++ base_rtc = mktimegm(&tm); ++ last_update = get_clock_realtime() / NANOSECONDS_PER_SECOND; ++} ++ ++static uint64_t rtc_read(void *opaque, hwaddr addr, unsigned size) ++{ ++ uint64_t val; ++ uint64_t guest_clock = get_clock_realtime() / NANOSECONDS_PER_SECOND; ++ ++ val = base_rtc + guest_clock - last_update; ++ ++ return val; ++} ++ ++static void rtc_write(void *opaque, hwaddr addr, uint64_t val, ++ unsigned size) ++{ ++} ++ ++const MemoryRegionOps rtc_ops = { ++ .read = rtc_read, ++ .write = rtc_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++ .valid = ++ { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++ .impl = ++ { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++}; ++ ++static uint64_t ignore_read(void *opaque, hwaddr addr, unsigned size) ++{ ++ return 1; ++} ++ ++static void ignore_write(void *opaque, hwaddr addr, uint64_t v, unsigned size) ++{ ++} ++ ++const MemoryRegionOps sw64_pci_ignore_ops = { ++ .read = ignore_read, ++ .write = ignore_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++ .valid = ++ { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++ .impl = ++ { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++}; ++ ++static uint64_t config_read(void *opaque, hwaddr addr, unsigned size) ++{ ++ PCIBus *b = opaque; ++ uint32_t trans_addr = 0; ++ ++ trans_addr |= ((addr >> 16) & 0xffff) << 8; ++ trans_addr |= (addr & 0xff); ++ ++ return pci_data_read(b, trans_addr, size); ++} ++ ++static void config_write(void *opaque, hwaddr addr, uint64_t val, ++ unsigned size) ++{ ++ PCIBus *b = opaque; ++ uint32_t trans_addr = 0; ++ ++ trans_addr |= ((addr >> 16) & 0xffff) << 8; ++ trans_addr |= (addr & 0xff); ++ ++ pci_data_write(b, trans_addr, val, size); ++} ++ ++const MemoryRegionOps sw64_pci_config_ops = { ++ .read = config_read, ++ .write = config_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++ .valid = ++ { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++ .impl = ++ { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++}; ++ ++static MemTxResult msi_read(void *opaque, hwaddr addr, ++ uint64_t *data, unsigned size, ++ MemTxAttrs attrs) ++{ ++ return MEMTX_OK; ++} ++ ++MemTxResult msi_write(void *opaque, hwaddr addr, ++ uint64_t value, unsigned size, ++ MemTxAttrs attrs) ++{ ++ int ret = 0; ++ MSIMessage msg = {}; ++ ++ if (!kvm_enabled()) ++ return MEMTX_DECODE_ERROR; ++ ++ msg.address = (uint64_t) addr + 0x8000fee00000; ++ msg.data = (uint32_t) value; ++ ++ ret = kvm_irqchip_send_msi(kvm_state, msg); ++ if (ret < 0) { ++ fprintf(stderr, "KVM: injection failed, MSI lost (%s)\n", ++ strerror(-ret)); ++ } ++ ++ return MEMTX_OK; ++} ++ ++const MemoryRegionOps msi_ops = { ++ .read_with_attrs = msi_read, ++ .write_with_attrs = msi_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++ .valid = ++ { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++ .impl = ++ { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++}; ++ ++uint64_t cpu_sw64_virt_to_phys(void *opaque, uint64_t addr) ++{ ++ return addr &= ~0xffffffff80000000 ; ++} ++ ++CpuInstanceProperties ++sw64_cpu_index_to_props(MachineState *ms, unsigned cpu_index) ++{ ++ MachineClass *mc = MACHINE_GET_CLASS(ms); ++ const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); ++ ++ assert(cpu_index < possible_cpus->len); ++ return possible_cpus->cpus[cpu_index].props; ++} ++ ++int64_t sw64_get_default_cpu_node_id(const MachineState *ms, int idx) ++{ ++ int nb_numa_nodes = ms->numa_state->num_nodes; ++ return idx % nb_numa_nodes; ++} ++ ++const CPUArchIdList *sw64_possible_cpu_arch_ids(MachineState *ms) ++{ ++ int i; ++ unsigned int max_cpus = ms->smp.max_cpus; ++ ++ if (ms->possible_cpus) { ++ /* ++ * make sure that max_cpus hasn't changed since the first use, i.e. ++ * -smp hasn't been parsed after it ++ */ ++ assert(ms->possible_cpus->len == max_cpus); ++ return ms->possible_cpus; ++ } ++ ++ ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + ++ sizeof(CPUArchId) * max_cpus); ++ ms->possible_cpus->len = max_cpus; ++ for (i = 0; i < ms->possible_cpus->len; i++) { ++ ms->possible_cpus->cpus[i].type = ms->cpu_type; ++ ms->possible_cpus->cpus[i].vcpus_count = 1; ++ ms->possible_cpus->cpus[i].arch_id = i; ++ ++ ms->possible_cpus->cpus[i].props.has_thread_id = true; ++ ms->possible_cpus->cpus[i].props.thread_id = i % ms->smp.threads; ++ ++ ms->possible_cpus->cpus[i].props.has_core_id = true; ++ ms->possible_cpus->cpus[i].props.core_id = ++ i / ms->smp.threads % ms->smp.cores; ++ ++ ms->possible_cpus->cpus[i].props.has_socket_id = true; ++ ms->possible_cpus->cpus[i].props.socket_id = ++ i / (ms->smp.cores * ms->smp.threads); ++ } ++ ++ return ms->possible_cpus; ++} ++ ++void sw64_cpu_reset(void *opaque) ++{ ++ SW64CPU *cpu = opaque; ++ ++ if (!kvm_enabled()) ++ cpu_reset(CPU(cpu)); ++ ++ return; ++} ++ ++void sw64_set_clocksource(void) ++{ ++ FILE *fp; ++ unsigned long mclk; ++ char buff[64]; ++ ++ fp = fopen("/sys/kernel/debug/sw64/mclk", "rb"); ++ if (fp == NULL) { ++ printf("Failed to open file mclk.\n"); ++ return; ++ } ++ ++ if (fgets(buff, 64, fp) == NULL) { ++ printf("Error in reading mclk.\n"); ++ fclose(fp); ++ fp = NULL; ++ return; ++ } ++ ++ mclk = atoi(buff); ++ fclose(fp); ++ rom_add_blob_fixed("mclk", (char *)&mclk, 0x8, 0x908001); ++} ++ ++void sw64_board_reset(MachineState *state, ShutdownCause reason) ++{ ++ qemu_devices_reset(reason); ++} ++ ++void sw64_set_ram_size(ram_addr_t ram_size) ++{ ++ ram_addr_t buf; ++ ++ if (kvm_enabled()) ++ buf = ram_size; ++ else ++ buf = ram_size | (1UL << 63); ++ ++ rom_add_blob_fixed("ram_size", (char *)&buf, 0x8, 0x2040); ++ ++ return; ++} ++ ++void sw64_clear_uefi_bios(void) ++{ ++ unsigned long uefi_bios[1] = {0}; ++ ++ /* Clear the first 8 bytes of UEFI BIOS to ensure the correctness ++ * of reset process. ++ * */ ++ rom_add_blob_fixed("uefi_bios", uefi_bios, 0x8, SW_BIOS_BASE); ++ ++ return; ++} ++ ++void sw64_clear_smp_rcb(void) ++{ ++ unsigned long smp_rcb[4] = {0}; ++ ++ /* Clear the smp_rcb fields to ensure the correctness of reset process. */ ++ rom_add_blob_fixed("smp_rcb", smp_rcb, 0x20, 0x820000); ++ ++ return; ++} ++ ++void sw64_clear_kernel_print(void) ++{ ++ unsigned long kernel_print[2048] = {0}; ++ ++ /* ++ * Clear the memory where the kernel is printed when the vm reboots ++ * will make debugging easier. ++ */ ++ rom_add_blob_fixed("kernel_print", kernel_print, 0x4000, 0x700000); ++ ++ return; ++} ++ ++void sw64_load_hmcode(const char *hmcode_filename, uint64_t *hmcode_entry) ++{ ++ long size; ++ ++ size = load_elf(hmcode_filename, NULL, cpu_sw64_virt_to_phys, NULL, ++ hmcode_entry, NULL, NULL, NULL, 0, EM_SW64, 0, 0); ++ if (size < 0) { ++ error_report("could not load hmcode: '%s'", hmcode_filename); ++ exit(1); ++ } ++ ++ return; ++} ++ ++void sw64_find_and_load_bios(const char *bios_name) ++{ ++ char *uefi_filename; ++ long size; ++ ++ uefi_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); ++ if (uefi_filename == NULL) { ++ error_report("no virtual bios provided"); ++ exit(1); ++ } ++ ++ size = load_image_targphys(uefi_filename, SW_BIOS_BASE, -1); ++ if (size < 0) { ++ error_report("could not load virtual bios: '%s'", uefi_filename); ++ exit(1); ++ } ++ ++ g_free(uefi_filename); ++ return; ++} ++ ++void sw64_load_kernel(const char *kernel_filename, uint64_t *kernel_entry, ++ const char *kernel_cmdline) ++{ ++ long size; ++ uint64_t param_offset; ++ ++ size = load_elf(kernel_filename, NULL, cpu_sw64_virt_to_phys, NULL, ++ kernel_entry, NULL, NULL, NULL, 0, EM_SW64, 0, 0); ++ if (size < 0) { ++ error_report("could not load kernel '%s'", kernel_filename); ++ exit(1); ++ } ++ ++ if (kernel_cmdline) { ++ param_offset = 0x90B000UL; ++ pstrcpy_targphys("cmdline", param_offset, 0x400, kernel_cmdline); ++ } ++ ++ return; ++} ++ ++void sw64_load_initrd(const char *initrd_filename, ++ BOOT_PARAMS *sunway_boot_params) ++{ ++ long initrd_size; ++ ++ initrd_size = get_image_size(initrd_filename); ++ if (initrd_size < 0) { ++ error_report("could not load initial ram disk '%s'", ++ initrd_filename); ++ exit(1); ++ } ++ /* Put the initrd image as high in memory as possible. */ ++ load_image_targphys(initrd_filename, SW_INITRD_BASE, initrd_size); ++ sunway_boot_params->initrd_start = SW_INITRD_BASE | 0xfff0000000000000UL; ++ sunway_boot_params->initrd_size = initrd_size; ++ ++ return; ++} ++ ++int sw64_load_dtb(MachineState *ms, BOOT_PARAMS *sunway_boot_params) ++{ ++ int ret, fdt_size; ++ hwaddr fdt_base; ++ ++ if (ms->kernel_filename) { ++ /* For direct kernel boot, place the DTB after the initrd to avoid ++ * overlaying the loaded kernel. ++ */ ++ sunway_boot_params->dtb_start = (SW_INITRD_BASE | 0xfff0000000000000UL) ++ + sunway_boot_params->initrd_size; ++ } else { ++ /* For BIOS boot, place the DTB at SW_FDT_BASE temporarily, the ++ * bootloader will move it to a more proper place later. ++ */ ++ sunway_boot_params->dtb_start = SW_FDT_BASE | 0xfff0000000000000UL; ++ } ++ ++ if (!ms->fdt) { ++ fprintf(stderr, "Board was unable to create a dtb blob\n"); ++ return -1; ++ } ++ ++ ret = fdt_pack(ms->fdt); ++ /* Should only fail if we've built a corrupted tree */ ++ g_assert(ret == 0); ++ fdt_size = fdt_totalsize(ms->fdt); ++ qemu_fdt_dumpdtb(ms->fdt, fdt_size); ++ ++ /* Put the DTB into the memory map as a ROM image: this will ensure ++ * the DTB is copied again upon reset, even if addr points into RAM. ++ */ ++ fdt_base = sunway_boot_params->dtb_start & (~0xfff0000000000000UL); ++ ++ dtb_start_c4 = sunway_boot_params->dtb_start; ++ ++ rom_add_blob_fixed("dtb", ms->fdt, fdt_size, fdt_base); ++ ++ return 0; ++} ++ ++void sw64_board_alarm_timer(void *opaque) ++{ ++ TimerState *ts = (TimerState *)((uintptr_t)opaque); ++ ++ if (!kvm_enabled()) { ++ int cpu = ts->order; ++ cpu_interrupt(qemu_get_cpu(cpu), CPU_INTERRUPT_TIMER); ++ } ++ ++ return; ++} ++ ++void sw64_create_alarm_timer(MachineState *ms, BoardState *bs) ++{ ++ TimerState *ts; ++ SW64CPU *cpu; ++ int i; ++ ++ for (i = 0; i < ms->smp.cpus; ++i) { ++ cpu = SW64_CPU(qemu_get_cpu(i)); ++ ts = g_new(TimerState, 1); ++ ts->opaque = (void *) ((uintptr_t)bs); ++ ts->order = i; ++ cpu->alarm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ++ &sw64_board_alarm_timer, ts); ++ } ++} ++ ++PCIINTxRoute sw64_route_intx_pin_to_irq(void *opaque, int pin) ++{ ++ PCIINTxRoute route; ++ ++ route.mode = PCI_INTX_ENABLED; ++ route.irq = SW_PIN_TO_IRQ; ++ return route; ++} ++ ++uint64_t convert_bit(int n) ++{ ++ uint64_t ret; ++ ++ if (n == 64) ++ ret = 0xffffffffffffffffUL; ++ else ++ ret = (1UL << n) - 1; ++ ++ return ret; ++} ++ ++FWCfgState *sw64_create_fw_cfg(hwaddr addr, hwaddr size) ++{ ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ uint16_t smp_cpus = ms->smp.cpus; ++ FWCfgState *fw_cfg; ++ ++ fw_cfg = fw_cfg_init_mem_wide(addr + 8, addr, 8, ++ addr + 16, &address_space_memory); ++ fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, smp_cpus); ++ ++ if (!ms->dtb) { ++ char *nodename; ++ ++ nodename = g_strdup_printf("/soc/fw_cfg@8049"); ++ qemu_fdt_add_subnode(ms->fdt, nodename); ++ qemu_fdt_setprop_string(ms->fdt, nodename, ++ "compatible", "qemu,fw-cfg-mmio"); ++ qemu_fdt_setprop(ms->fdt, nodename, "dma-coherent", NULL, 0); ++ qemu_fdt_setprop_sized_cells(ms->fdt, nodename, ++ "reg", 2, addr, 2, size); ++ g_free(nodename); ++ } ++ ++ return fw_cfg; ++} ++ ++void sw64_virt_build_smbios(FWCfgState *fw_cfg) ++{ ++ uint8_t *smbios_tables, *smbios_anchor; ++ size_t smbios_tables_len, smbios_anchor_len; ++ const char *product = "QEMU Virtual Machine"; ++ ++ if (kvm_enabled()) ++ product = "KVM Virtual Machine"; ++ ++ smbios_set_defaults("QEMU", product, ++ "sw64", false, ++ true, SMBIOS_ENTRY_POINT_TYPE_64); ++ ++ smbios_get_tables(MACHINE(qdev_get_machine()), NULL, 0, ++ &smbios_tables, &smbios_tables_len, ++ &smbios_anchor, &smbios_anchor_len, ++ &error_fatal); ++ ++ if (smbios_anchor) { ++ fw_cfg_add_file(fw_cfg, "etc/smbios/smbios-tables", ++ smbios_tables, smbios_tables_len); ++ fw_cfg_add_file(fw_cfg, "etc/smbios/smbios-anchor", ++ smbios_anchor, smbios_anchor_len); ++ } ++ ++ return; ++} ++ ++void sw64_board_set_irq(void *opaque, int irq, int level) ++{ ++ if (level == 0) ++ return; ++ ++ if (kvm_enabled()) { ++ kvm_set_irq(kvm_state, irq, level); ++ return; ++ } ++ ++ cpu_interrupt(qemu_get_cpu(0), CPU_INTERRUPT_PCIE); ++} ++ ++int sw64_board_map_irq(PCIDevice *d, int irq_num) ++{ ++ /* In fact,the return value is the interrupt type passed to kernel, ++ * so it must keep same with the type in do_entInt in kernel. ++ */ ++ return 16; ++} ++ ++void serial_set_irq(void *opaque, int irq, int level) ++{ ++ if (level == 0) ++ return; ++ ++ if (kvm_enabled()) { ++ kvm_set_irq(kvm_state, irq, level); ++ return; ++ } ++ ++ cpu_interrupt(qemu_get_cpu(0), CPU_INTERRUPT_HARD); ++} ++ ++void sw64_new_cpu(const char *name, int64_t arch_id, Error **errp) ++{ ++ Object *cpu = NULL; ++ Error *local_err = NULL; ++ ++ cpu = object_new(name); ++ object_property_set_uint(cpu, "core-id", arch_id, &local_err); ++ object_property_set_bool(cpu, "realized", true, &local_err); ++ ++ object_unref(cpu); ++ error_propagate(errp, local_err); ++} ++ ++void sw64_create_pcie(BoardState *bs, PCIBus *b, PCIHostState *phb) ++{ ++ int i; ++ ++ for (i = 0; i < nb_nics; i++) { ++ pci_nic_init_nofail(&nd_table[i], b, "e1000", NULL); ++ } ++ ++ pci_vga_init(b); ++ ++ bs->serial_irq = qemu_allocate_irq(serial_set_irq, bs, 12); ++ if (serial_hd(0)) { ++ serial_mm_init(get_system_memory(), 0x3F8 + 0x880100000000ULL, 0, ++ bs->serial_irq, (1843200 >> 4), serial_hd(0), ++ DEVICE_LITTLE_ENDIAN); ++ } ++} ++ ++void rtc_get_time(Object *obj, struct tm *current_tm, Error **errp) ++{ ++ time_t guest_sec; ++ int64_t guest_nsec; ++ ++ guest_nsec = get_clock_realtime(); ++ guest_sec = guest_nsec / NANOSECONDS_PER_SECOND; ++ gmtime_r(&guest_sec, current_tm); ++} +diff --git a/hw/sw64/sw64-acpi-build.c b/hw/sw64/sw64-acpi-build.c +new file mode 100644 +index 0000000000..0a06c969dd +--- /dev/null ++++ b/hw/sw64/sw64-acpi-build.c +@@ -0,0 +1,876 @@ ++/* Support for generating ACPI tables and passing them to Guests ++ * ++ * SW64 ACPI generation ++ * ++ * Copyright (C) 2023 Wang Yuanheng ++ * ++ * This work is licensed under the GNU GPL license version 2 or later. ++ */ ++ ++#include "qemu/osdep.h" ++#include "qapi/error.h" ++#include "qemu/bitmap.h" ++#include "hw/core/cpu.h" ++#include "target/sw64/cpu.h" ++#include "hw/acpi/acpi-defs.h" ++#include "hw/acpi/acpi.h" ++#include "hw/nvram/fw_cfg.h" ++#include "hw/acpi/bios-linker-loader.h" ++#include "hw/acpi/aml-build.h" ++#include "hw/acpi/utils.h" ++#include "hw/acpi/pci.h" ++#include "hw/acpi/memory_hotplug.h" ++#include "hw/acpi/generic_event_device.h" ++#include "hw/acpi/tpm.h" ++#include "hw/pci/pcie_host.h" ++#include "hw/acpi/aml-build.h" ++#include "hw/pci/pci.h" ++#include "hw/pci/pci_bus.h" ++#include "hw/pci-host/gpex.h" ++#include "hw/sw64/core.h" ++#include "hw/platform-bus.h" ++#include "sysemu/numa.h" ++#include "sysemu/reset.h" ++#include "sysemu/tpm.h" ++#include "kvm_sw64.h" ++#include "migration/vmstate.h" ++#include "hw/acpi/ghes.h" ++#include "hw/sw64/gpio.h" ++#include "hw/irq.h" ++#include "sysemu/runstate.h" ++ ++#define SW64_PCIE_IRQMAP 16 ++#define SW64_MCU_GSI_BASE 64 ++#define ACPI_BUILD_TABLE_SIZE 0x20000 ++ ++static void acpi_dsdt_add_uart(Aml *scope, const MemMapEntry *uart_memmap, ++ uint32_t uart_irq) ++{ ++ Aml *method, *dev, *crs; ++ Aml *pkg; ++ Aml *pkg0, *pkg1, *pkg2, *pkg3; ++ ++ dev = aml_device("COM0"); ++ aml_append(dev, aml_name_decl("_HID", aml_string("PNP0501"))); ++ aml_append(dev, aml_name_decl("_UID", aml_int(0))); ++ ++ crs = aml_resource_template(); ++ aml_append(crs, ++ aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, ++ AML_NON_CACHEABLE, AML_READ_WRITE, ++ 0, uart_memmap->base, ++ uart_memmap->base + uart_memmap->size - 1, ++ 0, uart_memmap->size)); ++ aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, ++ AML_SHARED, &uart_irq, 1)); ++ aml_append(dev, aml_name_decl("_CRS", crs)); ++ ++ pkg0 = aml_package(0x2); ++ aml_append(pkg0, aml_string("clock-frequency")); ++ aml_append(pkg0, aml_int(20000000)); ++ ++ pkg1 = aml_package(0x2); ++ aml_append(pkg1, aml_string("reg-io-width")); ++ aml_append(pkg1, aml_int(0x1)); ++ ++ pkg2 = aml_package(0x2); ++ aml_append(pkg2, aml_string("reg-shift")); ++ aml_append(pkg2, aml_int(0x0)); ++ ++ pkg3 = aml_package(0x3); ++ aml_append(pkg3, pkg0); ++ aml_append(pkg3, pkg1); ++ aml_append(pkg3, pkg2); ++ ++ pkg = aml_package(0x2); ++ aml_append(pkg, aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301")); ++ aml_append(pkg, pkg3); ++ ++ aml_append(dev, aml_name_decl("_DSD", pkg)); /* Device-Specific Data */ ++ aml_append(scope, dev); ++ ++ method = aml_method("_STA", 0, AML_NOTSERIALIZED); ++ aml_append(method, aml_return(aml_int(0xF))); ++ aml_append(dev, method); ++} ++ ++static void acpi_dsdt_add_sunway_ged(Aml *scope, const MemMapEntry *ged_memmap, ++ uint32_t ged_irq) ++{ ++ Aml *method, *dev, *crs; ++ Aml *pkg, *pkg0; ++ ++ dev = aml_device("SMHP"); ++ aml_append(dev, aml_name_decl("_HID", aml_string("SUNW1000"))); ++ aml_append(dev, aml_name_decl("_UID", aml_int(0))); ++ ++ crs = aml_resource_template(); ++ aml_append(crs, ++ aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, ++ AML_NON_CACHEABLE, AML_READ_WRITE, ++ 0, ged_memmap->base, ++ ged_memmap->base + ged_memmap->size - 1, ++ 0, ged_memmap->size)); ++ aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, ++ AML_SHARED, &ged_irq, 1)); ++ aml_append(dev, aml_name_decl("_CRS", crs)); ++ ++ pkg0 = aml_package(0x2); ++ aml_append(pkg0, aml_string("clock-frequency")); ++ aml_append(pkg0, aml_int(20000000)); ++ ++ pkg = aml_package(0x2); ++ aml_append(pkg, aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301")); ++ aml_append(pkg, pkg0); ++ ++ aml_append(dev, aml_name_decl("_DSD", pkg)); ++ aml_append(scope, dev); ++ ++ method = aml_method("_STA", 0, AML_NOTSERIALIZED); ++ aml_append(method, aml_return(aml_int(0xF))); ++ aml_append(dev, method); ++} ++ ++ ++static void acpi_dsdt_add_fw_cfg(Aml *scope, const MemMapEntry *fw_cfg_memmap) ++{ ++ Aml *dev = aml_device("FWCF"); ++ aml_append(dev, aml_name_decl("_HID", aml_string("QEMU0002"))); ++ /* device present, functioning, decoding, not shown in UI */ ++ aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); ++ aml_append(dev, aml_name_decl("_CCA", aml_int(1))); ++ ++ Aml *crs = aml_resource_template(); ++ aml_append(crs, ++ aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, ++ AML_NON_CACHEABLE, AML_READ_WRITE, ++ 0, fw_cfg_memmap->base, ++ fw_cfg_memmap->base + fw_cfg_memmap->size - 1, ++ 0, fw_cfg_memmap->size)); ++ aml_append(dev, aml_name_decl("_CRS", crs)); ++ aml_append(scope, dev); ++} ++ ++static void acpi_dsdt_add_pci_osc(Aml *dev) ++{ ++ Aml *method, *UUID, *ifctx, *ifctx1, *elsectx; ++ ++ /* Declare an _OSC (OS Control Handoff) method */ ++ aml_append(dev, aml_name_decl("SUPP", aml_int(0))); ++ aml_append(dev, aml_name_decl("CTRL", aml_int(0))); ++ method = aml_method("_OSC", 4, AML_NOTSERIALIZED); ++ aml_append(method, ++ aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1")); ++ ++ /* PCI Firmware Specification 3.0 ++ * 4.5.1. _OSC Interface for PCI Host Bridge Devices ++ * The _OSC interface for a PCI/PCI-X/PCI Express hierarchy is ++ * identified by the Universal Unique IDentifier (UUID) ++ * 33DB4D5B-1FF7-401C-9657-7441C03DD766 ++ */ ++ UUID = aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766"); ++ ifctx = aml_if(aml_equal(aml_arg(0), UUID)); ++ aml_append(ifctx, ++ aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2")); ++ aml_append(ifctx, ++ aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3")); ++ aml_append(ifctx, aml_store(aml_name("CDW2"), aml_name("SUPP"))); ++ aml_append(ifctx, aml_store(aml_name("CDW3"), aml_name("CTRL"))); ++ /* ++ * Allow OS control for all 5 features: ++ * PCIeHotplug SHPCHotplug PME AER PCIeCapability. ++ */ ++ aml_append(ifctx, aml_and(aml_name("CTRL"), aml_int(0x1F), ++ aml_name("CTRL"))); ++ ++ ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(0x1)))); ++ aml_append(ifctx1, aml_or(aml_name("CDW1"), aml_int(0x08), ++ aml_name("CDW1"))); ++ aml_append(ifctx, ifctx1); ++ ++ ifctx1 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), aml_name("CTRL")))); ++ aml_append(ifctx1, aml_or(aml_name("CDW1"), aml_int(0x10), ++ aml_name("CDW1"))); ++ aml_append(ifctx, ifctx1); ++ ++ aml_append(ifctx, aml_store(aml_name("CTRL"), aml_name("CDW3"))); ++ aml_append(ifctx, aml_return(aml_arg(3))); ++ aml_append(method, ifctx); ++ ++ elsectx = aml_else(); ++ aml_append(elsectx, aml_or(aml_name("CDW1"), aml_int(4), ++ aml_name("CDW1"))); ++ aml_append(elsectx, aml_return(aml_arg(3))); ++ aml_append(method, elsectx); ++ aml_append(dev, method); ++} ++ ++static void acpi_dsdt_add_pci(Aml *scope, uint32_t irq, SW64MachineState *vms) ++{ ++ CrsRangeEntry *entry; ++ Aml *dev, *rbuf, *method, *crs; ++ CrsRangeSet crs_range_set; ++ int i; ++ Aml *pkg0, *pkg1, *pkg; ++ ++ struct GPEXConfig cfg = { ++ .mmio32 = vms->memmap[VIRT_PCIE_MMIO], ++ .mmio64 = vms->memmap[VIRT_HIGH_PCIE_MMIO], ++ .pio = vms->memmap[VIRT_PCIE_PIO], ++ .ecam = vms->memmap[VIRT_PCIE_CFG], ++ .irq = irq, ++ .bus = vms->bus, ++ }; ++ ++ /* PCI0 */ ++ dev = aml_device("PCI0"); ++ aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08"))); ++ aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03"))); ++ aml_append(dev, aml_name_decl("_SEG", aml_int(0))); ++ aml_append(dev, aml_name_decl("_BBN", aml_int(0))); ++ method = aml_method("_PXM", 0, AML_SERIALIZED); ++ aml_append(method, aml_return(aml_int(0x0))); ++ aml_append(dev, method); ++ ++ method = aml_method("_STA", 0, AML_NOTSERIALIZED); ++ aml_append(method, aml_return(aml_int(0xF))); ++ aml_append(dev, method); ++ ++ pkg1 = aml_package(0x9); ++ ++ pkg0 = aml_package(0x2); ++ aml_append(pkg0, aml_string("sunway,rc-config-base")); ++ aml_append(pkg0, aml_int(0x0)); ++ aml_append(pkg1, pkg0); ++ ++ pkg0 = aml_package(0x2); ++ aml_append(pkg0, aml_string("sunway,ep-config-base")); ++ aml_append(pkg0, aml_int(vms->memmap[VIRT_PCIE_CFG].base)); ++ aml_append(pkg1, pkg0); ++ ++ pkg0 = aml_package(0x2); ++ aml_append(pkg0, aml_string("sunway,ep-mem-32-base")); ++ aml_append(pkg0, aml_int(vms->memmap[VIRT_PCIE_IO_BASE].base ++ | vms->memmap[VIRT_PCIE_MMIO].base)); ++ aml_append(pkg1, pkg0); ++ ++ pkg0 = aml_package(0x2); ++ aml_append(pkg0, aml_string("sunway,ep-mem-64-base")); ++ aml_append(pkg0, aml_int(vms->memmap[VIRT_HIGH_PCIE_MMIO].base)); ++ aml_append(pkg1, pkg0); ++ ++ pkg0 = aml_package(0x2); ++ aml_append(pkg0, aml_string("sunway,ep-io-base")); ++ aml_append(pkg0, aml_int(vms->memmap[VIRT_PCIE_PIO].base)); ++ aml_append(pkg1, pkg0); ++ ++ pkg0 = aml_package(0x2); ++ aml_append(pkg0, aml_string("sunway,piu-ior0-base")); ++ aml_append(pkg0, aml_int(0x0)); ++ aml_append(pkg1, pkg0); ++ ++ pkg0 = aml_package(0x2); ++ aml_append(pkg0, aml_string("sunway,piu-ior1-base")); ++ aml_append(pkg0, aml_int(0x0)); ++ aml_append(pkg1, pkg0); ++ ++ pkg0 = aml_package(0x2); ++ aml_append(pkg0, aml_string("sunway,rc-index")); ++ aml_append(pkg0, aml_int(0)); ++ aml_append(pkg1, pkg0); ++ ++ pkg0 = aml_package(0x2); ++ aml_append(pkg0, aml_string("sunway,pcie-io-base")); ++ aml_append(pkg0, aml_int(vms->memmap[VIRT_PCIE_IO_BASE].base)); ++ aml_append(pkg1, pkg0); ++ ++ pkg = aml_package(0x2); ++ aml_append(pkg, aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301")); ++ aml_append(pkg, pkg1); ++ aml_append(dev, aml_name_decl("_DSD", pkg)); ++ ++ rbuf = aml_resource_template(); ++ aml_append(dev, aml_name_decl("CRS0", rbuf)); ++ aml_append(rbuf, ++ aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, ++ 0x0000, 0x0000, 0xFF, 0x0000, 0x100)); ++ crs_range_set_init(&crs_range_set); ++ if (cfg.mmio32.size) { ++ crs_replace_with_free_ranges(crs_range_set.mem_ranges, ++ cfg.mmio32.base, ++ cfg.mmio32.base + cfg.mmio32.size - 1); ++ for (i = 0; i < crs_range_set.mem_ranges->len; i++) { ++ entry = g_ptr_array_index(crs_range_set.mem_ranges, i); ++ aml_append(rbuf, ++ aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, ++ AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, ++ entry->base, entry->limit, ++ 0x0000, entry->limit - entry->base + 1)); ++ } ++ } ++ ++ if (cfg.pio.size) { ++ crs_replace_with_free_ranges(crs_range_set.io_ranges, ++ cfg.pio.base, ++ cfg.pio.base + cfg.pio.size - 1); ++ for (i = 0; i < crs_range_set.io_ranges->len; i++) { ++ entry = g_ptr_array_index(crs_range_set.io_ranges, i); ++ aml_append(rbuf, ++ aml_qword_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, ++ AML_ENTIRE_RANGE, 0x0000, entry->base, ++ entry->limit, 0x0000, ++ entry->limit - entry->base + 1)); ++ } ++ } ++ if (cfg.mmio64.size) { ++ crs_replace_with_free_ranges(crs_range_set.mem_64bit_ranges, ++ cfg.mmio64.base, ++ cfg.mmio64.base + cfg.mmio64.size - 1); ++ for (i = 0; i < crs_range_set.mem_64bit_ranges->len; i++) { ++ entry = g_ptr_array_index(crs_range_set.mem_64bit_ranges, i); ++ aml_append(rbuf, ++ aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, ++ AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, ++ entry->base, ++ entry->limit, 0x0000, ++ entry->limit - entry->base + 1)); ++ } ++ } ++ ++ method = aml_method("_CRS", 0, AML_SERIALIZED); ++ aml_append(method, aml_return(rbuf)); ++ aml_append(dev, method); ++ acpi_dsdt_add_pci_osc(dev); ++ ++ /* RES0 */ ++ Aml *dev_res0 = aml_device("%s", "RES0"); ++ aml_append(dev_res0, aml_name_decl("_HID", aml_string("PNP0C02"))); ++ crs = aml_resource_template(); ++ aml_append(crs, ++ aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, ++ AML_NON_CACHEABLE, AML_READ_WRITE, 0x0000, ++ cfg.ecam.base, ++ cfg.ecam.base + cfg.ecam.size - 1, ++ 0x0000, ++ cfg.ecam.size)); ++ method = aml_method("_STA", 0, AML_NOTSERIALIZED); ++ aml_append(method, aml_return(aml_int(0xF))); ++ aml_append(dev, dev_res0); ++ aml_append(scope, dev); ++ crs_range_set_free(&crs_range_set); ++} ++ ++static void ++build_srat(GArray *table_data, BIOSLinker *linker, SW64MachineState *vms) ++{ ++ int i; ++ uint64_t mem_base; ++ MachineClass *mc = MACHINE_GET_CLASS(vms); ++ MachineState *ms = MACHINE(vms); ++ const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(ms); ++ AcpiTable table = { .sig = "SRAT", .rev = 3, .oem_id = vms->oem_id, ++ .oem_table_id = vms->oem_table_id }; ++ ++ acpi_table_begin(&table, table_data); ++ build_append_int_noprefix(table_data, 1, 4); /* Reserved */ ++ build_append_int_noprefix(table_data, 0, 8); /* Reserved */ ++ ++ for (i = 0; i < cpu_list->len; ++i) { ++ uint32_t node_id = cpu_list->cpus[i].props.node_id; ++ ++ /* CPU Affinity Structure */ ++ build_append_int_noprefix(table_data, 2, 1); /* Type */ ++ build_append_int_noprefix(table_data, 24, 1); /* Length */ ++ build_append_int_noprefix(table_data, 0, 2); /* Reserved */ ++ /* Proximity Domain */ ++ build_append_int_noprefix(table_data, node_id, 4); ++ build_append_int_noprefix(table_data, i, 4); /* APIC ID */ ++ /* Flags */ ++ build_append_int_noprefix(table_data, 1 /* Enabled */, 4); ++ build_append_int_noprefix(table_data, 0, 4); /* Clock Domain */ ++ build_append_int_noprefix(table_data, 0, 4); /* Reserved */ ++ } ++ ++ /* Memory Affinity Structure */ ++ mem_base = 0; ++ for (i = 0; i < ms->numa_state->num_nodes; ++i) { ++ if (ms->numa_state->nodes[i].node_mem > 0) { ++ build_srat_memory(table_data, mem_base, ++ ms->numa_state->nodes[i].node_mem, i, ++ MEM_AFFINITY_ENABLED); ++ mem_base += ms->numa_state->nodes[i].node_mem; ++ } ++ } ++ ++ if (ms->device_memory) { ++ build_srat_memory(table_data, ms->device_memory->base, ++ memory_region_size(&ms->device_memory->mr), ++ ms->numa_state->num_nodes - 1, ++ MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED); ++ } ++ ++ acpi_table_end(linker, &table); ++} ++ ++static void build_append_mcu_intc(GArray *table_data) ++{ ++ build_append_int_noprefix(table_data, 0x0, 1); /* Type: MCU INTC */ ++ build_append_int_noprefix(table_data, 1, 1); /* Status */ ++ build_append_int_noprefix(table_data, 0, 2); /* Reserved */ ++ build_append_int_noprefix(table_data, 0xf0000001, 4);/* Hardware ID */ ++ build_append_int_noprefix(table_data, 0, 8); /* Address Base */ ++ build_append_int_noprefix(table_data, 0, 4); /* Size */ ++ build_append_int_noprefix(table_data, SW64_MCU_GSI_BASE, 4); /* GSI Base */ ++ build_append_int_noprefix(table_data, 16, 4); /* GSI Count */ ++ build_append_int_noprefix(table_data, 0, 4); /* Cascade Vector */ ++} ++ ++static void ++build_madt(GArray *table_data, BIOSLinker *linker, SW64MachineState *vms) ++{ ++ int i; ++ const MemMapEntry *memmap = vms->memmap; ++ MachineState *ms = MACHINE(vms); ++ unsigned int num_cpus = ms->smp.max_cpus; ++ uint32_t flags, enabled, virt, online_capable; ++ AcpiTable table = { .sig = "APIC", .rev = 4, .oem_id = vms->oem_id, ++ .oem_table_id = vms->oem_table_id }; ++ ++ acpi_table_begin(&table, table_data); ++ ++ /* Local Interrupt Controller Address */ ++ build_append_int_noprefix(table_data, 0, 4); ++ /* Flags */ ++ build_append_int_noprefix(table_data, 0, 4); ++ ++ /* CINTC Structure */ ++ for (i = 0; i < num_cpus; i++) { ++ SW64CPU *sw64cpu = SW64_CPU(qemu_get_cpu(i)); ++ enabled = sw64cpu ? 1 : 0; ++ online_capable = !enabled << 1; ++ virt = 0x1 << 2; ++ flags = virt | online_capable | enabled; ++ ++ build_append_int_noprefix(table_data, 0x80, 1); /* Type */ ++ build_append_int_noprefix(table_data, 28, 1); /* Length */ ++ build_append_int_noprefix(table_data, 0x2, 1); /* Version */ ++ build_append_int_noprefix(table_data, 0, 1); /* Reserved */ ++ build_append_int_noprefix(table_data, flags, 4);/* Flags */ ++ build_append_int_noprefix(table_data, 0, 4); /* Reserved */ ++ build_append_int_noprefix(table_data, i, 4); /* Hardware ID */ ++ build_append_int_noprefix(table_data, 0, 4); /* ACPI Processor UID */ ++ ++ /* Boot Flag Address */ ++ if (i) { ++ build_append_int_noprefix(table_data, 0, 8); ++ } else { ++ build_append_int_noprefix(table_data, ++ memmap[VIRT_BOOT_FLAG].base, 8); ++ } ++ } ++ ++ /* PINTC Structure */ ++ build_append_int_noprefix(table_data, 0x81, 1); /* Type */ ++ build_append_int_noprefix(table_data, 60, 1); /* Length */ ++ build_append_int_noprefix(table_data, 0x2, 1); /* Version */ ++ build_append_int_noprefix(table_data, 0, 1); /* Reserved */ ++ /* Flags */ ++ build_append_int_noprefix(table_data, 0x3 /* Enabled && Virtual */, 4); ++ build_append_int_noprefix(table_data, 0, 4); /* Node */ ++ build_append_int_noprefix(table_data, 0, 8); /* Address Base */ ++ build_append_int_noprefix(table_data, 0, 4); /* Size*/ ++ /* Number of sub Interrupt Controller, only support MCU INTC now */ ++ build_append_int_noprefix(table_data, 1, 4); /* N */ ++ ++ /* Sub PINTC Structure[0]: MCU INTC */ ++ build_append_mcu_intc(table_data); ++ ++ /* MSIC Structure */ ++ build_append_int_noprefix(table_data, 0x82, 1); /* Type */ ++ build_append_int_noprefix(table_data, 56, 1); /* Length */ ++ build_append_int_noprefix(table_data, 0x2, 1); /* Version */ ++ build_append_int_noprefix(table_data, 0, 1); /* Reserved */ ++ build_append_int_noprefix(table_data, 0xf2000001, 4); /* Hardware ID */ ++ /* Flags */ ++ build_append_int_noprefix(table_data, 0x3 /* Enabled && Virtual */, 4); ++ build_append_int_noprefix(table_data, 0, 8); /* Address Base */ ++ build_append_int_noprefix(table_data, 0, 4); /* Size*/ ++ build_append_int_noprefix(table_data, 0, 4); /* Cascade Vector */ ++ build_append_int_noprefix(table_data, 0, 4); /* Node */ ++ build_append_int_noprefix(table_data, 0, 4); /* RC */ ++ build_append_int_noprefix(table_data, 256, 4); /* Number of Interrupt */ ++ build_append_int_noprefix(table_data, ++ memmap[VIRT_MSI].base, 8); /* Message Address */ ++ build_append_int_noprefix(table_data, 0, 8); /* Reserved[2] */ ++ ++ acpi_table_end(linker, &table); ++} ++ ++static void acpi_dsdt_add_gpio(Aml *scope, const MemMapEntry *gpio_memmap, ++ uint32_t gpio_irq) ++{ ++ Aml *method, *dev, *crs, *dev1; ++ Aml *pkg, *pkg0, *pkg1, *pkg2, *pkg3; ++ ++ dev = aml_device("GPI0"); ++ aml_append(dev, aml_name_decl("_HID", aml_string("SUNW0002"))); ++ aml_append(dev, aml_name_decl("_UID", aml_int(0))); ++ ++ crs = aml_resource_template(); ++ aml_append(crs, ++ aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, ++ AML_NON_CACHEABLE, AML_READ_WRITE, ++ 0, gpio_memmap->base, ++ gpio_memmap->base + gpio_memmap->size - 1, ++ 0, gpio_memmap->size)); ++ aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, ++ AML_SHARED, &gpio_irq, 1)); ++ aml_append(dev, aml_name_decl("_CRS", crs)); ++ ++ method = aml_method("_STA", 0, AML_NOTSERIALIZED); ++ aml_append(method, aml_return(aml_int(0xF))); ++ aml_append(dev, method); ++ ++ ++ Aml *aei = aml_resource_template(); ++ const uint32_t pin_list[1] = {0}; ++ aml_append(aei, aml_gpio_int(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, ++ AML_EXCLUSIVE, AML_PULL_DOWN, 0, pin_list, 1, ++ "GPI0", NULL, 0)); ++ aml_append(dev, aml_name_decl("_AEI", aei)); ++ ++ method = aml_method("_L00", 0, AML_NOTSERIALIZED); ++ aml_append(method, aml_notify(aml_name(ACPI_POWER_BUTTON_DEVICE), ++ aml_int(0x80))); ++ aml_append(dev, method); ++ ++ ++ dev1 = aml_device("GP00"); ++ aml_append(dev1, aml_name_decl("_UID", aml_int(0))); ++ ++ pkg0 = aml_package(0x2); ++ aml_append(pkg0, aml_string("compatible")); ++ aml_append(pkg0, aml_string("snps,dw-apb-gpio-port")); ++ ++ pkg1 = aml_package(0x2); ++ aml_append(pkg1, aml_string("reg")); ++ aml_append(pkg1, aml_int(0x0)); ++ ++ pkg2 = aml_package(0x2); ++ aml_append(pkg2, aml_string("snps,nr-gpios")); ++ aml_append(pkg2, aml_int(0x8)); ++ ++ pkg3 = aml_package(0x3); ++ aml_append(pkg3, pkg0); ++ aml_append(pkg3, pkg1); ++ aml_append(pkg3, pkg2); ++ ++ pkg = aml_package(0x2); ++ aml_append(pkg, aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301")); ++ aml_append(pkg, pkg3); ++ ++ aml_append(dev1, aml_name_decl("_DSD", pkg)); ++ ++ aml_append(dev, dev1); ++ aml_append(scope, dev); ++ ++} ++ ++static void acpi_dsdt_add_misc_platform(Aml *scope, SW64MachineState *vms) ++{ ++ Aml *method, *dev; ++ Aml *pkg, *pkg0, *pkg1; ++ ++ dev = aml_device("MIS0"); ++ aml_append(dev, aml_name_decl("_HID", aml_string("SUNW0200"))); ++ aml_append(dev, aml_name_decl("_UID", aml_int(0))); ++ ++ pkg1 = aml_package(0x2); ++ ++ pkg0 = aml_package(0x2); ++ aml_append(pkg0, aml_string("sunway,spbu_base")); ++ aml_append(pkg0, aml_int(vms->memmap[VIRT_SPBU].base)); ++ aml_append(pkg1, pkg0); ++ ++ pkg0 = aml_package(0x2); ++ aml_append(pkg0, aml_string("sunway,intpu_base")); ++ aml_append(pkg0, aml_int(vms->memmap[VIRT_INTPU].base)); ++ aml_append(pkg1, pkg0); ++ ++ pkg = aml_package(0x2); ++ aml_append(pkg, aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301")); ++ aml_append(pkg, pkg1); ++ aml_append(dev, aml_name_decl("_DSD", pkg)); ++ ++ method = aml_method("_STA", 0, AML_NOTSERIALIZED); ++ aml_append(method, aml_return(aml_int(0xF))); ++ ++ method = aml_method("_PXM", 0, AML_SERIALIZED); ++ aml_append(method, aml_return(aml_int(0x0))); ++ aml_append(dev, method); ++ ++ aml_append(scope, dev); ++} ++ ++/* DSDT */ ++static void ++build_dsdt(GArray *table_data, BIOSLinker *linker, SW64MachineState *vms) ++{ ++ Aml *scope, *dsdt; ++ const MemMapEntry *memmap = vms->memmap; ++ const int *irqmap = vms->irqmap; ++ AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = vms->oem_id, ++ .oem_table_id = vms->oem_table_id }; ++ ++ acpi_table_begin(&table, table_data); ++ dsdt = init_aml_allocator(); ++ ++ /* When booting the VM with UEFI, UEFI takes ownership of the RTC hardware. ++ * While UEFI can use libfdt to disable the RTC device node in the DTB that ++ * it passes to the OS, it cannot modify AML. Therefore, we won't generate ++ * the RTC ACPI device at all when using UEFI. ++ */ ++ scope = aml_scope("\\_SB"); ++ ++ acpi_dsdt_add_uart(scope, &memmap[VIRT_UART], ++ (irqmap[VIRT_UART] + SW64_MCU_GSI_BASE)); ++ ++ acpi_dsdt_add_sunway_ged(scope, &memmap[VIRT_SUNWAY_GED], ++ (irqmap[VIRT_SUNWAY_GED] + SW64_MCU_GSI_BASE)); ++ ++ acpi_dsdt_add_fw_cfg(scope, &memmap[VIRT_FW_CFG]); ++ acpi_dsdt_add_pci(scope, SW64_PCIE_IRQMAP, vms); ++ ++ acpi_dsdt_add_gpio(scope, &memmap[VIRT_GPIO], ++ (irqmap[VIRT_GPIO] + SW64_MCU_GSI_BASE)); ++ ++ acpi_dsdt_add_power_button(scope); ++ ++ acpi_dsdt_add_misc_platform(scope, vms); ++ ++ aml_append(dsdt, scope); ++ ++ /* copy AML table into ACPI tables blob */ ++ g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); ++ ++ acpi_table_end(linker, &table); ++ free_aml_allocator(); ++} ++ ++typedef ++struct AcpiBuildState { ++ /* Copy of table in RAM (for patching). */ ++ MemoryRegion *table_mr; ++ MemoryRegion *rsdp_mr; ++ MemoryRegion *linker_mr; ++ /* Is table patched? */ ++ bool patched; ++} AcpiBuildState; ++ ++static void acpi_align_size(GArray *blob, unsigned align) ++{ ++ /* ++ * Align size to multiple of given size. This reduces the chance ++ * we need to change size in the future (breaking cross version migration). ++ */ ++ g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align)); ++} ++ ++static ++void sw64_acpi_build(SW64MachineState *vms, AcpiBuildTables *tables) ++{ ++ GArray *table_offsets; ++ unsigned dsdt, xsdt; ++ GArray *tables_blob = tables->table_data; ++ MachineState *ms = MACHINE(vms); ++ ++ table_offsets = g_array_new(false, true /* clear */, ++ sizeof(uint32_t)); ++ ++ bios_linker_loader_alloc(tables->linker, ++ ACPI_BUILD_TABLE_FILE, tables_blob, ++ 64, false /* high memory */); ++ ++ /* DSDT is pointed to by FADT */ ++ dsdt = tables_blob->len; ++ build_dsdt(tables_blob, tables->linker, vms); ++ ++ /* FADT MADT MCFG is pointed to by XSDT*/ ++ acpi_add_table(table_offsets, tables_blob); ++ { ++ AcpiFadtData fadt = { ++ .rev = 5, ++ .minor_ver = 1, ++ .flags = 1 << ACPI_FADT_F_HW_REDUCED_ACPI, ++ .xdsdt_tbl_offset = &dsdt, ++ }; ++ build_fadt(tables_blob, tables->linker, &fadt, vms->oem_id, ++ vms->oem_table_id); ++ } ++ ++ acpi_add_table(table_offsets, tables_blob); ++ build_madt(tables_blob, tables->linker, vms); ++ ++ acpi_add_table(table_offsets, tables_blob); ++ build_pptt(tables_blob, tables->linker, ms, ++ vms->oem_id, vms->oem_table_id); ++ ++ acpi_add_table(table_offsets, tables_blob); ++ { ++ AcpiMcfgInfo mcfg = { ++ .base = vms->memmap[VIRT_PCIE_CFG].base, ++ .size = vms->memmap[VIRT_PCIE_CFG].size, ++ }; ++ build_mcfg(tables_blob, tables->linker, &mcfg, vms->oem_id, ++ vms->oem_table_id); ++ } ++ ++ /* NUMA support */ ++ if (ms->numa_state->num_nodes > 0) { ++ acpi_add_table(table_offsets, tables_blob); ++ build_srat(tables_blob, tables->linker, vms); ++ ++ if (ms->numa_state->have_numa_distance) { ++ acpi_add_table(table_offsets, tables_blob); ++ build_slit(tables_blob, tables->linker, ms, vms->oem_id, ++ vms->oem_table_id); ++ } ++ } ++ ++ /* XSDT is pointed to by RSDP */ ++ xsdt = tables_blob->len; ++ build_xsdt(tables_blob, tables->linker, table_offsets, vms->oem_id, ++ vms->oem_table_id); ++ ++ /* RSDP is in FSEG memory, so allocate it separately */ ++ { ++ AcpiRsdpData rsdp_data = { ++ .revision = 2, ++ .oem_id = vms->oem_id, ++ .xsdt_tbl_offset = &xsdt, ++ .rsdt_tbl_offset = NULL, ++ }; ++ build_rsdp(tables->rsdp, tables->linker, &rsdp_data); ++ } ++ ++ /* ++ * The align size is 128, warn if 64k is not enough therefore ++ * the align size could be resized. ++ */ ++ if (tables_blob->len > ACPI_BUILD_TABLE_SIZE / 2) { ++ warn_report("ACPI table size %u exceeds %d bytes," ++ " migration may not work", ++ tables_blob->len, ACPI_BUILD_TABLE_SIZE / 2); ++ error_printf("Try removing CPUs, NUMA nodes, memory slots" ++ " or PCI bridges."); ++ } ++ acpi_align_size(tables_blob, ACPI_BUILD_TABLE_SIZE); ++ ++ ++ /* Cleanup memory that's no longer used. */ ++ g_array_free(table_offsets, true); ++} ++ ++static void acpi_ram_update(MemoryRegion *mr, GArray *data) ++{ ++ uint32_t size = acpi_data_len(data); ++ ++ /* Make sure RAM size is correct - in case it got changed ++ * e.g. by migration */ ++ memory_region_ram_resize(mr, size, &error_abort); ++ ++ memcpy(memory_region_get_ram_ptr(mr), data->data, size); ++ memory_region_set_dirty(mr, 0, size); ++} ++ ++static void sw64_acpi_build_update(void *build_opaque) ++{ ++ AcpiBuildState *build_state = build_opaque; ++ AcpiBuildTables tables; ++ ++ /* No state to update or already patched? Nothing to do. */ ++ if (!build_state || build_state->patched) { ++ return; ++ } ++ build_state->patched = true; ++ ++ acpi_build_tables_init(&tables); ++ ++ sw64_acpi_build((SW64MachineState *)MACHINE(qdev_get_machine()), &tables); ++ ++ acpi_ram_update(build_state->table_mr, tables.table_data); ++ acpi_ram_update(build_state->rsdp_mr, tables.rsdp); ++ acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob); ++ ++ acpi_build_tables_cleanup(&tables, true); ++} ++ ++static void sw64_acpi_build_reset(void *build_opaque) ++{ ++ AcpiBuildState *build_state = build_opaque; ++ build_state->patched = false; ++} ++ ++static const VMStateDescription vmstate_sw64_acpi_build = { ++ .name = "sw64_acpi_build", ++ .version_id = 1, ++ .minimum_version_id = 1, ++ .fields = (VMStateField[]) { ++ VMSTATE_BOOL(patched, AcpiBuildState), ++ VMSTATE_END_OF_LIST() ++ }, ++}; ++ ++static void sw64_powerdown_req(Notifier *n, void *opaque) ++{ ++ SW64MachineState *s = container_of(n, SW64MachineState, powerdown_notifier); ++ ++ if (s->gpio_dev) { ++ SW64GPIOState *gpio = SW64_GPIO(s->gpio_dev); ++ gpio->intsr = 0x1; ++ qemu_irq_pulse(gpio->irq[0]); ++ } else { ++ printf("error: no gpio device found!\n"); ++ } ++ ++} ++ ++void sw64_acpi_setup(SW64MachineState *vms) ++{ ++ AcpiBuildTables tables; ++ AcpiBuildState *build_state; ++ ++ if (!vms->fw_cfg) { ++ return; ++ } ++ ++ build_state = g_malloc0(sizeof *build_state); ++ ++ acpi_build_tables_init(&tables); ++ ++ sw64_acpi_build(vms, &tables); ++ /* Now expose it all to Guest */ ++ build_state->table_mr = acpi_add_rom_blob(sw64_acpi_build_update, ++ build_state, tables.table_data, ++ ACPI_BUILD_TABLE_FILE); ++ assert(build_state->table_mr != NULL); ++ ++ build_state->linker_mr = acpi_add_rom_blob(sw64_acpi_build_update, ++ build_state, ++ tables.linker->cmd_blob, ++ ACPI_BUILD_LOADER_FILE); ++ build_state->rsdp_mr = acpi_add_rom_blob(sw64_acpi_build_update, ++ build_state, tables.rsdp, ++ ACPI_BUILD_RSDP_FILE); ++ ++ qemu_register_reset(sw64_acpi_build_reset, build_state); ++ vms->powerdown_notifier.notify = sw64_powerdown_req; ++ qemu_register_powerdown_notifier(&vms->powerdown_notifier); ++ sw64_acpi_build_reset(build_state); ++ vmstate_register(NULL, 0, &vmstate_sw64_acpi_build, build_state); ++ ++ /* Cleanup tables but don't free the memory: we track it ++ * in build_state. ++ */ ++ acpi_build_tables_cleanup(&tables, false); ++} +diff --git a/hw/sw64/sw64_iommu.c b/hw/sw64/sw64_iommu.c +new file mode 100644 +index 0000000000..40f0ae1d30 +--- /dev/null ++++ b/hw/sw64/sw64_iommu.c +@@ -0,0 +1,570 @@ ++/* ++ * QEMU sw64 IOMMU emulation ++ * ++ * Copyright (c) 2021 Lu Feifei ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy ++ * of this software and associated documentation files (the "Software"), to deal ++ * in the Software without restriction, including without limitation the rights ++ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ++ * copies of the Software, and to permit persons to whom the Software is ++ * furnished to do so, subject to the following conditions: ++ * ++ * The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL ++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER ++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN ++ * THE SOFTWARE. ++ */ ++ ++#include "qemu/osdep.h" ++#include "hw/sysbus.h" ++#include "exec/address-spaces.h" ++#include "qemu/log.h" ++#include "qapi/error.h" ++#include "hw/sw64/sw64_iommu.h" ++#include "hw/sw64/sunway.h" ++#include "sysemu/kvm.h" ++ ++#define IOMMU_PAGE_SHIFT 13 ++#define IOMMU_PAGE_SIZE_8K (1ULL << IOMMU_PAGE_SHIFT) ++#define IOMMU_PAGE_MASK_8K (~(IOMMU_PAGE_SIZE_8K - 1)) ++#define IOMMU_IOVA_SHIFT 16 ++#define SW64IOMMU_PTIOTLB_MAX_SIZE 256 ++ ++static MemTxResult swvt_msi_read(void *opaque, hwaddr addr, ++ uint64_t *data, unsigned size, MemTxAttrs attrs) ++{ ++ return MEMTX_OK; ++} ++ ++static MemTxResult swvt_msi_write(void *opaque, hwaddr addr, ++ uint64_t value, unsigned size, ++ MemTxAttrs attrs) ++{ ++ MemTxResult ret; ++ ++ ret = msi_write(opaque, addr, value, size, attrs); ++ ++ return ret; ++} ++ ++static const MemoryRegionOps swvt_msi_ops = { ++ .read_with_attrs = swvt_msi_read, ++ .write_with_attrs = swvt_msi_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++ .valid = { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++ .impl = { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++}; ++ ++SWVTAddressSpace *iommu_find_add_as(SW64IOMMUState *s, PCIBus *bus, int devfn) ++{ ++ uintptr_t key = (uintptr_t)bus; ++ SWVTBus *swvt_bus = g_hash_table_lookup(s->swvtbus_as_by_busptr, &key); ++ SWVTAddressSpace *swvt_dev_as; ++ char name[128]; ++ ++ if (!swvt_bus) { ++ uintptr_t *new_key = g_malloc(sizeof(*new_key)); ++ *new_key = (uintptr_t)bus; ++ /* No corresponding free() */ ++ swvt_bus = g_malloc0(sizeof(SWVTBus) + sizeof(SWVTAddressSpace *) * \ ++ PCI_DEVFN_MAX); ++ swvt_bus->bus = bus; ++ g_hash_table_insert(s->swvtbus_as_by_busptr, new_key, swvt_bus); ++ } ++ swvt_dev_as = swvt_bus->dev_as[devfn]; ++ if (!swvt_dev_as) { ++ snprintf(name, sizeof(name), "sw64_iommu_devfn_%d", devfn); ++ swvt_bus->dev_as[devfn] = swvt_dev_as = g_malloc0(sizeof(SWVTAddressSpace)); ++ ++ swvt_dev_as->bus = bus; ++ swvt_dev_as->devfn = (uint8_t)devfn; ++ swvt_dev_as->iommu_state = s; ++ ++ memory_region_init_iommu(&swvt_dev_as->iommu, sizeof(swvt_dev_as->iommu), ++ TYPE_SW64_IOMMU_MEMORY_REGION, OBJECT(s), ++ "sw64_iommu_dmar", ++ 1UL << 32); ++ memory_region_init_io(&swvt_dev_as->msi, OBJECT(s), ++ &swvt_msi_ops, s, "sw_msi", 1 * 1024 * 1024); ++ memory_region_init(&swvt_dev_as->root, OBJECT(s), ++ "swvt_root", UINT64_MAX); ++ memory_region_add_subregion_overlap(&swvt_dev_as->root, ++ 0x8000fee00000ULL, ++ &swvt_dev_as->msi, 64); ++ address_space_init(&swvt_dev_as->as, &swvt_dev_as->root, name); ++ memory_region_add_subregion_overlap(&swvt_dev_as->root, 0, ++ MEMORY_REGION(&swvt_dev_as->iommu), ++ 1); ++ } ++ ++ memory_region_set_enabled(MEMORY_REGION(&swvt_dev_as->iommu), true); ++ ++ return swvt_dev_as; ++} ++ ++/** ++ * get_pte - Get the content of a page table entry located at ++ * @base_addr[@index] ++ */ ++static int get_pte(dma_addr_t baseaddr, uint64_t *pte) ++{ ++ int ret; ++ ++ /* TODO: guarantee 64-bit single-copy atomicity */ ++ ret = dma_memory_read(&address_space_memory, baseaddr, ++ (uint8_t *)pte, sizeof(*pte)); ++ ++ if (ret != MEMTX_OK) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++static bool swvt_do_iommu_translate(SWVTAddressSpace *swvt_as, PCIBus *bus, ++ uint8_t devfn, hwaddr addr, IOMMUTLBEntry *entry) ++{ ++ SW64IOMMUState *s = swvt_as->iommu_state; ++ uint8_t bus_num = pci_bus_num(bus); ++ unsigned long dtbbaseaddr, dtbbasecond; ++ unsigned long pdebaseaddr, ptebaseaddr; ++ unsigned long pte; ++ uint16_t source_id; ++ SW64DTIOTLBEntry *dtcached_entry = NULL; ++ SW64DTIOTLBKey dtkey, *new_key; ++ ++ dtcached_entry = g_hash_table_lookup(s->dtiotlb, &dtkey); ++ ++ if (unlikely(!dtcached_entry)) { ++ dtbbaseaddr = s->dtbr + (bus_num << 3); ++ ++ if (get_pte(dtbbaseaddr, &pte)) ++ goto error; ++ ++ dtbbasecond = (pte & (~(SW_IOMMU_ENTRY_VALID))) + (devfn << 3); ++ if (get_pte(dtbbasecond, &pte)) ++ goto error; ++ ++ source_id = ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL); ++ dtcached_entry = g_new0(SW64DTIOTLBEntry, 1); ++ dtcached_entry->ptbase_addr = pte & (~(SW_IOMMU_ENTRY_VALID)); ++ dtcached_entry->source_id = source_id; ++ ++ new_key = g_new0(SW64DTIOTLBKey, 1); ++ new_key->source_id = source_id; ++ ++ g_hash_table_insert(s->dtiotlb, new_key, dtcached_entry); ++ } ++ ++ pdebaseaddr = dtcached_entry->ptbase_addr; ++ pdebaseaddr += ((addr >> 23) & SW_IOMMU_LEVEL1_OFFSET) << 3; ++ ++ if (get_pte(pdebaseaddr, &pte)) ++ goto error; ++ ++ ptebaseaddr = pte & (~(SW_IOMMU_ENTRY_VALID)); ++ ptebaseaddr += ((addr >> IOMMU_PAGE_SHIFT) & SW_IOMMU_LEVEL2_OFFSET) << 3; ++ ++ if (get_pte(ptebaseaddr, &pte)) ++ goto error; ++ ++ pte &= ~(SW_IOMMU_ENTRY_VALID | SW_IOMMU_GRN | SW_IOMMU_ENABLE); ++ entry->translated_addr = pte; ++ entry->addr_mask = IOMMU_PAGE_SIZE_8K - 1; ++ ++ return 0; ++ ++error: ++ entry->perm = IOMMU_NONE; ++ return -EINVAL; ++} ++ ++static void swvt_ptiotlb_inv_all(SW64IOMMUState *s) ++{ ++ g_hash_table_remove_all(s->ptiotlb); ++} ++ ++static IOMMUTLBEntry *swvt_lookup_ptiotlb(SW64IOMMUState *s, uint16_t source_id, ++ hwaddr addr) ++{ ++ SW64PTIOTLBKey ptkey; ++ IOMMUTLBEntry *entry = NULL; ++ ++ ptkey.source_id = source_id; ++ ptkey.iova = addr; ++ entry = g_hash_table_lookup(s->ptiotlb, &ptkey); ++ ++ return entry; ++} ++ ++static IOMMUTLBEntry sw64_translate_iommu(IOMMUMemoryRegion *iommu, hwaddr addr, ++ IOMMUAccessFlags flag, int iommu_idx) ++{ ++ SWVTAddressSpace *swvt_as = container_of(iommu, SWVTAddressSpace, iommu); ++ SW64IOMMUState *s = swvt_as->iommu_state; ++ IOMMUTLBEntry *cached_entry = NULL; ++ IOMMUTLBEntry entry = { ++ .target_as = &address_space_memory, ++ .iova = addr, ++ .translated_addr = addr, ++ .addr_mask = ~(hwaddr)0, ++ .perm = IOMMU_NONE, ++ }; ++ uint8_t bus_num = pci_bus_num(swvt_as->bus); ++ uint16_t source_id; ++ SW64PTIOTLBKey *new_ptkey; ++ hwaddr aligned_addr; ++ ++ source_id = ((bus_num & 0xffUL) << 8) | (swvt_as->devfn & 0xffUL); ++ ++ qemu_mutex_lock(&s->iommu_lock); ++ ++ aligned_addr = addr & IOMMU_PAGE_MASK_8K; ++ ++ cached_entry = swvt_lookup_ptiotlb(s, source_id, aligned_addr); ++ ++ if (cached_entry) ++ goto out; ++ ++ if (g_hash_table_size(s->ptiotlb) >= SW64IOMMU_PTIOTLB_MAX_SIZE) { ++ swvt_ptiotlb_inv_all(s); ++ } ++ ++ cached_entry = g_new0(IOMMUTLBEntry, 1); ++ ++ if (swvt_do_iommu_translate(swvt_as, swvt_as->bus, swvt_as->devfn, ++ addr, cached_entry)) { ++ g_free(cached_entry); ++ qemu_mutex_unlock(&s->iommu_lock); ++ printf("%s: detected translation failure " ++ "(busnum=%d, devfn=%#x, iova=%#lx.\n", ++ __func__, pci_bus_num(swvt_as->bus), swvt_as->devfn, ++ entry.iova); ++ entry.iova = 0; ++ entry.translated_addr = 0; ++ entry.addr_mask = 0; ++ entry.perm = IOMMU_NONE; ++ ++ return entry; ++ } else { ++ new_ptkey = g_new0(SW64PTIOTLBKey, 1); ++ new_ptkey->source_id = source_id; ++ new_ptkey->iova = aligned_addr; ++ g_hash_table_insert(s->ptiotlb, new_ptkey, cached_entry); ++ } ++ ++out: ++ qemu_mutex_unlock(&s->iommu_lock); ++ entry.perm = flag; ++ entry.translated_addr = cached_entry->translated_addr + ++ (addr & (IOMMU_PAGE_SIZE_8K - 1)); ++ entry.addr_mask = cached_entry->addr_mask; ++ ++ return entry; ++} ++ ++static void swvt_ptiotlb_inv_iova(SW64IOMMUState *s, uint16_t source_id, dma_addr_t iova) ++{ ++ SW64PTIOTLBKey key = {.source_id = source_id, .iova = iova}; ++ ++ qemu_mutex_lock(&s->iommu_lock); ++ g_hash_table_remove(s->ptiotlb, &key); ++ qemu_mutex_unlock(&s->iommu_lock); ++} ++ ++void swvt_address_space_unmap_iova(SW64IOMMUState *s, unsigned long val) ++{ ++ SWVTAddressSpace *swvt_as; ++ IOMMUNotifier *n; ++ uint16_t source_id; ++ dma_addr_t iova; ++ IOMMUTLBEvent event; ++ ++ source_id = val & 0xffff; ++ iova = (val >> IOMMU_IOVA_SHIFT) << IOMMU_PAGE_SHIFT; ++ ++ swvt_ptiotlb_inv_iova(s, source_id, iova); ++ ++ QLIST_FOREACH(swvt_as, &s->swvt_as_with_notifiers, next) { ++ uint8_t bus_num = pci_bus_num(swvt_as->bus); ++ uint16_t as_sourceid = ((bus_num & 0xffUL) << 8) | (swvt_as->devfn & 0xffUL); ++ ++ if (as_sourceid == source_id) { ++ IOMMU_NOTIFIER_FOREACH(n, &swvt_as->iommu) { ++ event.type = IOMMU_NOTIFIER_UNMAP; ++ event.entry.target_as = &address_space_memory; ++ event.entry.iova = iova & IOMMU_PAGE_MASK_8K; ++ event.entry.translated_addr = 0; ++ event.entry.perm = IOMMU_NONE; ++ event.entry.addr_mask = IOMMU_PAGE_SIZE_8K - 1; ++ ++ memory_region_notify_iommu(&swvt_as->iommu, 0, event); ++ } ++ } ++ } ++} ++ ++/* Unmap the whole range in the notifier's scope. */ ++static void swvt_address_space_unmap(SWVTAddressSpace *as, IOMMUNotifier *n) ++{ ++ IOMMUTLBEvent event; ++ hwaddr size; ++ hwaddr start = n->start; ++ hwaddr end = n->end; ++ ++ assert(start <= end); ++ size = end - start; ++ ++ event.entry.target_as = &address_space_memory; ++ /* Adjust iova for the size */ ++ event.entry.iova = n->start & ~(size - 1); ++ /* This field is meaningless for unmap */ ++ event.entry.translated_addr = 0; ++ event.entry.perm = IOMMU_NONE; ++ event.entry.addr_mask = size - 1; ++ ++ memory_region_notify_iommu_one(n, &event); ++} ++ ++void swvt_address_space_map_iova(SW64IOMMUState *s, unsigned long val) ++{ ++ SWVTAddressSpace *swvt_as; ++ IOMMUNotifier *n; ++ uint16_t source_id; ++ dma_addr_t iova; ++ IOMMUTLBEvent event; ++ int ret; ++ ++ source_id = val & 0xffff; ++ iova = (val >> IOMMU_IOVA_SHIFT) << IOMMU_PAGE_SHIFT; ++ ++ swvt_ptiotlb_inv_iova(s, source_id, iova); ++ ++ QLIST_FOREACH(swvt_as, &s->swvt_as_with_notifiers, next) { ++ uint8_t bus_num = pci_bus_num(swvt_as->bus); ++ uint16_t as_sourceid = ((bus_num & 0xffUL) << 8) | (swvt_as->devfn & 0xffUL); ++ ++ if (as_sourceid == source_id) { ++ IOMMU_NOTIFIER_FOREACH(n, &swvt_as->iommu) { ++ event.type = IOMMU_NOTIFIER_UNMAP; ++ event.entry.target_as = &address_space_memory; ++ event.entry.iova = iova & IOMMU_PAGE_MASK_8K; ++ event.entry.perm = IOMMU_RW; ++ ++ ret = swvt_do_iommu_translate(swvt_as, swvt_as->bus, ++ swvt_as->devfn, iova, &event.entry); ++ if (ret) ++ goto out; ++ ++ memory_region_notify_iommu(&swvt_as->iommu, 0, event); ++ } ++ } ++ } ++out: ++ return; ++} ++ ++void swvt_address_space_invalidate_iova(SW64IOMMUState *s, unsigned long val) ++{ ++ int map_flag; ++ ++ map_flag = val >> 36; ++ ++ if (map_flag) ++ swvt_address_space_map_iova(s, val & 0xfffffffff); ++ else ++ swvt_address_space_unmap_iova(s, val); ++ ++ return; ++} ++ ++static AddressSpace *sw64_dma_iommu(PCIBus *bus, void *opaque, int devfn) ++{ ++ SW64IOMMUState *s = opaque; ++ SWVTAddressSpace *swvt_as; ++ ++ assert(0 <= devfn && devfn < PCI_DEVFN_MAX); ++ ++ swvt_as = iommu_find_add_as(s, bus, devfn); ++ return &swvt_as->as; ++} ++ ++static uint64_t piu0_read(void *opaque, hwaddr addr, unsigned size) ++{ ++ uint64_t ret = 0; ++ switch (addr) { ++ default: ++ break; ++ } ++ return ret; ++} ++ ++static void piu0_write(void *opaque, hwaddr addr, uint64_t val, ++ unsigned size) ++{ ++ SW64IOMMUState *s = (SW64IOMMUState *)opaque; ++ ++ switch (addr) { ++ case 0xb000: ++ /* DTBaseAddr */ ++ s->dtbr = val; ++ break; ++ case 0xb280: ++ /* PTLB_FlushVAddr */ ++ swvt_address_space_invalidate_iova(s, val); ++ break; ++ default: ++ break; ++ } ++} ++ ++const MemoryRegionOps core3_pci_piu0_ops = { ++ .read = piu0_read, ++ .write = piu0_write, ++ .endianness = DEVICE_LITTLE_ENDIAN, ++ .valid = { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++ .impl = { ++ .min_access_size = 1, ++ .max_access_size = 8, ++ }, ++}; ++ ++void sw64_vt_iommu_init(PCIBus *b) ++{ ++ DeviceState *dev_iommu; ++ SW64IOMMUState *s; ++ MemoryRegion *io_piu0 = g_new(MemoryRegion, 1); ++ ++ dev_iommu = qdev_new(TYPE_SW64_IOMMU); ++ s = SW64_IOMMU(dev_iommu); ++ ++ s->pci_bus = b; ++ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev_iommu), &error_fatal); ++ ++ pci_setup_iommu(b, sw64_dma_iommu, dev_iommu); ++ ++ memory_region_init_io(io_piu0, OBJECT(s), &core3_pci_piu0_ops, s, ++ "pci0-piu0-io", 4 * 1024 * 1024); ++ memory_region_add_subregion(get_system_memory(), 0x880200000000ULL, ++ io_piu0); ++} ++ ++static int swvt_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, ++ IOMMUNotifierFlag old, ++ IOMMUNotifierFlag new, ++ Error **errp) ++{ ++ SWVTAddressSpace *swvt_as = container_of(iommu, SWVTAddressSpace, iommu); ++ SW64IOMMUState *s = swvt_as->iommu_state; ++ ++ /* Update per-address-space notifier flags */ ++ swvt_as->notifier_flags = new; ++ ++ if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) { ++ error_setg(errp, "swvt does not support dev-iotlb yet"); ++ return -EINVAL; ++ } ++ ++ if (old == IOMMU_NOTIFIER_NONE) { ++ QLIST_INSERT_HEAD(&s->swvt_as_with_notifiers, swvt_as, next); ++ } else if (new == IOMMU_NOTIFIER_NONE) { ++ QLIST_REMOVE(swvt_as, next); ++ } ++ return 0; ++} ++ ++static void swvt_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) ++{ ++ SWVTAddressSpace *swvt_as = container_of(iommu_mr, SWVTAddressSpace, iommu); ++ ++ /* ++ * The replay can be triggered by either a invalidation or a newly ++ * created entry. No matter what, we release existing mappings ++ * (it means flushing caches for UNMAP-only registers). ++ */ ++ swvt_address_space_unmap(swvt_as, n); ++} ++ ++/* GHashTable functions */ ++static gboolean swvt_uint64_equal(gconstpointer v1, gconstpointer v2) ++{ ++ return *((const uint64_t *)v1) == *((const uint64_t *)v2); ++} ++ ++static guint swvt_uint64_hash(gconstpointer v) ++{ ++ return (guint)*(const uint64_t *)v; ++} ++ ++static void iommu_realize(DeviceState *d, Error **errp) ++{ ++ SW64IOMMUState *s = SW64_IOMMU(d); ++ ++ QLIST_INIT(&s->swvt_as_with_notifiers); ++ qemu_mutex_init(&s->iommu_lock); ++ ++ s->dtiotlb = g_hash_table_new_full(swvt_uint64_hash, swvt_uint64_equal, ++ g_free, g_free); ++ s->ptiotlb = g_hash_table_new_full(swvt_uint64_hash, swvt_uint64_equal, ++ g_free, g_free); ++ ++ s->swvtbus_as_by_busptr = g_hash_table_new(NULL, NULL); ++} ++ ++static void iommu_reset(DeviceState *d) ++{ ++} ++ ++static void sw64_iommu_class_init(ObjectClass *klass, void *data) ++{ ++ DeviceClass *dc = DEVICE_CLASS(klass); ++ ++ dc->reset = iommu_reset; ++ dc->realize = iommu_realize; ++} ++ ++static void sw64_iommu_memory_region_class_init(ObjectClass *klass, void *data) ++{ ++ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); ++ ++ imrc->translate = sw64_translate_iommu; ++ imrc->notify_flag_changed = swvt_iommu_notify_flag_changed; ++ imrc->replay = swvt_iommu_replay; ++} ++ ++static const TypeInfo sw64_iommu_info = { ++ .name = TYPE_SW64_IOMMU, ++ .parent = TYPE_SYS_BUS_DEVICE, ++ .instance_size = sizeof(SW64IOMMUState), ++ .class_init = sw64_iommu_class_init, ++ .class_size = sizeof(SW64IOMMUClass), ++}; ++ ++static const TypeInfo sw64_iommu_memory_region_info = { ++ .parent = TYPE_IOMMU_MEMORY_REGION, ++ .name = TYPE_SW64_IOMMU_MEMORY_REGION, ++ .class_init = sw64_iommu_memory_region_class_init, ++}; ++ ++static void sw64_iommu_register_types(void) ++{ ++ type_register_static(&sw64_iommu_info); ++ type_register_static(&sw64_iommu_memory_region_info); ++} ++ ++type_init(sw64_iommu_register_types) +diff --git a/hw/sw64/trace-events b/hw/sw64/trace-events +new file mode 100644 +index 0000000000..1aa744c984 +--- /dev/null ++++ b/hw/sw64/trace-events +@@ -0,0 +1,3 @@ ++# See docs/devel/tracing.rst for syntax documentation. ++ ++# pci.c +diff --git a/include/disas/dis-asm.h b/include/disas/dis-asm.h +index 2324f6b1a4..74db69cd1c 100644 +--- a/include/disas/dis-asm.h ++++ b/include/disas/dis-asm.h +@@ -187,6 +187,10 @@ enum bfd_architecture + #define bfd_mach_alpha_ev4 0x10 + #define bfd_mach_alpha_ev5 0x20 + #define bfd_mach_alpha_ev6 0x30 ++ bfd_arch_sw64, /* Sw64 */ ++#define bfd_mach_sw64 1 ++#define bfd_mach_sw64_core3 0x10 ++#define bfd_mach_sw64_core4 0x20 + bfd_arch_arm, /* Advanced Risc Machines ARM */ + #define bfd_mach_arm_unknown 0 + #define bfd_mach_arm_2 1 +@@ -425,6 +429,8 @@ int print_insn_h8500 (bfd_vma, disassemble_info*); + int print_insn_arm_a64 (bfd_vma, disassemble_info*); + int print_insn_alpha (bfd_vma, disassemble_info*); + disassembler_ftype arc_get_disassembler (int, int); ++int print_insn_sw64 (bfd_vma, disassemble_info*); ++int print_insn_arm (bfd_vma, disassemble_info*); + int print_insn_sparc (bfd_vma, disassemble_info*); + int print_insn_big_a29k (bfd_vma, disassemble_info*); + int print_insn_little_a29k (bfd_vma, disassemble_info*); +diff --git a/include/elf.h b/include/elf.h +index e7259ec366..98bd530851 100644 +--- a/include/elf.h ++++ b/include/elf.h +@@ -210,6 +210,8 @@ typedef struct mips_elf_abiflags_v0 { + + #define EF_AVR_MACH 0x7F /* Mask for AVR e_flags to get core type */ + ++#define EM_SW64 0x9916 /* SW64 */ ++ + /* This is the info that is needed to parse the dynamic section of the file */ + #define DT_NULL 0 + #define DT_NEEDED 1 +@@ -1448,6 +1450,48 @@ typedef struct { + #define EF_RISCV_RVE 0x0008 + #define EF_RISCV_TSO 0x0010 + ++/* ++ SW_64 ELF relocation types ++ */ ++#define EM_SW_64 0x9916 ++#define R_SW_64_NONE 0 /* No reloc */ ++#define R_SW_64_REFLONG 1 /* Direct 32 bit */ ++#define R_SW_64_REFQUAD 2 /* Direct 64 bit */ ++#define R_SW_64_GPREL32 3 /* GP relative 32 bit */ ++#define R_SW_64_LITERAL 4 /* GP relative 16 bit w/optimization */ ++#define R_SW_64_LITUSE 5 /* Optimization hint for LITERAL */ ++#define R_SW_64_GPDISP 6 /* Add displacement to GP */ ++#define R_SW_64_BRADDR 7 /* PC+4 relative 23 bit shifted */ ++#define R_SW_64_HINT 8 /* PC+4 relative 16 bit shifted */ ++#define R_SW_64_SREL16 9 /* PC relative 16 bit */ ++#define R_SW_64_SREL32 10 /* PC relative 32 bit */ ++#define R_SW_64_SREL64 11 /* PC relative 64 bit */ ++#define R_SW_64_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ ++#define R_SW_64_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ ++#define R_SW_64_GPREL16 19 /* GP relative 16 bit */ ++#define R_SW_64_COPY 24 /* Copy symbol at runtime */ ++#define R_SW_64_GLOB_DAT 25 /* Create GOT entry */ ++#define R_SW_64_JMP_SLOT 26 /* Create PLT entry */ ++#define R_SW_64_RELATIVE 27 /* Adjust by program base */ ++#define R_SW_64_TLS_GD_HI 28 ++#define R_SW_64_TLSGD 29 ++#define R_SW_64_TLS_LDM 30 ++#define R_SW_64_DTPMOD64 31 ++#define R_SW_64_GOTDTPREL 32 ++#define R_SW_64_DTPREL64 33 ++#define R_SW_64_DTPRELHI 34 ++#define R_SW_64_DTPRELLO 35 ++#define R_SW_64_DTPREL16 36 ++#define R_SW_64_GOTTPREL 37 ++#define R_SW_64_TPREL64 38 ++#define R_SW_64_TPRELHI 39 ++#define R_SW_64_TPRELLO 40 ++#define R_SW_64_TPREL16 41 ++/* Keep this the last entry. */ ++#define R_SW_64_NUM 46 ++/* Legal values for sh_flags field of Elf64_Shdr. */ ++#define SHF_SW_64_GPREL 0x10000000 ++ + typedef struct elf32_rel { + Elf32_Addr r_offset; + Elf32_Word r_info; +diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h +index ff2a310270..472940fcb1 100644 +--- a/include/hw/acpi/aml-build.h ++++ b/include/hw/acpi/aml-build.h +@@ -366,6 +366,11 @@ Aml *aml_dword_io(AmlMinFixed min_fixed, AmlMaxFixed max_fixed, + uint32_t addr_gran, uint32_t addr_min, + uint32_t addr_max, uint32_t addr_trans, + uint32_t len); ++Aml *aml_qword_io(AmlMinFixed min_fixed, AmlMaxFixed max_fixed, ++ AmlDecode dec, AmlISARanges isa_ranges, ++ uint64_t addr_gran, uint64_t addr_min, ++ uint64_t addr_max, uint64_t addr_trans, ++ uint64_t len); + Aml *aml_dword_memory(AmlDecode dec, AmlMinFixed min_fixed, + AmlMaxFixed max_fixed, AmlCacheable cacheable, + AmlReadAndWrite read_and_write, +diff --git a/include/hw/sw64/core.h b/include/hw/sw64/core.h +new file mode 100644 +index 0000000000..25e3f7f90e +--- /dev/null ++++ b/include/hw/sw64/core.h +@@ -0,0 +1,138 @@ ++#ifndef HW_SW64_CORE_H ++#define HW_SW64_CORE_H ++ ++#include "hw/pci/pci_host.h" ++#include "qom/object.h" ++#include "hw/boards.h" ++#include "hw/sw64/pm.h" ++#define TYPE_CORE3_BOARD "core3-board" ++#define CORE3_BOARD(obj) \ ++ OBJECT_CHECK(BoardState, (obj), TYPE_CORE3_BOARD) ++ ++#define TYPE_CORE4_BOARD "core4-board" ++#define CORE4_BOARD(obj) \ ++ OBJECT_CHECK(BoardState, (obj), TYPE_CORE4_BOARD) ++ ++#define SW_ACPI_BUILD_APPNAME6 "SUNWAY" ++#define SW_ACPI_BUILD_APPNAME8 "SUNWAY " ++ ++#define MAX_CPUS_CORE4 256 ++ ++extern unsigned long dtb_start_c4; ++ ++struct SW64MachineClass { ++ MachineClass parent; ++}; ++ ++struct SW64MachineState { ++ MachineState parent; ++ FWCfgState *fw_cfg; ++ DeviceState *acpi_dev; ++ DeviceState *gpio_dev; ++ PCIBus *bus; ++ char *oem_id; ++ char *oem_table_id; ++ MemMapEntry *memmap; ++ const int *irqmap; ++ Notifier powerdown_notifier; ++}; ++ ++#define TYPE_SW64_MACHINE MACHINE_TYPE_NAME("sw64") ++OBJECT_DECLARE_TYPE(SW64MachineState, SW64MachineClass, SW64_MACHINE) ++ ++struct CORE3MachineClass { ++ MachineClass parent; ++}; ++ ++struct CORE3MachineState { ++ MachineState parent; ++ FWCfgState *fw_cfg; ++ DeviceState *acpi_dev; ++ PCIBus *bus; ++ char *oem_id; ++ char *oem_table_id; ++ const MemMapEntry *memmap; ++ const int *irqmap; ++ int fdt_size; ++}; ++ ++#define TYPE_CORE3_MACHINE MACHINE_TYPE_NAME("core3") ++OBJECT_DECLARE_TYPE(CORE3MachineState, CORE3MachineClass, CORE3_MACHINE) ++ ++struct CORE4MachineClass { ++ MachineClass parent; ++}; ++ ++struct CORE4MachineState { ++ MachineState parent; ++ FWCfgState *fw_cfg; ++ DeviceState *acpi_dev; ++ DeviceState *gpio_dev; ++ PCIBus *bus; ++ char *oem_id; ++ char *oem_table_id; ++ const MemMapEntry *memmap; ++ const int *irqmap; ++ int fdt_size; ++ OnOffAuto acpi; ++}; ++ ++#define TYPE_CORE4_MACHINE MACHINE_TYPE_NAME("core4") ++OBJECT_DECLARE_TYPE(CORE4MachineState, CORE4MachineClass, CORE4_MACHINE) ++ ++typedef struct BoardState { ++ PCIHostState parent_obj; ++ MemoryRegion io_mcu; ++ MemoryRegion io_spbu; ++ MemoryRegion io_intpu; ++ MemoryRegion msi_ep; ++ MemoryRegion mem_ep; ++ MemoryRegion mem_ep64; ++ MemoryRegion conf_piu0; ++ MemoryRegion io_piu0; ++ MemoryRegion io_ep; ++ MemoryRegion io_rtc; ++ qemu_irq serial_irq; ++ uint32_t ofw_addr; ++} BoardState; ++ ++typedef struct TimerState { ++ void *opaque; ++ int order; ++} TimerState; ++ ++enum { ++ VIRT_BOOT_FLAG, ++ VIRT_PCIE_MMIO, ++ VIRT_MSI, ++ VIRT_SPBU, ++ VIRT_SUNWAY_GED, ++ VIRT_INTPU, ++ VIRT_RTC, ++ VIRT_FW_CFG, ++ VIRT_PCIE_IO_BASE, ++ VIRT_PCIE_PIO, ++ VIRT_UART, ++ VIRT_PCIE_CFG, ++ VIRT_HIGH_PCIE_MMIO, ++ VIRT_MCU, ++ VIRT_GPIO, ++}; ++ ++typedef struct boot_params { ++ unsigned long initrd_start; /* logical address of initrd */ ++ unsigned long initrd_size; /* size of initrd */ ++ unsigned long dtb_start; /* logical address of dtb */ ++ unsigned long efi_systab; /* logical address of EFI system table */ ++ unsigned long efi_memmap; /* logical address of EFI memory map */ ++ unsigned long efi_memmap_size; /* size of EFI memory map */ ++ unsigned long efi_memdesc_size; /* size of an EFI memory map descriptor */ ++ unsigned long efi_memdesc_version; /* memory descriptor version */ ++ unsigned long cmdline; /* logical address of cmdline */ ++} BOOT_PARAMS; ++ ++void core3_board_init(MachineState *machine); ++void core4_board_init(MachineState *machine); ++void sw64_acpi_setup(SW64MachineState *vms); ++bool sw64_is_acpi_enabled(CORE4MachineState *c4ms); ++#endif +diff --git a/include/hw/sw64/sunway.h b/include/hw/sw64/sunway.h +new file mode 100644 +index 0000000000..4501831c06 +--- /dev/null ++++ b/include/hw/sw64/sunway.h +@@ -0,0 +1,51 @@ ++#ifndef SW64_SUNWAY_H ++#define SW64_SUNWAY_H ++ ++#include "exec/cpu-defs.h" ++#include "hw/pci/pci.h" ++#include "hw/loader.h" ++#include "hw/sw64/core.h" ++ ++extern const MemoryRegionOps rtc_ops; ++extern const MemoryRegionOps sw64_pci_ignore_ops; ++extern const MemoryRegionOps sw64_pci_config_ops; ++extern const MemoryRegionOps msi_ops; ++ ++void sw64_init_rtc_base_info(void); ++ ++uint64_t cpu_sw64_virt_to_phys(void *opaque, uint64_t addr); ++CpuInstanceProperties sw64_cpu_index_to_props(MachineState *ms, ++ unsigned cpu_index); ++int64_t sw64_get_default_cpu_node_id(const MachineState *ms, ++ int idx); ++const CPUArchIdList *sw64_possible_cpu_arch_ids(MachineState *ms); ++void sw64_cpu_reset(void *opaque); ++void sw64_board_reset(MachineState *state, ShutdownCause reason); ++ ++void sw64_set_clocksource(void); ++void sw64_set_ram_size(ram_addr_t ram_size); ++void sw64_clear_uefi_bios(void); ++void sw64_clear_smp_rcb(void); ++void sw64_clear_kernel_print(void); ++void sw64_load_hmcode(const char *hmcode_filename, uint64_t *hmcode_entry); ++void sw64_find_and_load_bios(const char *bios_name); ++void sw64_load_kernel(const char *kernel_filename, uint64_t *kernel_entry, ++ const char *kernel_cmdline); ++void sw64_load_initrd(const char *initrd_filename, ++ BOOT_PARAMS *sunway_boot_params); ++int sw64_load_dtb(MachineState *ms, BOOT_PARAMS *sunway_boot_params); ++void sw64_board_alarm_timer(void *opaque); ++void sw64_create_alarm_timer(MachineState *ms, BoardState *bs); ++uint64_t convert_bit(int n); ++FWCfgState *sw64_create_fw_cfg(hwaddr addr, hwaddr size); ++void sw64_virt_build_smbios(FWCfgState *fw_cfg); ++void sw64_board_set_irq(void *opaque, int irq, int level); ++int sw64_board_map_irq(PCIDevice *d, int irq_num); ++void serial_set_irq(void *opaque, int irq, int level); ++void sw64_new_cpu(const char *name, int64_t arch_id, Error **errp); ++void sw64_create_pcie(BoardState *bs, PCIBus *b, PCIHostState *phb); ++PCIINTxRoute sw64_route_intx_pin_to_irq(void *opaque, int pin); ++MemTxResult msi_write(void *opaque, hwaddr addr, uint64_t value, ++ unsigned size, MemTxAttrs attrs); ++void rtc_get_time(Object *obj, struct tm *current_tm, Error **errp); ++#endif /* SW64_SUNWAY_H */ +diff --git a/include/hw/sw64/sw64_iommu.h b/include/hw/sw64/sw64_iommu.h +new file mode 100644 +index 0000000000..eb59ff107a +--- /dev/null ++++ b/include/hw/sw64/sw64_iommu.h +@@ -0,0 +1,103 @@ ++/* ++ * Copyright (C) 2021-2025 Wuxi Institute of Advanced Technology ++ * Written by Lu Feifei ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, see . ++ */ ++ ++#ifndef HW_SW64_IOMMU_H ++#define HW_SW64_IOMMU_H ++ ++#include "hw/sysbus.h" ++#include "hw/pci/pci.h" ++ ++#define TYPE_SW64_IOMMU_MEMORY_REGION "sw64-iommu-memory-region" ++#define SW_IOMMU_ENTRY_VALID ((1UL) << 63) ++#define SW_IOMMU_LEVEL1_OFFSET 0x1ff ++#define SW_IOMMU_LEVEL2_OFFSET 0x3ff ++#define SW_IOMMU_ENABLE 3 ++#define SW_IOMMU_GRN ((0UL) << 4) ++#define SWVT_PCI_BUS_MAX 256 ++ ++typedef struct SW64IOMMUClass SW64IOMMUClass; ++typedef struct SW64IOMMUState SW64IOMMUState; ++typedef struct SWVTAddressSpace SWVTAddressSpace; ++typedef struct SW64DTIOTLBKey SW64DTIOTLBKey; ++typedef struct SW64PTIOTLBKey SW64PTIOTLBKey; ++typedef struct SW64DTIOTLBEntry SW64DTIOTLBEntry; ++typedef struct SWVTBus SWVTBus; ++ ++struct SW64DTIOTLBEntry { ++ uint16_t source_id; ++ unsigned long ptbase_addr; ++}; ++ ++struct SW64DTIOTLBKey { ++ uint16_t source_id; ++}; ++ ++struct SW64PTIOTLBKey { ++ uint16_t source_id; ++ dma_addr_t iova; ++}; ++ ++struct SWVTAddressSpace { ++ PCIBus *bus; ++ uint8_t devfn; ++ AddressSpace as; ++ IOMMUMemoryRegion iommu; ++ MemoryRegion root; ++ MemoryRegion msi; /* Interrupt region: 0xfeeXXXXX */ ++ SW64IOMMUState *iommu_state; ++ QLIST_ENTRY(SWVTAddressSpace) next; ++ /* Superset of notifier flags that this address space has */ ++ IOMMUNotifierFlag notifier_flags; ++}; ++ ++struct SWVTBus { ++ PCIBus* bus; /* A reference to the bus to provide translation for */ ++ SWVTAddressSpace *dev_as[0]; /* A table of SWVTAddressSpace objects indexed by devfn */ ++}; ++ ++struct SW64IOMMUState { ++ SysBusDevice busdev; ++ dma_addr_t dtbr; /* Current root table pointer */ ++ GHashTable *dtiotlb; /* IOTLB for device table */ ++ GHashTable *ptiotlb; /* IOTLB for page table */ ++ ++ GHashTable *swvtbus_as_by_busptr; ++ /* list of registered notifiers */ ++ QLIST_HEAD(, SWVTAddressSpace) swvt_as_with_notifiers; ++ ++ PCIBus *pci_bus; ++ QemuMutex iommu_lock; ++}; ++ ++struct SW64IOMMUClass { ++ SysBusDeviceClass parent; ++ DeviceRealize realize; ++}; ++ ++#define TYPE_SW64_IOMMU "sw64-iommu" ++#define SW64_IOMMU(obj) \ ++ OBJECT_CHECK(SW64IOMMUState, (obj), TYPE_SW64_IOMMU) ++#define SW64_IOMMU_CLASS(klass) \ ++ OBJECT_CLASS_CHECK(SW64IOMMUClass, (klass), TYPE_SW64_IOMMU) ++#define SW64_IOMMU_GET_CLASS(obj) \ ++ OBJECT_GET_CLASS(SW64IOMMUClass, (obj), TYPE_SW64_IOMMU) ++extern void sw64_vt_iommu_init(PCIBus *b); ++extern void swvt_address_space_invalidate_iova(SW64IOMMUState *s, unsigned long val); ++extern void swvt_address_space_unmap_iova(SW64IOMMUState *s, unsigned long val); ++extern void swvt_address_space_map_iova(SW64IOMMUState *s, unsigned long val); ++extern SWVTAddressSpace *iommu_find_add_as(SW64IOMMUState *s, PCIBus *bus, int devfn); ++#endif +diff --git a/include/sysemu/arch_init.h b/include/sysemu/arch_init.h +index 8850cb1a14..cb1b36f878 100644 +--- a/include/sysemu/arch_init.h ++++ b/include/sysemu/arch_init.h +@@ -25,6 +25,7 @@ enum { + QEMU_ARCH_AVR = (1 << 21), + QEMU_ARCH_HEXAGON = (1 << 22), + QEMU_ARCH_LOONGARCH = (1 << 23), ++ QEMU_ARCH_SW64 = (1 << 24), + }; + + extern const uint32_t arch_type; +diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h +index a02850583b..bd6ad130e7 100644 +--- a/include/tcg/tcg-op.h ++++ b/include/tcg/tcg-op.h +@@ -102,6 +102,61 @@ tcg_gen_qemu_st_i128(TCGv_i128 v, TCGv a, TCGArg i, MemOp m) + tcg_gen_qemu_st_i128_chk(v, tcgv_tl_temp(a), i, m, TCG_TYPE_TL); + } + ++static inline void tcg_gen_qemu_ld8u(TCGv ret, TCGv addr, int mem_index) ++{ ++ tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_UB); ++} ++ ++static inline void tcg_gen_qemu_ld8s(TCGv ret, TCGv addr, int mem_index) ++{ ++ tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_SB); ++} ++ ++static inline void tcg_gen_qemu_st8(TCGv arg, TCGv addr, int mem_index) ++{ ++ tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_UB); ++} ++ ++static inline void tcg_gen_qemu_st16(TCGv arg, TCGv addr, int mem_index) ++{ ++ tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUW); ++} ++ ++static inline void tcg_gen_qemu_ld16u(TCGv ret, TCGv addr, int mem_index) ++{ ++ tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUW); ++} ++ ++static inline void tcg_gen_qemu_ld16s(TCGv ret, TCGv addr, int mem_index) ++{ ++ tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESW); ++} ++ ++static inline void tcg_gen_qemu_ld32u(TCGv ret, TCGv addr, int mem_index) ++{ ++ tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TEUL); ++} ++ ++static inline void tcg_gen_qemu_ld32s(TCGv ret, TCGv addr, int mem_index) ++{ ++ tcg_gen_qemu_ld_tl(ret, addr, mem_index, MO_TESL); ++} ++ ++static inline void tcg_gen_qemu_st32(TCGv arg, TCGv addr, int mem_index) ++{ ++ tcg_gen_qemu_st_tl(arg, addr, mem_index, MO_TEUL); ++} ++ ++static inline void tcg_gen_qemu_ld64(TCGv_i64 ret, TCGv addr, int mem_index) ++{ ++ tcg_gen_qemu_ld_i64(ret, addr, mem_index, MO_TEUQ); ++} ++ ++static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) ++{ ++ tcg_gen_qemu_st_i64(arg, addr, mem_index, MO_TEUQ); ++} ++ + #define DEF_ATOMIC2(N, S) \ + static inline void N##_##S(TCGv_##S r, TCGv a, TCGv_##S v, \ + TCGArg i, MemOp m) \ +diff --git a/linux-headers/asm-sw64/kvm.h b/linux-headers/asm-sw64/kvm.h +new file mode 100644 +index 0000000000..e8b364548c +--- /dev/null ++++ b/linux-headers/asm-sw64/kvm.h +@@ -0,0 +1,90 @@ ++#ifndef __LINUX_KVM_SW64_H ++#define __LINUX_KVM_SW64_H ++ ++#include ++ ++#define __KVM_HAVE_GUEST_DEBUG ++ ++#define KVM_DIRTY_LOG_PAGE_OFFSET 64 ++ ++/* ++ * for KVM_GET_REGS and KVM_SET_REGS ++ */ ++struct user_pt_regs { ++ unsigned long r[31]; ++ unsigned long pc; ++ unsigned long pstate; ++}; ++ ++/* 256 bits aligned for simd */ ++struct fpreg { ++ unsigned long v[4] __attribute__((aligned(32))); ++}; ++ ++struct user_fpsimd_state { ++ struct fpreg fp[31]; ++ unsigned long fpcr; ++ unsigned long __reserved[3]; ++}; ++ ++struct kvm_regs { ++ union { ++ struct { ++ unsigned long r[27]; ++ unsigned long fpcr; ++ ++ unsigned long fp[124]; ++ /* These are saved by hmcode: */ ++ unsigned long ps; ++ unsigned long pc; ++ unsigned long gp; ++ unsigned long r16; ++ unsigned long r17; ++ unsigned long r18; ++ unsigned long reserved[6]; ++ } c3_regs; ++ struct { ++ union { ++ struct user_pt_regs reg; ++ struct { ++ unsigned long r[31]; ++ unsigned long pc; ++ unsigned long ps; ++ } ++ }; ++ struct user_fpsimd_state fpstate; ++ } c4_regs; ++ }; ++}; ++ ++/* ++ * for KVM_GET_FPU and KVM_SET_FPU ++ */ ++struct kvm_fpu { ++}; ++ ++/* ++ * KVM SW_64 specific structures and definitions ++ */ ++struct kvm_debug_exit_arch { ++ unsigned long epc; ++}; ++ ++/* for KVM_SET_GUEST_DEBUG */ ++struct kvm_guest_debug_arch { ++}; ++ ++/* definition of registers in kvm_run */ ++struct kvm_sync_regs { ++}; ++ ++/* dummy definition */ ++struct kvm_sregs { ++}; ++ ++#define KVM_SW64_VCPU_INIT _IO(KVMIO, 0xba) ++#define KVM_SW64_USE_SLAVE _IO(KVMIO, 0xbb) ++#define KVM_SW64_GET_VCB _IO(KVMIO, 0xbc) ++#define KVM_SW64_SET_VCB _IO(KVMIO, 0xbd) ++ ++#endif /* __LINUX_KVM_SW64_H */ +diff --git a/linux-headers/asm-sw64/unistd.h b/linux-headers/asm-sw64/unistd.h +new file mode 100644 +index 0000000000..affe297e73 +--- /dev/null ++++ b/linux-headers/asm-sw64/unistd.h +@@ -0,0 +1,380 @@ ++#ifndef _UAPI_ASM_SW64_UNISTD_64_H ++#define _UAPI_ASM_SW64_UNISTD_64_H ++ ++#define __NR_exit 1 ++#define __NR_fork 2 ++#define __NR_read 3 ++#define __NR_write 4 ++#define __NR_close 6 ++#define __NR_osf_wait4 7 ++#define __NR_link 9 ++#define __NR_unlink 10 ++#define __NR_chdir 12 ++#define __NR_fchdir 13 ++#define __NR_mknod 14 ++#define __NR_chmod 15 ++#define __NR_chown 16 ++#define __NR_brk 17 ++#define __NR_lseek 19 ++#define __NR_getxpid 20 ++#define __NR_osf_mount 21 ++#define __NR_umount2 22 ++#define __NR_setuid 23 ++#define __NR_getxuid 24 ++#define __NR_ptrace 26 ++#define __NR_access 33 ++#define __NR_sync 36 ++#define __NR_kill 37 ++#define __NR_setpgid 39 ++#define __NR_dup 41 ++#define __NR_pipe 42 ++#define __NR_osf_set_program_attributes 43 ++#define __NR_open 45 ++#define __NR_getxgid 47 ++#define __NR_osf_sigprocmask 48 ++#define __NR_acct 51 ++#define __NR_sigpending 52 ++#define __NR_ioctl 54 ++#define __NR_symlink 57 ++#define __NR_readlink 58 ++#define __NR_execve 59 ++#define __NR_umask 60 ++#define __NR_chroot 61 ++#define __NR_getpgrp 63 ++#define __NR_getpagesize 64 ++#define __NR_vfork 66 ++#define __NR_stat 67 ++#define __NR_lstat 68 ++#define __NR_mmap 71 ++#define __NR_munmap 73 ++#define __NR_mprotect 74 ++#define __NR_madvise 75 ++#define __NR_vhangup 76 ++#define __NR_getgroups 79 ++#define __NR_setgroups 80 ++#define __NR_setpgrp 82 ++#define __NR_osf_setitimer 83 ++#define __NR_osf_getitimer 86 ++#define __NR_gethostname 87 ++#define __NR_sethostname 88 ++#define __NR_getdtablesize 89 ++#define __NR_dup2 90 ++#define __NR_fstat 91 ++#define __NR_fcntl 92 ++#define __NR_osf_select 93 ++#define __NR_poll 94 ++#define __NR_fsync 95 ++#define __NR_setpriority 96 ++#define __NR_socket 97 ++#define __NR_connect 98 ++#define __NR_accept 99 ++#define __NR_getpriority 100 ++#define __NR_send 101 ++#define __NR_recv 102 ++#define __NR_sigreturn 103 ++#define __NR_bind 104 ++#define __NR_setsockopt 105 ++#define __NR_listen 106 ++#define __NR_sigsuspend 111 ++#define __NR_osf_sigstack 112 ++#define __NR_recvmsg 113 ++#define __NR_sendmsg 114 ++#define __NR_osf_gettimeofday 116 ++#define __NR_osf_getrusage 117 ++#define __NR_getsockopt 118 ++#define __NR_socketcall 119 ++#define __NR_readv 120 ++#define __NR_writev 121 ++#define __NR_osf_settimeofday 122 ++#define __NR_fchown 123 ++#define __NR_fchmod 124 ++#define __NR_recvfrom 125 ++#define __NR_setreuid 126 ++#define __NR_setregid 127 ++#define __NR_rename 128 ++#define __NR_truncate 129 ++#define __NR_ftruncate 130 ++#define __NR_flock 131 ++#define __NR_setgid 132 ++#define __NR_sendto 133 ++#define __NR_shutdown 134 ++#define __NR_socketpair 135 ++#define __NR_mkdir 136 ++#define __NR_rmdir 137 ++#define __NR_osf_utimes 138 ++#define __NR_getpeername 141 ++#define __NR_getrlimit 144 ++#define __NR_setrlimit 145 ++#define __NR_setsid 147 ++#define __NR_quotactl 148 ++#define __NR_getsockname 150 ++#define __NR_sigaction 156 ++#define __NR_osf_getdirentries 159 ++#define __NR_osf_statfs 160 ++#define __NR_osf_fstatfs 161 ++#define __NR_osf_getdomainname 165 ++#define __NR_setdomainname 166 ++#define __NR_bpf 170 ++#define __NR_userfaultfd 171 ++#define __NR_membarrier 172 ++#define __NR_mlock2 173 ++#define __NR_getpid 174 ++#define __NR_getppid 175 ++#define __NR_getuid 176 ++#define __NR_geteuid 177 ++#define __NR_getgid 178 ++#define __NR_getegid 179 ++#define __NR_osf_swapon 199 ++#define __NR_msgctl 200 ++#define __NR_msgget 201 ++#define __NR_msgrcv 202 ++#define __NR_msgsnd 203 ++#define __NR_semctl 204 ++#define __NR_semget 205 ++#define __NR_semop 206 ++#define __NR_osf_utsname 207 ++#define __NR_lchown 208 ++#define __NR_shmat 209 ++#define __NR_shmctl 210 ++#define __NR_shmdt 211 ++#define __NR_shmget 212 ++#define __NR_msync 217 ++#define __NR_osf_stat 224 ++#define __NR_osf_lstat 225 ++#define __NR_osf_fstat 226 ++#define __NR_osf_statfs64 227 ++#define __NR_osf_fstatfs64 228 ++#define __NR_statfs64 229 ++#define __NR_fstatfs64 230 ++#define __NR_getpgid 233 ++#define __NR_getsid 234 ++#define __NR_sigaltstack 235 ++#define __NR_osf_sysinfo 241 ++#define __NR_osf_proplist_syscall 244 ++#define __NR_osf_usleep_thread 251 ++#define __NR_sysfs 254 ++#define __NR_osf_getsysinfo 256 ++#define __NR_osf_setsysinfo 257 ++#define __NR_bdflush 300 ++#define __NR_sethae 301 ++#define __NR_mount 302 ++#define __NR_old_adjtimex 303 ++#define __NR_swapoff 304 ++#define __NR_getdents 305 ++#define __NR_create_module 306 ++#define __NR_init_module 307 ++#define __NR_delete_module 308 ++#define __NR_get_kernel_syms 309 ++#define __NR_syslog 310 ++#define __NR_reboot 311 ++#define __NR_clone 312 ++#define __NR_uselib 313 ++#define __NR_mlock 314 ++#define __NR_munlock 315 ++#define __NR_mlockall 316 ++#define __NR_munlockall 317 ++#define __NR_sysinfo 318 ++#define __NR__sysctl 319 ++#define __NR_oldumount 321 ++#define __NR_swapon 322 ++#define __NR_times 323 ++#define __NR_personality 324 ++#define __NR_setfsuid 325 ++#define __NR_setfsgid 326 ++#define __NR_ustat 327 ++#define __NR_statfs 328 ++#define __NR_fstatfs 329 ++#define __NR_sched_setparam 330 ++#define __NR_sched_getparam 331 ++#define __NR_sched_setscheduler 332 ++#define __NR_sched_getscheduler 333 ++#define __NR_sched_yield 334 ++#define __NR_sched_get_priority_max 335 ++#define __NR_sched_get_priority_min 336 ++#define __NR_sched_rr_get_interval 337 ++#define __NR_afs_syscall 338 ++#define __NR_uname 339 ++#define __NR_nanosleep 340 ++#define __NR_mremap 341 ++#define __NR_nfsservctl 342 ++#define __NR_setresuid 343 ++#define __NR_getresuid 344 ++#define __NR_pciconfig_read 345 ++#define __NR_pciconfig_write 346 ++#define __NR_query_module 347 ++#define __NR_prctl 348 ++#define __NR_pread64 349 ++#define __NR_pwrite64 350 ++#define __NR_rt_sigreturn 351 ++#define __NR_rt_sigaction 352 ++#define __NR_rt_sigprocmask 353 ++#define __NR_rt_sigpending 354 ++#define __NR_rt_sigtimedwait 355 ++#define __NR_rt_sigqueueinfo 356 ++#define __NR_rt_sigsuspend 357 ++#define __NR_select 358 ++#define __NR_gettimeofday 359 ++#define __NR_settimeofday 360 ++#define __NR_getitimer 361 ++#define __NR_setitimer 362 ++#define __NR_utimes 363 ++#define __NR_getrusage 364 ++#define __NR_wait4 365 ++#define __NR_adjtimex 366 ++#define __NR_getcwd 367 ++#define __NR_capget 368 ++#define __NR_capset 369 ++#define __NR_sendfile 370 ++#define __NR_setresgid 371 ++#define __NR_getresgid 372 ++#define __NR_dipc 373 ++#define __NR_pivot_root 374 ++#define __NR_mincore 375 ++#define __NR_pciconfig_iobase 376 ++#define __NR_getdents64 377 ++#define __NR_gettid 378 ++#define __NR_readahead 379 ++#define __NR_tkill 381 ++#define __NR_setxattr 382 ++#define __NR_lsetxattr 383 ++#define __NR_fsetxattr 384 ++#define __NR_getxattr 385 ++#define __NR_lgetxattr 386 ++#define __NR_fgetxattr 387 ++#define __NR_listxattr 388 ++#define __NR_llistxattr 389 ++#define __NR_flistxattr 390 ++#define __NR_removexattr 391 ++#define __NR_lremovexattr 392 ++#define __NR_fremovexattr 393 ++#define __NR_futex 394 ++#define __NR_sched_setaffinity 395 ++#define __NR_sched_getaffinity 396 ++#define __NR_tuxcall 397 ++#define __NR_io_setup 398 ++#define __NR_io_destroy 399 ++#define __NR_io_getevents 400 ++#define __NR_io_submit 401 ++#define __NR_io_cancel 402 ++#define __NR_io_pgetevents 403 ++#define __NR_rseq 404 ++#define __NR_exit_group 405 ++#define __NR_lookup_dcookie 406 ++#define __NR_epoll_create 407 ++#define __NR_epoll_ctl 408 ++#define __NR_epoll_wait 409 ++#define __NR_remap_file_pages 410 ++#define __NR_set_tid_address 411 ++#define __NR_restart_syscall 412 ++#define __NR_fadvise64 413 ++#define __NR_timer_create 414 ++#define __NR_timer_settime 415 ++#define __NR_timer_gettime 416 ++#define __NR_timer_getoverrun 417 ++#define __NR_timer_delete 418 ++#define __NR_clock_settime 419 ++#define __NR_clock_gettime 420 ++#define __NR_clock_getres 421 ++#define __NR_clock_nanosleep 422 ++#define __NR_semtimedop 423 ++#define __NR_tgkill 424 ++#define __NR_stat64 425 ++#define __NR_lstat64 426 ++#define __NR_fstat64 427 ++#define __NR_vserver 428 ++#define __NR_mbind 429 ++#define __NR_get_mempolicy 430 ++#define __NR_set_mempolicy 431 ++#define __NR_mq_open 432 ++#define __NR_mq_unlink 433 ++#define __NR_mq_timedsend 434 ++#define __NR_mq_timedreceive 435 ++#define __NR_mq_notify 436 ++#define __NR_mq_getsetattr 437 ++#define __NR_waitid 438 ++#define __NR_add_key 439 ++#define __NR_request_key 440 ++#define __NR_keyctl 441 ++#define __NR_ioprio_set 442 ++#define __NR_ioprio_get 443 ++#define __NR_inotify_init 444 ++#define __NR_inotify_add_watch 445 ++#define __NR_inotify_rm_watch 446 ++#define __NR_fdatasync 447 ++#define __NR_kexec_load 448 ++#define __NR_migrate_pages 449 ++#define __NR_openat 450 ++#define __NR_mkdirat 451 ++#define __NR_mknodat 452 ++#define __NR_fchownat 453 ++#define __NR_futimesat 454 ++#define __NR_fstatat64 455 ++#define __NR_unlinkat 456 ++#define __NR_renameat 457 ++#define __NR_linkat 458 ++#define __NR_symlinkat 459 ++#define __NR_readlinkat 460 ++#define __NR_fchmodat 461 ++#define __NR_faccessat 462 ++#define __NR_pselect6 463 ++#define __NR_ppoll 464 ++#define __NR_unshare 465 ++#define __NR_set_robust_list 466 ++#define __NR_get_robust_list 467 ++#define __NR_splice 468 ++#define __NR_sync_file_range 469 ++#define __NR_tee 470 ++#define __NR_vmsplice 471 ++#define __NR_move_pages 472 ++#define __NR_getcpu 473 ++#define __NR_epoll_pwait 474 ++#define __NR_utimensat 475 ++#define __NR_signalfd 476 ++#define __NR_timerfd 477 ++#define __NR_eventfd 478 ++#define __NR_recvmmsg 479 ++#define __NR_fallocate 480 ++#define __NR_timerfd_create 481 ++#define __NR_timerfd_settime 482 ++#define __NR_timerfd_gettime 483 ++#define __NR_signalfd4 484 ++#define __NR_eventfd2 485 ++#define __NR_epoll_create1 486 ++#define __NR_dup3 487 ++#define __NR_pipe2 488 ++#define __NR_inotify_init1 489 ++#define __NR_preadv 490 ++#define __NR_pwritev 491 ++#define __NR_rt_tgsigqueueinfo 492 ++#define __NR_perf_event_open 493 ++#define __NR_fanotify_init 494 ++#define __NR_fanotify_mark 495 ++#define __NR_prlimit64 496 ++#define __NR_name_to_handle_at 497 ++#define __NR_open_by_handle_at 498 ++#define __NR_clock_adjtime 499 ++#define __NR_syncfs 500 ++#define __NR_setns 501 ++#define __NR_accept4 502 ++#define __NR_sendmmsg 503 ++#define __NR_process_vm_readv 504 ++#define __NR_process_vm_writev 505 ++#define __NR_kcmp 506 ++#define __NR_finit_module 507 ++#define __NR_sched_setattr 508 ++#define __NR_sched_getattr 509 ++#define __NR_renameat2 510 ++#define __NR_getrandom 511 ++#define __NR_memfd_create 512 ++#define __NR_execveat 513 ++#define __NR_seccomp 514 ++#define __NR_copy_file_range 515 ++#define __NR_preadv2 516 ++#define __NR_pwritev2 517 ++#define __NR_statx 518 ++ ++#ifdef __KERNEL__ ++#define __NR_syscalls 519 ++#endif ++ ++#endif /* _UAPI_ASM_SW64_UNISTD_64_H */ +diff --git a/linux-headers/asm-sw64/unistd.h.bak b/linux-headers/asm-sw64/unistd.h.bak +new file mode 100644 +index 0000000000..30ff44f4a6 +--- /dev/null ++++ b/linux-headers/asm-sw64/unistd.h.bak +@@ -0,0 +1,481 @@ ++#ifndef _SW_64_UNISTD_H ++#define _SW_64_UNISTD_H ++ ++#define __NR_osf_syscall 0 /* not implemented */ ++#define __NR_exit 1 ++#define __NR_fork 2 ++#define __NR_read 3 ++#define __NR_write 4 ++#define __NR_osf_old_open 5 /* not implemented */ ++#define __NR_close 6 ++#define __NR_osf_wait4 7 ++#define __NR_osf_old_creat 8 /* not implemented */ ++#define __NR_link 9 ++#define __NR_unlink 10 ++#define __NR_osf_execve 11 /* not implemented */ ++#define __NR_chdir 12 ++#define __NR_fchdir 13 ++#define __NR_mknod 14 ++#define __NR_chmod 15 ++#define __NR_chown 16 ++#define __NR_brk 17 ++#define __NR_osf_getfsstat 18 /* not implemented */ ++#define __NR_lseek 19 ++#define __NR_getxpid 20 ++#define __NR_osf_mount 21 ++#define __NR_umount 22 ++#define __NR_setuid 23 ++#define __NR_getxuid 24 ++#define __NR_exec_with_loader 25 /* not implemented */ ++#define __NR_ptrace 26 ++#define __NR_osf_nrecvmsg 27 /* not implemented */ ++#define __NR_osf_nsendmsg 28 /* not implemented */ ++#define __NR_osf_nrecvfrom 29 /* not implemented */ ++#define __NR_osf_naccept 30 /* not implemented */ ++#define __NR_osf_ngetpeername 31 /* not implemented */ ++#define __NR_osf_ngetsockname 32 /* not implemented */ ++#define __NR_access 33 ++#define __NR_osf_chflags 34 /* not implemented */ ++#define __NR_osf_fchflags 35 /* not implemented */ ++#define __NR_sync 36 ++#define __NR_kill 37 ++#define __NR_osf_old_stat 38 /* not implemented */ ++#define __NR_setpgid 39 ++#define __NR_osf_old_lstat 40 /* not implemented */ ++#define __NR_dup 41 ++#define __NR_pipe 42 ++#define __NR_osf_set_program_attributes 43 ++#define __NR_osf_profil 44 /* not implemented */ ++#define __NR_open 45 ++#define __NR_osf_old_sigaction 46 /* not implemented */ ++#define __NR_getxgid 47 ++#define __NR_osf_sigprocmask 48 ++#define __NR_osf_getlogin 49 /* not implemented */ ++#define __NR_osf_setlogin 50 /* not implemented */ ++#define __NR_acct 51 ++#define __NR_sigpending 52 ++ ++#define __NR_ioctl 54 ++#define __NR_osf_reboot 55 /* not implemented */ ++#define __NR_osf_revoke 56 /* not implemented */ ++#define __NR_symlink 57 ++#define __NR_readlink 58 ++#define __NR_execve 59 ++#define __NR_umask 60 ++#define __NR_chroot 61 ++#define __NR_osf_old_fstat 62 /* not implemented */ ++#define __NR_getpgrp 63 ++#define __NR_getpagesize 64 ++#define __NR_osf_mremap 65 /* not implemented */ ++#define __NR_vfork 66 ++#define __NR_stat 67 ++#define __NR_lstat 68 ++#define __NR_osf_sbrk 69 /* not implemented */ ++#define __NR_osf_sstk 70 /* not implemented */ ++#define __NR_mmap 71 /* OSF/1 mmap is superset of Linux */ ++#define __NR_osf_old_vadvise 72 /* not implemented */ ++#define __NR_munmap 73 ++#define __NR_mprotect 74 ++#define __NR_madvise 75 ++#define __NR_vhangup 76 ++#define __NR_osf_kmodcall 77 /* not implemented */ ++#define __NR_osf_mincore 78 /* not implemented */ ++#define __NR_getgroups 79 ++#define __NR_setgroups 80 ++#define __NR_osf_old_getpgrp 81 /* not implemented */ ++#define __NR_setpgrp 82 /* BSD alias for setpgid */ ++#define __NR_osf_setitimer 83 ++#define __NR_osf_old_wait 84 /* not implemented */ ++#define __NR_osf_table 85 /* not implemented */ ++#define __NR_osf_getitimer 86 ++#define __NR_gethostname 87 ++#define __NR_sethostname 88 ++#define __NR_getdtablesize 89 ++#define __NR_dup2 90 ++#define __NR_fstat 91 ++#define __NR_fcntl 92 ++#define __NR_osf_select 93 ++#define __NR_poll 94 ++#define __NR_fsync 95 ++#define __NR_setpriority 96 ++#define __NR_socket 97 ++#define __NR_connect 98 ++#define __NR_accept 99 ++#define __NR_getpriority 100 ++#define __NR_send 101 ++#define __NR_recv 102 ++#define __NR_sigreturn 103 ++#define __NR_bind 104 ++#define __NR_setsockopt 105 ++#define __NR_listen 106 ++#define __NR_osf_plock 107 /* not implemented */ ++#define __NR_osf_old_sigvec 108 /* not implemented */ ++#define __NR_osf_old_sigblock 109 /* not implemented */ ++#define __NR_osf_old_sigsetmask 110 /* not implemented */ ++#define __NR_sigsuspend 111 ++#define __NR_osf_sigstack 112 ++#define __NR_recvmsg 113 ++#define __NR_sendmsg 114 ++#define __NR_osf_old_vtrace 115 /* not implemented */ ++#define __NR_osf_gettimeofday 116 ++#define __NR_osf_getrusage 117 ++#define __NR_getsockopt 118 ++ ++#define __NR_readv 120 ++#define __NR_writev 121 ++#define __NR_osf_settimeofday 122 ++#define __NR_fchown 123 ++#define __NR_fchmod 124 ++#define __NR_recvfrom 125 ++#define __NR_setreuid 126 ++#define __NR_setregid 127 ++#define __NR_rename 128 ++#define __NR_truncate 129 ++#define __NR_ftruncate 130 ++#define __NR_flock 131 ++#define __NR_setgid 132 ++#define __NR_sendto 133 ++#define __NR_shutdown 134 ++#define __NR_socketpair 135 ++#define __NR_mkdir 136 ++#define __NR_rmdir 137 ++#define __NR_osf_utimes 138 ++#define __NR_osf_old_sigreturn 139 /* not implemented */ ++#define __NR_osf_adjtime 140 /* not implemented */ ++#define __NR_getpeername 141 ++#define __NR_osf_gethostid 142 /* not implemented */ ++#define __NR_osf_sethostid 143 /* not implemented */ ++#define __NR_getrlimit 144 ++#define __NR_setrlimit 145 ++#define __NR_osf_old_killpg 146 /* not implemented */ ++#define __NR_setsid 147 ++#define __NR_quotactl 148 ++#define __NR_osf_oldquota 149 /* not implemented */ ++#define __NR_getsockname 150 ++ ++#define __NR_osf_pid_block 153 /* not implemented */ ++#define __NR_osf_pid_unblock 154 /* not implemented */ ++ ++#define __NR_sigaction 156 ++#define __NR_osf_sigwaitprim 157 /* not implemented */ ++#define __NR_osf_nfssvc 158 /* not implemented */ ++#define __NR_osf_getdirentries 159 ++#define __NR_osf_statfs 160 ++#define __NR_osf_fstatfs 161 ++ ++#define __NR_osf_asynch_daemon 163 /* not implemented */ ++#define __NR_osf_getfh 164 /* not implemented */ ++#define __NR_osf_getdomainname 165 ++#define __NR_setdomainname 166 ++ ++#define __NR_osf_exportfs 169 /* not implemented */ ++ ++#define __NR_userfaultfd 171 ++ ++#define __NR_osf_alt_plock 181 /* not implemented */ ++ ++#define __NR_osf_getmnt 184 /* not implemented */ ++ ++#define __NR_osf_alt_sigpending 187 /* not implemented */ ++#define __NR_osf_alt_setsid 188 /* not implemented */ ++ ++#define __NR_osf_swapon 199 ++#define __NR_msgctl 200 ++#define __NR_msgget 201 ++#define __NR_msgrcv 202 ++#define __NR_msgsnd 203 ++#define __NR_semctl 204 ++#define __NR_semget 205 ++#define __NR_semop 206 ++#define __NR_osf_utsname 207 ++#define __NR_lchown 208 ++#define __NR_osf_shmat 209 ++#define __NR_shmctl 210 ++#define __NR_shmdt 211 ++#define __NR_shmget 212 ++#define __NR_osf_mvalid 213 /* not implemented */ ++#define __NR_osf_getaddressconf 214 /* not implemented */ ++#define __NR_osf_msleep 215 /* not implemented */ ++#define __NR_osf_mwakeup 216 /* not implemented */ ++#define __NR_msync 217 ++#define __NR_osf_signal 218 /* not implemented */ ++#define __NR_osf_utc_gettime 219 /* not implemented */ ++#define __NR_osf_utc_adjtime 220 /* not implemented */ ++ ++#define __NR_osf_security 222 /* not implemented */ ++#define __NR_osf_kloadcall 223 /* not implemented */ ++ ++#define __NR_osf_stat 224 ++#define __NR_osf_lstat 225 ++#define __NR_osf_fstat 226 ++#define __NR_osf_statfs64 227 ++#define __NR_osf_fstatfs64 228 ++ ++#define __NR_getpgid 233 ++#define __NR_getsid 234 ++#define __NR_sigaltstack 235 ++#define __NR_osf_waitid 236 /* not implemented */ ++#define __NR_osf_priocntlset 237 /* not implemented */ ++#define __NR_osf_sigsendset 238 /* not implemented */ ++#define __NR_osf_set_speculative 239 /* not implemented */ ++#define __NR_osf_msfs_syscall 240 /* not implemented */ ++#define __NR_osf_sysinfo 241 ++#define __NR_osf_uadmin 242 /* not implemented */ ++#define __NR_osf_fuser 243 /* not implemented */ ++#define __NR_osf_proplist_syscall 244 ++#define __NR_osf_ntp_adjtime 245 /* not implemented */ ++#define __NR_osf_ntp_gettime 246 /* not implemented */ ++#define __NR_osf_pathconf 247 /* not implemented */ ++#define __NR_osf_fpathconf 248 /* not implemented */ ++ ++#define __NR_osf_uswitch 250 /* not implemented */ ++#define __NR_osf_usleep_thread 251 ++#define __NR_osf_audcntl 252 /* not implemented */ ++#define __NR_osf_audgen 253 /* not implemented */ ++#define __NR_sysfs 254 ++#define __NR_osf_subsys_info 255 /* not implemented */ ++#define __NR_osf_getsysinfo 256 ++#define __NR_osf_setsysinfo 257 ++#define __NR_osf_afs_syscall 258 /* not implemented */ ++#define __NR_osf_swapctl 259 /* not implemented */ ++#define __NR_osf_memcntl 260 /* not implemented */ ++#define __NR_osf_fdatasync 261 /* not implemented */ ++ ++/* ++ * Ignore legacy syscalls that we don't use. ++ */ ++#define __IGNORE_alarm ++#define __IGNORE_creat ++#define __IGNORE_getegid ++#define __IGNORE_geteuid ++#define __IGNORE_getgid ++#define __IGNORE_getpid ++#define __IGNORE_getppid ++#define __IGNORE_getuid ++#define __IGNORE_pause ++#define __IGNORE_time ++#define __IGNORE_utime ++#define __IGNORE_umount2 ++ ++/* ++ * Linux-specific system calls begin at 300 ++ */ ++#define __NR_bdflush 300 ++#define __NR_sethae 301 ++#define __NR_mount 302 ++#define __NR_old_adjtimex 303 ++#define __NR_swapoff 304 ++#define __NR_getdents 305 ++#define __NR_create_module 306 ++#define __NR_init_module 307 ++#define __NR_delete_module 308 ++#define __NR_get_kernel_syms 309 ++#define __NR_syslog 310 ++#define __NR_reboot 311 ++#define __NR_clone 312 ++#define __NR_uselib 313 ++#define __NR_mlock 314 ++#define __NR_munlock 315 ++#define __NR_mlockall 316 ++#define __NR_munlockall 317 ++#define __NR_sysinfo 318 ++#define __NR__sysctl 319 ++/* 320 was sys_idle. */ ++#define __NR_oldumount 321 ++#define __NR_swapon 322 ++#define __NR_times 323 ++#define __NR_personality 324 ++#define __NR_setfsuid 325 ++#define __NR_setfsgid 326 ++#define __NR_ustat 327 ++#define __NR_statfs 328 ++#define __NR_fstatfs 329 ++#define __NR_sched_setparam 330 ++#define __NR_sched_getparam 331 ++#define __NR_sched_setscheduler 332 ++#define __NR_sched_getscheduler 333 ++#define __NR_sched_yield 334 ++#define __NR_sched_get_priority_max 335 ++#define __NR_sched_get_priority_min 336 ++#define __NR_sched_rr_get_interval 337 ++#define __NR_afs_syscall 338 ++#define __NR_uname 339 ++#define __NR_nanosleep 340 ++#define __NR_mremap 341 ++#define __NR_nfsservctl 342 ++#define __NR_setresuid 343 ++#define __NR_getresuid 344 ++#define __NR_pciconfig_read 345 ++#define __NR_pciconfig_write 346 ++#define __NR_query_module 347 ++#define __NR_prctl 348 ++#define __NR_pread64 349 ++#define __NR_pwrite64 350 ++#define __NR_rt_sigreturn 351 ++#define __NR_rt_sigaction 352 ++#define __NR_rt_sigprocmask 353 ++#define __NR_rt_sigpending 354 ++#define __NR_rt_sigtimedwait 355 ++#define __NR_rt_sigqueueinfo 356 ++#define __NR_rt_sigsuspend 357 ++#define __NR_select 358 ++#define __NR_gettimeofday 359 ++#define __NR_settimeofday 360 ++#define __NR_getitimer 361 ++#define __NR_setitimer 362 ++#define __NR_utimes 363 ++#define __NR_getrusage 364 ++#define __NR_wait4 365 ++#define __NR_adjtimex 366 ++#define __NR_getcwd 367 ++#define __NR_capget 368 ++#define __NR_capset 369 ++#define __NR_sendfile 370 ++#define __NR_setresgid 371 ++#define __NR_getresgid 372 ++#define __NR_dipc 373 ++#define __NR_pivot_root 374 ++#define __NR_mincore 375 ++#define __NR_pciconfig_iobase 376 ++#define __NR_getdents64 377 ++#define __NR_gettid 378 ++#define __NR_readahead 379 ++/* 380 is unused */ ++#define __NR_tkill 381 ++#define __NR_setxattr 382 ++#define __NR_lsetxattr 383 ++#define __NR_fsetxattr 384 ++#define __NR_getxattr 385 ++#define __NR_lgetxattr 386 ++#define __NR_fgetxattr 387 ++#define __NR_listxattr 388 ++#define __NR_llistxattr 389 ++#define __NR_flistxattr 390 ++#define __NR_removexattr 391 ++#define __NR_lremovexattr 392 ++#define __NR_fremovexattr 393 ++#define __NR_futex 394 ++#define __NR_sched_setaffinity 395 ++#define __NR_sched_getaffinity 396 ++#define __NR_tuxcall 397 ++#define __NR_io_setup 398 ++#define __NR_io_destroy 399 ++#define __NR_io_getevents 400 ++#define __NR_io_submit 401 ++#define __NR_io_cancel 402 ++#define __NR_exit_group 405 ++#define __NR_lookup_dcookie 406 ++#define __NR_epoll_create 407 ++#define __NR_epoll_ctl 408 ++#define __NR_epoll_wait 409 ++/* Feb 2007: These three sys_epoll defines shouldn't be here but culling ++ * them would break userspace apps ... we'll kill them off in 2010 :) */ ++#define __NR_sys_epoll_create __NR_epoll_create ++#define __NR_sys_epoll_ctl __NR_epoll_ctl ++#define __NR_sys_epoll_wait __NR_epoll_wait ++#define __NR_remap_file_pages 410 ++#define __NR_set_tid_address 411 ++#define __NR_restart_syscall 412 ++#define __NR_fadvise64 413 ++#define __NR_timer_create 414 ++#define __NR_timer_settime 415 ++#define __NR_timer_gettime 416 ++#define __NR_timer_getoverrun 417 ++#define __NR_timer_delete 418 ++#define __NR_clock_settime 419 ++#define __NR_clock_gettime 420 ++#define __NR_clock_getres 421 ++#define __NR_clock_nanosleep 422 ++#define __NR_semtimedop 423 ++#define __NR_tgkill 424 ++#define __NR_stat64 425 ++#define __NR_lstat64 426 ++#define __NR_fstat64 427 ++#define __NR_vserver 428 ++#define __NR_mbind 429 ++#define __NR_get_mempolicy 430 ++#define __NR_set_mempolicy 431 ++#define __NR_mq_open 432 ++#define __NR_mq_unlink 433 ++#define __NR_mq_timedsend 434 ++#define __NR_mq_timedreceive 435 ++#define __NR_mq_notify 436 ++#define __NR_mq_getsetattr 437 ++#define __NR_waitid 438 ++#define __NR_add_key 439 ++#define __NR_request_key 440 ++#define __NR_keyctl 441 ++#define __NR_ioprio_set 442 ++#define __NR_ioprio_get 443 ++#define __NR_inotify_init 444 ++#define __NR_inotify_add_watch 445 ++#define __NR_inotify_rm_watch 446 ++#define __NR_fdatasync 447 ++#define __NR_kexec_load 448 ++#define __NR_migrate_pages 449 ++#define __NR_openat 450 ++#define __NR_mkdirat 451 ++#define __NR_mknodat 452 ++#define __NR_fchownat 453 ++#define __NR_futimesat 454 ++#define __NR_fstatat64 455 ++#define __NR_unlinkat 456 ++#define __NR_renameat 457 ++#define __NR_linkat 458 ++#define __NR_symlinkat 459 ++#define __NR_readlinkat 460 ++#define __NR_fchmodat 461 ++#define __NR_faccessat 462 ++#define __NR_pselect6 463 ++#define __NR_ppoll 464 ++#define __NR_unshare 465 ++#define __NR_set_robust_list 466 ++#define __NR_get_robust_list 467 ++#define __NR_splice 468 ++#define __NR_sync_file_range 469 ++#define __NR_tee 470 ++#define __NR_vmsplice 471 ++#define __NR_move_pages 472 ++#define __NR_getcpu 473 ++#define __NR_epoll_pwait 474 ++#define __NR_utimensat 475 ++#define __NR_signalfd 476 ++#define __NR_timerfd 477 ++#define __NR_eventfd 478 ++#define __NR_recvmmsg 479 ++#define __NR_fallocate 480 ++#define __NR_timerfd_create 481 ++#define __NR_timerfd_settime 482 ++#define __NR_timerfd_gettime 483 ++#define __NR_signalfd4 484 ++#define __NR_eventfd2 485 ++#define __NR_epoll_create1 486 ++#define __NR_dup3 487 ++#define __NR_pipe2 488 ++#define __NR_inotify_init1 489 ++#define __NR_preadv 490 ++#define __NR_pwritev 491 ++#define __NR_rt_tgsigqueueinfo 492 ++#define __NR_perf_event_open 493 ++#define __NR_fanotify_init 494 ++#define __NR_fanotify_mark 495 ++#define __NR_prlimit64 496 ++#define __NR_name_to_handle_at 497 ++#define __NR_open_by_handle_at 498 ++#define __NR_clock_adjtime 499 ++#define __NR_syncfs 500 ++#define __NR_setns 501 ++#define __NR_accept4 502 ++#define __NR_sendmmsg 503 ++#define __NR_process_vm_readv 504 ++#define __NR_process_vm_writev 505 ++#define __NR_kcmp 506 ++#define __NR_finit_module 507 ++#define __NR_sched_setattr 508 ++#define __NR_sched_getattr 509 ++#define __NR_renameat2 510 ++#define __NR_getrandom 511 ++#define __NR_memfd_create 512 ++#define __NR_execveat 513 ++ ++#endif /* _SW_64_UNISTD_H */ +diff --git a/linux-user/elfload.c b/linux-user/elfload.c +index cf9e74468b..bada645bf1 100644 +--- a/linux-user/elfload.c ++++ b/linux-user/elfload.c +@@ -1995,6 +1995,24 @@ static bool init_guest_commpage(void) + + #endif /* TARGET_HPPA */ + ++#ifdef TARGET_SW64 ++ ++#define ELF_CLASS ELFCLASS64 ++#define ELF_ARCH EM_SW64 ++ ++#define ELF_START_MMAP (0x30000000000ULL) ++ ++/* TODO: todo after */ ++static inline void init_thread(struct target_pt_regs *regs, ++ struct image_info *infop) ++{ ++ regs->pc = infop->entry; ++ /* regs->ps = 8; */ ++ regs->usp = infop->start_stack; ++} ++ ++#endif /* TARGET_SW64 */ ++ + #ifdef TARGET_XTENSA + + #define ELF_CLASS ELFCLASS32 +diff --git a/linux-user/host/sw64/host-signal.h b/linux-user/host/sw64/host-signal.h +new file mode 100644 +index 0000000000..6aa981df03 +--- /dev/null ++++ b/linux-user/host/sw64/host-signal.h +@@ -0,0 +1,46 @@ ++/* ++ * host-signal.h: signal info dependent on the host architecture ++ * ++ * Copyright (c) 2022 wxiat ++ * ++ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. ++ * See the COPYING file in the top-level directory. ++ */ ++ ++#ifndef SW64_HOST_SIGNAL_H ++#define SW64_HOST_SIGNAL_H ++ ++static inline uintptr_t host_signal_pc(ucontext_t *uc) ++{ ++ return uc->uc_mcontext.sc_pc; ++} ++ ++static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) ++{ ++ uc->uc_mcontext.sc_pc = pc; ++} ++ ++static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) ++{ ++ uint32_t *pc = (uint32_t *)host_signal_pc(uc); ++ uint32_t insn = *pc; ++ ++ /* XXX: need kernel patch to get write flag faster */ ++ switch (insn >> 26) { ++ case 0x0d: /* stw */ ++ case 0x0e: /* stb */ ++ case 0x0f: /* stq_u */ ++ case 0x24: /* stf */ ++ case 0x25: /* stg */ ++ case 0x26: /* sts */ ++ case 0x27: /* stt */ ++ case 0x2c: /* stl */ ++ case 0x2d: /* stq */ ++ case 0x2e: /* stl_c */ ++ case 0x2f: /* stq_c */ ++ return true; ++ } ++ return false; ++} ++ ++#endif +diff --git a/linux-user/host/sw64/hostdep.h b/linux-user/host/sw64/hostdep.h +new file mode 100755 +index 0000000000..6493919f26 +--- /dev/null ++++ b/linux-user/host/sw64/hostdep.h +@@ -0,0 +1,14 @@ ++/* ++ * hostdep.h : things which are dependent on the host architecture ++ * ++ * * Written by Wang Yuanheng ++ * ++ * Copyright (C) 2022 wxiat ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ */ ++ ++#ifndef SW_64_HOSTDEP_H ++#define SW_64_HOSTDEP_H ++#endif +diff --git a/linux-user/meson.build b/linux-user/meson.build +index bc41e8c3bc..9e2ccef51b 100644 +--- a/linux-user/meson.build ++++ b/linux-user/meson.build +@@ -50,6 +50,7 @@ subdir('riscv') + subdir('s390x') + subdir('sh4') + subdir('sparc') ++subdir('sw64') + subdir('x86_64') + subdir('xtensa') + +diff --git a/linux-user/sw64/cpu_loop.c b/linux-user/sw64/cpu_loop.c +new file mode 100644 +index 0000000000..ca00e8fdb4 +--- /dev/null ++++ b/linux-user/sw64/cpu_loop.c +@@ -0,0 +1,110 @@ ++/* ++ * qemu user cpu loop ++ * ++ * Copyright (c) 2003-2008 Fabrice Bellard ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, see . ++ */ ++ ++#include "qemu/osdep.h" ++#include "qemu.h" ++#include "user-internals.h" ++#include "cpu_loop-common.h" ++#include "signal-common.h" ++ ++void cpu_loop(CPUSW64State *env) ++{ ++ CPUState *cs = CPU(sw64_env_get_cpu(env)); ++ int trapnr; ++ target_siginfo_t info; ++ abi_long sysret; ++ ++ while (1) { ++ cpu_exec_start(cs); ++ trapnr = cpu_exec(cs); ++ cpu_exec_end(cs); ++ process_queued_cpu_work(cs); ++ ++ switch (trapnr) { ++ case EXCP_OPCDEC: ++ cpu_abort(cs, "ILLEGAL SW64 insn at line %d!", __LINE__); ++ case EXCP_CALL_SYS: ++ switch (env->error_code) { ++ case 0x83: ++ /* CALLSYS */ ++ trapnr = env->ir[IDX_V0]; ++ sysret = do_syscall(env, trapnr, ++ env->ir[IDX_A0], env->ir[IDX_A1], ++ env->ir[IDX_A2], env->ir[IDX_A3], ++ env->ir[IDX_A4], env->ir[IDX_A5], ++ 0, 0); ++ if (sysret == -QEMU_ERESTARTSYS) { ++ env->pc -= 4; ++ break; ++ } ++ if (sysret == -QEMU_ESIGRETURN) { ++ break; ++ } ++ /* Syscall writes 0 to V0 to bypass error check, similar ++ to how this is handled internal to Linux kernel. ++ (Ab)use trapnr temporarily as boolean indicating error. */ ++ trapnr = (env->ir[IDX_V0] != 0 && sysret < 0); ++ env->ir[IDX_V0] = (trapnr ? -sysret : sysret); ++ env->ir[IDX_A3] = trapnr; ++ break; ++ default: ++ printf("UNDO sys_call %lx\n", env->error_code); ++ exit(-1); ++ } ++ break; ++ case EXCP_MMFAULT: ++ info.si_signo = TARGET_SIGSEGV; ++ info.si_errno = 0; ++ info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID ++ ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR); ++ info._sifields._sigfault._addr = env->trap_arg0; ++ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); ++ break; ++ case EXCP_ARITH: ++ info.si_signo = TARGET_SIGFPE; ++ info.si_errno = 0; ++ info.si_code = TARGET_FPE_FLTINV; ++ info._sifields._sigfault._addr = env->pc; ++ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); ++ break; ++ case EXCP_INTERRUPT: ++ /* just indicate that signals should be handled asap */ ++ break; ++ default: ++ cpu_abort(cs, "UNDO"); ++ } ++ process_pending_signals (env); ++ ++ /* Most of the traps imply a transition through hmcode, which ++ implies an REI instruction has been executed. Which means ++ that RX and LOCK_ADDR should be cleared. But there are a ++ few exceptions for traps internal to QEMU. */ ++ } ++} ++ ++void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) ++{ ++ int i; ++ ++ for(i = 0; i < 28; i++) { ++ env->ir[i] = ((abi_ulong *)regs)[i]; ++ } ++ env->ir[IDX_SP] = regs->usp; ++ env->pc = regs->pc; ++} +diff --git a/linux-user/sw64/meson.build b/linux-user/sw64/meson.build +new file mode 100644 +index 0000000000..181fe002f2 +--- /dev/null ++++ b/linux-user/sw64/meson.build +@@ -0,0 +1,5 @@ ++syscall_nr_generators += { ++ 'sw64': generator(sh, ++ arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ], ++ output: '@BASENAME@_nr.h') ++} +diff --git a/linux-user/sw64/signal.c b/linux-user/sw64/signal.c +new file mode 100644 +index 0000000000..31d8204d6b +--- /dev/null ++++ b/linux-user/sw64/signal.c +@@ -0,0 +1,288 @@ ++/* ++ * Emulation of Linux signals ++ * ++ * Copyright (c) 2003 Fabrice Bellard ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, see . ++ */ ++#include "qemu/osdep.h" ++#include "qemu.h" ++#include "user-internals.h" ++#include "signal-common.h" ++#include "linux-user/trace.h" ++ ++struct target_sigcontext { ++ abi_long sc_onstack; ++ abi_long sc_mask; ++ abi_long sc_pc; ++ abi_long sc_ps; ++ abi_long sc_regs[32]; ++ abi_long sc_ownedfp; ++ abi_long sc_fpregs[32]; ++ abi_ulong sc_fpcr; ++ abi_ulong sc_fp_control; ++ abi_ulong sc_reserved1; ++ abi_ulong sc_reserved2; ++ abi_ulong sc_ssize; ++ abi_ulong sc_sbase; ++ abi_ulong sc_traparg_a0; ++ abi_ulong sc_traparg_a1; ++ abi_ulong sc_traparg_a2; ++ abi_ulong sc_fp_trap_pc; ++ abi_ulong sc_fp_trigger_sum; ++ abi_ulong sc_fp_trigger_inst; ++}; ++ ++struct target_ucontext { ++ abi_ulong tuc_flags; ++ abi_ulong tuc_link; ++ abi_ulong tuc_osf_sigmask; ++ target_stack_t tuc_stack; ++ struct target_sigcontext tuc_mcontext; ++ target_sigset_t tuc_sigmask; ++}; ++ ++struct target_sigframe { ++ struct target_sigcontext sc; ++ unsigned int retcode[3]; ++}; ++ ++struct target_rt_sigframe { ++ target_siginfo_t info; ++ struct target_ucontext uc; ++ unsigned int retcode[3]; ++}; ++ ++#define INSN_MOV_R30_R16 0x47fe0410 ++#define INSN_LDI_R0 0x201f0000 ++#define INSN_CALLSYS 0x00000083 ++ ++static void setup_sigcontext(struct target_sigcontext *sc, CPUSW64State *env, ++ abi_ulong frame_addr, target_sigset_t *set) ++{ ++ int i; ++ ++ __put_user(on_sig_stack(frame_addr), &sc->sc_onstack); ++ __put_user(set->sig[0], &sc->sc_mask); ++ __put_user(env->pc, &sc->sc_pc); ++ __put_user(8, &sc->sc_ps); ++ ++ for (i = 0; i < 31; ++i) { ++ __put_user(env->ir[i], &sc->sc_regs[i]); ++ } ++ __put_user(0, &sc->sc_regs[31]); ++ ++ for (i = 0; i < 31; ++i) { ++ __put_user(env->fr[i], &sc->sc_fpregs[i]); ++ } ++ __put_user(0, &sc->sc_fpregs[31]); ++ __put_user(cpu_sw64_load_fpcr(env), &sc->sc_fpcr); ++ ++ __put_user(0, &sc->sc_traparg_a0); /* FIXME */ ++ __put_user(0, &sc->sc_traparg_a1); /* FIXME */ ++ __put_user(0, &sc->sc_traparg_a2); /* FIXME */ ++} ++ ++static void restore_sigcontext(CPUSW64State *env, ++ struct target_sigcontext *sc) ++{ ++ uint64_t fpcr; ++ int i; ++ ++ __get_user(env->pc, &sc->sc_pc); ++ ++ for (i = 0; i < 31; ++i) { ++ __get_user(env->ir[i], &sc->sc_regs[i]); ++ } ++ for (i = 0; i < 31; ++i) { ++ __get_user(env->fr[i], &sc->sc_fpregs[i]); ++ } ++ ++ __get_user(fpcr, &sc->sc_fpcr); ++ cpu_sw64_store_fpcr(env, fpcr); ++} ++ ++static inline abi_ulong get_sigframe(struct target_sigaction *sa, ++ CPUSW64State *env, ++ unsigned long framesize) ++{ ++ abi_ulong sp; ++ ++ sp = target_sigsp(get_sp_from_cpustate(env), sa); ++ ++ return (sp - framesize) & -32; ++} ++ ++void setup_frame(int sig, struct target_sigaction *ka, ++ target_sigset_t *set, CPUSW64State *env) ++{ ++ abi_ulong frame_addr, r26; ++ struct target_sigframe *frame; ++ int err = 0; ++ ++ frame_addr = get_sigframe(ka, env, sizeof(*frame)); ++ trace_user_setup_frame(env, frame_addr); ++ if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { ++ goto give_sigsegv; ++ } ++ ++ setup_sigcontext(&frame->sc, env, frame_addr, set); ++ ++ if (ka->ka_restorer) { ++ r26 = ka->ka_restorer; ++ } else { ++ __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); ++ __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, ++ &frame->retcode[1]); ++ __put_user(INSN_CALLSYS, &frame->retcode[2]); ++ /* imb() */ ++ r26 = frame_addr + offsetof(struct target_sigframe, retcode); ++ } ++ ++ unlock_user_struct(frame, frame_addr, 1); ++ ++ if (err) { ++give_sigsegv: ++ force_sigsegv(sig); ++ return; ++ } ++ ++ env->ir[IDX_RA] = r26; ++ env->ir[IDX_PV] = env->pc = ka->_sa_handler; ++ env->ir[IDX_A0] = sig; ++ env->ir[IDX_A1] = 0; ++ env->ir[IDX_A2] = frame_addr + offsetof(struct target_sigframe, sc); ++ env->ir[IDX_SP] = frame_addr; ++} ++ ++void setup_rt_frame(int sig, struct target_sigaction *ka, ++ target_siginfo_t *info, ++ target_sigset_t *set, CPUSW64State *env) ++{ ++ abi_ulong frame_addr, r26; ++ struct target_rt_sigframe *frame; ++ int i, err = 0; ++ ++ frame_addr = get_sigframe(ka, env, sizeof(*frame)); ++ trace_user_setup_rt_frame(env, frame_addr); ++ if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { ++ goto give_sigsegv; ++ } ++ ++ tswap_siginfo(&frame->info, info); ++ ++ __put_user(0, &frame->uc.tuc_flags); ++ __put_user(0, &frame->uc.tuc_link); ++ __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask); ++ ++ target_save_altstack(&frame->uc.tuc_stack, env); ++ ++ setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set); ++ for (i = 0; i < TARGET_NSIG_WORDS; ++i) { ++ __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); ++ } ++ ++ if (ka->ka_restorer) { ++ r26 = ka->ka_restorer; ++ } else { ++ __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); ++ __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, ++ &frame->retcode[1]); ++ __put_user(INSN_CALLSYS, &frame->retcode[2]); ++ r26 = frame_addr + offsetof(struct target_sigframe, retcode); ++ } ++ ++ if (err) { ++give_sigsegv: ++ force_sigsegv(sig); ++ return; ++ } ++ ++ env->ir[IDX_RA] = r26; ++ env->ir[IDX_PV] = env->pc = ka->_sa_handler; ++ env->ir[IDX_A0] = sig; ++ env->ir[IDX_A1] = frame_addr + offsetof(struct target_rt_sigframe, info); ++ env->ir[IDX_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc); ++ env->ir[IDX_SP] = frame_addr; ++} ++ ++long do_sigreturn(CPUSW64State *env) ++{ ++ struct target_sigcontext *sc; ++ abi_ulong sc_addr = env->ir[IDX_A0]; ++ target_sigset_t target_set; ++ sigset_t set; ++ ++ if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) { ++ goto badframe; ++ } ++ ++ target_sigemptyset(&target_set); ++ __get_user(target_set.sig[0], &sc->sc_mask); ++ ++ target_to_host_sigset_internal(&set, &target_set); ++ set_sigmask(&set); ++ ++ restore_sigcontext(env, sc); ++ unlock_user_struct(sc, sc_addr, 0); ++ return -QEMU_ESIGRETURN; ++ ++badframe: ++ force_sig(TARGET_SIGSEGV); ++ return -QEMU_ESIGRETURN; ++} ++ ++long do_rt_sigreturn(CPUSW64State *env) ++{ ++ abi_ulong frame_addr = env->ir[IDX_A0]; ++ struct target_rt_sigframe *frame; ++ sigset_t set; ++ ++ trace_user_do_rt_sigreturn(env, frame_addr); ++ if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { ++ goto badframe; ++ } ++ target_to_host_sigset(&set, &frame->uc.tuc_sigmask); ++ set_sigmask(&set); ++ ++ restore_sigcontext(env, &frame->uc.tuc_mcontext); ++ target_restore_altstack(&frame->uc.tuc_stack, env); ++ ++ unlock_user_struct(frame, frame_addr, 0); ++ return -QEMU_ESIGRETURN; ++ ++ ++badframe: ++ unlock_user_struct(frame, frame_addr, 0); ++ force_sig(TARGET_SIGSEGV); ++ return -QEMU_ESIGRETURN; ++} ++ ++void setup_sigtramp(abi_ulong sigtramp_page) ++{ ++ uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 6 * 4, 0); ++ assert(tramp != NULL); ++ ++ default_sigreturn = sigtramp_page; ++ __put_user(INSN_MOV_R30_R16, &tramp[0]); ++ __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, &tramp[1]); ++ __put_user(INSN_CALLSYS, &tramp[2]); ++ ++ default_rt_sigreturn = sigtramp_page + 3 * 4; ++ __put_user(INSN_MOV_R30_R16, &tramp[3]); ++ __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, &tramp[4]); ++ __put_user(INSN_CALLSYS, &tramp[5]); ++ ++ unlock_user(tramp, sigtramp_page, 6 * 4); ++} +diff --git a/linux-user/sw64/sockbits.h b/linux-user/sw64/sockbits.h +new file mode 100644 +index 0000000000..0e4c8f012d +--- /dev/null ++++ b/linux-user/sw64/sockbits.h +@@ -0,0 +1 @@ ++#include "../generic/sockbits.h" +diff --git a/linux-user/sw64/syscall.tbl b/linux-user/sw64/syscall.tbl +new file mode 100644 +index 0000000000..d007c7bb07 +--- /dev/null ++++ b/linux-user/sw64/syscall.tbl +@@ -0,0 +1,488 @@ ++# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note ++# ++# system call numbers and entry vectors for sw64 ++# ++# The format is: ++# ++# ++# The is always "common" for this file ++# ++0 common osf_syscall sw64_syscall_zero ++1 common exit sys_exit ++2 common fork sw64_fork ++3 common read sys_read ++4 common write sys_write ++5 common osf_old_open sys_ni_syscall ++6 common close sys_close ++7 common osf_wait4 sys_osf_wait4 ++8 common osf_old_creat sys_ni_syscall ++9 common link sys_link ++10 common unlink sys_unlink ++11 common osf_execve sys_ni_syscall ++12 common chdir sys_chdir ++13 common fchdir sys_fchdir ++14 common mknod sys_mknod ++15 common chmod sys_chmod ++16 common chown sys_chown ++17 common brk sys_osf_brk ++18 common osf_getfsstat sys_ni_syscall ++19 common lseek sys_lseek ++20 common getxpid sys_getxpid ++21 common osf_mount sys_osf_mount ++22 common umount2 sys_umount ++23 common setuid sys_setuid ++24 common getxuid sys_getxuid ++25 common exec_with_loader sys_ni_syscall ++26 common ptrace sys_ptrace ++27 common osf_nrecvmsg sys_ni_syscall ++28 common osf_nsendmsg sys_ni_syscall ++29 common osf_nrecvfrom sys_ni_syscall ++30 common osf_naccept sys_ni_syscall ++31 common osf_ngetpeername sys_ni_syscall ++32 common osf_ngetsockname sys_ni_syscall ++33 common access sys_access ++34 common osf_chflags sys_ni_syscall ++35 common osf_fchflags sys_ni_syscall ++36 common sync sys_sync ++37 common kill sys_kill ++38 common osf_old_stat sys_ni_syscall ++39 common setpgid sys_setpgid ++40 common osf_old_lstat sys_ni_syscall ++41 common dup sys_dup ++42 common pipe sys_sw64_pipe ++43 common osf_set_program_attributes sys_osf_set_program_attributes ++44 common osf_profil sys_ni_syscall ++45 common open sys_open ++46 common osf_old_sigaction sys_ni_syscall ++47 common getxgid sys_getxgid ++48 common osf_sigprocmask sys_osf_sigprocmask ++49 common osf_getlogin sys_ni_syscall ++50 common osf_setlogin sys_ni_syscall ++51 common acct sys_acct ++52 common sigpending sys_sigpending ++54 common ioctl sys_ioctl ++55 common osf_reboot sys_ni_syscall ++56 common osf_revoke sys_ni_syscall ++57 common symlink sys_symlink ++58 common readlink sys_readlink ++59 common execve sys_execve ++60 common umask sys_umask ++61 common chroot sys_chroot ++62 common osf_old_fstat sys_ni_syscall ++63 common getpgrp sys_getpgrp ++64 common getpagesize sys_getpagesize ++65 common osf_mremap sys_ni_syscall ++66 common vfork sw64_vfork ++67 common stat sys_newstat ++68 common lstat sys_newlstat ++69 common osf_sbrk sys_ni_syscall ++70 common osf_sstk sys_ni_syscall ++71 common mmap sys_osf_mmap ++72 common osf_old_vadvise sys_ni_syscall ++73 common munmap sys_munmap ++74 common mprotect sys_mprotect ++75 common madvise sys_madvise ++76 common vhangup sys_vhangup ++77 common osf_kmodcall sys_ni_syscall ++78 common osf_mincore sys_ni_syscall ++79 common getgroups sys_getgroups ++80 common setgroups sys_setgroups ++81 common osf_old_getpgrp sys_ni_syscall ++82 common setpgrp sys_setpgid ++83 common osf_setitimer compat_sys_setitimer ++84 common osf_old_wait sys_ni_syscall ++85 common osf_table sys_ni_syscall ++86 common osf_getitimer compat_sys_getitimer ++87 common gethostname sys_gethostname ++88 common sethostname sys_sethostname ++89 common getdtablesize sys_getdtablesize ++90 common dup2 sys_dup2 ++91 common fstat sys_newfstat ++92 common fcntl sys_fcntl ++93 common osf_select sys_osf_select ++94 common poll sys_poll ++95 common fsync sys_fsync ++96 common setpriority sys_setpriority ++97 common socket sys_socket ++98 common connect sys_connect ++99 common accept sys_accept ++100 common getpriority sys_osf_getpriority ++101 common send sys_send ++102 common recv sys_recv ++103 common sigreturn sys_sigreturn ++104 common bind sys_bind ++105 common setsockopt sys_setsockopt ++106 common listen sys_listen ++107 common osf_plock sys_ni_syscall ++108 common osf_old_sigvec sys_ni_syscall ++109 common osf_old_sigblock sys_ni_syscall ++110 common osf_old_sigsetmask sys_ni_syscall ++111 common sigsuspend sys_sigsuspend ++112 common osf_sigstack sys_osf_sigstack ++113 common recvmsg sys_recvmsg ++114 common sendmsg sys_sendmsg ++115 common osf_old_vtrace sys_ni_syscall ++116 common osf_gettimeofday sys_osf_gettimeofday ++117 common osf_getrusage sys_osf_getrusage ++118 common getsockopt sys_getsockopt ++120 common readv sys_osf_readv ++121 common writev sys_osf_writev ++122 common osf_settimeofday sys_osf_settimeofday ++123 common fchown sys_fchown ++124 common fchmod sys_fchmod ++125 common recvfrom sys_recvfrom ++126 common setreuid sys_setreuid ++127 common setregid sys_setregid ++128 common rename sys_rename ++129 common truncate sys_truncate ++130 common ftruncate sys_ftruncate ++131 common flock sys_flock ++132 common setgid sys_setgid ++133 common sendto sys_sendto ++134 common shutdown sys_shutdown ++135 common socketpair sys_socketpair ++136 common mkdir sys_mkdir ++137 common rmdir sys_rmdir ++138 common osf_utimes sys_osf_utimes ++139 common osf_old_sigreturn sys_ni_syscall ++140 common osf_adjtime sys_ni_syscall ++141 common getpeername sys_getpeername ++142 common osf_gethostid sys_ni_syscall ++143 common osf_sethostid sys_ni_syscall ++144 common getrlimit sys_getrlimit ++145 common setrlimit sys_setrlimit ++146 common osf_old_killpg sys_ni_syscall ++147 common setsid sys_setsid ++148 common quotactl sys_quotactl ++149 common osf_oldquota sys_ni_syscall ++150 common getsockname sys_getsockname ++153 common osf_pid_block sys_ni_syscall ++154 common osf_pid_unblock sys_ni_syscall ++156 common sigaction sys_osf_sigaction ++157 common osf_sigwaitprim sys_ni_syscall ++158 common osf_nfssvc sys_ni_syscall ++159 common osf_getdirentries sys_osf_getdirentries ++160 common osf_statfs sys_osf_statfs ++161 common osf_fstatfs sys_osf_fstatfs ++163 common osf_asynch_daemon sys_ni_syscall ++164 common osf_getfh sys_ni_syscall ++165 common osf_getdomainname sys_osf_getdomainname ++166 common setdomainname sys_setdomainname ++169 common osf_exportfs sys_ni_syscall ++181 common osf_alt_plock sys_ni_syscall ++184 common osf_getmnt sys_ni_syscall ++187 common osf_alt_sigpending sys_ni_syscall ++188 common osf_alt_setsid sys_ni_syscall ++199 common osf_swapon sys_swapon ++200 common msgctl sys_old_msgctl ++201 common msgget sys_msgget ++202 common msgrcv sys_msgrcv ++203 common msgsnd sys_msgsnd ++204 common semctl sys_old_semctl ++205 common semget sys_semget ++206 common semop sys_semop ++207 common osf_utsname sys_osf_utsname ++208 common lchown sys_lchown ++209 common shmat sys_shmat ++210 common shmctl sys_old_shmctl ++211 common shmdt sys_shmdt ++212 common shmget sys_shmget ++213 common osf_mvalid sys_ni_syscall ++214 common osf_getaddressconf sys_ni_syscall ++215 common osf_msleep sys_ni_syscall ++216 common osf_mwakeup sys_ni_syscall ++217 common msync sys_msync ++218 common osf_signal sys_ni_syscall ++219 common osf_utc_gettime sys_ni_syscall ++220 common osf_utc_adjtime sys_ni_syscall ++222 common osf_security sys_ni_syscall ++223 common osf_kloadcall sys_ni_syscall ++224 common osf_stat sys_osf_stat ++225 common osf_lstat sys_osf_lstat ++226 common osf_fstat sys_osf_fstat ++227 common osf_statfs64 sys_osf_statfs64 ++228 common osf_fstatfs64 sys_osf_fstatfs64 ++233 common getpgid sys_getpgid ++234 common getsid sys_getsid ++235 common sigaltstack sys_sigaltstack ++236 common osf_waitid sys_ni_syscall ++237 common osf_priocntlset sys_ni_syscall ++238 common osf_sigsendset sys_ni_syscall ++239 common osf_set_speculative sys_ni_syscall ++240 common osf_msfs_syscall sys_ni_syscall ++241 common osf_sysinfo sys_osf_sysinfo ++242 common osf_uadmin sys_ni_syscall ++243 common osf_fuser sys_ni_syscall ++244 common osf_proplist_syscall sys_osf_proplist_syscall ++245 common osf_ntp_adjtime sys_ni_syscall ++246 common osf_ntp_gettime sys_ni_syscall ++247 common osf_pathconf sys_ni_syscall ++248 common osf_fpathconf sys_ni_syscall ++250 common osf_uswitch sys_ni_syscall ++251 common osf_usleep_thread sys_osf_usleep_thread ++252 common osf_audcntl sys_ni_syscall ++253 common osf_audgen sys_ni_syscall ++254 common sysfs sys_sysfs ++255 common osf_subsys_info sys_ni_syscall ++256 common osf_getsysinfo sys_osf_getsysinfo ++257 common osf_setsysinfo sys_osf_setsysinfo ++258 common osf_afs_syscall sys_ni_syscall ++259 common osf_swapctl sys_ni_syscall ++260 common osf_memcntl sys_ni_syscall ++261 common osf_fdatasync sys_ni_syscall ++300 common bdflush sys_bdflush ++301 common sethae sys_sethae ++302 common mount sys_mount ++303 common old_adjtimex sys_old_adjtimex ++304 common swapoff sys_swapoff ++305 common getdents sys_getdents ++306 common create_module sys_ni_syscall ++307 common init_module sys_init_module ++308 common delete_module sys_delete_module ++309 common get_kernel_syms sys_ni_syscall ++310 common syslog sys_syslog ++311 common reboot sys_reboot ++312 common clone sw64_clone ++313 common uselib sys_uselib ++314 common mlock sys_mlock ++315 common munlock sys_munlock ++316 common mlockall sys_mlockall ++317 common munlockall sys_munlockall ++318 common sysinfo sys_sysinfo ++319 common _sysctl sys_ni_syscall ++# 320 was sys_idle ++321 common oldumount sys_oldumount ++322 common swapon sys_swapon ++323 common times sys_times ++324 common personality sys_personality ++325 common setfsuid sys_setfsuid ++326 common setfsgid sys_setfsgid ++327 common ustat sys_ustat ++328 common statfs sys_statfs ++329 common fstatfs sys_fstatfs ++330 common sched_setparam sys_sched_setparam ++331 common sched_getparam sys_sched_getparam ++332 common sched_setscheduler sys_sched_setscheduler ++333 common sched_getscheduler sys_sched_getscheduler ++334 common sched_yield sys_sched_yield ++335 common sched_get_priority_max sys_sched_get_priority_max ++336 common sched_get_priority_min sys_sched_get_priority_min ++337 common sched_rr_get_interval sys_sched_rr_get_interval ++338 common afs_syscall sys_ni_syscall ++339 common uname sys_newuname ++340 common nanosleep sys_nanosleep ++341 common mremap sys_mremap ++342 common nfsservctl sys_ni_syscall ++343 common setresuid sys_setresuid ++344 common getresuid sys_getresuid ++345 common pciconfig_read sys_pciconfig_read ++346 common pciconfig_write sys_pciconfig_write ++347 common query_module sys_ni_syscall ++348 common prctl sys_prctl ++349 common pread64 sys_pread64 ++350 common pwrite64 sys_pwrite64 ++351 common rt_sigreturn sys_rt_sigreturn ++352 common rt_sigaction sys_rt_sigaction ++353 common rt_sigprocmask sys_rt_sigprocmask ++354 common rt_sigpending sys_rt_sigpending ++355 common rt_sigtimedwait sys_rt_sigtimedwait ++356 common rt_sigqueueinfo sys_rt_sigqueueinfo ++357 common rt_sigsuspend sys_rt_sigsuspend ++358 common select sys_select ++359 common gettimeofday sys_gettimeofday ++360 common settimeofday sys_settimeofday ++361 common getitimer sys_getitimer ++362 common setitimer sys_setitimer ++363 common utimes sys_utimes ++364 common getrusage sys_getrusage ++365 common wait4 sys_wait4 ++366 common adjtimex sys_adjtimex ++367 common getcwd sys_getcwd ++368 common capget sys_capget ++369 common capset sys_capset ++370 common sendfile sys_sendfile64 ++371 common setresgid sys_setresgid ++372 common getresgid sys_getresgid ++373 common dipc sys_ni_syscall ++374 common pivot_root sys_pivot_root ++375 common mincore sys_mincore ++376 common pciconfig_iobase sys_pciconfig_iobase ++377 common getdents64 sys_getdents64 ++378 common gettid sys_gettid ++379 common readahead sys_readahead ++# 380 is unused ++381 common tkill sys_tkill ++382 common setxattr sys_setxattr ++383 common lsetxattr sys_lsetxattr ++384 common fsetxattr sys_fsetxattr ++385 common getxattr sys_getxattr ++386 common lgetxattr sys_lgetxattr ++387 common fgetxattr sys_fgetxattr ++388 common listxattr sys_listxattr ++389 common llistxattr sys_llistxattr ++390 common flistxattr sys_flistxattr ++391 common removexattr sys_removexattr ++392 common lremovexattr sys_lremovexattr ++393 common fremovexattr sys_fremovexattr ++394 common futex sys_futex ++395 common sched_setaffinity sys_sched_setaffinity ++396 common sched_getaffinity sys_sched_getaffinity ++397 common tuxcall sys_ni_syscall ++398 common io_setup sys_io_setup ++399 common io_destroy sys_io_destroy ++400 common io_getevents sys_io_getevents ++401 common io_submit sys_io_submit ++402 common io_cancel sys_io_cancel ++405 common exit_group sys_exit_group ++406 common lookup_dcookie sys_lookup_dcookie ++407 common epoll_create sys_epoll_create ++408 common epoll_ctl sys_epoll_ctl ++409 common epoll_wait sys_epoll_wait ++410 common remap_file_pages sys_remap_file_pages ++411 common set_tid_address sys_set_tid_address ++412 common restart_syscall sys_restart_syscall ++413 common fadvise64 sys_fadvise64 ++414 common timer_create sys_timer_create ++415 common timer_settime sys_timer_settime ++416 common timer_gettime sys_timer_gettime ++417 common timer_getoverrun sys_timer_getoverrun ++418 common timer_delete sys_timer_delete ++419 common clock_settime sys_clock_settime ++420 common clock_gettime sys_clock_gettime ++421 common clock_getres sys_clock_getres ++422 common clock_nanosleep sys_clock_nanosleep ++423 common semtimedop sys_semtimedop ++424 common tgkill sys_tgkill ++425 common stat64 sys_stat64 ++426 common lstat64 sys_lstat64 ++427 common fstat64 sys_fstat64 ++428 common vserver sys_ni_syscall ++429 common mbind sys_ni_syscall ++430 common get_mempolicy sys_ni_syscall ++431 common set_mempolicy sys_ni_syscall ++432 common mq_open sys_mq_open ++433 common mq_unlink sys_mq_unlink ++434 common mq_timedsend sys_mq_timedsend ++435 common mq_timedreceive sys_mq_timedreceive ++436 common mq_notify sys_mq_notify ++437 common mq_getsetattr sys_mq_getsetattr ++438 common waitid sys_waitid ++439 common add_key sys_add_key ++440 common request_key sys_request_key ++441 common keyctl sys_keyctl ++442 common ioprio_set sys_ioprio_set ++443 common ioprio_get sys_ioprio_get ++444 common inotify_init sys_inotify_init ++445 common inotify_add_watch sys_inotify_add_watch ++446 common inotify_rm_watch sys_inotify_rm_watch ++447 common fdatasync sys_fdatasync ++448 common kexec_load sys_kexec_load ++449 common migrate_pages sys_migrate_pages ++450 common openat sys_openat ++451 common mkdirat sys_mkdirat ++452 common mknodat sys_mknodat ++453 common fchownat sys_fchownat ++454 common futimesat sys_futimesat ++455 common fstatat64 sys_fstatat64 ++456 common unlinkat sys_unlinkat ++457 common renameat sys_renameat ++458 common linkat sys_linkat ++459 common symlinkat sys_symlinkat ++460 common readlinkat sys_readlinkat ++461 common fchmodat sys_fchmodat ++462 common faccessat sys_faccessat ++463 common pselect6 sys_pselect6 ++464 common ppoll sys_ppoll ++465 common unshare sys_unshare ++466 common set_robust_list sys_set_robust_list ++467 common get_robust_list sys_get_robust_list ++468 common splice sys_splice ++469 common sync_file_range sys_sync_file_range ++470 common tee sys_tee ++471 common vmsplice sys_vmsplice ++472 common move_pages sys_move_pages ++473 common getcpu sys_getcpu ++474 common epoll_pwait sys_epoll_pwait ++475 common utimensat sys_utimensat ++476 common signalfd sys_signalfd ++477 common timerfd sys_ni_syscall ++478 common eventfd sys_eventfd ++479 common recvmmsg sys_recvmmsg ++480 common fallocate sys_fallocate ++481 common timerfd_create sys_timerfd_create ++482 common timerfd_settime sys_timerfd_settime ++483 common timerfd_gettime sys_timerfd_gettime ++484 common signalfd4 sys_signalfd4 ++485 common eventfd2 sys_eventfd2 ++486 common epoll_create1 sys_epoll_create1 ++487 common dup3 sys_dup3 ++488 common pipe2 sys_pipe2 ++489 common inotify_init1 sys_inotify_init1 ++490 common preadv sys_preadv ++491 common pwritev sys_pwritev ++492 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo ++493 common perf_event_open sys_perf_event_open ++494 common fanotify_init sys_fanotify_init ++495 common fanotify_mark sys_fanotify_mark ++496 common prlimit64 sys_prlimit64 ++497 common name_to_handle_at sys_name_to_handle_at ++498 common open_by_handle_at sys_open_by_handle_at ++499 common clock_adjtime sys_clock_adjtime ++500 common syncfs sys_syncfs ++501 common setns sys_setns ++502 common accept4 sys_accept4 ++503 common sendmmsg sys_sendmmsg ++504 common process_vm_readv sys_process_vm_readv ++505 common process_vm_writev sys_process_vm_writev ++506 common kcmp sys_kcmp ++507 common finit_module sys_finit_module ++508 common sched_setattr sys_sched_setattr ++509 common sched_getattr sys_sched_getattr ++510 common renameat2 sys_renameat2 ++511 common getrandom sys_getrandom ++512 common memfd_create sys_memfd_create ++513 common execveat sys_execveat ++514 common seccomp sys_seccomp ++515 common bpf sys_bpf ++516 common userfaultfd sys_userfaultfd ++517 common membarrier sys_membarrier ++518 common mlock2 sys_mlock2 ++519 common copy_file_range sys_copy_file_range ++520 common preadv2 sys_preadv2 ++521 common pwritev2 sys_pwritev2 ++522 common statx sys_statx ++523 common io_pgetevents sys_io_pgetevents ++524 common pkey_mprotect sys_pkey_mprotect ++525 common pkey_alloc sys_pkey_alloc ++526 common pkey_free sys_pkey_free ++527 common rseq sys_rseq ++528 common statfs64 sys_statfs64 ++529 common fstatfs64 sys_fstatfs64 ++530 common getegid sys_getegid ++531 common geteuid sys_geteuid ++532 common getppid sys_getppid ++# all other architectures have common numbers for new syscall, sw64 ++# is the exception. ++534 common pidfd_send_signal sys_pidfd_send_signal ++535 common io_uring_setup sys_io_uring_setup ++536 common io_uring_enter sys_io_uring_enter ++537 common io_uring_register sys_io_uring_register ++538 common open_tree sys_open_tree ++539 common move_mount sys_move_mount ++540 common fsopen sys_fsopen ++541 common fsconfig sys_fsconfig ++542 common fsmount sys_fsmount ++543 common fspick sys_fspick ++544 common pidfd_open sys_pidfd_open ++# 545 reserved for clone3 ++546 common close_range sys_close_range ++547 common openat2 sys_openat2 ++548 common pidfd_getfd sys_pidfd_getfd ++549 common faccessat2 sys_faccessat2 ++550 common process_madvise sys_process_madvise ++551 common epoll_pwait2 sys_epoll_pwait2 ++552 common mount_setattr sys_mount_setattr ++# 553 reserved for quotactl_path ++554 common landlock_create_ruleset sys_landlock_create_ruleset ++555 common landlock_add_rule sys_landlock_add_rule ++556 common landlock_restrict_self sys_landlock_restrict_self +diff --git a/linux-user/sw64/syscall_nr.h b/linux-user/sw64/syscall_nr.h +new file mode 100644 +index 0000000000..91737af322 +--- /dev/null ++++ b/linux-user/sw64/syscall_nr.h +@@ -0,0 +1,471 @@ ++/* ++ * This file contains the system call numbers. ++ */ ++#define TARGET_NR_osf_syscall 0 /* not implemented */ ++#define TARGET_NR_exit 1 ++#define TARGET_NR_fork 2 ++#define TARGET_NR_read 3 ++#define TARGET_NR_write 4 ++#define TARGET_NR_osf_old_open 5 /* not implemented */ ++#define TARGET_NR_close 6 ++#define TARGET_NR_osf_wait4 7 ++#define TARGET_NR_osf_old_creat 8 /* not implemented */ ++#define TARGET_NR_link 9 ++#define TARGET_NR_unlink 10 ++#define TARGET_NR_osf_execve 11 /* not implemented */ ++#define TARGET_NR_chdir 12 ++#define TARGET_NR_fchdir 13 ++#define TARGET_NR_mknod 14 ++#define TARGET_NR_chmod 15 ++#define TARGET_NR_chown 16 ++#define TARGET_NR_brk 17 ++#define TARGET_NR_osf_getfsstat 18 /* not implemented */ ++#define TARGET_NR_lseek 19 ++#define TARGET_NR_getxpid 20 ++#define TARGET_NR_osf_mount 21 ++#define TARGET_NR_umount 22 ++#define TARGET_NR_setuid 23 ++#define TARGET_NR_getxuid 24 ++#define TARGET_NR_exec_with_loader 25 /* not implemented */ ++#define TARGET_NR_ptrace 26 ++#define TARGET_NR_osf_nrecvmsg 27 /* not implemented */ ++#define TARGET_NR_osf_nsendmsg 28 /* not implemented */ ++#define TARGET_NR_osf_nrecvfrom 29 /* not implemented */ ++#define TARGET_NR_osf_naccept 30 /* not implemented */ ++#define TARGET_NR_osf_ngetpeername 31 /* not implemented */ ++#define TARGET_NR_osf_ngetsockname 32 /* not implemented */ ++#define TARGET_NR_access 33 ++#define TARGET_NR_osf_chflags 34 /* not implemented */ ++#define TARGET_NR_osf_fchflags 35 /* not implemented */ ++#define TARGET_NR_sync 36 ++#define TARGET_NR_kill 37 ++#define TARGET_NR_osf_old_stat 38 /* not implemented */ ++#define TARGET_NR_setpgid 39 ++#define TARGET_NR_osf_old_lstat 40 /* not implemented */ ++#define TARGET_NR_dup 41 ++#define TARGET_NR_pipe 42 ++#define TARGET_NR_osf_set_program_attributes 43 ++#define TARGET_NR_osf_profil 44 /* not implemented */ ++#define TARGET_NR_open 45 ++#define TARGET_NR_osf_old_sigaction 46 /* not implemented */ ++#define TARGET_NR_getxgid 47 ++#define TARGET_NR_osf_sigprocmask 48 ++#define TARGET_NR_osf_getlogin 49 /* not implemented */ ++#define TARGET_NR_osf_setlogin 50 /* not implemented */ ++#define TARGET_NR_acct 51 ++#define TARGET_NR_sigpending 52 ++ ++#define TARGET_NR_ioctl 54 ++#define TARGET_NR_osf_reboot 55 /* not implemented */ ++#define TARGET_NR_osf_revoke 56 /* not implemented */ ++#define TARGET_NR_symlink 57 ++#define TARGET_NR_readlink 58 ++#define TARGET_NR_execve 59 ++#define TARGET_NR_umask 60 ++#define TARGET_NR_chroot 61 ++#define TARGET_NR_osf_old_fstat 62 /* not implemented */ ++#define TARGET_NR_getpgrp 63 ++#define TARGET_NR_getpagesize 64 ++#define TARGET_NR_osf_mremap 65 /* not implemented */ ++#define TARGET_NR_vfork 66 ++#define TARGET_NR_stat 67 ++#define TARGET_NR_lstat 68 ++#define TARGET_NR_osf_sbrk 69 /* not implemented */ ++#define TARGET_NR_osf_sstk 70 /* not implemented */ ++#define TARGET_NR_mmap 71 /* OSF/1 mmap is superset of Linux */ ++#define TARGET_NR_osf_old_vadvise 72 /* not implemented */ ++#define TARGET_NR_munmap 73 ++#define TARGET_NR_mprotect 74 ++#define TARGET_NR_madvise 75 ++#define TARGET_NR_vhangup 76 ++#define TARGET_NR_osf_kmodcall 77 /* not implemented */ ++#define TARGET_NR_osf_mincore 78 /* not implemented */ ++#define TARGET_NR_getgroups 79 ++#define TARGET_NR_setgroups 80 ++#define TARGET_NR_osf_old_getpgrp 81 /* not implemented */ ++#define TARGET_NR_setpgrp 82 /* BSD alias for setpgid */ ++#define TARGET_NR_osf_setitimer 83 ++#define TARGET_NR_osf_old_wait 84 /* not implemented */ ++#define TARGET_NR_osf_table 85 /* not implemented */ ++#define TARGET_NR_osf_getitimer 86 ++#define TARGET_NR_gethostname 87 ++#define TARGET_NR_sethostname 88 ++#define TARGET_NR_getdtablesize 89 ++#define TARGET_NR_dup2 90 ++#define TARGET_NR_fstat 91 ++#define TARGET_NR_fcntl 92 ++#define TARGET_NR_osf_select 93 ++#define TARGET_NR_poll 94 ++#define TARGET_NR_fsync 95 ++#define TARGET_NR_setpriority 96 ++#define TARGET_NR_socket 97 ++#define TARGET_NR_connect 98 ++#define TARGET_NR_accept 99 ++#define TARGET_NR_getpriority 100 ++#define TARGET_NR_send 101 ++#define TARGET_NR_recv 102 ++#define TARGET_NR_sigreturn 103 ++#define TARGET_NR_bind 104 ++#define TARGET_NR_setsockopt 105 ++#define TARGET_NR_listen 106 ++#define TARGET_NR_osf_plock 107 /* not implemented */ ++#define TARGET_NR_osf_old_sigvec 108 /* not implemented */ ++#define TARGET_NR_osf_old_sigblock 109 /* not implemented */ ++#define TARGET_NR_osf_old_sigsetmask 110 /* not implemented */ ++#define TARGET_NR_sigsuspend 111 ++#define TARGET_NR_osf_sigstack 112 ++#define TARGET_NR_recvmsg 113 ++#define TARGET_NR_sendmsg 114 ++#define TARGET_NR_osf_old_vtrace 115 /* not implemented */ ++#define TARGET_NR_osf_gettimeofday 116 ++#define TARGET_NR_osf_getrusage 117 ++#define TARGET_NR_getsockopt 118 ++ ++#define TARGET_NR_readv 120 ++#define TARGET_NR_writev 121 ++#define TARGET_NR_osf_settimeofday 122 ++#define TARGET_NR_fchown 123 ++#define TARGET_NR_fchmod 124 ++#define TARGET_NR_recvfrom 125 ++#define TARGET_NR_setreuid 126 ++#define TARGET_NR_setregid 127 ++#define TARGET_NR_rename 128 ++#define TARGET_NR_truncate 129 ++#define TARGET_NR_ftruncate 130 ++#define TARGET_NR_flock 131 ++#define TARGET_NR_setgid 132 ++#define TARGET_NR_sendto 133 ++#define TARGET_NR_shutdown 134 ++#define TARGET_NR_socketpair 135 ++#define TARGET_NR_mkdir 136 ++#define TARGET_NR_rmdir 137 ++#define TARGET_NR_osf_utimes 138 ++#define TARGET_NR_osf_old_sigreturn 139 /* not implemented */ ++#define TARGET_NR_osf_adjtime 140 /* not implemented */ ++#define TARGET_NR_getpeername 141 ++#define TARGET_NR_osf_gethostid 142 /* not implemented */ ++#define TARGET_NR_osf_sethostid 143 /* not implemented */ ++#define TARGET_NR_getrlimit 144 ++#define TARGET_NR_setrlimit 145 ++#define TARGET_NR_osf_old_killpg 146 /* not implemented */ ++#define TARGET_NR_setsid 147 ++#define TARGET_NR_quotactl 148 ++#define TARGET_NR_osf_oldquota 149 /* not implemented */ ++#define TARGET_NR_getsockname 150 ++ ++#define TARGET_NR_osf_pid_block 153 /* not implemented */ ++#define TARGET_NR_osf_pid_unblock 154 /* not implemented */ ++ ++#define TARGET_NR_sigaction 156 ++#define TARGET_NR_osf_sigwaitprim 157 /* not implemented */ ++#define TARGET_NR_osf_nfssvc 158 /* not implemented */ ++#define TARGET_NR_osf_getdirentries 159 ++#define TARGET_NR_osf_statfs 160 ++#define TARGET_NR_osf_fstatfs 161 ++ ++#define TARGET_NR_osf_asynch_daemon 163 /* not implemented */ ++#define TARGET_NR_osf_getfh 164 /* not implemented */ ++#define TARGET_NR_osf_getdomainname 165 ++#define TARGET_NR_setdomainname 166 ++ ++#define TARGET_NR_osf_exportfs 169 /* not implemented */ ++ ++#define TARGET_NR_osf_alt_plock 181 /* not implemented */ ++ ++#define TARGET_NR_osf_getmnt 184 /* not implemented */ ++ ++#define TARGET_NR_osf_alt_sigpending 187 /* not implemented */ ++#define TARGET_NR_osf_alt_setsid 188 /* not implemented */ ++ ++#define TARGET_NR_osf_swapon 199 ++#define TARGET_NR_msgctl 200 ++#define TARGET_NR_msgget 201 ++#define TARGET_NR_msgrcv 202 ++#define TARGET_NR_msgsnd 203 ++#define TARGET_NR_semctl 204 ++#define TARGET_NR_semget 205 ++#define TARGET_NR_semop 206 ++#define TARGET_NR_osf_utsname 207 ++#define TARGET_NR_lchown 208 ++#define TARGET_NR_osf_shmat 209 ++#define TARGET_NR_shmctl 210 ++#define TARGET_NR_shmdt 211 ++#define TARGET_NR_shmget 212 ++#define TARGET_NR_osf_mvalid 213 /* not implemented */ ++#define TARGET_NR_osf_getaddressconf 214 /* not implemented */ ++#define TARGET_NR_osf_msleep 215 /* not implemented */ ++#define TARGET_NR_osf_mwakeup 216 /* not implemented */ ++#define TARGET_NR_msync 217 ++#define TARGET_NR_osf_signal 218 /* not implemented */ ++#define TARGET_NR_osf_utc_gettime 219 /* not implemented */ ++#define TARGET_NR_osf_utc_adjtime 220 /* not implemented */ ++ ++#define TARGET_NR_osf_security 222 /* not implemented */ ++#define TARGET_NR_osf_kloadcall 223 /* not implemented */ ++ ++#define TARGET_NR_osf_stat 224 ++#define TARGET_NR_osf_lstat 225 ++#define TARGET_NR_osf_fstat 226 ++#define TARGET_NR_osf_statfs64 227 ++#define TARGET_NR_osf_fstatfs64 228 ++ ++#define TARGET_NR_getpgid 233 ++#define TARGET_NR_getsid 234 ++#define TARGET_NR_sigaltstack 235 ++#define TARGET_NR_osf_waitid 236 /* not implemented */ ++#define TARGET_NR_osf_priocntlset 237 /* not implemented */ ++#define TARGET_NR_osf_sigsendset 238 /* not implemented */ ++#define TARGET_NR_osf_set_speculative 239 /* not implemented */ ++#define TARGET_NR_osf_msfs_syscall 240 /* not implemented */ ++#define TARGET_NR_osf_sysinfo 241 ++#define TARGET_NR_osf_uadmin 242 /* not implemented */ ++#define TARGET_NR_osf_fuser 243 /* not implemented */ ++#define TARGET_NR_osf_proplist_syscall 244 ++#define TARGET_NR_osf_ntp_adjtime 245 /* not implemented */ ++#define TARGET_NR_osf_ntp_gettime 246 /* not implemented */ ++#define TARGET_NR_osf_pathconf 247 /* not implemented */ ++#define TARGET_NR_osf_fpathconf 248 /* not implemented */ ++ ++#define TARGET_NR_osf_uswitch 250 /* not implemented */ ++#define TARGET_NR_osf_usleep_thread 251 ++#define TARGET_NR_osf_audcntl 252 /* not implemented */ ++#define TARGET_NR_osf_audgen 253 /* not implemented */ ++#define TARGET_NR_sysfs 254 ++#define TARGET_NR_osf_subsys_info 255 /* not implemented */ ++#define TARGET_NR_osf_getsysinfo 256 ++#define TARGET_NR_osf_setsysinfo 257 ++#define TARGET_NR_osf_afs_syscall 258 /* not implemented */ ++#define TARGET_NR_osf_swapctl 259 /* not implemented */ ++#define TARGET_NR_osf_memcntl 260 /* not implemented */ ++#define TARGET_NR_osf_fdatasync 261 /* not implemented */ ++ ++/* ++ * Ignore legacy syscalls that we don't use. ++ */ ++#define TARGET_IGNORE_alarm ++#define TARGET_IGNORE_creat ++#define TARGET_IGNORE_getegid ++#define TARGET_IGNORE_geteuid ++#define TARGET_IGNORE_getgid ++#define TARGET_IGNORE_getpid ++#define TARGET_IGNORE_getppid ++#define TARGET_IGNORE_getuid ++#define TARGET_IGNORE_pause ++#define TARGET_IGNORE_time ++#define TARGET_IGNORE_utime ++#define TARGET_IGNORE_umount2 ++ ++/* ++ * Linux-specific system calls begin at 300 ++ */ ++#define TARGET_NR_bdflush 300 ++#define TARGET_NR_sethae 301 ++#define TARGET_NR_mount 302 ++#define TARGET_NR_old_adjtimex 303 ++#define TARGET_NR_swapoff 304 ++#define TARGET_NR_getdents 305 ++#define TARGET_NR_create_module 306 ++#define TARGET_NR_init_module 307 ++#define TARGET_NR_delete_module 308 ++#define TARGET_NR_get_kernel_syms 309 ++#define TARGET_NR_syslog 310 ++#define TARGET_NR_reboot 311 ++#define TARGET_NR_clone 312 ++#define TARGET_NR_uselib 313 ++#define TARGET_NR_mlock 314 ++#define TARGET_NR_munlock 315 ++#define TARGET_NR_mlockall 316 ++#define TARGET_NR_munlockall 317 ++#define TARGET_NR_sysinfo 318 ++#define TARGET_NR__sysctl 319 ++/* 320 was sysTARGETidle. */ ++#define TARGET_NR_oldumount 321 ++#define TARGET_NR_swapon 322 ++#define TARGET_NR_times 323 ++#define TARGET_NR_personality 324 ++#define TARGET_NR_setfsuid 325 ++#define TARGET_NR_setfsgid 326 ++#define TARGET_NR_ustat 327 ++#define TARGET_NR_statfs 328 ++#define TARGET_NR_fstatfs 329 ++#define TARGET_NR_sched_setparam 330 ++#define TARGET_NR_sched_getparam 331 ++#define TARGET_NR_sched_setscheduler 332 ++#define TARGET_NR_sched_getscheduler 333 ++#define TARGET_NR_sched_yield 334 ++#define TARGET_NR_sched_get_priority_max 335 ++#define TARGET_NR_sched_get_priority_min 336 ++#define TARGET_NR_sched_rr_get_interval 337 ++#define TARGET_NR_afs_syscall 338 ++#define TARGET_NR_uname 339 ++#define TARGET_NR_nanosleep 340 ++#define TARGET_NR_mremap 341 ++#define TARGET_NR_nfsservctl 342 ++#define TARGET_NR_setresuid 343 ++#define TARGET_NR_getresuid 344 ++#define TARGET_NR_pciconfig_read 345 ++#define TARGET_NR_pciconfig_write 346 ++#define TARGET_NR_query_module 347 ++#define TARGET_NR_prctl 348 ++#define TARGET_NR_pread64 349 ++#define TARGET_NR_pwrite64 350 ++#define TARGET_NR_rt_sigreturn 351 ++#define TARGET_NR_rt_sigaction 352 ++#define TARGET_NR_rt_sigprocmask 353 ++#define TARGET_NR_rt_sigpending 354 ++#define TARGET_NR_rt_sigtimedwait 355 ++#define TARGET_NR_rt_sigqueueinfo 356 ++#define TARGET_NR_rt_sigsuspend 357 ++#define TARGET_NR_select 358 ++#define TARGET_NR_gettimeofday 359 ++#define TARGET_NR_settimeofday 360 ++#define TARGET_NR_getitimer 361 ++#define TARGET_NR_setitimer 362 ++#define TARGET_NR_utimes 363 ++#define TARGET_NR_getrusage 364 ++#define TARGET_NR_wait4 365 ++#define TARGET_NR_adjtimex 366 ++#define TARGET_NR_getcwd 367 ++#define TARGET_NR_capget 368 ++#define TARGET_NR_capset 369 ++#define TARGET_NR_sendfile 370 ++#define TARGET_NR_setresgid 371 ++#define TARGET_NR_getresgid 372 ++#define TARGET_NR_dipc 373 ++#define TARGET_NR_pivot_root 374 ++#define TARGET_NR_mincore 375 ++#define TARGET_NR_pciconfig_iobase 376 ++#define TARGET_NR_getdents64 377 ++#define TARGET_NR_gettid 378 ++#define TARGET_NR_readahead 379 ++/* 380 is unused */ ++#define TARGET_NR_tkill 381 ++#define TARGET_NR_setxattr 382 ++#define TARGET_NR_lsetxattr 383 ++#define TARGET_NR_fsetxattr 384 ++#define TARGET_NR_getxattr 385 ++#define TARGET_NR_lgetxattr 386 ++#define TARGET_NR_fgetxattr 387 ++#define TARGET_NR_listxattr 388 ++#define TARGET_NR_llistxattr 389 ++#define TARGET_NR_flistxattr 390 ++#define TARGET_NR_removexattr 391 ++#define TARGET_NR_lremovexattr 392 ++#define TARGET_NR_fremovexattr 393 ++#define TARGET_NR_futex 394 ++#define TARGET_NR_sched_setaffinity 395 ++#define TARGET_NR_sched_getaffinity 396 ++#define TARGET_NR_tuxcall 397 ++#define TARGET_NR_io_setup 398 ++#define TARGET_NR_io_destroy 399 ++#define TARGET_NR_io_getevents 400 ++#define TARGET_NR_io_submit 401 ++#define TARGET_NR_io_cancel 402 ++#define TARGET_NR_exit_group 405 ++#define TARGET_NR_lookup_dcookie 406 ++#define TARGET_NR_epoll_create 407 ++#define TARGET_NR_epoll_ctl 408 ++#define TARGET_NR_epoll_wait 409 ++/* Feb 2007: These three sysTARGETepoll defines shouldn't be here but culling ++ * them would break userspace apps ... we'll kill them off in 2010 :) */ ++#define TARGET_NR_sys_epoll_create TARGET_NR_epoll_create ++#define TARGET_NR_sys_epoll_ctl TARGET_NR_epoll_ctl ++#define TARGET_NR_sys_epoll_wait TARGET_NR_epoll_wait ++#define TARGET_NR_remap_file_pages 410 ++#define TARGET_NR_set_tid_address 411 ++#define TARGET_NR_restart_syscall 412 ++#define TARGET_NR_fadvise64 413 ++#define TARGET_NR_timer_create 414 ++#define TARGET_NR_timer_settime 415 ++#define TARGET_NR_timer_gettime 416 ++#define TARGET_NR_timer_getoverrun 417 ++#define TARGET_NR_timer_delete 418 ++#define TARGET_NR_clock_settime 419 ++#define TARGET_NR_clock_gettime 420 ++#define TARGET_NR_clock_getres 421 ++#define TARGET_NR_clock_nanosleep 422 ++#define TARGET_NR_semtimedop 423 ++#define TARGET_NR_tgkill 424 ++#define TARGET_NR_stat64 425 ++#define TARGET_NR_lstat64 426 ++#define TARGET_NR_fstat64 427 ++#define TARGET_NR_vserver 428 ++#define TARGET_NR_mbind 429 ++#define TARGET_NR_get_mempolicy 430 ++#define TARGET_NR_set_mempolicy 431 ++#define TARGET_NR_mq_open 432 ++#define TARGET_NR_mq_unlink 433 ++#define TARGET_NR_mq_timedsend 434 ++#define TARGET_NR_mq_timedreceive 435 ++#define TARGET_NR_mq_notify 436 ++#define TARGET_NR_mq_getsetattr 437 ++#define TARGET_NR_waitid 438 ++#define TARGET_NR_add_key 439 ++#define TARGET_NR_request_key 440 ++#define TARGET_NR_keyctl 441 ++#define TARGET_NR_ioprio_set 442 ++#define TARGET_NR_ioprio_get 443 ++#define TARGET_NR_inotify_init 444 ++#define TARGET_NR_inotify_add_watch 445 ++#define TARGET_NR_inotify_rm_watch 446 ++#define TARGET_NR_fdatasync 447 ++#define TARGET_NR_kexec_load 448 ++#define TARGET_NR_migrate_pages 449 ++#define TARGET_NR_openat 450 ++#define TARGET_NR_mkdirat 451 ++#define TARGET_NR_mknodat 452 ++#define TARGET_NR_fchownat 453 ++#define TARGET_NR_futimesat 454 ++#define TARGET_NR_fstatat64 455 ++#define TARGET_NR_unlinkat 456 ++#define TARGET_NR_renameat 457 ++#define TARGET_NR_linkat 458 ++#define TARGET_NR_symlinkat 459 ++#define TARGET_NR_readlinkat 460 ++#define TARGET_NR_fchmodat 461 ++#define TARGET_NR_faccessat 462 ++#define TARGET_NR_pselect6 463 ++#define TARGET_NR_ppoll 464 ++#define TARGET_NR_unshare 465 ++#define TARGET_NR_set_robust_list 466 ++#define TARGET_NR_get_robust_list 467 ++#define TARGET_NR_splice 468 ++#define TARGET_NR_sync_file_range 469 ++#define TARGET_NR_tee 470 ++#define TARGET_NR_vmsplice 471 ++#define TARGET_NR_move_pages 472 ++#define TARGET_NR_getcpu 473 ++#define TARGET_NR_epoll_pwait 474 ++#define TARGET_NR_utimensat 475 ++#define TARGET_NR_signalfd 476 ++#define TARGET_NR_timerfd 477 ++#define TARGET_NR_eventfd 478 ++#define TARGET_NR_recvmmsg 479 ++#define TARGET_NR_fallocate 480 ++#define TARGET_NR_timerfd_create 481 ++#define TARGET_NR_timerfd_settime 482 ++#define TARGET_NR_timerfd_gettime 483 ++#define TARGET_NR_signalfd4 484 ++#define TARGET_NR_eventfd2 485 ++#define TARGET_NR_epoll_create1 486 ++#define TARGET_NR_dup3 487 ++#define TARGET_NR_pipe2 488 ++#define TARGET_NR_inotify_init1 489 ++#define TARGET_NR_preadv 490 ++#define TARGET_NR_pwritev 491 ++#define TARGET_NR_rt_tgsigqueueinfo 492 ++#define TARGET_NR_perf_event_open 493 ++#define TARGET_NR_fanotify_init 494 ++#define TARGET_NR_fanotify_mark 495 ++#define TARGET_NR_prlimit64 496 ++#define TARGET_NR_name_to_handle_at 497 ++#define TARGET_NR_open_by_handle_at 498 ++#define TARGET_NR_clock_adjtime 499 ++#define TARGET_NR_syncfs 500 ++#define TARGET_NR_setns 501 ++#define TARGET_NR_accept4 502 ++#define TARGET_NR_sendmmsg 503 ++#define TARGET_NR_process_vm_readv 504 ++#define TARGET_NR_process_vm_writev 505 ++#define TARGET_NR_sw_slave_rwperfmons 506 ++#define TARGET_NR_sys_get_vmflags 507 +diff --git a/linux-user/sw64/syscallhdr.sh b/linux-user/sw64/syscallhdr.sh +new file mode 100644 +index 0000000000..46c166d8ae +--- /dev/null ++++ b/linux-user/sw64/syscallhdr.sh +@@ -0,0 +1,32 @@ ++#!/bin/sh ++# SPDX-License-Identifier: GPL-2.0 ++ ++in="$1" ++out="$2" ++my_abis=`echo "($3)" | tr ',' '|'` ++prefix="$4" ++offset="$5" ++ ++fileguard=LINUX_USER_SW64_`basename "$out" | sed \ ++ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ ++ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` ++grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( ++ printf "#ifndef %s\n" "${fileguard}" ++ printf "#define %s\n" "${fileguard}" ++ printf "\n" ++ ++ nxt=0 ++ while read nr abi name entry ; do ++ if [ -z "$offset" ]; then ++ printf "#define TARGET_NR_%s%s\t%s\n" \ ++ "${prefix}" "${name}" "${nr}" ++ else ++ printf "#define TARGET_NR_%s%s\t(%s + %s)\n" \ ++ "${prefix}" "${name}" "${offset}" "${nr}" ++ fi ++ nxt=$((nr+1)) ++ done ++ ++ printf "\n" ++ printf "#endif /* %s */" "${fileguard}" ++) > "$out" +diff --git a/linux-user/sw64/target_cpu.h b/linux-user/sw64/target_cpu.h +new file mode 100644 +index 0000000000..4553ea337d +--- /dev/null ++++ b/linux-user/sw64/target_cpu.h +@@ -0,0 +1,52 @@ ++/* ++ * SW64 specific CPU ABI and functions for linux-user ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, see . ++ */ ++#ifndef SW64_TARGET_CPU_H ++#define SW64_TARGET_CPU_H ++ ++static inline void cpu_clone_regs_child(CPUSW64State *env, target_ulong newsp, ++ unsigned flags) ++{ ++ if (newsp) { ++ env->ir[IDX_SP] = newsp; ++ } ++ env->ir[IDX_V0] = 0; ++ env->ir[IDX_A3] = 0; ++ env->ir[IDX_A4] = 1; /* OSF/1 secondary return: child */ ++} ++ ++static inline void cpu_clone_regs_parent(CPUSW64State *env, unsigned flags) ++{ ++ /* ++ * OSF/1 secondary return: parent ++ * Note that the kernel does not do this if SETTLS, because the ++ * settls argument register is still live after copy_thread. ++ */ ++ if (!(flags & CLONE_SETTLS)) { ++ env->ir[IDX_A4] = 0; ++ } ++} ++ ++static inline void cpu_set_tls(CPUSW64State *env, target_ulong newtls) ++{ ++ env->unique = newtls; ++} ++ ++static inline abi_ulong get_sp_from_cpustate(CPUSW64State *state) ++{ ++ return state->ir[IDX_SP]; ++} ++#endif +diff --git a/linux-user/sw64/target_elf.h b/linux-user/sw64/target_elf.h +new file mode 100644 +index 0000000000..be48b6dee3 +--- /dev/null ++++ b/linux-user/sw64/target_elf.h +@@ -0,0 +1,14 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation, or (at your option) any ++ * later version. See the COPYING file in the top-level directory. ++ */ ++ ++#ifndef SW64_TARGET_ELF_H ++#define SW64_TARGET_ELF_H ++static inline const char *cpu_get_model(uint32_t eflags) ++{ ++ return "any"; ++} ++#endif +diff --git a/linux-user/sw64/target_errno_defs.h b/linux-user/sw64/target_errno_defs.h +new file mode 100644 +index 0000000000..fd637f5bc9 +--- /dev/null ++++ b/linux-user/sw64/target_errno_defs.h +@@ -0,0 +1,204 @@ ++#ifndef sw64_TARGET_ERRNO_DEFS_H ++#define sw64_TARGET_ERRNO_DEFS_H ++ ++#include "../generic/target_errno_defs.h" ++ ++/* ++ * Generic target errno overridden with definitions taken ++ * from asm-sw64/errno.h ++ */ ++#undef TARGET_EWOULDBLOCK ++#define TARGET_EWOULDBLOCK TARGET_EAGAIN ++#undef TARGET_EDEADLK ++#define TARGET_EDEADLK 11 ++#undef TARGET_EAGAIN ++#define TARGET_EAGAIN 35 ++#undef TARGET_EINPROGRESS ++#define TARGET_EINPROGRESS 36 ++#undef TARGET_EALREADY ++#define TARGET_EALREADY 37 ++#undef TARGET_ENOTSOCK ++#define TARGET_ENOTSOCK 38 ++#undef TARGET_EDESTADDRREQ ++#define TARGET_EDESTADDRREQ 39 ++#undef TARGET_EMSGSIZE ++#define TARGET_EMSGSIZE 40 ++#undef TARGET_EPROTOTYPE ++#define TARGET_EPROTOTYPE 41 ++#undef TARGET_ENOPROTOOPT ++#define TARGET_ENOPROTOOPT 42 ++#undef TARGET_EPROTONOSUPPORT ++#define TARGET_EPROTONOSUPPORT 43 ++#undef TARGET_ESOCKTNOSUPPORT ++#define TARGET_ESOCKTNOSUPPORT 44 ++#undef TARGET_EOPNOTSUPP ++#define TARGET_EOPNOTSUPP 45 ++#undef TARGET_EPFNOSUPPORT ++#define TARGET_EPFNOSUPPORT 46 ++#undef TARGET_EAFNOSUPPORT ++#define TARGET_EAFNOSUPPORT 47 ++#undef TARGET_EADDRINUSE ++#define TARGET_EADDRINUSE 48 ++#undef TARGET_EADDRNOTAVAIL ++#define TARGET_EADDRNOTAVAIL 49 ++#undef TARGET_ENETDOWN ++#define TARGET_ENETDOWN 50 ++#undef TARGET_ENETUNREACH ++#define TARGET_ENETUNREACH 51 ++#undef TARGET_ENETRESET ++#define TARGET_ENETRESET 52 ++#undef TARGET_ECONNABORTED ++#define TARGET_ECONNABORTED 53 ++#undef TARGET_ECONNRESET ++#define TARGET_ECONNRESET 54 ++#undef TARGET_ENOBUFS ++#define TARGET_ENOBUFS 55 ++#undef TARGET_EISCONN ++#define TARGET_EISCONN 56 ++#undef TARGET_ENOTCONN ++#define TARGET_ENOTCONN 57 ++#undef TARGET_ESHUTDOWN ++#define TARGET_ESHUTDOWN 58 ++#undef TARGET_ETOOMANYREFS ++#define TARGET_ETOOMANYREFS 59 ++#undef TARGET_ETIMEDOUT ++#define TARGET_ETIMEDOUT 60 ++#undef TARGET_ECONNREFUSED ++#define TARGET_ECONNREFUSED 61 ++#undef TARGET_ELOOP ++#define TARGET_ELOOP 62 ++#undef TARGET_ENAMETOOLONG ++#define TARGET_ENAMETOOLONG 63 ++#undef TARGET_EHOSTDOWN ++#define TARGET_EHOSTDOWN 64 ++#undef TARGET_EHOSTUNREACH ++#define TARGET_EHOSTUNREACH 65 ++#undef TARGET_ENOTEMPTY ++#define TARGET_ENOTEMPTY 66 ++/* Unused 67 */ ++#undef TARGET_EUSERS ++#define TARGET_EUSERS 68 ++#undef TARGET_EDQUOT ++#define TARGET_EDQUOT 69 ++#undef TARGET_ESTALE ++#define TARGET_ESTALE 70 ++#undef TARGET_EREMOTE ++#define TARGET_EREMOTE 71 ++/* Unused 72-76 */ ++#undef TARGET_ENOLCK ++#define TARGET_ENOLCK 77 ++#undef TARGET_ENOSYS ++#define TARGET_ENOSYS 78 ++/* Unused 79 */ ++#undef TARGET_ENOMSG ++#define TARGET_ENOMSG 80 ++#undef TARGET_EIDRM ++#define TARGET_EIDRM 81 ++#undef TARGET_ENOSR ++#define TARGET_ENOSR 82 ++#undef TARGET_ETIME ++#define TARGET_ETIME 83 ++#undef TARGET_EBADMSG ++#define TARGET_EBADMSG 84 ++#undef TARGET_EPROTO ++#define TARGET_EPROTO 85 ++#undef TARGET_ENODATA ++#define TARGET_ENODATA 86 ++#undef TARGET_ENOSTR ++#define TARGET_ENOSTR 87 ++#undef TARGET_ECHRNG ++#define TARGET_ECHRNG 88 ++#undef TARGET_EL2NSYNC ++#define TARGET_EL2NSYNC 89 ++#undef TARGET_EL3HLT ++#define TARGET_EL3HLT 90 ++#undef TARGET_EL3RST ++#define TARGET_EL3RST 91 ++#undef TARGET_ENOPKG ++#define TARGET_ENOPKG 92 ++#undef TARGET_ELNRNG ++#define TARGET_ELNRNG 93 ++#undef TARGET_EUNATCH ++#define TARGET_EUNATCH 94 ++#undef TARGET_ENOCSI ++#define TARGET_ENOCSI 95 ++#undef TARGET_EL2HLT ++#define TARGET_EL2HLT 96 ++#undef TARGET_EBADE ++#define TARGET_EBADE 97 ++#undef TARGET_EBADR ++#define TARGET_EBADR 98 ++#undef TARGET_EXFULL ++#define TARGET_EXFULL 99 ++#undef TARGET_ENOANO ++#define TARGET_ENOANO 100 ++#undef TARGET_EBADRQC ++#define TARGET_EBADRQC 101 ++#undef TARGET_EBADSLT ++#define TARGET_EBADSLT 102 ++/* Unused 103 */ ++#undef TARGET_EBFONT ++#define TARGET_EBFONT 104 ++#undef TARGET_ENONET ++#define TARGET_ENONET 105 ++#undef TARGET_ENOLINK ++#define TARGET_ENOLINK 106 ++#undef TARGET_EADV ++#define TARGET_EADV 107 ++#undef TARGET_ESRMNT ++#define TARGET_ESRMNT 108 ++#undef TARGET_ECOMM ++#define TARGET_ECOMM 109 ++#undef TARGET_EMULTIHOP ++#define TARGET_EMULTIHOP 110 ++#undef TARGET_EDOTDOT ++#define TARGET_EDOTDOT 111 ++#undef TARGET_EOVERFLOW ++#define TARGET_EOVERFLOW 112 ++#undef TARGET_ENOTUNIQ ++#define TARGET_ENOTUNIQ 113 ++#undef TARGET_EBADFD ++#define TARGET_EBADFD 114 ++#undef TARGET_EREMCHG ++#define TARGET_EREMCHG 115 ++#undef TARGET_EILSEQ ++#define TARGET_EILSEQ 116 ++/* Same as default 117-121 */ ++#undef TARGET_ELIBACC ++#define TARGET_ELIBACC 122 ++#undef TARGET_ELIBBAD ++#define TARGET_ELIBBAD 123 ++#undef TARGET_ELIBSCN ++#define TARGET_ELIBSCN 124 ++#undef TARGET_ELIBMAX ++#define TARGET_ELIBMAX 125 ++#undef TARGET_ELIBEXEC ++#define TARGET_ELIBEXEC 126 ++#undef TARGET_ERESTART ++#define TARGET_ERESTART 127 ++#undef TARGET_ESTRPIPE ++#define TARGET_ESTRPIPE 128 ++#undef TARGET_ENOMEDIUM ++#define TARGET_ENOMEDIUM 129 ++#undef TARGET_EMEDIUMTYPE ++#define TARGET_EMEDIUMTYPE 130 ++#undef TARGET_ECANCELED ++#define TARGET_ECANCELED 131 ++#undef TARGET_ENOKEY ++#define TARGET_ENOKEY 132 ++#undef TARGET_EKEYEXPIRED ++#define TARGET_EKEYEXPIRED 133 ++#undef TARGET_EKEYREVOKED ++#define TARGET_EKEYREVOKED 134 ++#undef TARGET_EKEYREJECTED ++#define TARGET_EKEYREJECTED 135 ++#undef TARGET_EOWNERDEAD ++#define TARGET_EOWNERDEAD 136 ++#undef TARGET_ENOTRECOVERABLE ++#define TARGET_ENOTRECOVERABLE 137 ++#undef TARGET_ERFKILL ++#define TARGET_ERFKILL 138 ++#undef TARGET_EHWPOISON ++#define TARGET_EHWPOISON 139 ++ ++#endif +diff --git a/linux-user/sw64/target_fcntl.h b/linux-user/sw64/target_fcntl.h +new file mode 100644 +index 0000000000..9721e3de39 +--- /dev/null ++++ b/linux-user/sw64/target_fcntl.h +@@ -0,0 +1,11 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation, or (at your option) any ++ * later version. See the COPYING file in the top-level directory. ++ */ ++ ++#ifndef SW64_TARGET_FCNTL_H ++#define sw64_TARGET_FCNTL_H ++#include "../generic/fcntl.h" ++#endif +diff --git a/linux-user/sw64/target_mman.h b/linux-user/sw64/target_mman.h +new file mode 100644 +index 0000000000..ed222080c7 +--- /dev/null ++++ b/linux-user/sw64/target_mman.h +@@ -0,0 +1,12 @@ ++/* ++ * arch/sw64/include/asm/processor.h: ++ * ++ * TASK_UNMAPPED_BASE TASK_SIZE / 2 ++ * TASK_SIZE 0x40000000000UL ++ */ ++#define TASK_UNMAPPED_BASE 0x20000000000ull ++ ++/* arch/sw64/include/asm/elf.h */ ++#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) ++ ++#include "../generic/target_mman.h" +diff --git a/linux-user/sw64/target_prctl.h b/linux-user/sw64/target_prctl.h +new file mode 100644 +index 0000000000..eb53b31ad5 +--- /dev/null ++++ b/linux-user/sw64/target_prctl.h +@@ -0,0 +1 @@ ++/* No special prctl support required. */ +diff --git a/linux-user/sw64/target_proc.h b/linux-user/sw64/target_proc.h +new file mode 100644 +index 0000000000..43fe29ca72 +--- /dev/null ++++ b/linux-user/sw64/target_proc.h +@@ -0,0 +1 @@ ++/* No target-specific /proc support */ +diff --git a/linux-user/sw64/target_resource.h b/linux-user/sw64/target_resource.h +new file mode 100644 +index 0000000000..227259594c +--- /dev/null ++++ b/linux-user/sw64/target_resource.h +@@ -0,0 +1 @@ ++#include "../generic/target_resource.h" +diff --git a/linux-user/sw64/target_signal.h b/linux-user/sw64/target_signal.h +new file mode 100644 +index 0000000000..8cc1693b05 +--- /dev/null ++++ b/linux-user/sw64/target_signal.h +@@ -0,0 +1,100 @@ ++#ifndef SW64_TARGET_SIGNAL_H ++#define SW64_TARGET_SIGNAL_H ++ ++#include "cpu.h" ++ ++#define TARGET_SIGHUP 1 ++#define TARGET_SIGINT 2 ++#define TARGET_SIGQUIT 3 ++#define TARGET_SIGILL 4 ++#define TARGET_SIGTRAP 5 ++#define TARGET_SIGABRT 6 ++#define TARGET_SIGSTKFLT 7 /* actually SIGEMT */ ++#define TARGET_SIGFPE 8 ++#define TARGET_SIGKILL 9 ++#define TARGET_SIGBUS 10 ++#define TARGET_SIGSEGV 11 ++#define TARGET_SIGSYS 12 ++#define TARGET_SIGPIPE 13 ++#define TARGET_SIGALRM 14 ++#define TARGET_SIGTERM 15 ++#define TARGET_SIGURG 16 ++#define TARGET_SIGSTOP 17 ++#define TARGET_SIGTSTP 18 ++#define TARGET_SIGCONT 19 ++#define TARGET_SIGCHLD 20 ++#define TARGET_SIGTTIN 21 ++#define TARGET_SIGTTOU 22 ++#define TARGET_SIGIO 23 ++#define TARGET_SIGXCPU 24 ++#define TARGET_SIGXFSZ 25 ++#define TARGET_SIGVTALRM 26 ++#define TARGET_SIGPROF 27 ++#define TARGET_SIGWINCH 28 ++#define TARGET_SIGPWR 29 /* actually SIGINFO */ ++#define TARGET_SIGUSR1 30 ++#define TARGET_SIGUSR2 31 ++#define TARGET_SIGRTMIN 32 ++ ++#define TARGET_SIG_BLOCK 1 ++#define TARGET_SIG_UNBLOCK 2 ++#define TARGET_SIG_SETMASK 3 ++ ++/* this struct defines a stack used during syscall handling */ ++ ++typedef struct target_sigaltstack { ++ abi_ulong ss_sp; ++ int32_t ss_flags; ++ int32_t dummy; ++ abi_ulong ss_size; ++} target_stack_t; ++ ++ ++/* ++ * sigaltstack controls ++ */ ++#define TARGET_SS_ONSTACK 1 ++#define TARGET_SS_DISABLE 2 ++ ++#define TARGET_SA_ONSTACK 0x00000001 ++#define TARGET_SA_RESTART 0x00000002 ++#define TARGET_SA_NOCLDSTOP 0x00000004 ++#define TARGET_SA_NODEFER 0x00000008 ++#define TARGET_SA_RESETHAND 0x00000010 ++#define TARGET_SA_NOCLDWAIT 0x00000020 /* not supported yet */ ++#define TARGET_SA_SIGINFO 0x00000040 ++ ++#define TARGET_MINSIGSTKSZ 4096 ++#define TARGET_SIGSTKSZ 16384 ++ ++/* From . */ ++#define TARGET_GEN_INTOVF -1 /* integer overflow */ ++#define TARGET_GEN_INTDIV -2 /* integer division by zero */ ++#define TARGET_GEN_FLTOVF -3 /* fp overflow */ ++#define TARGET_GEN_FLTDIV -4 /* fp division by zero */ ++#define TARGET_GEN_FLTUND -5 /* fp underflow */ ++#define TARGET_GEN_FLTINV -6 /* invalid fp operand */ ++#define TARGET_GEN_FLTINE -7 /* inexact fp operand */ ++#define TARGET_GEN_DECOVF -8 /* decimal overflow (for COBOL??) */ ++#define TARGET_GEN_DECDIV -9 /* decimal division by zero */ ++#define TARGET_GEN_DECINV -10 /* invalid decimal operand */ ++#define TARGET_GEN_ROPRAND -11 /* reserved operand */ ++#define TARGET_GEN_ASSERTERR -12 /* assertion error */ ++#define TARGET_GEN_NULPTRERR -13 /* null pointer error */ ++#define TARGET_GEN_STKOVF -14 /* stack overflow */ ++#define TARGET_GEN_STRLENERR -15 /* string length error */ ++#define TARGET_GEN_SUBSTRERR -16 /* substring error */ ++#define TARGET_GEN_RANGERR -17 /* range error */ ++#define TARGET_GEN_SUBRNG -18 ++#define TARGET_GEN_SUBRNG1 -19 ++#define TARGET_GEN_SUBRNG2 -20 ++#define TARGET_GEN_SUBRNG3 -21 ++#define TARGET_GEN_SUBRNG4 -22 ++#define TARGET_GEN_SUBRNG5 -23 ++#define TARGET_GEN_SUBRNG6 -24 ++#define TARGET_GEN_SUBRNG7 -25 ++ ++#define TARGET_ARCH_HAS_SETUP_FRAME ++#define TARGET_ARCH_HAS_KA_RESTORER ++#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 ++#endif /* SW64_TARGET_SIGNAL_H */ +diff --git a/linux-user/sw64/target_structs.h b/linux-user/sw64/target_structs.h +new file mode 100644 +index 0000000000..7c13dc4bac +--- /dev/null ++++ b/linux-user/sw64/target_structs.h +@@ -0,0 +1,47 @@ ++/* ++ * SW64 specific structures for linux-user ++ * ++ * Copyright (c) 2018 Lin Hainan ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ */ ++#ifndef SW64_TARGET_STRUCTS_H ++#define SW64_TARGET_STRUCTS_H ++ ++/* TODO: Maybe it should be update. now it's different from other arch */ ++struct target_ipc_perm { ++ abi_int __key; /* Key. */ ++ abi_uint uid; /* Owner's user ID. */ ++ abi_uint gid; /* Owner's group ID. */ ++ abi_uint cuid; /* Creator's user ID. */ ++ abi_uint cgid; /* Creator's group ID. */ ++ abi_uint mode; /* Read/write permission. */ ++ abi_ushort __seq; /* Sequence number. */ ++ abi_ushort __pad1; ++ abi_ulong __unused1; ++ abi_ulong __unused2; ++}; ++ ++struct target_shmid_ds { ++ struct target_ipc_perm shm_perm; /* operation permission struct */ ++ abi_long shm_segsz; /* size of segment in bytes */ ++ abi_ulong shm_atime; /* time of last shmat() */ ++ abi_ulong shm_dtime; /* time of last shmdt() */ ++ abi_ulong shm_ctime; /* time of last change by shmctl() */ ++ abi_int shm_cpid; /* pid of creator */ ++ abi_int shm_lpid; /* pid of last shmop */ ++ abi_ulong shm_nattch; /* number of current attaches */ ++ abi_ulong __unused1; ++ abi_ulong __unused2; ++}; ++ ++#endif +diff --git a/linux-user/sw64/target_syscall.h b/linux-user/sw64/target_syscall.h +new file mode 100644 +index 0000000000..418905110c +--- /dev/null ++++ b/linux-user/sw64/target_syscall.h +@@ -0,0 +1,125 @@ ++#ifndef SW64_TARGET_SYSCALL_H ++#define SW64_TARGET_SYSCALL_H ++ ++/* TODO */ ++struct target_pt_regs { ++ abi_ulong r0; ++ abi_ulong r1; ++ abi_ulong r2; ++ abi_ulong r3; ++ abi_ulong r4; ++ abi_ulong r5; ++ abi_ulong r6; ++ abi_ulong r7; ++ abi_ulong r8; ++ abi_ulong r19; ++ abi_ulong r20; ++ abi_ulong r21; ++ abi_ulong r22; ++ abi_ulong r23; ++ abi_ulong r24; ++ abi_ulong r25; ++ abi_ulong r26; ++ abi_ulong r27; ++ abi_ulong r28; ++ abi_ulong hae; ++/* JRP - These are the values provided to a0-a2 by hmcode */ ++ abi_ulong trap_a0; ++ abi_ulong trap_a1; ++ abi_ulong trap_a2; ++/* These are saved by hmcode: */ ++ abi_ulong ps; ++ abi_ulong pc; ++ abi_ulong gp; ++ abi_ulong r16; ++ abi_ulong r17; ++ abi_ulong r18; ++/* Those is needed by qemu to temporary store the user stack pointer */ ++ abi_ulong usp; ++ abi_ulong unique; ++}; ++ ++ ++#define TARGET_MCL_CURRENT 0x2000 ++#define TARGET_MCL_FUTURE 0x4000 ++#define TARGET_MCL_ONFAULT 0x8000 ++ ++#define UNAME_MACHINE "sw64" ++#define UNAME_MINIMUM_RELEASE "2.6.32" ++#undef TARGET_EOPNOTSUPP ++#define TARGET_EOPNOTSUPP 45 /* Operation not supported on transport endpoint */ ++#define SWCR_STATUS_INV0 (1UL<<17) ++#define SWCR_STATUS_DZE0 (1UL<<18) ++#define SWCR_STATUS_OVF0 (1UL<<19) ++#define SWCR_STATUS_UNF0 (1UL<<20) ++#define SWCR_STATUS_INE0 (1UL<<21) ++#define SWCR_STATUS_DNO0 (1UL<<22) ++ ++#define SWCR_STATUS_MASK0 (SWCR_STATUS_INV0 | SWCR_STATUS_DZE0 | \ ++ SWCR_STATUS_OVF0 | SWCR_STATUS_UNF0 | \ ++ SWCR_STATUS_INE0 | SWCR_STATUS_DNO0) ++ ++#define SWCR_STATUS0_TO_EXCSUM_SHIFT 16 ++ ++#define SWCR_STATUS_INV1 (1UL<<23) ++#define SWCR_STATUS_DZE1 (1UL<<24) ++#define SWCR_STATUS_OVF1 (1UL<<25) ++#define SWCR_STATUS_UNF1 (1UL<<26) ++#define SWCR_STATUS_INE1 (1UL<<27) ++#define SWCR_STATUS_DNO1 (1UL<<28) ++ ++#define SWCR_STATUS_MASK1 (SWCR_STATUS_INV1 | SWCR_STATUS_DZE1 | \ ++ SWCR_STATUS_OVF1 | SWCR_STATUS_UNF1 | \ ++ SWCR_STATUS_INE1 | SWCR_STATUS_DNO1) ++ ++#define SWCR_STATUS1_TO_EXCSUM_SHIFT 22 ++#define SWCR_STATUS_INV2 (1UL<<34) ++#define SWCR_STATUS_DZE2 (1UL<<35) ++#define SWCR_STATUS_OVF2 (1UL<<36) ++#define SWCR_STATUS_UNF2 (1UL<<37) ++#define SWCR_STATUS_INE2 (1UL<<38) ++#define SWCR_STATUS_DNO2 (1UL<<39) ++ ++#define SWCR_STATUS_MASK2 (SWCR_STATUS_INV2 | SWCR_STATUS_DZE2 | \ ++ SWCR_STATUS_OVF2 | SWCR_STATUS_UNF2 | \ ++ SWCR_STATUS_INE2 | SWCR_STATUS_DNO2) ++#define SWCR_STATUS_INV3 (1UL<<40) ++#define SWCR_STATUS_DZE3 (1UL<<41) ++#define SWCR_STATUS_OVF3 (1UL<<42) ++#define SWCR_STATUS_UNF3 (1UL<<43) ++#define SWCR_STATUS_INE3 (1UL<<44) ++#define SWCR_STATUS_DNO3 (1UL<<45) ++ ++#define SWCR_STATUS_MASK3 (SWCR_STATUS_INV3 | SWCR_STATUS_DZE3 | \ ++ SWCR_STATUS_OVF3 | SWCR_STATUS_UNF3 | \ ++ SWCR_STATUS_INE3 | SWCR_STATUS_DNO3) ++#define SWCR_TRAP_ENABLE_INV (1UL<<1) /* invalid op */ ++#define SWCR_TRAP_ENABLE_DZE (1UL<<2) /* division by zero */ ++#define SWCR_TRAP_ENABLE_OVF (1UL<<3) /* overflow */ ++#define SWCR_TRAP_ENABLE_UNF (1UL<<4) /* underflow */ ++#define SWCR_TRAP_ENABLE_INE (1UL<<5) /* inexact */ ++#define SWCR_TRAP_ENABLE_DNO (1UL<<6) /* denorm */ ++#define SWCR_TRAP_ENABLE_MASK (SWCR_TRAP_ENABLE_INV | SWCR_TRAP_ENABLE_DZE | \ ++ SWCR_TRAP_ENABLE_OVF | SWCR_TRAP_ENABLE_UNF | \ ++ SWCR_TRAP_ENABLE_INE | SWCR_TRAP_ENABLE_DNO) ++ ++/* Denorm and Underflow flushing */ ++#define SWCR_MAP_DMZ (1UL<<12) /* Map denorm inputs to zero */ ++#define SWCR_MAP_UMZ (1UL<<13) /* Map underflowed outputs to zero */ ++ ++#define SWCR_MAP_MASK (SWCR_MAP_DMZ | SWCR_MAP_UMZ) ++ ++/* status bits coming from fpcr: */ ++#define SWCR_STATUS_INV (1UL<<17) ++#define SWCR_STATUS_DZE (1UL<<18) ++#define SWCR_STATUS_OVF (1UL<<19) ++#define SWCR_STATUS_UNF (1UL<<20) ++#define SWCR_STATUS_INE (1UL<<21) ++#define SWCR_STATUS_DNO (1UL<<22) ++ ++#define SWCR_STATUS_MASK (SWCR_STATUS_INV | SWCR_STATUS_DZE | \ ++ SWCR_STATUS_OVF | SWCR_STATUS_UNF | \ ++ SWCR_STATUS_INE | SWCR_STATUS_DNO) ++#define TARGET_GSI_IEEE_FP_CONTROL 45 ++#define TARGET_SSI_IEEE_FP_CONTROL 14 ++#endif +diff --git a/linux-user/sw64/termbits.h b/linux-user/sw64/termbits.h +new file mode 100644 +index 0000000000..5c40efcb20 +--- /dev/null ++++ b/linux-user/sw64/termbits.h +@@ -0,0 +1,266 @@ ++typedef unsigned char target_cc_t; ++typedef unsigned int target_speed_t; ++typedef unsigned int target_tcflag_t; ++ ++#define TARGET_NCCS 19 ++struct target_termios { ++ target_tcflag_t c_iflag; /* input mode flags */ ++ target_tcflag_t c_oflag; /* output mode flags */ ++ target_tcflag_t c_cflag; /* control mode flags */ ++ target_tcflag_t c_lflag; /* local mode flags */ ++ target_cc_t c_cc[TARGET_NCCS]; /* control characters */ ++ target_cc_t c_line; /* line discipline (== c_cc[19]) */ ++ target_speed_t c_ispeed; /* input speed */ ++ target_speed_t c_ospeed; /* output speed */ ++}; ++ ++/* c_cc characters */ ++#define TARGET_VEOF 0 ++#define TARGET_VEOL 1 ++#define TARGET_VEOL2 2 ++#define TARGET_VERASE 3 ++#define TARGET_VWERASE 4 ++#define TARGET_VKILL 5 ++#define TARGET_VREPRINT 6 ++#define TARGET_VSWTC 7 ++#define TARGET_VINTR 8 ++#define TARGET_VQUIT 9 ++#define TARGET_VSUSP 10 ++#define TARGET_VSTART 12 ++#define TARGET_VSTOP 13 ++#define TARGET_VLNEXT 14 ++#define TARGET_VDISCARD 15 ++#define TARGET_VMIN 16 ++#define TARGET_VTIME 17 ++ ++/* c_iflag bits */ ++#define TARGET_IGNBRK 0000001 ++#define TARGET_BRKINT 0000002 ++#define TARGET_IGNPAR 0000004 ++#define TARGET_PARMRK 0000010 ++#define TARGET_INPCK 0000020 ++#define TARGET_ISTRIP 0000040 ++#define TARGET_INLCR 0000100 ++#define TARGET_IGNCR 0000200 ++#define TARGET_ICRNL 0000400 ++#define TARGET_IXON 0001000 ++#define TARGET_IXOFF 0002000 ++#define TARGET_IXANY 0004000 ++#define TARGET_IUCLC 0010000 ++#define TARGET_IMAXBEL 0020000 ++#define TARGET_IUTF8 0040000 ++ ++/* c_oflag bits */ ++#define TARGET_OPOST 0000001 ++#define TARGET_ONLCR 0000002 ++#define TARGET_OLCUC 0000004 ++ ++#define TARGET_OCRNL 0000010 ++#define TARGET_ONOCR 0000020 ++#define TARGET_ONLRET 0000040 ++ ++#define TARGET_OFILL 00000100 ++#define TARGET_OFDEL 00000200 ++#define TARGET_NLDLY 00001400 ++#define TARGET_NL0 00000000 ++#define TARGET_NL1 00000400 ++#define TARGET_NL2 00001000 ++#define TARGET_NL3 00001400 ++#define TARGET_TABDLY 00006000 ++#define TARGET_TAB0 00000000 ++#define TARGET_TAB1 00002000 ++#define TARGET_TAB2 00004000 ++#define TARGET_TAB3 00006000 ++#define TARGET_CRDLY 00030000 ++#define TARGET_CR0 00000000 ++#define TARGET_CR1 00010000 ++#define TARGET_CR2 00020000 ++#define TARGET_CR3 00030000 ++#define TARGET_FFDLY 00040000 ++#define TARGET_FF0 00000000 ++#define TARGET_FF1 00040000 ++#define TARGET_BSDLY 00100000 ++#define TARGET_BS0 00000000 ++#define TARGET_BS1 00100000 ++#define TARGET_VTDLY 00200000 ++#define TARGET_VT0 00000000 ++#define TARGET_VT1 00200000 ++#define TARGET_XTABS 01000000 /* Hmm.. Linux/i386 considers this part of TABDLY.. */ ++ ++/* c_cflag bit meaning */ ++#define TARGET_CBAUD 0000037 ++#define TARGET_B0 0000000 /* hang up */ ++#define TARGET_B50 0000001 ++#define TARGET_B75 0000002 ++#define TARGET_B110 0000003 ++#define TARGET_B134 0000004 ++#define TARGET_B150 0000005 ++#define TARGET_B200 0000006 ++#define TARGET_B300 0000007 ++#define TARGET_B600 0000010 ++#define TARGET_B1200 0000011 ++#define TARGET_B1800 0000012 ++#define TARGET_B2400 0000013 ++#define TARGET_B4800 0000014 ++#define TARGET_B9600 0000015 ++#define TARGET_B19200 0000016 ++#define TARGET_B38400 0000017 ++#define TARGET_EXTA B19200 ++#define TARGET_EXTB B38400 ++#define TARGET_CBAUDEX 0000000 ++#define TARGET_B57600 00020 ++#define TARGET_B115200 00021 ++#define TARGET_B230400 00022 ++#define TARGET_B460800 00023 ++#define TARGET_B500000 00024 ++#define TARGET_B576000 00025 ++#define TARGET_B921600 00026 ++#define TARGET_B1000000 00027 ++#define TARGET_B1152000 00030 ++#define TARGET_B1500000 00031 ++#define TARGET_B2000000 00032 ++#define TARGET_B2500000 00033 ++#define TARGET_B3000000 00034 ++#define TARGET_B3500000 00035 ++#define TARGET_B4000000 00036 ++ ++#define TARGET_CSIZE 00001400 ++#define TARGET_CS5 00000000 ++#define TARGET_CS6 00000400 ++#define TARGET_CS7 00001000 ++#define TARGET_CS8 00001400 ++ ++#define TARGET_CSTOPB 00002000 ++#define TARGET_CREAD 00004000 ++#define TARGET_PARENB 00010000 ++#define TARGET_PARODD 00020000 ++#define TARGET_HUPCL 00040000 ++ ++#define TARGET_CLOCAL 00100000 ++#define TARGET_CMSPAR 010000000000 /* mark or space (stick) parity */ ++#define TARGET_CRTSCTS 020000000000 /* flow control */ ++ ++/* c_lflag bits */ ++#define TARGET_ISIG 0x00000080 ++#define TARGET_ICANON 0x00000100 ++#define TARGET_XCASE 0x00004000 ++#define TARGET_ECHO 0x00000008 ++#define TARGET_ECHOE 0x00000002 ++#define TARGET_ECHOK 0x00000004 ++#define TARGET_ECHONL 0x00000010 ++#define TARGET_NOFLSH 0x80000000 ++#define TARGET_TOSTOP 0x00400000 ++#define TARGET_ECHOCTL 0x00000040 ++#define TARGET_ECHOPRT 0x00000020 ++#define TARGET_ECHOKE 0x00000001 ++#define TARGET_FLUSHO 0x00800000 ++#define TARGET_PENDIN 0x20000000 ++#define TARGET_IEXTEN 0x00000400 ++#define TARGET_EXTPROC 0x10000000 ++ ++#define TARGET_FIOCLEX TARGET_IO('f', 1) ++#define TARGET_FIONCLEX TARGET_IO('f', 2) ++#define TARGET_FIOASYNC TARGET_IOW('f', 125, int) ++#define TARGET_FIONBIO TARGET_IOW('f', 126, int) ++#define TARGET_FIONREAD TARGET_IOR('f', 127, int) ++#define TARGET_TIOCINQ FIONREAD ++#define TARGET_FIOQSIZE TARGET_IOR('f', 128, loff_t) ++ ++#define TARGET_TIOCGETP TARGET_IOR('t', 8, struct target_sgttyb) ++#define TARGET_TIOCSETP TARGET_IOW('t', 9, struct target_sgttyb) ++#define TARGET_TIOCSETN TARGET_IOW('t', 10, struct target_sgttyb) /* TIOCSETP wo flush */ ++ ++#define TARGET_TIOCSETC TARGET_IOW('t', 17, struct target_tchars) ++#define TARGET_TIOCGETC TARGET_IOR('t', 18, struct target_tchars) ++#define TARGET_TCGETS TARGET_IOR('t', 19, struct target_termios) ++#define TARGET_TCSETS TARGET_IOW('t', 20, struct target_termios) ++#define TARGET_TCSETSW TARGET_IOW('t', 21, struct target_termios) ++#define TARGET_TCSETSF TARGET_IOW('t', 22, struct target_termios) ++ ++#define TARGET_TCGETA TARGET_IOR('t', 23, struct target_termio) ++#define TARGET_TCSETA TARGET_IOW('t', 24, struct target_termio) ++#define TARGET_TCSETAW TARGET_IOW('t', 25, struct target_termio) ++#define TARGET_TCSETAF TARGET_IOW('t', 28, struct target_termio) ++ ++#define TARGET_TCSBRK TARGET_IO('t', 29) ++#define TARGET_TCXONC TARGET_IO('t', 30) ++#define TARGET_TCFLSH TARGET_IO('t', 31) ++ ++#define TARGET_TIOCSWINSZ TARGET_IOW('t', 103, struct target_winsize) ++#define TARGET_TIOCGWINSZ TARGET_IOR('t', 104, struct target_winsize) ++#define TARGET_TIOCSTART TARGET_IO('t', 110) /* start output, like ^Q */ ++#define TARGET_TIOCSTOP TARGET_IO('t', 111) /* stop output, like ^S */ ++#define TARGET_TIOCOUTQ TARGET_IOR('t', 115, int) /* output queue size */ ++ ++#define TARGET_TIOCGLTC TARGET_IOR('t', 116, struct target_ltchars) ++#define TARGET_TIOCSLTC TARGET_IOW('t', 117, struct target_ltchars) ++#define TARGET_TIOCSPGRP TARGET_IOW('t', 118, int) ++#define TARGET_TIOCGPGRP TARGET_IOR('t', 119, int) ++ ++#define TARGET_TIOCEXCL 0x540C ++#define TARGET_TIOCNXCL 0x540D ++#define TARGET_TIOCSCTTY 0x540E ++ ++#define TARGET_TIOCSTI 0x5412 ++#define TARGET_TIOCMGET 0x5415 ++#define TARGET_TIOCMBIS 0x5416 ++#define TARGET_TIOCMBIC 0x5417 ++#define TARGET_TIOCMSET 0x5418 ++# define TARGET_TIOCM_LE 0x001 ++# define TARGET_TIOCM_DTR 0x002 ++# define TARGET_TIOCM_RTS 0x004 ++# define TARGET_TIOCM_ST 0x008 ++# define TARGET_TIOCM_SR 0x010 ++# define TARGET_TIOCM_CTS 0x020 ++# define TARGET_TIOCM_CAR 0x040 ++# define TARGET_TIOCM_RNG 0x080 ++# define TARGET_TIOCM_DSR 0x100 ++# define TARGET_TIOCM_CD TIOCM_CAR ++# define TARGET_TIOCM_RI TIOCM_RNG ++# define TARGET_TIOCM_OUT1 0x2000 ++# define TARGET_TIOCM_OUT2 0x4000 ++# define TARGET_TIOCM_LOOP 0x8000 ++ ++#define TARGET_TIOCGSOFTCAR 0x5419 ++#define TARGET_TIOCSSOFTCAR 0x541A ++#define TARGET_TIOCLINUX 0x541C ++#define TARGET_TIOCCONS 0x541D ++#define TARGET_TIOCGSERIAL 0x541E ++#define TARGET_TIOCSSERIAL 0x541F ++#define TARGET_TIOCPKT 0x5420 ++# define TARGET_TIOCPKT_DATA 0 ++# define TARGET_TIOCPKT_FLUSHREAD 1 ++# define TARGET_TIOCPKT_FLUSHWRITE 2 ++# define TARGET_TIOCPKT_STOP 4 ++# define TARGET_TIOCPKT_START 8 ++# define TARGET_TIOCPKT_NOSTOP 16 ++# define TARGET_TIOCPKT_DOSTOP 32 ++ ++ ++#define TARGET_TIOCNOTTY 0x5422 ++#define TARGET_TIOCSETD 0x5423 ++#define TARGET_TIOCGETD 0x5424 ++#define TARGET_TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ ++#define TARGET_TIOCSBRK 0x5427 /* BSD compatibility */ ++#define TARGET_TIOCCBRK 0x5428 /* BSD compatibility */ ++#define TARGET_TIOCGSID 0x5429 /* Return the session ID of FD */ ++#define TARGET_TIOCGPTN TARGET_IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ ++#define TARGET_TIOCSPTLCK TARGET_IOW('T', 0x31, int) /* Lock/unlock Pty */ ++#define TARGET_TIOCGPTPEER TARGET_IO('T', 0x41) /* Safely open the slave */ ++ ++#define TARGET_TIOCSERCONFIG 0x5453 ++#define TARGET_TIOCSERGWILD 0x5454 ++#define TARGET_TIOCSERSWILD 0x5455 ++#define TARGET_TIOCGLCKTRMIOS 0x5456 ++#define TARGET_TIOCSLCKTRMIOS 0x5457 ++#define TARGET_TIOCSERGSTRUCT 0x5458 /* For debugging only */ ++#define TARGET_TIOCSERGETLSR 0x5459 /* Get line status register */ ++ /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ ++# define TARGET_TIOCSER_TEMT 0x01 /* Transmitter physically empty */ ++#define TARGET_TIOCSERGETMULTI 0x545A /* Get multiport config */ ++#define TARGET_TIOCSERSETMULTI 0x545B /* Set multiport config */ ++ ++#define TARGET_TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ ++#define TARGET_TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ ++#define TARGET_TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ ++#define TARGET_TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ +diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h +index 77ba343c85..bd1b1d2569 100644 +--- a/linux-user/syscall_defs.h ++++ b/linux-user/syscall_defs.h +@@ -85,7 +85,7 @@ + + #elif defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ + defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) || \ +- defined(TARGET_MIPS) ++ defined(TARGET_MIPS) || defined(TARGET_SW64) + + #define TARGET_IOC_SIZEBITS 13 + #define TARGET_IOC_DIRBITS 3 +@@ -2087,6 +2087,50 @@ struct target_stat64 { + + /* LoongArch no newfstatat/fstat syscall. */ + ++#elif defined(TARGET_SW64) ++ ++struct target_stat { ++ unsigned int st_dev; ++ unsigned int st_ino; ++ unsigned int st_mode; ++ unsigned int st_nlink; ++ unsigned int st_uid; ++ unsigned int st_gid; ++ unsigned int st_rdev; ++ abi_long st_size; ++ abi_ulong target_st_atime; ++ abi_ulong target_st_mtime; ++ abi_ulong target_st_ctime; ++ unsigned int st_blksize; ++ unsigned int st_blocks; ++ unsigned int st_flags; ++ unsigned int st_gen; ++}; ++ ++#define TARGET_HAS_STRUCT_STAT64 ++struct target_stat64 { ++ abi_ulong st_dev; ++ abi_ulong st_ino; ++ abi_ulong st_rdev; ++ abi_long st_size; ++ abi_ulong st_blocks; ++ ++ unsigned int st_mode; ++ unsigned int st_uid; ++ unsigned int st_gid; ++ unsigned int st_blksize; ++ unsigned int st_nlink; ++ unsigned int __pad0; ++ ++ abi_ulong target_st_atime; ++ abi_ulong target_st_atime_nsec; ++ abi_ulong target_st_mtime; ++ abi_ulong target_st_mtime_nsec; ++ abi_ulong target_st_ctime; ++ abi_ulong target_st_ctime_nsec; ++ abi_long __unused[3]; ++}; ++ + #else + #error unsupported CPU + #endif +diff --git a/meson.build b/meson.build +index 0c62b4156d..d92384c23a 100644 +--- a/meson.build ++++ b/meson.build +@@ -56,10 +56,14 @@ qapi_trace_events = [] + bsd_oses = ['gnu/kfreebsd', 'freebsd', 'netbsd', 'openbsd', 'dragonfly', 'darwin'] + supported_oses = ['windows', 'freebsd', 'netbsd', 'openbsd', 'darwin', 'sunos', 'linux'] + supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv32', 'riscv64', 'x86', 'x86_64', +- 'arm', 'aarch64', 'loongarch64', 'mips', 'mips64', 'sparc64'] ++ 'arm', 'aarch64', 'loongarch64', 'mips', 'mips64', 'sparc64', 'sw64'] + + cpu = host_machine.cpu_family() + ++if cpu == 'sw_64' ++ cpu = 'sw64' ++endif ++ + target_dirs = config_host['TARGET_DIRS'].split() + have_linux_user = false + have_bsd_user = false +@@ -116,6 +120,8 @@ elif cpu in ['riscv64'] + kvm_targets = ['riscv64-softmmu'] + elif cpu in ['loongarch64'] + kvm_targets = ['loongarch64-softmmu'] ++elif cpu in ['sw64'] ++ kvm_targets = ['sw64-softmmu'] + else + kvm_targets = [] + endif +@@ -682,6 +688,8 @@ if get_option('tcg').allowed() + tcg_arch = 'i386' + elif host_arch == 'ppc64' + tcg_arch = 'ppc' ++ elif host_arch == 'sw64' ++ tcg_arch = 'sw64' + endif + add_project_arguments('-iquote', meson.current_source_dir() / 'tcg' / tcg_arch, + language: all_languages) +@@ -2883,6 +2891,7 @@ disassemblers = { + 'sparc' : ['CONFIG_SPARC_DIS'], + 'xtensa' : ['CONFIG_XTENSA_DIS'], + 'loongarch' : ['CONFIG_LOONGARCH_DIS'], ++ 'sw64' : ['CONFIG_SW64_DIS'], + } + + have_ivshmem = config_host_data.get('CONFIG_EVENTFD') +@@ -3335,6 +3344,7 @@ if have_system + 'hw/sparc', + 'hw/sparc64', + 'hw/ssi', ++ 'hw/sw64', + 'hw/timer', + 'hw/tpm', + 'hw/ufs', +@@ -4119,6 +4129,7 @@ if 'simple' in get_option('trace_backends') + summary_info += {'Trace output file': get_option('trace_file') + '-'} + endif + summary_info += {'D-Bus display': dbus_display} ++summary_info += {'vt-iommu': config_host.has_key('CONFIG_SW64_VT_IOMMU')} + summary_info += {'QOM debugging': get_option('qom_cast_debug')} + summary_info += {'Relocatable install': get_option('relocatable')} + summary_info += {'vhost-kernel support': have_vhost_kernel} +diff --git a/pc-bios/c3-uefi-bios-sw b/pc-bios/c3-uefi-bios-sw +new file mode 100755 +index 0000000000..095cc83a26 +Binary files /dev/null and b/pc-bios/c3-uefi-bios-sw differ +diff --git a/pc-bios/c4-uefi-bios-sw b/pc-bios/c4-uefi-bios-sw +new file mode 100644 +index 0000000000..3dd29837b7 +Binary files /dev/null and b/pc-bios/c4-uefi-bios-sw differ +diff --git a/pc-bios/core3-hmcode b/pc-bios/core3-hmcode +new file mode 100644 +index 0000000000..94bb0c2214 +Binary files /dev/null and b/pc-bios/core3-hmcode differ +diff --git a/pc-bios/core3-reset b/pc-bios/core3-reset +new file mode 100755 +index 0000000000..5880780b36 +Binary files /dev/null and b/pc-bios/core3-reset differ +diff --git a/pc-bios/core4-hmcode b/pc-bios/core4-hmcode +new file mode 100755 +index 0000000000..e2e47b9c17 +Binary files /dev/null and b/pc-bios/core4-hmcode differ +diff --git a/pc-bios/core4-reset b/pc-bios/core4-reset +new file mode 100755 +index 0000000000..ab5220ef7e +Binary files /dev/null and b/pc-bios/core4-reset differ +diff --git a/pc-bios/meson.build b/pc-bios/meson.build +index e67fa433a1..2001a4f4bc 100644 +--- a/pc-bios/meson.build ++++ b/pc-bios/meson.build +@@ -37,6 +37,12 @@ blobs = [ + 'vgabios-ramfb.bin', + 'vgabios-bochs-display.bin', + 'vgabios-ati.bin', ++ 'c3-uefi-bios-sw', ++ 'core3-reset', ++ 'core3-hmcode', ++ 'c4-uefi-bios-sw', ++ 'core4-reset', ++ 'core4-hmcode', + 'openbios-sparc32', + 'openbios-sparc64', + 'openbios-ppc', +diff --git a/pc-bios/uefi-bios-sw-old b/pc-bios/uefi-bios-sw-old +new file mode 100644 +index 0000000000..8be24e6d73 +Binary files /dev/null and b/pc-bios/uefi-bios-sw-old differ +diff --git a/qapi/machine-target.json b/qapi/machine-target.json +index 7b7149f81c..c959a39dab 100644 +--- a/qapi/machine-target.json ++++ b/qapi/machine-target.json +@@ -348,7 +348,8 @@ + 'TARGET_S390X', + 'TARGET_MIPS', + 'TARGET_LOONGARCH64', +- 'TARGET_RISCV' ] } } ++ 'TARGET_RISCV', ++ 'TARGET_SW64'] } } + + ## + # @query-cpu-definitions: +@@ -364,6 +365,7 @@ + 'TARGET_ARM', + 'TARGET_I386', + 'TARGET_S390X', ++ 'TARGET_SW64', + 'TARGET_MIPS', + 'TARGET_LOONGARCH64', + 'TARGET_RISCV' ] } } +diff --git a/qapi/machine.json b/qapi/machine.json +index b6d634b30d..82b5ff7c36 100644 +--- a/qapi/machine.json ++++ b/qapi/machine.json +@@ -35,7 +35,7 @@ + 'loongarch64', 'm68k', 'microblaze', 'microblazeel', 'mips', 'mips64', + 'mips64el', 'mipsel', 'nios2', 'or1k', 'ppc', + 'ppc64', 'riscv32', 'riscv64', 'rx', 's390x', 'sh4', +- 'sh4eb', 'sparc', 'sparc64', 'tricore', ++ 'sh4eb', 'sparc', 'sparc64', 'sw64', 'tricore', + 'x86_64', 'xtensa', 'xtensaeb' ] } + + ## +diff --git a/qemu-options.hx b/qemu-options.hx +index caeca1d9bd..f64cf2b556 100644 +--- a/qemu-options.hx ++++ b/qemu-options.hx +@@ -2631,7 +2631,7 @@ SRST + ERST + + DEF("no-acpi", 0, QEMU_OPTION_no_acpi, +- "-no-acpi disable ACPI\n", QEMU_ARCH_I386 | QEMU_ARCH_ARM) ++ "-no-acpi disable ACPI\n", QEMU_ARCH_I386 | QEMU_ARCH_ARM | QEMU_ARCH_SW64) + SRST + ``-no-acpi`` + Disable ACPI (Advanced Configuration and Power Interface) support. +@@ -2690,7 +2690,7 @@ DEF("smbios", HAS_ARG, QEMU_OPTION_smbios, + " specify SMBIOS type 17 fields\n" + "-smbios type=41[,designation=str][,kind=str][,instance=%d][,pcidev=str]\n" + " specify SMBIOS type 41 fields\n", +- QEMU_ARCH_I386 | QEMU_ARCH_ARM | QEMU_ARCH_LOONGARCH) ++ QEMU_ARCH_I386 | QEMU_ARCH_ARM | QEMU_ARCH_LOONGARCH | QEMU_ARCH_SW64) + SRST + ``-smbios file=binary`` + Load SMBIOS entry from binary file. +@@ -4511,7 +4511,7 @@ ERST + DEF("enable-kvm", 0, QEMU_OPTION_enable_kvm, \ + "-enable-kvm enable KVM full virtualization support\n", + QEMU_ARCH_ARM | QEMU_ARCH_I386 | QEMU_ARCH_MIPS | QEMU_ARCH_PPC | +- QEMU_ARCH_RISCV | QEMU_ARCH_S390X) ++ QEMU_ARCH_RISCV | QEMU_ARCH_S390X | QEMU_ARCH_SW64) + SRST + ``-enable-kvm`` + Enable KVM full virtualization support. This option is only +diff --git a/scripts/qemu-version.sh b/scripts/qemu-version.sh +index 3f6e7e6d41..31642ff3dd 100755 +--- a/scripts/qemu-version.sh ++++ b/scripts/qemu-version.sh +@@ -9,7 +9,7 @@ version="$3" + if [ -z "$pkgversion" ]; then + cd "$dir" + if [ -e .git ]; then +- pkgversion=$(git describe --match 'v*' --dirty) || : ++ pkgversion=$(git describe --tag --match 'v*' --dirty) || : + fi + fi + +diff --git a/system/qdev-monitor.c b/system/qdev-monitor.c +index a13db763e5..5b3cf22116 100644 +--- a/system/qdev-monitor.c ++++ b/system/qdev-monitor.c +@@ -61,7 +61,7 @@ typedef struct QDevAlias + QEMU_ARCH_MIPS | QEMU_ARCH_PPC | \ + QEMU_ARCH_RISCV | QEMU_ARCH_SH4 | \ + QEMU_ARCH_SPARC | QEMU_ARCH_XTENSA | \ +- QEMU_ARCH_LOONGARCH) ++ QEMU_ARCH_LOONGARCH | QEMU_ARCH_SW64) + #define QEMU_ARCH_VIRTIO_CCW (QEMU_ARCH_S390X) + #define QEMU_ARCH_VIRTIO_MMIO (QEMU_ARCH_M68K) + +diff --git a/target/Kconfig b/target/Kconfig +index 83da0bd293..2758c79c52 100644 +--- a/target/Kconfig ++++ b/target/Kconfig +@@ -18,3 +18,4 @@ source sh4/Kconfig + source sparc/Kconfig + source tricore/Kconfig + source xtensa/Kconfig ++source sw64/Kconfig +diff --git a/target/meson.build b/target/meson.build +index a53a60486f..cf4d7db8c5 100644 +--- a/target/meson.build ++++ b/target/meson.build +@@ -17,5 +17,6 @@ subdir('rx') + subdir('s390x') + subdir('sh4') + subdir('sparc') ++subdir('sw64') + subdir('tricore') + subdir('xtensa') +diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h +index dedeb89f8e..15e1fef1d3 100644 +--- a/target/openrisc/cpu.h ++++ b/target/openrisc/cpu.h +@@ -73,7 +73,7 @@ enum { + UPR_PMP = (1 << 8), + UPR_PICP = (1 << 9), + UPR_TTP = (1 << 10), +- UPR_CUP = (255 << 24), ++ UPR_CUP = ((uint32_t)255 << 24), + }; + + /* CPU configure register */ +@@ -185,15 +185,15 @@ enum { + TTMR_TP = (0xfffffff), + TTMR_IP = (1 << 28), + TTMR_IE = (1 << 29), +- TTMR_M = (3 << 30), ++ TTMR_M = ((uint32_t)3 << 30), + }; + + /* Timer Mode */ + enum { + TIMER_NONE = (0 << 30), + TIMER_INTR = (1 << 30), +- TIMER_SHOT = (2 << 30), +- TIMER_CONT = (3 << 30), ++ TIMER_SHOT = ((uint32_t)2 << 30), ++ TIMER_CONT = ((uint32_t)3 << 30), + }; + + /* TLB size */ +diff --git a/target/sw64/Kconfig b/target/sw64/Kconfig +new file mode 100644 +index 0000000000..ad50b9677e +--- /dev/null ++++ b/target/sw64/Kconfig +@@ -0,0 +1,2 @@ ++config SW64 ++ bool +diff --git a/target/sw64/cpu-param.h b/target/sw64/cpu-param.h +new file mode 100644 +index 0000000000..db25eb1970 +--- /dev/null ++++ b/target/sw64/cpu-param.h +@@ -0,0 +1,16 @@ ++/* ++ * SW64 cpu parameters for qemu. ++ * ++ * Copyright (c) 2018 Lin Hainan ++ */ ++ ++#ifndef SW64_CPU_PARAM_H ++#define SW64_CPU_PARAM_H 1 ++ ++#define TARGET_LONG_BITS 64 ++#define TARGET_PAGE_BITS 13 ++ ++#define TARGET_VIRT_ADDR_SPACE_BITS 64 ++#define TARGET_PHYS_ADDR_SPACE_BITS 48 ++ ++#endif +diff --git a/target/sw64/cpu-qom.h b/target/sw64/cpu-qom.h +new file mode 100644 +index 0000000000..fb8e63ddb6 +--- /dev/null ++++ b/target/sw64/cpu-qom.h +@@ -0,0 +1,29 @@ ++/* ++ * QEMU SW64 CPU ++ * ++ * Copyright (c) 2018 Lin Hainan ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version 2 ++ * of the License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++#ifndef QEMU_SW64_CPU_QOM_H ++#define QEMU_SW64_CPU_QOM_H ++ ++#include "hw/core/cpu.h" ++ ++#define TYPE_SW64_CPU "sw64-cpu" ++ ++OBJECT_DECLARE_CPU_TYPE(SW64CPU, SW64CPUClass, SW64_CPU) ++ ++#define SW64_CPU_TYPE_SUFFIX "-" TYPE_SW64_CPU ++#define SW64_CPU_TYPE_NAME(model) model SW64_CPU_TYPE_SUFFIX ++ ++#endif +diff --git a/target/sw64/cpu.c b/target/sw64/cpu.c +new file mode 100644 +index 0000000000..2cf37e8a72 +--- /dev/null ++++ b/target/sw64/cpu.c +@@ -0,0 +1,462 @@ ++/* ++ * QEMU SW64 CPU ++ * ++ * Copyright (c) 2018 Lin Hainan ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ */ ++ ++#include "qemu/osdep.h" ++#include "qapi/error.h" ++#include "qemu/qemu-print.h" ++#include "cpu.h" ++#include "exec/exec-all.h" ++#include "exec/cpu-common.h" ++#include "sysemu/kvm.h" ++#include "disas/dis-asm.h" ++#include "kvm_sw64.h" ++#include "sysemu/reset.h" ++#include "hw/qdev-properties.h" ++#include "sysemu/arch_init.h" ++#include "qapi/qapi-commands-machine-target.h" ++#if !defined(CONFIG_USER_ONLY) ++#include "hw/sw64/pm.h" ++#endif ++ ++static void sw64_cpu_set_pc(CPUState *cs, vaddr value) ++{ ++ SW64CPU *cpu = SW64_CPU(cs); ++ ++ cpu->env.pc = value; ++} ++ ++static void sw64_restore_state_to_opc(CPUState *cs, ++ const TranslationBlock *tb, ++ const uint64_t *data) ++{ ++ SW64CPU *cpu = SW64_CPU(cs); ++ ++ cpu->env.pc = data[0]; ++} ++ ++static void sw64_cpu_dump_state(CPUState *cs, FILE *f, int flags) ++{ ++ SW64CPU *cpu = SW64_CPU(cs); ++ CPUSW64State *env = &cpu->env; ++ int i; ++ ++ static const char ireg_names[31][4] = { ++ "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1", ++ "s2", "s3", "s4", "s5", "fp", "a0", "a1", "a2", "a3", "a4", "a5", ++ "t8", "t9", "t10", "t11", "ra", "t12", "at", "gp", "sp"}; ++ static const char freg_names[128][4] = { ++ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", ++ "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", ++ "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", ++ "f30", "f31", "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", ++ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", ++ "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", ++ "f28", "f29", "f30", "f31", "f0", "f1", "f2", "f3", "f4", "f5", ++ "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", ++ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", ++ "f26", "f27", "f28", "f29", "f30", "f31", "f0", "f1", "f2", "f3", ++ "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", ++ "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", ++ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"}; ++ qemu_fprintf(f, "PC=%016" PRIx64 " SP=%016" PRIx64 "\n", env->pc, ++ env->ir[IDX_SP]); ++ for (i = 0; i < 31; i++) { ++ qemu_fprintf(f, "%s=%016" PRIx64, ireg_names[i], env->ir[i]); ++ if ((i % 4) == 3) { ++ qemu_fprintf(f, "\n"); ++ } else { ++ qemu_fprintf(f, " "); ++ } ++ } ++ qemu_fprintf(f, "\n"); ++#ifndef CONFIG_USER_ONLY ++ static const char sreg_names[10][4] = {"p1", "p2", "p4", "p5", "p6", ++ "p7", "p20", "p21", "p22", "p23"}; ++ for (i = 0; i < 10; i++) { ++ qemu_fprintf(f, "%s=%016" PRIx64, sreg_names[i], env->sr[i]); ++ if ((i % 4) == 3) { ++ qemu_fprintf(f, "\n"); ++ } else { ++ qemu_fprintf(f, " "); ++ } ++ } ++ qemu_fprintf(f, "\n"); ++#endif ++ for (i = 0; i < 32; i++) { ++ qemu_fprintf(f, "%s=%016" PRIx64, freg_names[i + 96], env->fr[i + 96]); ++ qemu_fprintf(f, " %016" PRIx64, env->fr[i + 64]); ++ qemu_fprintf(f, " %016" PRIx64, env->fr[i + 32]); ++ qemu_fprintf(f, " %016" PRIx64, env->fr[i]); ++ qemu_fprintf(f, "\n"); ++ } ++ qemu_fprintf(f, "\n"); ++} ++ ++#ifndef CONFIG_USER_ONLY ++static void sw64_machine_cpu_reset(void *opaque) ++{ ++ SW64CPU *cpu = opaque; ++ ++ cpu_reset(CPU(cpu)); ++} ++#endif ++ ++static void sw64_cpu_realizefn(DeviceState *dev, Error **errp) ++{ ++ CPUState *cs = CPU(dev); ++ SW64CPUClass *scc = SW64_CPU_GET_CLASS(dev); ++ Error *local_err = NULL; ++#ifndef CONFIG_USER_ONLY ++ SW64CPU *cpu = SW64_CPU(dev); ++ MachineState *ms = MACHINE(qdev_get_machine()); ++ unsigned int max_cpus = ms->smp.max_cpus; ++ ++ if (cpu->core_id > 0) { ++ cpu_hot_id = cpu->core_id; ++ if (!cpu_hot_id || cpu_hot_id > max_cpus - 1) { ++ error_setg(&local_err, "error:" ++ "Cpu num must be less than max cpus and can not be 0!"); ++ error_propagate(errp, local_err); ++ return ; ++ } ++ if (get_state_cpumask(cpu_hot_id)) { ++ error_setg(&local_err, "error: Unable to add already online cpu!"); ++ error_propagate(errp, local_err); ++ return ; ++ } ++ } ++#endif ++ ++ cpu_exec_realizefn(cs, &local_err); ++ if (local_err != NULL) { ++ error_propagate(errp, local_err); ++ return; ++ } ++#ifndef CONFIG_USER_ONLY ++ qemu_register_reset(sw64_machine_cpu_reset, cs); ++#endif ++ ++ qemu_init_vcpu(cs); ++ ++ scc->parent_realize(dev, errp); ++} ++ ++static void sw64_cpu_unrealizefn(DeviceState *dev) ++{ ++ SW64CPUClass *scc = SW64_CPU_GET_CLASS(dev); ++#ifndef CONFIG_USER_ONLY ++ CPUState *cs = CPU(dev); ++ cpu_remove_sync(CPU(dev)); ++ qemu_unregister_reset(sw64_machine_cpu_reset, cs); ++#endif ++ scc->parent_unrealize(dev); ++} ++ ++static char *sw64_cpu_class_get_model_name(SW64CPUClass *cc) ++{ ++ const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); ++ assert(g_str_has_suffix(class_name, SW64_CPU_TYPE_SUFFIX)); ++ return g_strndup(class_name, ++ strlen(class_name) - strlen(SW64_CPU_TYPE_SUFFIX)); ++} ++ ++static void sw64_cpu_list_entry(gpointer data, gpointer user_data) ++{ ++ ObjectClass *oc = data; ++ SW64CPUClass *cc = SW64_CPU_CLASS(oc); ++ ++ char *name = sw64_cpu_class_get_model_name(cc); ++ qemu_printf("sw64 %s\n", name); ++ g_free(name); ++} ++ ++void sw64_cpu_list(void) ++{ ++ GSList *list; ++ ++ list = object_class_get_list_sorted(TYPE_SW64_CPU, false); ++ qemu_printf("Available CPUs:\n"); ++ g_slist_foreach(list, sw64_cpu_list_entry, NULL); ++ g_slist_free(list); ++} ++ ++static void sw64_cpu_add_definition(gpointer data, gpointer user_data) ++{ ++ ObjectClass *oc = data; ++ CpuDefinitionInfoList **cpu_list = user_data; ++ CpuDefinitionInfoList *entry; ++ CpuDefinitionInfo *info; ++ const char *typename; ++ ++ typename = object_class_get_name(oc); ++ info = g_malloc0(sizeof(*info)); ++ info->name = g_strndup(typename, ++ strlen(typename) - strlen("-" TYPE_SW64_CPU)); ++ info->q_typename = g_strdup(typename); ++ ++ entry = g_malloc0(sizeof(*entry)); ++ entry->value = info; ++ entry->next = *cpu_list; ++ *cpu_list = entry; ++} ++ ++CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) ++{ ++ CpuDefinitionInfoList *cpu_list = NULL; ++ GSList *list; ++ ++ list = object_class_get_list(TYPE_SW64_CPU, false); ++ g_slist_foreach(list, sw64_cpu_add_definition, &cpu_list); ++ g_slist_free(list); ++ ++ return cpu_list; ++} ++ ++static void sw64_cpu_disas_set_info(CPUState *cs, disassemble_info *info) ++{ ++ CPUSW64State *env = cpu_env(cs); ++ if (test_feature(env, SW64_FEATURE_CORE3)) ++ info->mach = bfd_mach_sw64_core3; ++ else if(test_feature(env, SW64_FEATURE_CORE4)) ++ info->mach = bfd_mach_sw64_core4; ++ info->print_insn = print_insn_sw64; ++} ++ ++#include "fpu/softfloat.h" ++ ++static void core3_init(Object *obj) ++{ ++ CPUState *cs = CPU(obj); ++ CPUSW64State *env = cpu_env(cs); ++#ifdef CONFIG_USER_ONLY ++ env->fpcr = 0x680e800000000000; ++#endif ++ set_feature(env, SW64_FEATURE_CORE3); ++} ++ ++static void core4_init(Object *obj) ++{ ++ CPUState *cs = CPU(obj); ++ CPUSW64State *env = cpu_env(cs); ++#ifdef CONFIG_USER_ONLY ++ env->fpcr = 0x680e800000000000; ++#endif ++ set_feature(env, SW64_FEATURE_CORE4); ++} ++ ++static ObjectClass *sw64_cpu_class_by_name(const char *cpu_model) ++{ ++ ObjectClass *oc; ++ char *typename; ++ char **cpuname; ++ ++ cpuname = g_strsplit(cpu_model, ",", 1); ++ typename = g_strdup_printf(SW64_CPU_TYPE_NAME("%s"), cpu_model); ++ ++ oc = object_class_by_name(typename); ++ g_strfreev(cpuname); ++ g_free(typename); ++ ++ if (oc && object_class_dynamic_cast(oc, TYPE_SW64_CPU) && ++ !object_class_is_abstract(oc)) { ++ return oc; ++ } ++ return NULL; ++} ++ ++bool sw64_cpu_has_work(CPUState *cs) ++{ ++ /* If CPU has gotten into asleep(halt), then it may be ++ * wake up by hard interrupt, timer, ii, mail or mchk. ++ */ ++ return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER | ++ CPU_INTERRUPT_II0 | CPU_INTERRUPT_MCHK | CPU_INTERRUPT_IINM); ++} ++ ++static void sw64_cpu_initfn(Object *obj) ++{ ++ CPUState *cs = CPU(obj); ++ SW64CPU *cpu = SW64_CPU(obj); ++ CPUSW64State *env = &cpu->env; ++ ++#ifndef CONFIG_USER_ONLY ++ env->flags = ENV_FLAG_HM_MODE; ++#else ++ env->flags = ENV_FLAG_PS_USER; ++#endif ++ tlb_flush(cs); ++} ++ ++#ifndef CONFIG_USER_ONLY ++static void sw64_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, ++ unsigned size, MMUAccessType access_type, ++ int mmu_idx, MemTxAttrs attrs, ++ MemTxResult response, uintptr_t retaddr) ++{ ++#ifdef DEBUG_TRANS ++ SW64CPU *cpu = SW64_CPU(cs); ++ CPUSW64State *env = &cpu->env; ++ ++ if (retaddr) { ++ cpu_restore_state(cs, retaddr); ++ } ++ fprintf(stderr, "PC = %lx, Wrong IO addr. Hwaddr = %lx, vaddr = %lx, access_type = %d\n", ++ env->pc, physaddr, addr, access_type); ++#endif ++} ++#endif ++ ++static void sw64_cpu_reset(DeviceState *dev) ++{ ++ CPUState *s = CPU(dev); ++ SW64CPU *cpu = SW64_CPU(s); ++ SW64CPUClass *scc = SW64_CPU_GET_CLASS(cpu); ++ ++ scc->parent_reset(dev); ++ ++#ifndef CONFIG_USER_ONLY ++ if (kvm_enabled()) { ++ kvm_sw64_reset_vcpu(cpu); ++ } ++#endif ++} ++ ++static Property sw64_cpu_properties[] = { ++#ifdef CONFIG_USER_ONLY ++ /* apic_id = 0 by default for *-user, see commit 9886e834 */ ++ DEFINE_PROP_UINT32("thread-id", SW64CPU, thread_id, 0), ++ DEFINE_PROP_UINT32("core-id", SW64CPU, core_id, 0), ++ DEFINE_PROP_UINT32("socket-id", SW64CPU, socket_id, 0), ++#else ++ DEFINE_PROP_UINT32("thread-id", SW64CPU, thread_id, 0), ++ DEFINE_PROP_UINT32("core-id", SW64CPU, core_id, 0xFFFFFFFF), ++ DEFINE_PROP_UINT32("socket-id", SW64CPU, socket_id, 0), ++#endif ++ DEFINE_PROP_UINT32("node-id", SW64CPU, node_id, -1), ++ DEFINE_PROP_END_OF_LIST() ++}; ++ ++#ifndef CONFIG_USER_ONLY ++#include "hw/core/sysemu-cpu-ops.h" ++ ++static const struct SysemuCPUOps sw64_sysemu_ops = { ++ .get_phys_page_debug = sw64_cpu_get_phys_page_debug, ++}; ++#endif ++ ++#include "hw/core/tcg-cpu-ops.h" ++ ++static const struct TCGCPUOps sw64_tcg_ops = { ++ .initialize = sw64_translate_init, ++ .restore_state_to_opc = sw64_restore_state_to_opc, ++ ++#ifndef CONFIG_USER_ONLY ++ .tlb_fill = sw64_cpu_tlb_fill, ++ .do_unaligned_access = sw64_cpu_do_unaligned_access, ++ .cpu_exec_interrupt = sw64_cpu_exec_interrupt, ++ .do_transaction_failed = sw64_cpu_do_transaction_failed, ++ .do_interrupt = sw64_cpu_do_interrupt, ++#endif /* !CONFIG_USER_ONLY */ ++}; ++ ++static void sw64_cpu_class_init(ObjectClass *oc, void *data) ++{ ++ DeviceClass *dc = DEVICE_CLASS(oc); ++ CPUClass *cc = CPU_CLASS(oc); ++ SW64CPUClass *scc = SW64_CPU_CLASS(oc); ++ ++ device_class_set_parent_realize(dc, sw64_cpu_realizefn, ++ &scc->parent_realize); ++ device_class_set_parent_reset(dc, sw64_cpu_reset, &scc->parent_reset); ++ device_class_set_props(dc, sw64_cpu_properties); ++ ++ scc->parent_unrealize = dc->unrealize; ++ dc->unrealize = sw64_cpu_unrealizefn; ++ ++ dc->user_creatable = true; ++ ++ cc->class_by_name = sw64_cpu_class_by_name; ++#ifndef CONFIG_USER_ONLY ++ dc->vmsd = &vmstate_sw64_cpu; ++ cc->sysemu_ops = &sw64_sysemu_ops; ++#endif ++ cc->has_work = sw64_cpu_has_work; ++ cc->set_pc = sw64_cpu_set_pc; ++ cc->disas_set_info = sw64_cpu_disas_set_info; ++ cc->dump_state = sw64_cpu_dump_state; ++ ++ cc->gdb_read_register = sw64_cpu_gdb_read_register; ++ cc->gdb_write_register = sw64_cpu_gdb_write_register; ++ cc->gdb_num_core_regs = 67; ++ cc->gdb_core_xml_file = "sw64-core.xml"; ++ ++ cc->tcg_ops = &sw64_tcg_ops; ++} ++ ++static const SW64CPUInfo sw64_cpus[] = ++{ ++ { ++ .name = "core3", ++ .initfn = core3_init, ++ }, ++ { ++ .name = "core4", ++ .initfn = core4_init, ++ }, ++ { ++ .name = NULL ++ }, ++}; ++ ++static void cpu_register(const SW64CPUInfo *info) ++{ ++ TypeInfo type_info = { ++ .parent = TYPE_SW64_CPU, ++ .instance_size = sizeof(SW64CPU), ++ .instance_init = info->initfn, ++ .class_size = sizeof(SW64CPUClass), ++ .class_init = info->class_init, ++ }; ++ ++ type_info.name = g_strdup_printf("%s-" TYPE_SW64_CPU, info->name); ++ type_register(&type_info); ++ g_free((void*)type_info.name); ++} ++ ++static const TypeInfo sw64_cpu_type_info = { ++ .name = TYPE_SW64_CPU, ++ .parent = TYPE_CPU, ++ .instance_size = sizeof(SW64CPU), ++ .instance_init = sw64_cpu_initfn, ++ .abstract = true, ++ .class_size = sizeof(SW64CPUClass), ++ .class_init = sw64_cpu_class_init, ++}; ++ ++static void sw64_cpu_register_types(void) ++{ ++ const SW64CPUInfo *info = sw64_cpus; ++ ++ type_register_static(&sw64_cpu_type_info); ++ ++ while (info->name) { ++ cpu_register(info); ++ info++; ++ } ++} ++ ++type_init(sw64_cpu_register_types) +diff --git a/target/sw64/cpu.h b/target/sw64/cpu.h +new file mode 100644 +index 0000000000..854dd218ce +--- /dev/null ++++ b/target/sw64/cpu.h +@@ -0,0 +1,449 @@ ++/* ++ * SW64 emulation cpu definitions for qemu. ++ * ++ * Copyright (c) 2018 Lin Hainan ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ */ ++#ifndef SW64_CPU_H ++#define SW64_CPU_H ++ ++#include "cpu-qom.h" ++#include "fpu/softfloat.h" ++#include "profile.h" ++ ++/* QEMU addressing/paging config */ ++#define TARGET_PAGE_BITS 13 ++#define TARGET_LONG_BITS 64 ++#define TARGET_LEVEL_BITS 10 ++//#define ALIGNED_ONLY ++ ++#include "exec/cpu-defs.h" ++ ++/* FIXME: LOCKFIX */ ++#define SW64_FIXLOCK 1 ++ ++/* swcore processors have a weak memory model */ ++#define TCG_GUEST_DEFAULT_MO (0) ++ ++#define SOFTMMU 1 ++ ++#ifndef CONFIG_USER_ONLY ++#define MMU_MODE0_SUFFIX _phys ++#define MMU_MODE3_SUFFIX _user ++#define MMU_MODE2_SUFFIX _kernel ++#endif ++#define MMU_PHYS_IDX 0 ++#define MMU_KERNEL_IDX 2 ++#define MMU_USER_IDX 3 ++ ++/* FIXME:Bits 4 and 5 are the mmu mode. The VMS hmcode uses all 4 modes; ++ The Unix hmcode only uses bit 4. */ ++#define PS_USER_MODE 8u ++ ++#define ENV_FLAG_HM_SHIFT 0 ++#define ENV_FLAG_PS_SHIFT 8 ++#define ENV_FLAG_FEN_SHIFT 24 ++ ++#define ENV_FLAG_HM_MODE (1u << ENV_FLAG_HM_SHIFT) ++#define ENV_FLAG_PS_USER (PS_USER_MODE << ENV_FLAG_PS_SHIFT) ++#define ENV_FLAG_FEN (1u << ENV_FLAG_FEN_SHIFT) ++ ++#define MCU_CLOCK 25000000 ++ ++#define PTE_MASK 0xbfffffffffffffff ++ ++typedef struct CPUArchState { ++ uint64_t ir[32]; ++ uint64_t fr[128]; ++ uint64_t pc; ++ bool is_slave; ++ ++ uint64_t csr[0x100]; ++ uint64_t fpcr; ++ uint64_t fpcr_exc_enable; ++ uint8_t fpcr_round_mode; ++ uint8_t fpcr_flush_to_zero; ++ ++ float_status fp_status; ++ ++ uint64_t hm_entry; ++ ++#if !defined(CONFIG_USER_ONLY) ++ uint64_t sr[10]; /* shadow regs 1,2,4-7,20-23 */ ++#endif ++ ++ uint32_t flags; ++ uint64_t error_code; ++ uint64_t unique; ++ uint64_t lock_addr; ++ uint64_t lock_valid; ++ uint64_t lock_flag; ++ uint64_t lock_success; ++#ifdef SW64_FIXLOCK ++ uint64_t lock_value; ++#endif ++ ++ uint64_t trap_arg0; ++ uint64_t trap_arg1; ++ uint64_t trap_arg2; ++ ++ uint64_t features; ++ uint64_t insn_count[537]; ++ ++ /* reserve for slave */ ++ uint64_t ca[4]; ++ uint64_t scala_gpr[64]; ++ uint64_t vec_gpr[224]; ++ uint64_t fpcr_base; ++ uint64_t fpcr_ext; ++ uint64_t pendding_flag; ++ uint64_t pendding_status; ++ uint64_t synr_pendding_status; ++ uint64_t sync_pendding_status; ++ uint8_t vlenma_idxa; ++ uint8_t stable; ++} CPUSW64State; ++#define SW64_FEATURE_CORE3 0x2 ++#define SW64_FEATURE_CORE4 0x4 ++ ++static inline void set_feature(CPUSW64State *env, int feature) ++{ ++ env->features |= feature; ++} ++ ++/** ++ * SW64CPU: ++ * @env: #CPUSW64State ++ * ++ * An SW64 CPU ++ */ ++struct ArchCPU { ++ /*< private >*/ ++ CPUState parent_obj; ++ /*< public >*/ ++ CPUSW64State env; ++ CPUNegativeOffsetState neg; ++ ++ uint64_t k_regs[164]; ++ uint64_t k_vcb[96]; ++ QEMUTimer *alarm_timer; ++ target_ulong irq; ++ uint32_t node_id; ++ uint32_t thread_id; ++ uint32_t core_id; ++ uint32_t socket_id; ++}; ++ ++/** ++ * * SW64CPUClass: ++ * * @parent_realize: The parent class' realize handler. ++ * * @parent_reset: The parent class' reset handler. ++ * * ++ * * An Alpha CPU model. ++ * */ ++struct SW64CPUClass { ++ CPUClass parent_class; ++ ++ DeviceRealize parent_realize; ++ DeviceUnrealize parent_unrealize; ++ DeviceReset parent_reset; ++}; ++ ++enum { ++ IDX_V0 = 0, ++ IDX_T0 = 1, ++ IDX_T1 = 2, ++ IDX_T2 = 3, ++ IDX_T3 = 4, ++ IDX_T4 = 5, ++ IDX_T5 = 6, ++ IDX_T6 = 7, ++ IDX_T7 = 8, ++ IDX_S0 = 9, ++ IDX_S1 = 10, ++ IDX_S2 = 11, ++ IDX_S3 = 12, ++ IDX_S4 = 13, ++ IDX_S5 = 14, ++ IDX_S6 = 15, ++ IDX_FP = IDX_S6, ++ IDX_A0 = 16, ++ IDX_A1 = 17, ++ IDX_A2 = 18, ++ IDX_A3 = 19, ++ IDX_A4 = 20, ++ IDX_A5 = 21, ++ IDX_T8 = 22, ++ IDX_T9 = 23, ++ IDX_T10 = 24, ++ IDX_T11 = 25, ++ IDX_RA = 26, ++ IDX_T12 = 27, ++ IDX_PV = IDX_T12, ++ IDX_AT = 28, ++ IDX_GP = 29, ++ IDX_SP = 30, ++ IDX_ZERO = 31, ++}; ++ ++enum { ++ MM_K_TNV = 0x0, ++ MM_K_ACV = 0x1, ++ MM_K_FOR = 0x2, ++ MM_K_FOE = 0x3, ++ MM_K_FOW = 0x4 ++}; ++ ++enum { ++ PTE_VALID = 0x0001, ++ PTE_FOR = 0x0002, /* used for page protection (fault on read) */ ++ PTE_FOW = 0x0004, /* used for page protection (fault on write) */ ++ PTE_FOE = 0x0008, ++ PTE_KS = 0x0010, ++ PTE_PSE = 0x0040, ++ PTE_GH = 0x0060, ++ PTE_HRE = 0x0100, ++ PTE_VRE = 0x0200, ++ PTE_KRE = 0x0400, ++ PTE_URE = 0x0800, ++ PTE_HWE = 0x1000, ++ PTE_VWE = 0x2000, ++ PTE_KWE = 0x4000, ++ PTE_UWE = 0x8000 ++}; ++ ++static inline int cpu_mmu_index(CPUSW64State *env, bool ifetch) ++{ ++ int ret = env->flags & ENV_FLAG_PS_USER ? MMU_USER_IDX : MMU_KERNEL_IDX; ++ if (env->flags & ENV_FLAG_HM_MODE) { ++ ret = MMU_PHYS_IDX; ++ } ++ return ret; ++} ++ ++static inline SW64CPU *sw64_env_get_cpu(CPUSW64State *env) ++{ ++ return container_of(env, SW64CPU, env); ++} ++ ++#define ENV_GET_CPU(e) CPU(sw64_env_get_cpu(e)) ++#define ENV_OFFSET offsetof(SW64CPU, env) ++ ++#define cpu_init(cpu_model) cpu_generic_init(TYPE_SW64_CPU, cpu_model) ++ ++#define cpu_list sw64_cpu_list ++int cpu_sw64_signal_handler(int host_signum, void *pinfo, void *puc); ++int sw64_cpu_gdb_read_register(CPUState *cs, GByteArray *buf, int reg); ++int sw64_cpu_gdb_write_register(CPUState *cs, uint8_t *buf, int reg); ++bool sw64_cpu_tlb_fill(CPUState *cs, vaddr address, int size, ++ MMUAccessType access_type, int mmu_idx, ++ bool probe, uintptr_t retaddr); ++uint64_t sw64_ldl_phys(CPUState *cs, hwaddr addr); ++hwaddr sw64_cpu_get_phys_page_debug(CPUState *cs, vaddr addr); ++void sw64_stl_phys(CPUState *cs, hwaddr addr, uint64_t val); ++uint64_t sw64_ldw_phys(CPUState *cs, hwaddr addr); ++void sw64_stw_phys(CPUState *cs, hwaddr addr, uint64_t val); ++uint64_t cpu_sw64_load_fpcr(CPUSW64State *env); ++#ifndef CONFIG_USER_ONLY ++void sw64_cpu_do_interrupt(CPUState *cs); ++bool sw64_cpu_exec_interrupt(CPUState *cpu, int int_req); ++#endif ++void cpu_sw64_store_fpcr(CPUSW64State *env, uint64_t val); ++G_NORETURN void sw64_cpu_do_unaligned_access(CPUState *cs, vaddr addr, ++ MMUAccessType access_type, int mmu_idx, ++ uintptr_t retaddr); ++bool sw64_cpu_has_work(CPUState *cs); ++extern struct VMStateDescription vmstate_sw64_cpu; ++void sw64_cpu_list(void); ++ ++/* SW64-specific interrupt pending bits */ ++#define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_EXT_0 ++#define CPU_INTERRUPT_II0 CPU_INTERRUPT_TGT_EXT_1 ++#define CPU_INTERRUPT_MCHK CPU_INTERRUPT_TGT_EXT_2 ++#define CPU_INTERRUPT_PCIE CPU_INTERRUPT_TGT_EXT_3 ++#define CPU_INTERRUPT_WAKEUP CPU_INTERRUPT_TGT_EXT_3 ++#define CPU_INTERRUPT_IINM CPU_INTERRUPT_TGT_EXT_4 ++ ++#define cpu_signal_handler cpu_sw64_signal_handler ++#define CPU_RESOLVING_TYPE TYPE_SW64_CPU ++ ++#define SWCSR(x, y) x = y ++enum { ++ SWCSR(ITB_TAG, 0x0), ++ SWCSR(ITB_PTE, 0x1), ++ SWCSR(ITB_IA, 0x2), ++ SWCSR(ITB_IV, 0x3), ++ SWCSR(ITB_IVP, 0x4), ++ SWCSR(ITB_IU, 0x5), ++ SWCSR(ITB_IS, 0x6), ++ SWCSR(EXC_SUM, 0xd), ++ SWCSR(EXC_PC, 0xe), ++ SWCSR(DS_STAT, 0x48), ++ SWCSR(CID, 0xc4), ++ SWCSR(TID, 0xc7), ++ ++ SWCSR(DTB_TAG, 0x40), ++ SWCSR(DTB_PTE, 0x41), ++ SWCSR(DTB_IA, 0x42), ++ SWCSR(DTB_IV, 0x43), ++ SWCSR(DTB_IVP, 0x44), ++ SWCSR(DTB_IU, 0x45), ++ SWCSR(DTB_IS, 0x46), ++ SWCSR(II_REQ, 0x82), ++/* core3 csr */ ++ SWCSR(C3_PTBR, 0x8), ++ SWCSR(C3_PRI_BASE, 0x10), ++ SWCSR(C3_UPCR, 0x22), ++ SWCSR(C3_TIMER_CTL, 0x2a), ++ SWCSR(C3_TIMER_TH, 0x2b), ++ SWCSR(C3_INT_STAT, 0x30), ++ SWCSR(C3_INT_CLR, 0x31), ++ SWCSR(C3_IER, 0x32), ++ SWCSR(INT_PCI_INT, 0x33), ++ SWCSR(C3_DVA, 0x4e), ++ SWCSR(C3_DTB_PCR, 0x47), ++/* core4 csr */ ++ SWCSR(C4_PRI_BASE, 0xf), ++ SWCSR(C4_UPCR, 0x15), ++ SWCSR(INT_EN, 0x1a), ++ SWCSR(C4_INT_STAT, 0x1b), ++ SWCSR(C4_INT_CLR, 0x1c), ++ SWCSR(PCIE_INT, 0x21), ++ SWCSR(C4_TIMER_TH, 0x23), ++ SWCSR(C4_TIMER_CTL, 0x24), ++ SWCSR(IINM, 0x2f), ++ SWCSR(C4_DVA, 0x54), ++ SWCSR(C4_DTB_UPCR, 0x58), ++ SWCSR(C4_PTBR_SYS, 0x68), ++ SWCSR(C4_PTBR_USR, 0x69), ++ SWCSR(SOFT_CID, 0xc9), ++ SWCSR(SHTCLOCK, 0xca), ++}; ++ ++#include "exec/cpu-all.h" ++static inline void cpu_get_tb_cpu_state(CPUSW64State *env, target_ulong *pc, ++ target_ulong *cs_base, uint32_t *pflags) ++{ ++ *pc = env->pc; ++ *cs_base = 0; ++ *pflags = env->flags; ++} ++ ++void sw64_translate_init(void); ++ ++enum { ++ EXCP_NONE, ++ EXCP_HALT, ++ EXCP_II0, ++ EXCP_IINM, ++ EXCP_OPCDEC, ++ EXCP_CALL_SYS, ++ EXCP_ARITH, ++ EXCP_UNALIGN, ++#ifdef SOFTMMU ++ EXCP_MMFAULT, ++#else ++ EXCP_DTBD, ++ EXCP_DTBS_U, ++ EXCP_DTBS_K, ++ EXCP_ITB_U, ++ EXCP_ITB_K, ++#endif ++ EXCP_CLK_INTERRUPT, ++ EXCP_DEV_INTERRUPT, ++ EXCP_SLAVE, ++}; ++ ++#define CSR_SHIFT_AND_MASK(name, func, shift, bits) \ ++ name##_##func##_S = shift, \ ++ name##_##func##_V = bits, \ ++ name##_##func##_M = (1UL << bits) - 1 ++ ++#define FPCR_MASK(name) ((uint64_t)FPCR_##name##_M << FPCR_##name##_S) ++/* FPCR */ ++enum { ++ CSR_SHIFT_AND_MASK(FPCR, EXC_CTL, 0, 2), ++ CSR_SHIFT_AND_MASK(FPCR, EXC_CTL_WEN, 2, 1), ++ CSR_SHIFT_AND_MASK(FPCR, RSV0, 3, 1), ++ CSR_SHIFT_AND_MASK(FPCR, INV3, 4, 1), ++ CSR_SHIFT_AND_MASK(FPCR, ZERO0, 5, 1), ++ CSR_SHIFT_AND_MASK(FPCR, OVF3, 6, 1), ++ CSR_SHIFT_AND_MASK(FPCR, UNF3, 7, 1), ++ CSR_SHIFT_AND_MASK(FPCR, INE3, 8, 1), ++ CSR_SHIFT_AND_MASK(FPCR, ZERO1, 9, 1), ++ CSR_SHIFT_AND_MASK(FPCR, RSV1, 10, 10), ++ CSR_SHIFT_AND_MASK(FPCR, INV2, 20, 1), ++ CSR_SHIFT_AND_MASK(FPCR, ZERO2, 21, 1), ++ CSR_SHIFT_AND_MASK(FPCR, OVF2, 22, 1), ++ CSR_SHIFT_AND_MASK(FPCR, UNF2, 23, 1), ++ CSR_SHIFT_AND_MASK(FPCR, INE2, 24, 1), ++ CSR_SHIFT_AND_MASK(FPCR, ZERO3, 25, 1), ++ CSR_SHIFT_AND_MASK(FPCR, RSV2, 26, 10), ++ CSR_SHIFT_AND_MASK(FPCR, INV1, 36, 1), ++ CSR_SHIFT_AND_MASK(FPCR, ZERO4, 37, 1), ++ CSR_SHIFT_AND_MASK(FPCR, OVF1, 38, 1), ++ CSR_SHIFT_AND_MASK(FPCR, UNF1, 39, 1), ++ CSR_SHIFT_AND_MASK(FPCR, INE1, 40, 1), ++ CSR_SHIFT_AND_MASK(FPCR, ZERO5, 41, 1), ++ CSR_SHIFT_AND_MASK(FPCR, RSV3, 42, 6), ++ CSR_SHIFT_AND_MASK(FPCR, DNZ, 48, 1), ++ CSR_SHIFT_AND_MASK(FPCR, INVD, 49, 1), ++ CSR_SHIFT_AND_MASK(FPCR, DZED, 50, 1), ++ CSR_SHIFT_AND_MASK(FPCR, OVFD, 51, 1), ++ CSR_SHIFT_AND_MASK(FPCR, INV0, 52, 1), ++ CSR_SHIFT_AND_MASK(FPCR, DZE0, 53, 1), ++ CSR_SHIFT_AND_MASK(FPCR, OVF0, 54, 1), ++ CSR_SHIFT_AND_MASK(FPCR, UNF0, 55, 1), ++ CSR_SHIFT_AND_MASK(FPCR, INE0, 56, 1), ++ CSR_SHIFT_AND_MASK(FPCR, OVI0, 57, 1), ++ CSR_SHIFT_AND_MASK(FPCR, DYN, 58, 2), ++ CSR_SHIFT_AND_MASK(FPCR, UNDZ, 60, 1), ++ CSR_SHIFT_AND_MASK(FPCR, UNFD, 61, 1), ++ CSR_SHIFT_AND_MASK(FPCR, INED, 62, 1), ++ CSR_SHIFT_AND_MASK(FPCR, SUM, 63, 1), ++}; ++ ++/* Arithmetic exception (entArith) constants. */ ++#define EXC_M_SWC 1 /* Software completion */ ++#define EXC_M_INV 2 /* Invalid operation */ ++#define EXC_M_DZE 4 /* Division by zero */ ++#define EXC_M_OVF 8 /* Overflow */ ++#define EXC_M_UNF 16 /* Underflow */ ++#define EXC_M_INE 32 /* Inexact result */ ++#define EXC_M_IOV 64 /* Integer Overflow */ ++#define EXC_M_DNO 128 /* Denomal operation */ ++ ++G_NORETURN void dynamic_excp(CPUSW64State *env, uintptr_t retaddr, int excp, ++ int error); ++G_NORETURN void arith_excp(CPUSW64State *env, uintptr_t retaddr, int exc, ++ uint64_t mask); ++ ++#define DEBUG_ARCH ++#ifdef DEBUG_ARCH ++#define arch_assert(x) \ ++ do { \ ++ g_assert(x); /*fprintf(stderr, "+6b %d\n", __LINE__); */ \ ++ } while (0) ++#else ++#define arch_assert(x) ++#endif ++ ++typedef struct SW64CPUInfo { ++ const char *name; ++ void (*initfn)(Object *obj); ++ void (*class_init)(ObjectClass *oc, void *data); ++} SW64CPUInfo; ++#define test_feature(env, x) (env->features & (x)) ++ ++/* Slave */ ++#endif +diff --git a/target/sw64/exception.c b/target/sw64/exception.c +new file mode 100644 +index 0000000000..507472a798 +--- /dev/null ++++ b/target/sw64/exception.c +@@ -0,0 +1,79 @@ ++#include "qemu/osdep.h" ++#include "qemu/timer.h" ++ ++#include "cpu.h" ++#include "exec/cpu-common.h" ++#include "fpu/softfloat.h" ++#include "exec/helper-proto.h" ++#include "hw/core/cpu.h" ++ ++#ifndef CONFIG_USER_ONLY ++G_NORETURN void sw64_cpu_do_unaligned_access(CPUState *cs, vaddr addr, ++ MMUAccessType access_type, ++ int mmu_idx, uintptr_t retaddr) ++{ ++ SW64CPU *cpu = SW64_CPU(cs); ++ CPUSW64State *env = &cpu->env; ++ uint32_t insn = 0; ++ ++ if (retaddr) { ++ cpu_restore_state(cs, retaddr); ++ } ++ ++ fprintf(stderr, "Error %s addr = %lx\n", __func__, addr); ++ if (test_feature(env, SW64_FEATURE_CORE3)) ++ env->csr[C3_DVA] = addr; ++ else if (test_feature(env, SW64_FEATURE_CORE4)) ++ env->csr[C4_DVA] = addr; ++ ++ env->csr[EXC_SUM] = ((insn >> 21) & 31) << 8; /* opcode */ ++ env->csr[DS_STAT] = (insn >> 26) << 4; /* dest regno */ ++ cs->exception_index = EXCP_UNALIGN; ++ env->error_code = 0; ++ cpu_loop_exit(cs); ++} ++ ++#endif ++ ++/* This should only be called from translate, via gen_excp. ++ We expect that ENV->PC has already been updated. */ ++void G_NORETURN helper_excp(CPUSW64State *env, int excp, int error) ++{ ++ SW64CPU *cpu = sw64_env_get_cpu(env); ++ CPUState *cs = CPU(cpu); ++ ++ cs->exception_index = excp; ++ env->error_code = error; ++ cpu_loop_exit(cs); ++} ++ ++/* This may be called from any of the helpers to set up EXCEPTION_INDEX. */ ++void G_NORETURN dynamic_excp(CPUSW64State *env, uintptr_t retaddr, int excp, ++ int error) ++{ ++ SW64CPU *cpu = sw64_env_get_cpu(env); ++ CPUState *cs = CPU(cpu); ++ ++ cs->exception_index = excp; ++ env->error_code = error; ++ if (retaddr) { ++ /* FIXME: Not jump to another tb, but jump to next insn emu */ ++ cpu_restore_state(cs, retaddr); ++ /* Floating-point exceptions (our only users) point to the next PC. */ ++ env->pc += 4; ++ } ++ cpu_loop_exit(cs); ++} ++ ++void G_NORETURN arith_excp(CPUSW64State *env, uintptr_t retaddr, int exc, ++ uint64_t mask) ++{ ++ env->csr[EXC_SUM] = exc; ++ dynamic_excp(env, retaddr, EXCP_ARITH, 0); ++} ++ ++ ++void helper_trace_mem(CPUSW64State *env, uint64_t addr, uint64_t val) ++{ ++ /* printf("pc = %lx: Access mem addr =%lx, val = %lx\n", env->pc, addr,val); */ ++} +diff --git a/target/sw64/float_helper.c b/target/sw64/float_helper.c +new file mode 100644 +index 0000000000..e14b7d194d +--- /dev/null ++++ b/target/sw64/float_helper.c +@@ -0,0 +1,798 @@ ++#include "qemu/osdep.h" ++#include "cpu.h" ++#include "exec/exec-all.h" ++#include "exec/helper-proto.h" ++#include "fpu/softfloat.h" ++#include "qemu/cpu-float.h" ++ ++static inline uint32_t extractFloat16Frac(float16 a) ++{ ++ return float16_val(a) & 0x3ff; ++} ++ ++/*---------------------------------------------------------------------------- ++| Returns the exponent bits of the half-precision floating-point value `a'. ++*----------------------------------------------------------------------------*/ ++ ++static inline int extractFloat16Exp(float16 a) ++{ ++ return (float16_val(a) >> 10) & 0x1f; ++} ++ ++/*---------------------------------------------------------------------------- ++| Returns the sign bit of the single-precision floating-point value `a'. ++*----------------------------------------------------------------------------*/ ++ ++static inline uint8_t extractFloat16Sign(float16 a) ++{ ++ return float16_val(a) >> 15; ++} ++ ++#define FP_STATUS (env->fp_status) ++ ++#define CONVERT_BIT(X, SRC, DST) \ ++ (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X)&SRC) * (DST / SRC)) ++ ++static uint64_t soft_to_errcode_exc(CPUSW64State *env) ++{ ++ uint8_t exc = get_float_exception_flags(&FP_STATUS); ++ ++ if (unlikely(exc)) { ++ set_float_exception_flags(0, &FP_STATUS); ++ } ++ return exc; ++} ++ ++static inline uint64_t float32_to_s_int(uint32_t fi) ++{ ++ uint32_t frac = fi & 0x7fffff; ++ uint32_t sign = (fi >> 31) & 1; ++ uint32_t exp_msb = (fi >> 30) & 1; ++ uint32_t exp_low = (fi >> 23) & 0x7f; ++ uint32_t exp; ++ ++ exp = (exp_msb << 10) | exp_low; ++ if (exp_msb) { ++ if (exp_low == 0x7f) { ++ exp = 0x7ff; ++ } ++ } else { ++ if (exp_low != 0x00) { ++ exp |= 0x380; ++ } ++ } ++ ++ return (((uint64_t)sign << 63) | ((uint64_t)exp << 52) | ++ ((uint64_t)frac << 29)); ++} ++ ++static inline uint64_t float32_to_s(float32 fa) ++{ ++ CPU_FloatU a; ++ a.f = fa; ++ return float32_to_s_int(a.l); ++} ++static inline uint32_t s_to_float32_int(uint64_t a) ++{ ++ return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff); ++} ++ ++static inline float32 s_to_float32(uint64_t a) ++{ ++ CPU_FloatU r; ++ r.l = s_to_float32_int(a); ++ return r.f; ++} ++ ++uint32_t helper_s_to_memory(uint64_t a) ++{ ++ return s_to_float32(a); ++} ++ ++uint64_t helper_memory_to_s(uint32_t a) ++{ ++ return float32_to_s(a); ++} ++ ++uint64_t helper_fcvtls(CPUSW64State *env, uint64_t a) ++{ ++ float32 fr = int64_to_float32(a, &FP_STATUS); ++ env->error_code = soft_to_errcode_exc(env); ++ return float32_to_s(fr); ++} ++ ++uint64_t helper_fcvtld(CPUSW64State *env, uint64_t a) ++{ ++ float64 fr = int64_to_float64(a, &FP_STATUS); ++ env->error_code = soft_to_errcode_exc(env); ++ return (uint64_t)fr; ++} ++ ++static uint64_t do_fcvtdl(CPUSW64State *env, uint64_t a, uint64_t roundmode) ++{ ++ uint64_t frac, ret = 0; ++ uint32_t exp, sign, exc = 0; ++ int shift; ++ ++ sign = (a >> 63); ++ exp = (uint32_t)(a >> 52) & 0x7ff; ++ frac = a & 0xfffffffffffffull; ++ ++ if (exp == 0) { ++ if (unlikely(frac != 0) && !env->fp_status.flush_inputs_to_zero) { ++ goto do_underflow; ++ } ++ } else if (exp == 0x7ff) { ++ exc = float_flag_invalid; ++ } else { ++ /* Restore implicit bit. */ ++ frac |= 0x10000000000000ull; ++ ++ shift = exp - 1023 - 52; ++ if (shift >= 0) { ++ /* In this case the number is so large that we must shift ++ the fraction left. There is no rounding to do. */ ++ if (shift < 64) { ++ ret = frac << shift; ++ } ++ /* Check for overflow. Note the special case of -0x1p63. */ ++ if (shift >= 11 && a != 0xC3E0000000000000ull) { ++ exc = float_flag_inexact; ++ } ++ } else { ++ uint64_t round; ++ ++ /* In this case the number is smaller than the fraction as ++ represented by the 52 bit number. Here we must think ++ about rounding the result. Handle this by shifting the ++ fractional part of the number into the high bits of ROUND. ++ This will let us efficiently handle round-to-nearest. */ ++ shift = -shift; ++ if (shift < 63) { ++ ret = frac >> shift; ++ round = frac << (64 - shift); ++ } else { ++ /* The exponent is so small we shift out everything. ++ Leave a sticky bit for proper rounding below. */ ++ do_underflow: ++ round = 1; ++ } ++ ++ if (round) { ++ exc = float_flag_inexact; ++ switch (roundmode) { ++ case float_round_nearest_even: ++ if (round == (1ull << 63)) { ++ /* Fraction is exactly 0.5; round to even. */ ++ ret += (ret & 1); ++ } else if (round > (1ull << 63)) { ++ ret += 1; ++ } ++ break; ++ case float_round_to_zero: ++ break; ++ case float_round_up: ++ ret += 1 - sign; ++ break; ++ case float_round_down: ++ ret += sign; ++ break; ++ } ++ } ++ } ++ if (sign) { ++ ret = -ret; ++ } ++ } ++ env->error_code = exc; ++ ++ return ret; ++} ++ ++uint64_t helper_fris(CPUSW64State *env, uint64_t a, uint64_t roundmode) ++{ ++ uint64_t ir; ++ float32 fr; ++ ++ if (roundmode == 5) ++ roundmode = env->fpcr_round_mode; ++ ir = do_fcvtdl(env, a, roundmode); ++ fr = int64_to_float32(ir, &FP_STATUS); ++ return float32_to_s(fr); ++} ++ ++uint64_t helper_frid(CPUSW64State *env, uint64_t a, uint64_t roundmode) ++{ ++ if (roundmode == 5) ++ roundmode = env->fpcr_round_mode; ++ return int64_to_float64(do_fcvtdl(env, a, roundmode), &FP_STATUS); ++} ++ ++uint64_t helper_fcvtdl(CPUSW64State *env, uint64_t a, uint64_t roundmode) ++{ ++ return do_fcvtdl(env, a, roundmode); ++} ++ ++uint64_t helper_fcvtdl_dyn(CPUSW64State *env, uint64_t a) ++{ ++ uint64_t roundmode = (uint64_t)(env->fpcr_round_mode); ++ return do_fcvtdl(env, a, roundmode); ++} ++ ++uint64_t helper_fcvtsd(CPUSW64State *env, uint64_t a) ++{ ++ float32 fa; ++ float64 fr; ++ ++ fa = s_to_float32(a); ++ fr = float32_to_float64(fa, &FP_STATUS); ++ ++ return fr; ++} ++ ++uint64_t helper_fcvtds(CPUSW64State *env, uint64_t a) ++{ ++ float32 fa; ++ ++ fa = float64_to_float32((float64)a, &FP_STATUS); ++ ++ return float32_to_s(fa); ++} ++ ++uint64_t helper_fcvtwl(CPUSW64State *env, uint64_t a) ++{ ++ int32_t ret; ++ ret = (a >> 29) & 0x3fffffff; ++ ret |= ((a >> 62) & 0x3) << 30; ++ return (uint64_t)(int64_t)ret; ++} ++ ++uint64_t helper_fcvtlw(CPUSW64State *env, uint64_t a) ++{ ++ uint64_t ret; ++ ret = (a & 0x3fffffff) << 29; ++ ret |= ((a >> 30) & 0x3) << 62; ++ return ret; ++} ++ ++uint64_t helper_fadds(CPUSW64State *env, uint64_t a, uint64_t b) ++{ ++ float32 fa, fb, fr; ++ ++ fa = s_to_float32(a); ++ fb = s_to_float32(b); ++ fr = float32_add(fa, fb, &FP_STATUS); ++ ++ env->error_code = soft_to_errcode_exc(env); ++ return float32_to_s(fr); ++} ++ ++/* Input handing without software completion. Trap for all ++ non-finite numbers. */ ++uint64_t helper_faddd(CPUSW64State *env, uint64_t a, uint64_t b) ++{ ++ float64 fa, fb, fr; ++ ++ fa = (float64)a; ++ fb = (float64)b; ++ fr = float64_add(fa, fb, &FP_STATUS); ++ env->error_code = soft_to_errcode_exc(env); ++ return (uint64_t)fr; ++} ++ ++uint64_t helper_fsubs(CPUSW64State *env, uint64_t a, uint64_t b) ++{ ++ float32 fa, fb, fr; ++ ++ fa = s_to_float32(a); ++ fb = s_to_float32(b); ++ fr = float32_sub(fa, fb, &FP_STATUS); ++ env->error_code = soft_to_errcode_exc(env); ++ return float32_to_s(fr); ++} ++ ++uint64_t helper_fsubd(CPUSW64State *env, uint64_t a, uint64_t b) ++{ ++ float64 fa, fb, fr; ++ ++ fa = (float64)a; ++ fb = (float64)b; ++ fr = float64_sub(fa, fb, &FP_STATUS); ++ env->error_code = soft_to_errcode_exc(env); ++ return (uint64_t)fr; ++} ++ ++uint64_t helper_fmuls(CPUSW64State *env, uint64_t a, uint64_t b) ++{ ++ float32 fa, fb, fr; ++ ++ fa = s_to_float32(a); ++ fb = s_to_float32(b); ++ fr = float32_mul(fa, fb, &FP_STATUS); ++ env->error_code = soft_to_errcode_exc(env); ++ return float32_to_s(fr); ++} ++ ++uint64_t helper_fmuld(CPUSW64State *env, uint64_t a, uint64_t b) ++{ ++ float64 fa, fb, fr; ++ ++ fa = (float64)a; ++ fb = (float64)b; ++ fr = float64_mul(fa, fb, &FP_STATUS); ++ env->error_code = soft_to_errcode_exc(env); ++ return (uint64_t)fr; ++} ++ ++uint64_t helper_fdivs(CPUSW64State *env, uint64_t a, uint64_t b) ++{ ++ float32 fa, fb, fr; ++ ++ fa = s_to_float32(a); ++ fb = s_to_float32(b); ++ fr = float32_div(fa, fb, &FP_STATUS); ++ env->error_code = soft_to_errcode_exc(env); ++ return float32_to_s(fr); ++} ++ ++uint64_t helper_fdivd(CPUSW64State *env, uint64_t a, uint64_t b) ++{ ++ float64 fa, fb, fr; ++ ++ fa = (float64)a; ++ fb = (float64)b; ++ fr = float64_div(fa, fb, &FP_STATUS); ++ env->error_code = soft_to_errcode_exc(env); ++ ++ return (uint64_t)fr; ++} ++ ++uint64_t helper_frecs(CPUSW64State *env, uint64_t a) ++{ ++ float32 fa, fb, fr; ++ ++ fa = s_to_float32(a); ++ fb = int64_to_float32(1, &FP_STATUS); ++ fr = float32_div(fb, fa, &FP_STATUS); ++ env->error_code = soft_to_errcode_exc(env); ++ return float32_to_s(fr); ++} ++ ++uint64_t helper_frecd(CPUSW64State *env, uint64_t a) ++{ ++ float64 fa, fb, fr; ++ ++ fa = (float64)a; ++ fb = int64_to_float64(1, &FP_STATUS); ++ fr = float64_div(fb, fa, &FP_STATUS); ++ env->error_code = soft_to_errcode_exc(env); ++ ++ return (uint64_t)fr; ++} ++ ++uint64_t helper_fsqrts(CPUSW64State *env, uint64_t b) ++{ ++ float32 fb, fr; ++ fb = s_to_float32(b); ++ fr = float32_sqrt(fb, &FP_STATUS); ++ env->error_code = soft_to_errcode_exc(env); ++ ++ return float32_to_s(fr); ++} ++ ++uint64_t helper_fsqrt(CPUSW64State *env, uint64_t b) ++{ ++ float64 fr; ++ ++ fr = float64_sqrt(b, &FP_STATUS); ++ env->error_code = soft_to_errcode_exc(env); ++ ++ return (uint64_t)fr; ++} ++ ++ ++uint64_t helper_fmas(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c) ++{ ++ float32 fa, fb, fc, fr; ++ fa = s_to_float32(a); ++ fb = s_to_float32(b); ++ fc = s_to_float32(c); ++ ++ fr = float32_muladd(fa, fb, fc, 0, &FP_STATUS); ++ ++ return float32_to_s(fr); ++} ++ ++uint64_t helper_fmad(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c) ++{ ++ float64 fr; ++ ++ fr = float64_muladd(a, b, c, 0, &FP_STATUS); ++ ++ return fr; ++} ++ ++ ++uint64_t helper_fmss(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c) ++{ ++ float32 fa, fb, fc, fr; ++ fa = s_to_float32(a); ++ fb = s_to_float32(b); ++ fc = s_to_float32(c); ++ ++ fr = float32_muladd(fa, fb, fc, float_muladd_negate_c, &FP_STATUS); ++ ++ return float32_to_s(fr); ++} ++ ++uint64_t helper_fmsd(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c) ++{ ++ float64 fr; ++ ++ fr = float64_muladd(a, b, c, float_muladd_negate_c, &FP_STATUS); ++ ++ return fr; ++} ++ ++ ++uint64_t helper_fnmas(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c) ++{ ++ float32 fa, fb, fc, fr; ++ fa = s_to_float32(a); ++ fb = s_to_float32(b); ++ fc = s_to_float32(c); ++ int flag = float_muladd_negate_product; ++ ++ fr = float32_muladd(fa, fb, fc, flag, &FP_STATUS); ++ ++ return float32_to_s(fr); ++} ++ ++uint64_t helper_fnmad(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c) ++{ ++ float64 fr; ++ int flag = float_muladd_negate_product; ++ ++ fr = float64_muladd(a, b, c, flag, &FP_STATUS); ++ ++ return fr; ++} ++ ++uint64_t helper_fnmss(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c) ++{ ++ float32 fa, fb, fc, fr; ++ fa = s_to_float32(a); ++ fb = s_to_float32(b); ++ fc = s_to_float32(c); ++ int flag = float_muladd_negate_product | float_muladd_negate_c; ++ ++ fr = float32_muladd(fa, fb, fc, flag, &FP_STATUS); ++ ++ return float32_to_s(fr); ++} ++ ++uint64_t helper_fnmsd(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c) ++{ ++ float64 fr; ++ int flag = float_muladd_negate_product | float_muladd_negate_c; ++ ++ fr = float64_muladd(a, b, c, flag, &FP_STATUS); ++ ++ return fr; ++} ++uint64_t helper_load_fpcr(CPUSW64State *env) ++{ ++ return cpu_sw64_load_fpcr(env); ++} ++ ++static void update_fpcr_status_mask(CPUSW64State *env) ++{ ++ uint64_t t = 0; ++ ++ /* Don't mask the inv excp: ++ * EXC_CTL1 = 1 ++ * EXC_CTL1 = 0, input denormal, DNZ=0 ++ * EXC_CTL1 = 0, no input denormal or DNZ=1, INVD = 0 ++ */ ++ if ((env->fpcr & FPCR_MASK(EXC_CTL) & 0x2)) { ++ if (env->fpcr & FPCR_MASK(EXC_CTL) & 0x1) { ++ t |= (EXC_M_INE | EXC_M_UNF | EXC_M_IOV); ++ } else { ++ t |= EXC_M_INE; ++ } ++ } else { ++ /* INV and DNO mask */ ++ if (env->fpcr & FPCR_MASK(DNZ)) t |= EXC_M_DNO; ++ if (env->fpcr & FPCR_MASK(INVD)) t |= EXC_M_INV; ++ if (env->fpcr & FPCR_MASK(OVFD)) t |= EXC_M_OVF; ++ if (env->fpcr & FPCR_MASK(UNFD)) { ++ t |= EXC_M_UNF; ++ } ++ if (env->fpcr & FPCR_MASK(DZED)) t |= EXC_M_DZE; ++ if (env->fpcr & FPCR_MASK(INED)) t |= EXC_M_INE; ++ } ++ ++ env->fpcr_exc_enable = t; ++} ++ ++void helper_store_fpcr(CPUSW64State *env, uint64_t val) ++{ ++ uint64_t fpcr = val; ++ uint8_t ret; ++ ++ switch ((fpcr & FPCR_MASK(DYN)) >> FPCR_DYN_S) { ++ case 0x0: ++ ret = float_round_to_zero; ++ break; ++ case 0x1: ++ ret = float_round_down; ++ break; ++ case 0x2: ++ ret = float_round_nearest_even; ++ break; ++ case 0x3: ++ ret = float_round_up; ++ break; ++ default: ++ ret = float_round_nearest_even; ++ break; ++ } ++ ++ env->fpcr_round_mode = ret; ++ ++ env->fp_status.float_rounding_mode = ret; ++ ++ env->fpcr_flush_to_zero = ++ (fpcr & FPCR_MASK(UNFD)) && (fpcr & FPCR_MASK(UNDZ)); ++ env->fp_status.flush_to_zero = env->fpcr_flush_to_zero; ++ ++ /* FIXME: Now the DNZ flag does not work int CORE3. */ ++ val &= ~0x3UL; ++ val |= env->fpcr & 0x3UL; ++ env->fpcr = val; ++ update_fpcr_status_mask(env); ++} ++ ++void helper_setfpcrx(CPUSW64State *env, uint64_t val) ++{ ++ if (env->fpcr & FPCR_MASK(EXC_CTL_WEN)) { ++ env->fpcr &= ~3UL; ++ env->fpcr |= val & 0x3; ++ update_fpcr_status_mask(env); ++ } ++} ++ ++#ifndef CONFIG_USER_ONLY ++static uint32_t soft_to_exc_type(uint64_t exc) ++{ ++ uint32_t ret = 0; ++ ++ if (unlikely(exc)) { ++ ret |= CONVERT_BIT(exc, float_flag_invalid, EXC_M_INV); ++ ret |= CONVERT_BIT(exc, float_flag_divbyzero, EXC_M_DZE); ++ ret |= CONVERT_BIT(exc, float_flag_overflow, EXC_M_OVF); ++ ret |= CONVERT_BIT(exc, float_flag_underflow, EXC_M_UNF); ++ ret |= CONVERT_BIT(exc, float_flag_inexact, EXC_M_INE); ++ } ++ ++ return ret; ++} ++static void fp_exc_raise1(CPUSW64State *env, uintptr_t retaddr, uint64_t exc, ++ uint32_t regno) ++{ ++ if (!likely(exc)) ++ return; ++ arith_excp(env, retaddr, exc, 1ull << regno); ++} ++ ++void helper_fp_exc_raise(CPUSW64State *env, uint32_t regno) ++{ ++ uint64_t exc = env->error_code; ++ uint32_t exc_type = soft_to_exc_type(exc); ++ ++ if (exc_type) { ++ exc_type &= ~(env->fpcr_exc_enable); ++ if (exc_type) fp_exc_raise1(env, GETPC(), exc_type | EXC_M_SWC, regno); ++ } ++} ++#endif ++ ++void helper_ieee_input(CPUSW64State *env, uint64_t val) ++{ ++#ifndef CONFIG_USER_ONLY ++ uint32_t exp = (uint32_t)(val >> 52) & 0x7ff; ++ uint64_t frac = val & 0xfffffffffffffull; ++ ++ if (exp == 0) { ++ /* Denormals without /S raise an exception. */ ++ if (frac != 0) { ++ } ++ } else if (exp == 0x7ff) { ++ /* Infinity or NaN. */ ++ uint32_t exc_type = EXC_M_INV; ++ ++ if (exc_type) { ++ exc_type &= ~(env->fpcr_exc_enable); ++ if (exc_type) ++ fp_exc_raise1(env, GETPC(), exc_type | EXC_M_SWC, 32); ++ } ++ } ++#endif ++} ++ ++void helper_ieee_input_s(CPUSW64State *env, uint64_t val) ++{ ++ if (unlikely(2 * val - 1 < 0x1fffffffffffffull) && ++ !env->fp_status.flush_inputs_to_zero) { ++ } ++} ++ ++static inline float64 t_to_float64(uint64_t a) ++{ ++ /* Memory format is the same as float64 */ ++ CPU_DoubleU r; ++ r.ll = a; ++ return r.d; ++} ++ ++uint64_t helper_fcmpun(CPUSW64State *env, uint64_t a, uint64_t b) ++{ ++ float64 fa, fb; ++ uint64_t ret = 0; ++ ++ fa = t_to_float64(a); ++ fb = t_to_float64(b); ++ ++ if (float64_unordered_quiet(fa, fb, &FP_STATUS)) { ++ ret = 0x4000000000000000ULL; ++ } ++ env->error_code = soft_to_errcode_exc(env); ++ ++ return ret; ++} ++ ++uint64_t helper_fcmpeq(CPUSW64State *env, uint64_t a, uint64_t b) ++{ ++ float64 fa, fb; ++ uint64_t ret = 0; ++ ++ fa = t_to_float64(a); ++ fb = t_to_float64(b); ++ ++ if (float64_eq_quiet(fa, fb, &FP_STATUS)) { ++ ret = 0x4000000000000000ULL; ++ } ++ env->error_code = soft_to_errcode_exc(env); ++ ++ return ret; ++} ++ ++uint64_t helper_fcmple(CPUSW64State *env, uint64_t a, uint64_t b) ++{ ++ float64 fa, fb; ++ uint64_t ret = 0; ++ ++ fa = t_to_float64(a); ++ fb = t_to_float64(b); ++ ++ if (float64_le_quiet(fa, fb, &FP_STATUS)) { ++ ret = 0x4000000000000000ULL; ++ } ++ env->error_code = soft_to_errcode_exc(env); ++ ++ return ret; ++} ++ ++uint64_t helper_fcmplt(CPUSW64State *env, uint64_t a, uint64_t b) ++{ ++ float64 fa, fb; ++ uint64_t ret = 0; ++ ++ fa = t_to_float64(a); ++ fb = t_to_float64(b); ++ ++ if (float64_lt_quiet(fa, fb, &FP_STATUS)) { ++ ret = 0x4000000000000000ULL; ++ } ++ env->error_code = soft_to_errcode_exc(env); ++ ++ return ret; ++} ++ ++uint64_t helper_fcmpge(CPUSW64State *env, uint64_t a, uint64_t b) ++{ ++ float64 fa, fb; ++ uint64_t ret = 0; ++ ++ fa = t_to_float64(a); ++ fb = t_to_float64(b); ++ ++ if (float64_le_quiet(fb, fa, &FP_STATUS)) { ++ ret = 0x4000000000000000ULL; ++ } ++ env->error_code = soft_to_errcode_exc(env); ++ ++ return ret; ++} ++ ++uint64_t helper_fcmpgt(CPUSW64State *env, uint64_t a, uint64_t b) ++{ ++ float64 fa, fb; ++ uint64_t ret = 0; ++ ++ fa = t_to_float64(a); ++ fb = t_to_float64(b); ++ ++ if (float64_lt_quiet(fb, fa, &FP_STATUS)) { ++ ret = 0x4000000000000000ULL; ++ } ++ env->error_code = soft_to_errcode_exc(env); ++ ++ return ret; ++} ++ ++uint64_t helper_fcmpge_s(CPUSW64State *env, uint64_t a, uint64_t b) ++{ ++ float64 fa, fb; ++ uint64_t ret = 0; ++ ++ /* Make sure va and vb is s float. */ ++ fa = float32_to_float64(s_to_float32(a), &FP_STATUS); ++ fb = float32_to_float64(s_to_float32(b), &FP_STATUS); ++ ++ if (float64_le_quiet(fb, fa, &FP_STATUS)) { ++ ret = 0x4000000000000000ULL; ++ } ++ env->error_code = soft_to_errcode_exc(env); ++ ++ return ret; ++} ++ ++uint64_t helper_fcmple_s(CPUSW64State *env, uint64_t a, uint64_t b) ++{ ++ float64 fa, fb; ++ uint64_t ret = 0; ++ ++ /* Make sure va and vb is s float. */ ++ fa = float32_to_float64(s_to_float32(a), &FP_STATUS); ++ fb = float32_to_float64(s_to_float32(b), &FP_STATUS); ++ ++ if (float64_le_quiet(fa, fb, &FP_STATUS)) { ++ ret = 0x4000000000000000ULL; ++ } ++ env->error_code = soft_to_errcode_exc(env); ++ ++ return ret; ++} ++ ++void helper_vfcvtsh(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t vc, ++ uint64_t rd) ++{ ++ uint64_t temp = 0; ++ int i; ++ for (i = 0; i < 4; i++) { ++ temp |= (uint64_t)float32_to_float16(s_to_float32(env->fr[ra + i * 32]), ++ 1, &FP_STATUS) ++ << (i * 16); ++ } ++ for (i = 0; i < 4; i++) { ++ if (i == (vc & 0x3)) { ++ env->fr[rd + i * 32] = temp; ++ } else { ++ env->fr[rd + i * 32] = env->fr[rb + i * 32]; ++ } ++ } ++} ++ ++void helper_vfcvths(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t vc, ++ uint64_t rd) ++{ ++ uint64_t temp; ++ int i; ++ ++ temp = env->fr[ra + 32 * (vc & 0x3)]; ++ for (i = 0; i < 4; i++) { ++ env->fr[rd + i * 32] = float32_to_s( ++ float16_to_float32((temp >> (i * 16)) & 0xffffUL, 1, &FP_STATUS)); ++ } ++} +diff --git a/target/sw64/gdbstub.c b/target/sw64/gdbstub.c +new file mode 100644 +index 0000000000..87d37fa377 +--- /dev/null ++++ b/target/sw64/gdbstub.c +@@ -0,0 +1,55 @@ ++/* ++ * SW64 gdb server stub ++ * ++ * Copyright (c) 2022 Lu Feifei ++ * ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, see . ++ */ ++#include "qemu/osdep.h" ++#include "cpu.h" ++#include "gdbstub/helpers.h" ++ ++int sw64_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) ++{ ++ SW64CPU *cpu = SW64_CPU(cs); ++ CPUSW64State *env = &cpu->env; ++ ++ if (n < 31) { ++ return gdb_get_regl(mem_buf, env->ir[n]); ++ } else if (n == 31) { ++ return gdb_get_regl(mem_buf, 0); ++ } else if (n == 64) { ++ return gdb_get_regl(mem_buf, env->pc); ++ } ++ return 0; ++} ++ ++int sw64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) ++{ ++ SW64CPU *cpu = SW64_CPU(cs); ++ CPUSW64State *env = &cpu->env; ++ ++ if (n == 31) { ++ /* discard writes to r31 */ ++ return sizeof(target_ulong); ++ } else if (n < 31) { ++ env->ir[n] = ldtul_p(mem_buf); ++ return sizeof(target_ulong); ++ } else if (n == 64) { ++ env->pc = ldtul_p(mem_buf); ++ return sizeof(target_ulong); ++ } ++ ++ return 0; ++} +diff --git a/target/sw64/helper.c b/target/sw64/helper.c +new file mode 100644 +index 0000000000..35baa7e5d3 +--- /dev/null ++++ b/target/sw64/helper.c +@@ -0,0 +1,520 @@ ++/* ++ * This library is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2 of the License, or (at your option) any later version. ++ * ++ * This library is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with this library; if not, see . ++ */ ++ ++#include "qemu/osdep.h" ++#include "qemu/timer.h" ++ ++#include "cpu.h" ++#include "exec/exec-all.h" ++#include "exec/tb-flush.h" ++#include "fpu/softfloat.h" ++#include "exec/helper-proto.h" ++#include "hw/core/cpu.h" ++#include "exec/memattrs.h" ++ ++#ifndef CONFIG_USER_ONLY ++static target_ulong ldq_phys_clear(CPUState *cs, target_ulong phys) ++{ ++ return ldq_phys(cs->as, phys & ~(3UL)); ++} ++ ++static int get_sw64_physical_address(CPUSW64State *env, target_ulong addr, ++ int prot_need, int mmu_idx, target_ulong *pphys, ++ int *pprot, target_ulong *page_size) ++{ ++ CPUState *cs = CPU(sw64_env_get_cpu(env)); ++ target_ulong phys = 0; ++ int prot = 0; ++ int ret = MM_K_ACV; ++ target_ulong page_offset = 0; ++ target_ulong L1pte, L2pte, L3pte, pte; ++ target_ulong pt = 0, index = 0, pte_pfn_s = 0; ++ ++ if (((addr >> 28) & 0xffffffff8) == 0xffffffff8) { ++ phys = (~(0xffffffff80000000)) & addr; ++ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; ++ ret = -1; ++ goto exit; ++ } else if (((addr >> 32) & 0xfffff000) == 0xfffff000) { ++ goto do_pgmiss; ++ } else if (((addr >> 52) & 0xfff) == 0xfff) { ++ phys = (~(0xfff0000000000000)) & addr; ++ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; ++ ret = -1; ++ goto exit; ++ } ++do_pgmiss: ++ if (test_feature(env, SW64_FEATURE_CORE3)) { ++ pte_pfn_s = 28; ++ pt = env->csr[C3_PTBR]; ++ } else if (test_feature(env, SW64_FEATURE_CORE4)) { ++ pte_pfn_s = 24; ++ pt = (((addr>>52)&0x1) == 1) ? env->csr[C4_PTBR_SYS] : env->csr[C4_PTBR_USR]; ++ } ++ index = (addr >> (TARGET_PAGE_BITS + 3 * TARGET_LEVEL_BITS)) & ((1 << TARGET_LEVEL_BITS)-1); ++ L1pte = ldq_phys_clear(cs, pt + index * 8) & PTE_MASK; ++ if ((L1pte & PTE_VALID) == 0) { ++ ret = MM_K_TNV; ++ goto exit; ++ } ++ if (((L1pte >> 1) & 1) && prot_need == 0) { ++ ret = MM_K_FOR; ++ goto exit; ++ } ++ if (((L1pte >> 2) & 1) && prot_need == 1) { ++ ret = MM_K_FOW; ++ goto exit; ++ } ++ pt = L1pte >> pte_pfn_s << TARGET_PAGE_BITS; ++ ++ index = (addr >> (TARGET_PAGE_BITS + 2 * TARGET_LEVEL_BITS)) & ((1 << TARGET_LEVEL_BITS)-1); ++ L2pte = ldq_phys_clear(cs, pt + index * 8) & PTE_MASK; ++ ++ if ((L2pte & PTE_VALID) == 0) { ++ ret = MM_K_TNV; ++ goto exit; ++ } ++ if (((L2pte >> 1) & 1) && prot_need == 0) { ++ ret = MM_K_FOR; ++ goto exit; ++ } ++ if (((L2pte >> 2) & 1) && prot_need == 1) { ++ ret = MM_K_FOW; ++ goto exit; ++ } ++ ++ pt = L2pte >> pte_pfn_s << TARGET_PAGE_BITS; ++ ++ index = (addr >> (TARGET_PAGE_BITS + 1 * TARGET_LEVEL_BITS)) & ((1 << TARGET_LEVEL_BITS)-1); ++ L3pte = ldq_phys_clear(cs, pt + index * 8) & PTE_MASK; ++ ++ if ((L3pte & PTE_VALID) == 0) { ++ ret = MM_K_TNV; ++ goto exit; ++ } ++ if (((L3pte >> 1) & 1) && prot_need == 0) { ++ ret = MM_K_FOR; ++ goto exit; ++ } ++ if (((L3pte >> 2) & 1) && prot_need == 1) { ++ ret = MM_K_FOW; ++ goto exit; ++ } ++ ++ pt = L3pte >> pte_pfn_s << TARGET_PAGE_BITS; ++ ++ index = (addr >> TARGET_PAGE_BITS) & ((1 << TARGET_LEVEL_BITS)-1); ++ ++ target_ulong page_granularity = (L3pte >> 5) & 0x3; ++ if (page_granularity != 0) { ++ pte = L3pte; ++ *page_size = 1 << 23; ++ page_offset = addr & 0x7fe000; ++ } else { ++ *page_size = TARGET_PAGE_SIZE; ++ page_offset = 0; ++ pte = ldq_phys_clear(cs, pt + index * 8) & PTE_MASK; ++ if ((pte & PTE_VALID) == 0) { ++ ret = MM_K_TNV; ++ goto exit; ++ } ++ } ++#if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4 ++#error page bits out of date ++#endif ++ ++ /* Check access violations. */ ++ if ((pte & PTE_FOR) == 0) { ++ prot |= PAGE_READ | PAGE_EXEC; ++ } ++ if ((pte & PTE_FOW) == 0) { ++ prot |= PAGE_WRITE; ++ } ++ ++ /* Check fault-on-operation violations. */ ++ prot &= ~(pte >> 1); ++ ++ phys = (pte >> pte_pfn_s << TARGET_PAGE_BITS) + page_offset; ++ ++ if (unlikely((prot & prot_need) == 0)) { ++ ret = (prot_need & PAGE_EXEC ++ ? MM_K_FOE ++ : prot_need & PAGE_WRITE ++ ? MM_K_FOW ++ : prot_need & PAGE_READ ? MM_K_FOR : -1); ++ goto exit; ++ } ++ ++ ret = -1; ++exit: ++ *pphys = phys; ++ *pprot = prot; ++ return ret; ++} ++ ++bool sw64_cpu_tlb_fill(CPUState *cs, vaddr address, int size, ++ MMUAccessType access_type, int mmu_idx, ++ bool probe, uintptr_t retaddr) ++{ ++ SW64CPU *cpu = SW64_CPU(cs); ++ CPUSW64State *env = &cpu->env; ++ target_ulong phys; ++ int prot, fail, DVA = 0; ++ target_ulong page_size = TARGET_PAGE_SIZE; ++ ++ if (test_feature(env, SW64_FEATURE_CORE3)) { ++ DVA = C3_DVA; ++ } else if (test_feature(env, SW64_FEATURE_CORE4)){ ++ DVA = C4_DVA; ++ } ++ if (mmu_idx == MMU_PHYS_IDX) { ++ phys = address; ++ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; ++ fail = 0; ++ if ((address >> 52) & 1) goto do_pgmiss; ++ goto done; ++ } ++ ++do_pgmiss: ++ fail = get_sw64_physical_address(env, address, 1 << access_type, mmu_idx, ++ &phys, &prot, &page_size); ++ if (unlikely(fail >= 0)) { ++ if (probe) { ++ return false; ++ } ++ cs->exception_index = EXCP_MMFAULT; ++ if (access_type == 2) { ++ env->csr[DS_STAT] = fail; ++ env->csr[DVA] = address & ~(3UL); ++ } else { ++ env->csr[DS_STAT] = fail | (((unsigned long)access_type + 1) << 3); ++ env->csr[DVA] = address; ++ } ++ env->error_code = access_type; ++ cpu_loop_exit_restore(cs, retaddr); ++ } ++done: ++ tlb_set_page(cs, address & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, prot, ++ mmu_idx, page_size); ++ return true; ++} ++ ++hwaddr sw64_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) ++{ ++ SW64CPU *cpu = SW64_CPU(cs); ++ CPUSW64State *env = &cpu->env; ++ target_ulong phys; ++ target_ulong page_size = TARGET_PAGE_SIZE; ++ int prot, fail; ++ int mmu_index = cpu_mmu_index(env, 0); ++ if (mmu_index == MMU_PHYS_IDX) { ++ phys = addr; ++ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; ++ fail = -1; ++ if ((addr >> 52) & 1) goto do_pgmiss; ++ goto done; ++ } ++do_pgmiss: ++ fail = get_sw64_physical_address(&cpu->env, addr, 1, mmu_index, ++ &phys, &prot, &page_size); ++done: ++ return (fail >= 0 ? -1 : phys); ++} ++ ++#define a0(func) (((func & 0xFF) >> 6) & 0x1) ++#define a1(func) ((((func & 0xFF) >> 6) & 0x2) >> 1) ++ ++#define t(func) ((a0(func) ^ a1(func)) & 0x1) ++#define b0(func) (t(func) | a0(func)) ++#define b1(func) ((~t(func) & 1) | a1(func)) ++ ++#define START_SYS_CALL_ADDR(func) \ ++ (b1(func) << 14) | (b0(func) << 13) | ((func & 0x3F) << 7) ++ ++void sw64_cpu_do_interrupt(CPUState *cs) ++{ ++ int i = cs->exception_index; ++ ++ cs->exception_index = -1; ++ SW64CPU *cpu = SW64_CPU(cs); ++ CPUSW64State *env = &cpu->env; ++ switch (i) { ++ case EXCP_OPCDEC: ++ cpu_abort(cs, "ILLEGAL INSN"); ++ break; ++ case EXCP_CALL_SYS: ++ i = START_SYS_CALL_ADDR(env->error_code); ++ if (i <= 0x3F) { ++ i += 0x4000; ++ } else if (i >= 0x40 && i <= 0x7F) { ++ i += 0x2000; ++ } else if (i >= 0x80 && i <= 0x8F) { ++ i += 0x6000; ++ } ++ break; ++ case EXCP_ARITH: ++ env->error_code = -1; ++ env->csr[EXC_PC] = env->pc - 4; ++ env->csr[EXC_SUM] = 1; ++ i = 0xB80; ++ break; ++ case EXCP_UNALIGN: ++ i = 0xB00; ++ env->csr[EXC_PC] = env->pc - 4; ++ break; ++ case EXCP_CLK_INTERRUPT: ++ case EXCP_DEV_INTERRUPT: ++ case EXCP_IINM: ++ if (test_feature(env, SW64_FEATURE_CORE3)) ++ i = 0xE80;/* core3 */ ++ else if (test_feature(env, SW64_FEATURE_CORE4)) ++ i = 0xE00;/* core4 */ ++ break; ++ case EXCP_MMFAULT: ++ i = 0x980; ++ env->csr[EXC_PC] = env->pc; ++ break; ++ case EXCP_II0: ++ env->csr[EXC_PC] = env->pc; ++ i = 0xE00; ++ break; ++ default: ++ break; ++ } ++ env->pc = env->hm_entry + i; ++ env->flags = ENV_FLAG_HM_MODE; ++} ++ ++bool sw64_cpu_exec_interrupt(CPUState *cs, int interrupt_request) ++{ ++ SW64CPU *cpu = SW64_CPU(cs); ++ CPUSW64State *env = &cpu->env; ++ int idx = -1, INT_STAT = 0, IER = 0, PCI_INT = 0; ++ /* We never take interrupts while in PALmode. */ ++ if (env->flags & ENV_FLAG_HM_MODE) ++ return false; ++ ++ if (test_feature(env, SW64_FEATURE_CORE3)) { ++ IER = C3_IER; ++ INT_STAT = C3_INT_STAT; ++ PCI_INT = INT_PCI_INT; ++ } else if (test_feature(env, SW64_FEATURE_CORE4)) { ++ IER = INT_EN; ++ INT_STAT = C4_INT_STAT; ++ PCI_INT = PCIE_INT; ++ } ++ ++ if (interrupt_request & CPU_INTERRUPT_IINM) { ++ idx = EXCP_IINM; ++ env->csr[IINM] = 1UL << 1; ++ cs->interrupt_request &= ~CPU_INTERRUPT_IINM; ++ goto done; ++ } ++ ++ if (interrupt_request & CPU_INTERRUPT_II0) { ++ idx = EXCP_II0; ++ env->csr[INT_STAT] |= 1UL << 6; ++ if ((env->csr[IER] & env->csr[INT_STAT]) == 0) ++ return false; ++ cs->interrupt_request &= ~CPU_INTERRUPT_II0; ++ goto done; ++ } ++ ++ if (interrupt_request & CPU_INTERRUPT_TIMER) { ++ idx = EXCP_CLK_INTERRUPT; ++ env->csr[INT_STAT] |= 1UL << 4; ++ if ((env->csr[IER] & env->csr[INT_STAT]) == 0) ++ return false; ++ cs->interrupt_request &= ~CPU_INTERRUPT_TIMER; ++ goto done; ++ } ++ ++ if (interrupt_request & CPU_INTERRUPT_HARD) { ++ idx = EXCP_DEV_INTERRUPT; ++ env->csr[INT_STAT] |= 1UL << 12; ++ if ((env->csr[IER] & env->csr[INT_STAT]) == 0) ++ return false; ++ cs->interrupt_request &= ~CPU_INTERRUPT_HARD; ++ goto done; ++ } ++ ++ if (interrupt_request & CPU_INTERRUPT_PCIE) { ++ idx = EXCP_DEV_INTERRUPT; ++ env->csr[INT_STAT] |= 1UL << 1; ++ env->csr[PCI_INT] = 0x10; ++ if ((env->csr[IER] & env->csr[INT_STAT]) == 0) ++ return false; ++ cs->interrupt_request &= ~CPU_INTERRUPT_PCIE; ++ goto done; ++ } ++ ++done: ++ if (idx >= 0) { ++ cs->exception_index = idx; ++ env->error_code = 0; ++ env->csr[EXC_PC] = env->pc; ++ sw64_cpu_do_interrupt(cs); ++ return true; ++ } ++ ++ return false; ++} ++#endif ++ ++static void update_fpcr_status_mask(CPUSW64State* env) { ++ uint64_t t = 0; ++ ++ /* Don't mask the inv excp: ++ * EXC_CTL1 = 1 ++ * EXC_CTL1 = 0, input denormal, DNZ=0 ++ * EXC_CTL1 = 0, no input denormal or DNZ=1, INVD = 0 ++ */ ++ if ((env->fpcr & FPCR_MASK(EXC_CTL) & 0x2)) { ++ if (env->fpcr & FPCR_MASK(EXC_CTL) & 0x1) { ++ t |= (EXC_M_INE | EXC_M_UNF | EXC_M_IOV); ++ } else { ++ t |= EXC_M_INE; ++ } ++ } else { ++ /* INV and DNO mask */ ++ if (env->fpcr & FPCR_MASK(DNZ)) t |= EXC_M_DNO; ++ if (env->fpcr & FPCR_MASK(INVD)) t |= EXC_M_INV; ++ if (env->fpcr & FPCR_MASK(OVFD)) t |= EXC_M_OVF; ++ if (env->fpcr & FPCR_MASK(UNFD)) { ++ t |= EXC_M_UNF; ++ } ++ if (env->fpcr & FPCR_MASK(DZED)) t |= EXC_M_DZE; ++ if (env->fpcr & FPCR_MASK(INED)) t |= EXC_M_INE; ++ } ++ ++ env->fpcr_exc_enable = t; ++} ++ ++void cpu_sw64_store_fpcr(CPUSW64State* env, uint64_t val) { ++ uint64_t fpcr = val; ++ uint8_t ret; ++ ++ switch ((fpcr & FPCR_MASK(DYN)) >> FPCR_DYN_S) { ++ case 0x0: ++ ret = float_round_to_zero; ++ break; ++ case 0x1: ++ ret = float_round_down; ++ break; ++ case 0x2: ++ ret = float_round_nearest_even; ++ break; ++ case 0x3: ++ ret = float_round_up; ++ break; ++ default: ++ ret = float_round_nearest_even; ++ break; ++ } ++ ++ env->fpcr_round_mode = ret; ++ env->fp_status.float_rounding_mode = ret; ++ ++ env->fpcr_flush_to_zero = ++ (fpcr & FPCR_MASK(UNFD)) && (fpcr & FPCR_MASK(UNDZ)); ++ env->fp_status.flush_to_zero = env->fpcr_flush_to_zero; ++ ++ val &= ~0x3UL; ++ val |= env->fpcr & 0x3UL; ++ env->fpcr = val; ++ update_fpcr_status_mask(env); ++} ++ ++uint64_t helper_read_csr(CPUSW64State *env, uint64_t index) ++{ ++ if ((index == C3_PRI_BASE) || (index == C4_PRI_BASE)) ++ env->csr[index] = 0x10000; ++#ifndef CONFIG_USER_ONLY ++ if (index == SHTCLOCK) ++ env->csr[index] = qemu_clock_get_ns(QEMU_CLOCK_HOST) / 40; ++#endif ++ return env->csr[index]; ++} ++ ++uint64_t helper_rtc(void) ++{ ++#ifndef CONFIG_USER_ONLY ++ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) * CPUFREQ_SCALE; ++#else ++ return 0; ++#endif ++} ++ ++void helper_write_csr(CPUSW64State *env, uint64_t index, uint64_t va) ++{ ++ env->csr[index] = va; ++#ifndef CONFIG_USER_ONLY ++ CPUState *cs = &(sw64_env_get_cpu(env)->parent_obj); ++ SW64CPU *cpu = SW64_CPU(cs); ++ if ((index == DTB_IA) || (index == DTB_IV) || (index == DTB_IVP) || ++ (index == DTB_IU) || (index == DTB_IS) || (index == ITB_IA) || ++ (index == ITB_IV) || (index == ITB_IVP) || (index == ITB_IU) || ++ (index == ITB_IS) || (index == C3_PTBR) || (index == C4_PTBR_SYS) || ++ (index == C4_PTBR_USR) || (index == C3_UPCR) || (index == C3_DTB_PCR) || ++ (index == C4_UPCR) || (index == C4_DTB_UPCR)) { ++ tlb_flush(cs); ++ } ++//core3 ++ if (index == C3_INT_CLR) { ++ env->csr[C3_INT_STAT] &= ~va; ++ } ++ if ((index == C3_TIMER_CTL) && (va == 1)) { ++ timer_mod(cpu->alarm_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + env->csr[C3_TIMER_TH]); ++ } ++ ++//core4 ++ if (index == C4_INT_CLR) { ++ env->csr[C4_INT_STAT] &= ~va; ++ } ++ if (index == IINM) ++ env->csr[index] &= ~va; ++ if ((index == C4_TIMER_CTL) && (va == 1)) { ++ timer_mod(cpu->alarm_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + env->csr[C4_TIMER_TH]); ++ } ++ ++#endif ++} ++ ++uint64_t cpu_sw64_load_fpcr(CPUSW64State *env) ++{ ++ return (uint64_t)env->fpcr; ++} ++ ++void helper_tb_flush(CPUSW64State *env) ++{ ++ tb_flush(CPU(sw64_env_get_cpu(env))); ++} ++ ++void helper_cpustate_update(CPUSW64State *env, uint64_t pc) ++{ ++ switch (pc & 0x3) { ++ case 0x00: ++ env->flags = ENV_FLAG_HM_MODE; ++ break; ++ case 0x01: ++ env->flags &= ~(ENV_FLAG_PS_USER | ENV_FLAG_HM_MODE); ++ break; ++ case 0x02: ++ env->flags &= ~(ENV_FLAG_PS_USER | ENV_FLAG_HM_MODE); ++ break; ++ case 0x03: ++ env->flags = ENV_FLAG_PS_USER; ++ } ++} +diff --git a/target/sw64/helper.h b/target/sw64/helper.h +new file mode 100644 +index 0000000000..a72f2ca796 +--- /dev/null ++++ b/target/sw64/helper.h +@@ -0,0 +1,123 @@ ++DEF_HELPER_FLAGS_2(zap, TCG_CALL_NO_RWG_SE, i64, i64, i64) ++DEF_HELPER_FLAGS_2(zapnot, TCG_CALL_NO_RWG_SE, i64, i64, i64) ++DEF_HELPER_FLAGS_2(cmpgeb, TCG_CALL_NO_RWG_SE, i64, i64, i64) ++DEF_HELPER_FLAGS_1(s_to_memory, TCG_CALL_NO_RWG_SE, i32, i64) ++DEF_HELPER_FLAGS_1(memory_to_s, TCG_CALL_NO_RWG_SE, i64, i32) ++DEF_HELPER_FLAGS_2(fcvtls, TCG_CALL_NO_RWG, i64, env, i64) ++DEF_HELPER_FLAGS_2(fcvtld, TCG_CALL_NO_RWG, i64, env, i64) ++DEF_HELPER_FLAGS_3(fcvtdl, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_2(fcvtdl_dyn, TCG_CALL_NO_RWG, i64, env, i64) ++DEF_HELPER_FLAGS_3(fris, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(frid, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_2(fcvtsd, TCG_CALL_NO_RWG, i64, env, i64) ++DEF_HELPER_FLAGS_2(fcvtds, TCG_CALL_NO_RWG, i64, env, i64) ++DEF_HELPER_FLAGS_2(fcvtwl, TCG_CALL_NO_RWG, i64, env, i64) ++DEF_HELPER_FLAGS_2(fcvtlw, TCG_CALL_NO_RWG, i64, env, i64) ++DEF_HELPER_FLAGS_5(vfcvtsh, 0, void, env, i64, i64, i64, i64) ++DEF_HELPER_FLAGS_5(vfcvths, 0, void, env, i64, i64, i64, i64) ++DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_2(frecs, TCG_CALL_NO_RWG, i64, env, i64) ++DEF_HELPER_FLAGS_2(frecd, TCG_CALL_NO_RWG, i64, env, i64) ++DEF_HELPER_FLAGS_2(fsqrts, TCG_CALL_NO_RWG, i64, env, i64) ++DEF_HELPER_FLAGS_2(fsqrt, TCG_CALL_NO_RWG, i64, env, i64) ++DEF_HELPER_FLAGS_4(fmas, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(fmad, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(fmss, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(fmsd, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(fnmas, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(fnmad, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(fnmss, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(fnmsd, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) ++DEF_HELPER_FLAGS_0(rtc, TCG_CALL_NO_RWG, i64) ++DEF_HELPER_FLAGS_1(load_fpcr, 0, i64, env) ++DEF_HELPER_FLAGS_2(store_fpcr, 0, void, env, i64) ++DEF_HELPER_FLAGS_2(setfpcrx, 0, void, env, i64) ++DEF_HELPER_FLAGS_2(ieee_input, 0, void, env, i64) ++DEF_HELPER_FLAGS_2(ieee_input_s, 0, void, env, i64) ++DEF_HELPER_FLAGS_2(read_csr, TCG_CALL_NO_RWG, i64, env, i64) ++DEF_HELPER_FLAGS_3(write_csr, 0, void, env, i64, i64) ++DEF_HELPER_FLAGS_2(cpustate_update, 0, void, env, i64) ++DEF_HELPER_FLAGS_3(trace_mem, 0, void, env, i64, i64) ++DEF_HELPER_FLAGS_3(fcmpun, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(fcmpeq, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(fcmple, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(fcmplt, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(fcmpge, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(fcmpgt, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(fcmpge_s, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(fcmple_s, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_4(srlow, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(sllow, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vlogzz, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vconw, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vcond, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vshfw, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_2(ctlzow, 0, i64, env, i64) ++DEF_HELPER_FLAGS_4(vucaddw, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vucaddwi, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vucsubw, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vucsubwi, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vucaddh, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vucaddhi, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vucsubh, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vucsubhi, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vucaddb, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vucaddbi, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vucsubb, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vucsubbi, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_3(vstw, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(vsts, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_3(vstd, TCG_CALL_NO_RWG, i64, env, i64, i64) ++DEF_HELPER_FLAGS_2(v_print, 0, void, env, i64) ++DEF_HELPER_FLAGS_1(tb_flush, 0, void, env) ++DEF_HELPER_FLAGS_4(vmaxb, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vminb, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vmaxh, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vminh, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vmaxw, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vminw, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(sraow, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vsm4r, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vsm4key, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vsm3msw, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vcmpueqb, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vcmpugtb, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vcmpueqbi, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vcmpugtbi, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vumaxb, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vuminb, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vumaxh, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vuminh, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vumaxw, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vuminw, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_5(vinsb, 0, void, env, i64, i64, i64, i64) ++DEF_HELPER_FLAGS_5(vinsh, 0, void, env, i64, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vinsectlh, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vinsectlw, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vinsectlb, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_5(vshfq, 0, void, env, i64, i64, i64, i64) ++DEF_HELPER_FLAGS_4(vshfqb, 0, void, env, i64, i64, i64) ++DEF_HELPER_FLAGS_5(vsm3r, 0, void, env, i64, i64, i64, i64) ++ ++#ifndef CONFIG_USER_ONLY ++DEF_HELPER_FLAGS_2(fp_exc_raise, 0, void, env, i32) ++DEF_HELPER_FLAGS_2(pri_ldw, 0, i64, env, i64) ++DEF_HELPER_FLAGS_3(pri_stw, 0, void, env, i64, i64) ++DEF_HELPER_FLAGS_2(pri_ldl, 0, i64, env, i64) ++DEF_HELPER_FLAGS_3(pri_stl, 0, void, env, i64, i64) ++#endif ++ ++DEF_HELPER_3(excp, noreturn, env, int, int) ++#ifndef CONFIG_USER_ONLY ++/* Scale factor for core3 cpu freq, ie number of ns per tick. */ ++#define CPUFREQ_SCALE 3 ++#endif ++ ++/* SLAVE FLOAT HELPER. */ +diff --git a/target/sw64/int_helper.c b/target/sw64/int_helper.c +new file mode 100644 +index 0000000000..131182585a +--- /dev/null ++++ b/target/sw64/int_helper.c +@@ -0,0 +1,118 @@ ++#include "qemu/osdep.h" ++#include "cpu.h" ++#include "exec/exec-all.h" ++#include "exec/helper-proto.h" ++#include "qemu/host-utils.h" ++#include "exec/memattrs.h" ++ ++uint64_t helper_zapnot(uint64_t val, uint64_t mskb) ++{ ++ uint64_t mask; ++ ++ mask = -(mskb & 0x01) & 0x00000000000000ffull; ++ mask |= -(mskb & 0x02) & 0x000000000000ff00ull; ++ mask |= -(mskb & 0x04) & 0x0000000000ff0000ull; ++ mask |= -(mskb & 0x08) & 0x00000000ff000000ull; ++ mask |= -(mskb & 0x10) & 0x000000ff00000000ull; ++ mask |= -(mskb & 0x20) & 0x0000ff0000000000ull; ++ mask |= -(mskb & 0x40) & 0x00ff000000000000ull; ++ mask |= -(mskb & 0x80) & 0xff00000000000000ull; ++ ++ return val & mask; ++} ++ ++uint64_t helper_zap(uint64_t val, uint64_t mask) ++{ ++ return helper_zapnot(val, ~mask); ++} ++ ++uint64_t helper_cmpgeb(uint64_t va, uint64_t vb) ++{ ++ int i; ++ uint64_t ret = 0; ++ uint64_t tmp; ++ for (i = 0; i < 64; i += 8) { ++ tmp = ((va >> i) & 0xff) + (~(vb >> i) & 0xff) + 1; ++ ret |= (tmp >> 8) << (i / 8); ++ } ++ return ret; ++} ++ ++#ifndef CONFIG_USER_ONLY ++static inline MemTxAttrs cpu_get_mem_attrs(CPUSW64State *env) ++{ ++ return ((MemTxAttrs) { .secure = 1 }); ++} ++ ++static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs) ++{ ++ return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs)); ++} ++ ++uint64_t sw64_ldw_phys(CPUState *cs, hwaddr addr) ++{ ++ SW64CPU *cpu = SW64_CPU(cs); ++ int32_t ret; ++ CPUSW64State *env = &cpu->env; ++ MemTxAttrs attrs = cpu_get_mem_attrs(env); ++ AddressSpace *as = cpu_addressspace(cs, attrs); ++ ++ ret = (int32_t)address_space_ldl(as, addr, attrs, NULL); ++ ++ return (uint64_t)(int64_t)ret; ++} ++ ++void sw64_stw_phys(CPUState *cs, hwaddr addr, uint64_t val) ++{ ++ SW64CPU *cpu = SW64_CPU(cs); ++ CPUSW64State *env = &cpu->env; ++ MemTxAttrs attrs = cpu_get_mem_attrs(env); ++ AddressSpace *as = cpu_addressspace(cs, attrs); ++ ++ address_space_stl(as, addr, (uint32_t)val, attrs, NULL); ++} ++ ++uint64_t sw64_ldl_phys(CPUState *cs, hwaddr addr) ++{ ++ SW64CPU *cpu = SW64_CPU(cs); ++ CPUSW64State *env = &cpu->env; ++ MemTxAttrs attrs = cpu_get_mem_attrs(env); ++ AddressSpace *as = cpu_addressspace(cs, attrs); ++ ++ return address_space_ldq(as, addr, attrs, NULL); ++} ++ ++void sw64_stl_phys(CPUState *cs, hwaddr addr, uint64_t val) ++{ ++ SW64CPU *cpu = SW64_CPU(cs); ++ CPUSW64State *env = &cpu->env; ++ MemTxAttrs attrs = cpu_get_mem_attrs(env); ++ AddressSpace *as = cpu_addressspace(cs, attrs); ++ ++ address_space_stq(as, addr, val, attrs, NULL); ++} ++ ++uint64_t helper_pri_ldw(CPUSW64State *env, uint64_t hwaddr) ++{ ++ CPUState *cs = CPU(sw64_env_get_cpu(env)); ++ return sw64_ldw_phys(cs, hwaddr); ++} ++ ++void helper_pri_stw(CPUSW64State *env, uint64_t val, uint64_t hwaddr) ++{ ++ CPUState *cs = CPU(sw64_env_get_cpu(env)); ++ sw64_stw_phys(cs, hwaddr, val); ++} ++ ++uint64_t helper_pri_ldl(CPUSW64State *env, uint64_t hwaddr) ++{ ++ CPUState *cs = CPU(sw64_env_get_cpu(env)); ++ return sw64_ldl_phys(cs, hwaddr); ++} ++ ++void helper_pri_stl(CPUSW64State *env, uint64_t val, uint64_t hwaddr) ++{ ++ CPUState *cs = CPU(sw64_env_get_cpu(env)); ++ sw64_stl_phys(cs, hwaddr, val); ++} ++#endif +diff --git a/target/sw64/kvm.c b/target/sw64/kvm.c +new file mode 100644 +index 0000000000..f5c9c83cd9 +--- /dev/null ++++ b/target/sw64/kvm.c +@@ -0,0 +1,399 @@ ++/* ++ * SW64 implementation of KVM hooks ++ * ++ * Copyright (c) 2018 Lin Hainan ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ * ++ */ ++ ++#include "qemu/osdep.h" ++#include ++ ++#include ++ ++#include "qemu/timer.h" ++#include "qemu/error-report.h" ++#include "sysemu/sysemu.h" ++#include "sysemu/kvm.h" ++#include "kvm_sw64.h" ++#include "cpu.h" ++#include "exec/memattrs.h" ++#include "exec/address-spaces.h" ++#include "hw/boards.h" ++#include "hw/sw64/core.h" ++#include "qemu/log.h" ++ ++unsigned long core3_init_pc = 0xfff0000000011100; ++unsigned long core4_init_pc = 0xfff0000000011002; ++ ++const KVMCapabilityInfo kvm_arch_required_capabilities[] = { ++ KVM_CAP_LAST_INFO ++}; ++/* 50000 jump to bootlader while 2f00000 jump to bios*/ ++int kvm_sw64_vcpu_init(CPUState *cs) ++{ ++ struct kvm_regs *regs; ++ SW64CPU *cpu = SW64_CPU(cs); ++ CPUSW64State *env = cpu_env(cs); ++ regs = (struct kvm_regs *)cpu->k_regs; ++ if (test_feature(env, SW64_FEATURE_CORE3)) ++ regs->c3_regs.pc = core3_init_pc; ++ else if (test_feature(env, SW64_FEATURE_CORE4)) ++ regs->c4_regs.pc = core4_init_pc; ++ return kvm_vcpu_ioctl(cs, KVM_SET_REGS, regs); ++} ++ ++static void kvm_sw64_host_cpu_class_init(ObjectClass *oc, void *data) ++{ ++} ++ ++static void kvm_sw64_host_cpu_initfn(Object *obj) ++{ ++} ++ ++ ++static const TypeInfo host_sw64_cpu_type_info = { ++ .name = TYPE_SW64_HOST_CPU, ++ .parent = TYPE_SW64_CPU, ++ .instance_init = kvm_sw64_host_cpu_initfn, ++ .class_init = kvm_sw64_host_cpu_class_init, ++ .class_size = sizeof(SW64HostCPUClass), ++}; ++ ++int kvm_arch_init(MachineState *ms, KVMState *s) ++{ ++ kvm_async_interrupts_allowed = true; ++ ++ type_register_static(&host_sw64_cpu_type_info); ++ ++ return 0; ++} ++ ++/* 50000 jump to bootlader while 2f00000 jump to bios*/ ++void kvm_sw64_reset_vcpu(SW64CPU *cpu) ++{ ++ struct kvm_regs *regs; ++ CPUState *cs = CPU(cpu); ++ CPUSW64State *env = cpu_env(cs); ++ int ret; ++ regs = (struct kvm_regs *)cpu->k_regs; ++ if (test_feature(env, SW64_FEATURE_CORE3)) ++ regs->c3_regs.pc = core3_init_pc; ++ else if (test_feature(env, SW64_FEATURE_CORE4)) { ++ if (kvm_arch_vcpu_id(cs) == 0) { ++ regs->c4_regs.r[16] = 0xDEED2024; ++ regs->c4_regs.r[17] = dtb_start_c4; ++ } ++ regs->c4_regs.pc = core4_init_pc; ++ } ++ ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, regs); ++ if (ret < 0) { ++ fprintf(stderr, "kvm_sw64_vcpu_init failed: %s\n", strerror(-ret)); ++ abort(); ++ } ++ ++ ret = kvm_vcpu_ioctl(cs, KVM_SW64_VCPU_INIT, NULL); ++ if (ret < 0) { ++ fprintf(stderr, "kvm_sw64_vcpu_init failed: %s\n", strerror(-ret)); ++ abort(); ++ } ++} ++ ++unsigned long kvm_arch_vcpu_id(CPUState *cpu) ++{ ++ return cpu->cpu_index; ++} ++ ++#include ++int kvm_arch_init_vcpu(CPUState *cs) ++{ ++ int ret; ++ ret = kvm_sw64_vcpu_init(cs); ++ if (ret) { ++ return ret; ++ } ++ return 0; ++} ++ ++int kvm_arch_destroy_vcpu(CPUState *cs) ++{ ++ return 0; ++} ++ ++int kvm_arch_get_registers(CPUState *cs) ++{ ++ int ret, i; ++ SW64CPU *cpu = SW64_CPU(cs); ++ CPUSW64State *env = &cpu->env; ++ struct kvm_regs *regs; ++ regs = (struct kvm_regs *)cpu->k_regs; ++ ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, regs); ++ if (ret < 0) ++ return ret; ++ ++ ret = kvm_vcpu_ioctl(cs, KVM_SW64_GET_VCB, &cpu->k_vcb); ++ if (ret < 0) ++ return ret; ++ if (test_feature(env, SW64_FEATURE_CORE3)) { ++ for (i = 0; i < 16; i++) ++ env->ir[i] = regs->c3_regs.r[i]; ++ ++ for (i = 19; i < 29; i++) ++ env->ir[i] = regs->c3_regs.r[i-3]; ++ ++ env->ir[16] = regs->c3_regs.r16; ++ env->ir[17] = regs->c3_regs.r17; ++ env->ir[18] = regs->c3_regs.r18; ++ ++ env->ir[29] = regs->c3_regs.gp; ++ env->pc = regs->c3_regs.pc; ++ if (regs->c3_regs.ps >> 3) ++ env->ir[30] = cpu->k_vcb[3]; //usp ++ else ++ env->ir[30] = cpu->k_vcb[2]; //ksp ++ } else if (test_feature(env, SW64_FEATURE_CORE4)) { ++ for (i = 0; i < 30; i++) { ++ env->ir[i] = regs->c4_regs.r[i]; ++ } ++ env->ir[30] = cpu->k_vcb[44]; ++ env->pc = regs->c4_regs.pc; ++ } ++ return 0; ++} ++ ++int kvm_arch_put_registers(CPUState *cs, int level) ++{ ++ int ret; ++ SW64CPU *cpu = SW64_CPU(cs); ++ struct kvm_regs *regs; ++ CPUSW64State *env = &cpu->env; ++ ++ regs = (struct kvm_regs *)cpu->k_regs; ++ if (level == KVM_PUT_RUNTIME_STATE) { ++ int i; ++ if (test_feature(env, SW64_FEATURE_CORE3)) { ++ for (i = 0; i < 16; i++) ++ regs->c3_regs.r[i] = env->ir[i]; ++ ++ for (i = 19; i < 29; i++) ++ regs->c3_regs.r[i-3] = env->ir[i]; ++ ++ regs->c3_regs.r16 = env->ir[16]; ++ regs->c3_regs.r17 = env->ir[17]; ++ regs->c3_regs.r18 = env->ir[18]; ++ ++ regs->c3_regs.gp = env->ir[29]; ++ ++ if (regs->c3_regs.ps >> 3) ++ cpu->k_vcb[3] = env->ir[30]; //usp ++ else ++ cpu->k_vcb[2] = env->ir[30]; //ksp ++ ++ regs->c3_regs.pc = env->pc; ++ } else if (test_feature(env, SW64_FEATURE_CORE4)) { ++ for (i = 0; i < 30; i++) { ++ regs->c4_regs.r[i] = env->ir[i]; ++ } ++ cpu->k_vcb[44] = env->ir[30]; ++ ++ /* Using for guest kernel debugging */ ++ if ((env->pc & 0x2) == 0) { ++ env->pc |= 0x2; ++ } ++ regs->c4_regs.pc = env->pc; ++ } ++ } ++ ++ ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, regs); ++ if (ret < 0) ++ return ret; ++ ++ cpu->k_vcb[15] = kvm_arch_vcpu_id(cs); ++ fprintf(stderr,"vcpu %ld init.\n", cpu->k_vcb[15]); ++ ++ if (test_feature(env, SW64_FEATURE_CORE4)) { ++ if (level == KVM_PUT_RESET_STATE) { ++ /* Set initial value of PTBR_USR */ ++ cpu->k_vcb[27] = 0x0; ++ } ++ /* Set initial value of CSR_ATC */ ++ cpu->k_vcb[50] = 0x3; ++ /* Set initial value of CSR_PRI_BASE */ ++ cpu->k_vcb[51] = 0x10000; ++ } ++ ++ return kvm_vcpu_ioctl(cs, KVM_SW64_SET_VCB, &cpu->k_vcb); ++} ++ ++static const uint32_t brk_insn = 0x00000080; ++ ++int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) ++{ ++ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || ++ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) { ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) ++{ ++ static uint32_t brk; ++ ++ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) || ++ brk != brk_insn || ++ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) { ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++int kvm_arch_insert_hw_breakpoint(target_ulong addr, ++ target_ulong len, int type) ++{ ++ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); ++ return -EINVAL; ++} ++ ++int kvm_arch_remove_hw_breakpoint(target_ulong addr, ++ target_ulong len, int type) ++{ ++ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); ++ return -EINVAL; ++} ++ ++void kvm_arch_remove_all_hw_breakpoints(void) ++{ ++ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); ++} ++ ++int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, ++ int vector, PCIDevice *dev) ++{ ++ return -1; ++} ++ ++int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, ++ uint64_t address, uint32_t data, PCIDevice *dev) ++{ ++ return 0; ++} ++ ++void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) ++{ ++} ++ ++MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) ++{ ++ return MEMTXATTRS_UNSPECIFIED; ++} ++ ++bool kvm_sw64_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit) ++{ ++ SW64CPU *cpu = SW64_CPU(cs); ++ CPUSW64State *env = &cpu->env; ++ ++ /* Ensure PC is synchronised */ ++ kvm_cpu_synchronize_state(cs); ++ ++ /* For gdb correctness, remove the mode bit if it exists */ ++ if (env->pc & 0x2) { ++ env->pc &= ~0x2; ++ debug_exit->epc &= ~0x2; ++ } ++ ++ if (cs->singlestep_enabled) { ++ return true; ++ } else if (kvm_find_sw_breakpoint(cs, debug_exit->epc)) { ++ return true; ++ } else { ++ error_report("%s: unhandled debug exit (%"PRIx64", %"PRIx64")", ++ __func__, env->pc, debug_exit->epc); ++ } ++ ++ return false; ++} ++ ++int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) ++{ ++ int ret = 0; ++ ++ switch (run->exit_reason) { ++ case KVM_EXIT_DEBUG: ++ if (kvm_sw64_handle_debug(cs, &run->debug.arch)) { ++ ret = EXCP_DEBUG; ++ } /* otherwise return to guest */ ++ break; ++ default: ++ qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n", ++ __func__, run->exit_reason); ++ break; ++ } ++ return ret; ++} ++ ++bool kvm_arch_stop_on_emulation_error(CPUState *cs) ++{ ++ return true; ++} ++ ++int kvm_arch_process_async_events(CPUState *cs) ++{ ++ return 0; ++} ++ ++void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg) ++{ ++} ++ ++void kvm_arch_init_irq_routing(KVMState *s) ++{ ++ /* We know at this point that we're using the in-kernel ++ * irqchip, so we can use irqfds, and on x86 we know ++ * we can use msi via irqfd and GSI routing. ++ */ ++ kvm_msi_via_irqfd_allowed = true; ++ kvm_gsi_routing_allowed = true; ++} ++ ++int kvm_arch_irqchip_create(KVMState *s) ++{ ++ return 0; ++} ++ ++int kvm_arch_release_virq_post(int virq) ++{ ++ return -1; ++} ++ ++int kvm_arch_msi_data_to_gsi(uint32_t data) ++{ ++ return -1; ++} ++ ++ ++void kvm_sw64_register_slave(SW64CPU *cpu) ++{ ++ CPUState *cs = CPU(cpu); ++ ++ kvm_vcpu_ioctl(cs, KVM_SW64_USE_SLAVE, NULL); ++} ++ ++bool kvm_arch_cpu_check_are_resettable(void) ++{ ++ return true; ++} ++ ++void kvm_arch_accel_class_init(ObjectClass *oc) ++{ ++} ++ ++int kvm_arch_get_default_type(MachineState *ms) ++{ ++ return 0; ++} ++ +diff --git a/target/sw64/kvm_sw64.h b/target/sw64/kvm_sw64.h +new file mode 100644 +index 0000000000..81dd760008 +--- /dev/null ++++ b/target/sw64/kvm_sw64.h +@@ -0,0 +1,56 @@ ++/* ++ * QEMU KVM support -- SW64 specific functions. ++ * ++ * Copyright (c) 2018 Lin Hainan ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ * ++ */ ++ ++#ifndef QEMU_KVM_SW64_H ++#define QEMU_KVM_SW64_H ++ ++#include "sysemu/kvm.h" ++#include "exec/memory.h" ++#include "qemu/error-report.h" ++ ++/** ++ * kvm_sw64_vcpu_init: ++ * @cs: CPUState ++ * ++ * Initialize (or reinitialize) the VCPU by invoking the ++ * KVM_SW64_VCPU_INIT ioctl with the CPU type and feature ++ * bitmask specified in the CPUState. ++ * ++ * Returns: 0 if success else < 0 error code ++ */ ++int kvm_sw64_vcpu_init(CPUState *cs); ++void kvm_sw64_reset_vcpu(SW64CPU *cpu); ++void kvm_sw64_register_slave(SW64CPU *cpu); ++ ++#define TYPE_SW64_HOST_CPU "host-" TYPE_SW64_CPU ++#define SW64_HOST_CPU_CLASS(klass) \ ++ OBJECT_CLASS_CHECK(SW64HostCPUClass, (klass), TYPE_SW64_HOST_CPU) ++#define SW64_HOST_CPU_GET_CLASS(obj) \ ++ OBJECT_GET_CLASS(SW64HostCPUClass, (obj), TYPE_SW64_HOST_CPU) ++ ++typedef struct SW64HostCPUClass { ++ /*< private >*/ ++ SW64CPUClass parent_class; ++ /*< public >*/ ++ ++ uint64_t features; ++ uint32_t target; ++ const char *dtb_compatible; ++} SW64HostCPUClass; ++ ++/** ++ * kvm_sw64_handle_debug: ++ * @cs: CPUState ++ * @debug_exit: debug part of the KVM exit structure ++ * ++ * Returns: TRUE if the debug exception was handled. ++ */ ++bool kvm_sw64_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit); ++#endif +diff --git a/target/sw64/machine.c b/target/sw64/machine.c +new file mode 100644 +index 0000000000..da22a45657 +--- /dev/null ++++ b/target/sw64/machine.c +@@ -0,0 +1,17 @@ ++#include "qemu/osdep.h" ++#include "cpu.h" ++#include "migration/vmstate.h" ++#include "migration/cpu.h" ++ ++VMStateDescription vmstate_sw64_cpu = { ++ .name = "cpu", ++ .version_id = 1, ++ .minimum_version_id = 1, ++ .fields = (VMStateField[]) { ++#ifdef CONFIG_KVM ++ VMSTATE_UINTTL_ARRAY(k_regs, SW64CPU, 164), ++ VMSTATE_UINTTL_ARRAY(k_vcb, SW64CPU, 96), ++#endif ++ VMSTATE_END_OF_LIST() ++ } ++}; +diff --git a/target/sw64/meson.build b/target/sw64/meson.build +new file mode 100644 +index 0000000000..52001da689 +--- /dev/null ++++ b/target/sw64/meson.build +@@ -0,0 +1,20 @@ ++sw64_ss = ss.source_set() ++sw64_ss.add(files( ++ 'cpu.c', ++ 'exception.c', ++ 'float_helper.c', ++ 'helper.c', ++ 'gdbstub.c', ++ 'int_helper.c', ++ 'profile.c', ++ 'simd_helper.c', ++ 'translate.c', ++)) ++ ++sw64_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c')) ++ ++sw64_system_ss = ss.source_set() ++sw64_system_ss.add(files('machine.c')) ++ ++target_arch += {'sw64': sw64_ss} ++target_system_arch += {'sw64': sw64_system_ss} +diff --git a/target/sw64/profile.c b/target/sw64/profile.c +new file mode 100644 +index 0000000000..ca1ad8d80e +--- /dev/null ++++ b/target/sw64/profile.c +@@ -0,0 +1,2262 @@ ++#include "translate.h" ++ ++void insn_profile(DisasContext *ctx, uint32_t insn) ++{ ++ int32_t disp16, disp26 __attribute__((unused)); ++ uint8_t opc; ++ uint16_t fn3, fn4, fn6, fn8, fn11; ++ TCGv count; ++ int index, offs; ++ ++ opc = extract32(insn, 26, 6); ++ ++ fn3 = extract32(insn, 10, 3); ++ fn6 = extract32(insn, 10, 6); ++ fn4 = extract32(insn, 12, 4); ++ fn8 = extract32(insn, 5, 8); ++ fn11 = extract32(insn, 5, 11); ++ ++ disp16 = sextract32(insn, 0, 16); ++ disp26 = sextract32(insn, 0, 26); ++ ++ index = 0; ++ switch (opc) { ++ case 0x00: ++ /* SYS_CALL */ ++ index = SYS_CALL; ++ break; ++ case 0x01: ++ /* CALL */ ++ index = CALL; ++ break; ++ case 0x02: ++ /* RET */ ++ index = RET; ++ break; ++ case 0x03: ++ /* JMP */ ++ index = JMP; ++ break; ++ case 0x04: ++ /* BR */ ++ index = BR; ++ break; ++ case 0x05: ++ /* BSR */ ++ index = BSR; ++ break; ++ case 0x06: ++ switch (disp16) { ++ case 0x0000: ++ /* MEMB */ ++ index = MEMB; ++ break; ++ case 0x0001: ++ /* IMEMB */ ++ index = IMEMB; ++ break; ++ case 0x0002: ++ /* WMEMB */ ++ index = WMEMB; ++ break; ++ case 0x0020: ++ /* RTC */ ++ index = RTC; ++ break; ++ case 0x0040: ++ /* RCID */ ++ index = RCID; ++ break; ++ case 0x0080: ++ /* HALT */ ++ index = HALT; ++ break; ++ case 0x1000: ++ /* RD_F */ ++ index = RD_F; ++ break; ++ case 0x1020: ++ /* WR_F */ ++ index = WR_F; ++ break; ++ case 0x1040: ++ /* RTID */ ++ index = RTID; ++ break; ++ default: ++ if ((disp16 & 0xFF00) == 0xFC00) { ++ /* CSRWS */ ++ index = CSRWS; ++ break; ++ } ++ if ((disp16 & 0xFF00) == 0xFD00) { ++ /* CSRWC */ ++ index = CSRWC; ++ break; ++ } ++ if ((disp16 & 0xFF00) == 0xFE00) { ++ /* PRI_RCSR */ ++ index = PRI_RCSR; ++ break; ++ } ++ if ((disp16 & 0xFF00) == 0xFF00) { ++ /* PRI_WCSR */ ++ index = PRI_WCSR; ++ break; ++ } ++ goto do_invalid; ++ } ++ break; ++ case 0x07: ++ /* PRI_RET */ ++ index = PRI_RET; ++ break; ++ case 0x08: ++ switch (fn4) { ++ case 0x0: ++ /* LLDW */ ++ index = LLDW; ++ break; ++ case 0x1: ++ /* LLDL */ ++ index = LLDL; ++ break; ++ case 0x2: ++ /* LDW_INC */ ++ index = LDW_INC; ++ break; ++ case 0x3: ++ /* LDL_INC */ ++ index = LDL_INC; ++ break; ++ case 0x4: ++ /* LDW_DEC */ ++ index = LDW_DEC; ++ break; ++ case 0x5: ++ /* LDL_DEC */ ++ index = LDL_DEC; ++ break; ++ case 0x6: ++ /* LDW_SET */ ++ index = LDW_SET; ++ break; ++ case 0x7: ++ /* LDL_SET */ ++ index = LDL_SET; ++ break; ++ case 0x8: ++ /* LSTW */ ++ index = LSTW; ++ break; ++ case 0x9: ++ /* LSTL */ ++ index = LSTL; ++ break; ++ case 0xa: ++ /* LDW_NC */ ++ index = LDW_NC; ++ break; ++ case 0xb: ++ /* LDL_NC */ ++ index = LDL_NC; ++ break; ++ case 0xc: ++ /* LDD_NC */ ++ index = LDD_NC; ++ break; ++ case 0xd: ++ /* STW_NC */ ++ index = STW_NC; ++ break; ++ case 0xe: ++ /* STL_NC */ ++ index = STL_NC; ++ break; ++ case 0xf: ++ /* STD_NC */ ++ index = STD_NC; ++ break; ++ default: ++ goto do_invalid; ++ } ++ break; ++ case 0x9: ++ /* LDWE */ ++ index = LDWE; ++ break; ++ case 0x0a: ++ /* LDSE */ ++ index = LDSE; ++ break; ++ case 0x0b: ++ /* LDDE */ ++ index = LDDE; ++ break; ++ case 0x0c: ++ /* VLDS */ ++ index = VLDS; ++ break; ++ case 0x0d: ++ /* VLDD */ ++ index = VLDD; ++ break; ++ case 0x0e: ++ /* VSTS */ ++ index = VSTS; ++ break; ++ case 0x0f: ++ /* VSTD */ ++ index = VSTD; ++ break; ++ case 0x10: ++ if (fn11 == 0x70) { ++ /* FIMOVS */ ++ index = FIMOVS; ++ } else if (fn11 == 0x78) { ++ /* FIMOVD */ ++ index = FIMOVD; ++ } else { ++ switch (fn11 & 0xff) { ++ case 0x00: ++ /* ADDW */ ++ index = ADDW; ++ break; ++ case 0x01: ++ /* SUBW */ ++ index = SUBW; ++ break; ++ case 0x02: ++ /* S4ADDW */ ++ index = S4ADDW; ++ break; ++ case 0x03: ++ /* S4SUBW */ ++ index = S4SUBW; ++ break; ++ case 0x04: ++ /* S8ADDW */ ++ index = S8ADDW; ++ break; ++ case 0x05: ++ /* S8SUBW */ ++ index = S8SUBW; ++ break; ++ ++ case 0x08: ++ /* ADDL */ ++ index = ADDL; ++ break; ++ case 0x09: ++ /* SUBL */ ++ index = SUBL; ++ break; ++ case 0x0a: ++ /* S4ADDL */ ++ index = S4ADDL; ++ break; ++ case 0x0b: ++ /* S4SUBL */ ++ index = S4SUBL; ++ break; ++ case 0x0c: ++ /* S8ADDL */ ++ index = S8ADDL; ++ break; ++ case 0x0d: ++ /* S8SUBL */ ++ index = S8SUBL; ++ break; ++ case 0x10: ++ /* MULW */ ++ index = MULW; ++ break; ++ case 0x11: ++ /* DIVW */ ++ index = DIVW; ++ break; ++ case 0x12: ++ /* UDIVW */ ++ index = UDIVW; ++ break; ++ case 0x13: ++ /* REMW */ ++ index = REMW; ++ break; ++ case 0x14: ++ /* UREMW */ ++ index = UREMW; ++ break; ++ case 0x18: ++ /* MULL */ ++ index = MULL; ++ break; ++ case 0x19: ++ /* MULH */ ++ index = MULH; ++ break; ++ case 0x1A: ++ /* DIVL */ ++ index = DIVL; ++ break; ++ case 0x1B: ++ /* UDIVL */ ++ index = UDIVL; ++ break; ++ case 0x1C: ++ /* REML */ ++ index = REML; ++ break; ++ case 0x1D: ++ /* UREML */ ++ index = UREML; ++ break; ++ case 0x1E: ++ /* ADDPI */ ++ index = ADDPI; ++ break; ++ case 0x1F: ++ /* ADDPIS */ ++ index = ADDPIS; ++ break; ++ case 0x28: ++ /* CMPEQ */ ++ index = CMPEQ; ++ break; ++ case 0x29: ++ /* CMPLT */ ++ index = CMPLT; ++ break; ++ case 0x2a: ++ /* CMPLE */ ++ index = CMPLE; ++ break; ++ case 0x2b: ++ /* CMPULT */ ++ index = CMPULT; ++ break; ++ case 0x2c: ++ /* CMPULE */ ++ index = CMPULE; ++ break; ++ case 0x2D: ++ /* SBT */ ++ index = SBT; ++ break; ++ case 0x2E: ++ /* CBT */ ++ index = CBT; ++ break; ++ case 0x38: ++ /* AND */ ++ index = AND; ++ break; ++ case 0x39: ++ /* BIC */ ++ index = BIC; ++ break; ++ case 0x3a: ++ /* BIS */ ++ index = BIS; ++ break; ++ case 0x3b: ++ /* ORNOT */ ++ index = ORNOT; ++ break; ++ case 0x3c: ++ /* XOR */ ++ index = XOR; ++ break; ++ case 0x3d: ++ /* EQV */ ++ index = EQV; ++ break; ++ case 0x40: ++ /* INSLB */ ++ index = INSLB; ++ break; ++ case 0x41: ++ /* INSLH */ ++ index = INSLH; ++ break; ++ case 0x42: ++ /* INSLW */ ++ index = INSLW; ++ break; ++ case 0x43: ++ /* INSLL */ ++ index = INSLL; ++ break; ++ case 0x44: ++ /* INSHB */ ++ index = INSHB; ++ break; ++ case 0x45: ++ /* INSHH */ ++ index = INSHH; ++ break; ++ case 0x46: ++ /* INSHW */ ++ index = INSHW; ++ break; ++ case 0x47: ++ /* INSHL */ ++ index = INSHL; ++ break; ++ case 0x48: ++ /* SLLL */ ++ index = SLLL; ++ break; ++ case 0x49: ++ /* SRLL */ ++ index = SRLL; ++ break; ++ case 0x4a: ++ /* SRAL */ ++ index = SRAL; ++ break; ++ case 0x4B: ++ /* ROLL */ ++ index = ROLL; ++ break; ++ case 0x4C: ++ /* SLLW */ ++ index = SLLW; ++ break; ++ case 0x4D: ++ /* SRLW */ ++ index = SRLW; ++ break; ++ case 0x4E: ++ /* SRAW */ ++ index = SRAW; ++ break; ++ case 0x4F: ++ /* ROLW */ ++ index = ROLW; ++ break; ++ case 0x50: ++ /* EXTLB */ ++ index = EXTLB; ++ break; ++ case 0x51: ++ /* EXTLH */ ++ index = EXTLH; ++ break; ++ case 0x52: ++ /* EXTLW */ ++ index = EXTLW; ++ break; ++ case 0x53: ++ /* EXTLL */ ++ index = EXTLL; ++ break; ++ case 0x54: ++ /* EXTHB */ ++ index = EXTHB; ++ break; ++ case 0x55: ++ /* EXTHH */ ++ index = EXTHH; ++ break; ++ case 0x56: ++ /* EXTHW */ ++ index = EXTHW; ++ break; ++ case 0x57: ++ /* EXTHL */ ++ index = EXTHL; ++ break; ++ case 0x58: ++ /* CTPOP */ ++ index = CTPOP; ++ break; ++ case 0x59: ++ /* CTLZ */ ++ index = CTLZ; ++ break; ++ case 0x5a: ++ /* CTTZ */ ++ index = CTTZ; ++ break; ++ case 0x5B: ++ /* REVBH */ ++ index = REVBH; ++ break; ++ case 0x5C: ++ /* REVBW */ ++ index = REVBW; ++ break; ++ case 0x5D: ++ /* REVBL */ ++ index = REVBL; ++ break; ++ case 0x5E: ++ /* CASW */ ++ index = CASW; ++ break; ++ case 0x5F: ++ /* CASL */ ++ index = CASL; ++ break; ++ case 0x60: ++ /* MASKLB */ ++ index = MASKLB; ++ break; ++ case 0x61: ++ /* MASKLH */ ++ index = MASKLH; ++ break; ++ case 0x62: ++ /* MASKLW */ ++ index = MASKLW; ++ break; ++ case 0x63: ++ /* MASKLL */ ++ index = MASKLL; ++ break; ++ case 0x64: ++ /* MASKHB */ ++ index = MASKHB; ++ break; ++ case 0x65: ++ /* MASKHH */ ++ index = MASKHH; ++ break; ++ case 0x66: ++ /* MASKHW */ ++ index = MASKHW; ++ break; ++ case 0x67: ++ /* MASKHL */ ++ index = MASKHL; ++ break; ++ case 0x68: ++ /* ZAP */ ++ index = ZAP; ++ break; ++ case 0x69: ++ /* ZAPNOT */ ++ index = ZAPNOT; ++ break; ++ case 0x6a: ++ /* SEXTB */ ++ index = SEXTB; ++ break; ++ case 0x6b: ++ /* SEXTH */ ++ index = SEXTH; ++ break; ++ case 0x6c: ++ /* CMPGEB*/ ++ break; ++ default: ++ break; ++ } ++ } ++ break; ++ case 0x11: ++ switch (fn3) { ++ case 0x0: ++ /* SELEQ */ ++ index = SELEQ; ++ break; ++ case 0x1: ++ /* SELGE */ ++ index = SELGE; ++ break; ++ case 0x2: ++ /* SELGT */ ++ index = SELGT; ++ break; ++ case 0x3: ++ /* SELLE */ ++ index = SELLE; ++ break; ++ case 0x4: ++ /* SELLT */ ++ index = SELLT; ++ break; ++ case 0x5: ++ /* SELNE */ ++ index = SELNE; ++ break; ++ case 0x6: ++ /* SELLBC */ ++ index = SELLBC; ++ break; ++ case 0x7: ++ /* SELLBS */ ++ index = SELLBS; ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0x12: ++ switch (fn8 & 0xff) { ++ case 0x00: ++ /* ADDWI */ ++ index = ADDWI; ++ break; ++ case 0x01: ++ /* SUBWI */ ++ index = SUBWI; ++ break; ++ case 0x02: ++ /* S4ADDWI */ ++ index = S4ADDWI; ++ break; ++ case 0x03: ++ /* S4SUBWI */ ++ index = S4SUBWI; ++ break; ++ case 0x04: ++ /* S8ADDWI */ ++ index = S8ADDWI; ++ break; ++ case 0x05: ++ /* S8SUBWI */ ++ index = S8SUBWI; ++ break; ++ ++ case 0x08: ++ /* ADDLI */ ++ index = ADDLI; ++ break; ++ case 0x09: ++ /* SUBLI */ ++ index = SUBLI; ++ break; ++ case 0x0a: ++ /* S4ADDLI */ ++ index = S4ADDLI; ++ break; ++ case 0x0b: ++ /* S4SUBLI */ ++ index = S4SUBLI; ++ break; ++ case 0x0c: ++ /* S8ADDLI */ ++ index = S8ADDLI; ++ break; ++ case 0x0d: ++ /* S8SUBLI */ ++ index = S8SUBLI; ++ break; ++ case 0x10: ++ /* MULWI */ ++ index = MULWI; ++ break; ++ case 0x11: ++ /* DIVWI */ ++ index = DIVWI; ++ break; ++ case 0x12: ++ /* UDIVWI */ ++ index = UDIVWI; ++ break; ++ case 0x13: ++ /* REMWI */ ++ index = REMWI; ++ break; ++ case 0x14: ++ /* UREMWI */ ++ index = UREMWI; ++ break; ++ case 0x18: ++ /* MULLI */ ++ index = MULLI; ++ break; ++ case 0x19: ++ /* MULHI */ ++ index = MULHI; ++ break; ++ case 0x1A: ++ /* DIVLI */ ++ index = DIVLI; ++ break; ++ case 0x1B: ++ /* UDIVLI */ ++ index = UDIVLI; ++ break; ++ case 0x1C: ++ /* REMLI */ ++ index = REMLI; ++ break; ++ case 0x1D: ++ /* UREMLI */ ++ index = UREMLI; ++ break; ++ case 0x1E: ++ /* ADDPII */ ++ index = ADDPII; ++ break; ++ case 0x1F: ++ /* ADDPISI */ ++ index = ADDPISI; ++ break; ++ case 0x28: ++ /* CMPEQI */ ++ index = CMPEQI; ++ break; ++ case 0x29: ++ /* CMPLTI */ ++ index = CMPLTI; ++ break; ++ case 0x2a: ++ /* CMPLEI */ ++ index = CMPLEI; ++ break; ++ case 0x2b: ++ /* CMPULTI */ ++ index = CMPULTI; ++ break; ++ case 0x2c: ++ /* CMPULEI */ ++ index = CMPULEI; ++ break; ++ case 0x2D: ++ /* SBTI */ ++ index = SBTI; ++ break; ++ case 0x2E: ++ /* CBTI */ ++ index = CBTI; ++ break; ++ case 0x38: ++ /* ANDI */ ++ index = ANDI; ++ break; ++ case 0x39: ++ /* BICI */ ++ index = BICI; ++ break; ++ case 0x3a: ++ /* BISI */ ++ index = BISI; ++ break; ++ case 0x3b: ++ /* ORNOTI */ ++ index = ORNOTI; ++ break; ++ case 0x3c: ++ /* XORI */ ++ index = XORI; ++ break; ++ case 0x3d: ++ /* EQVI */ ++ index = EQVI; ++ break; ++ case 0x40: ++ /* INSLBI */ ++ index = INSLBI; ++ break; ++ case 0x41: ++ /* INSLHI */ ++ index = INSLHI; ++ break; ++ case 0x42: ++ /* INSLWI */ ++ index = INSLWI; ++ break; ++ case 0x43: ++ /* INSLLI */ ++ index = INSLLI; ++ break; ++ case 0x44: ++ /* INSHBI */ ++ index = INSHBI; ++ break; ++ case 0x45: ++ /* INSHHI */ ++ index = INSHHI; ++ break; ++ case 0x46: ++ /* INSHWI */ ++ index = INSHWI; ++ break; ++ case 0x47: ++ /* INSHLI */ ++ index = INSHLI; ++ break; ++ case 0x48: ++ /* SLLLI */ ++ index = SLLLI; ++ break; ++ case 0x49: ++ /* SRLLI */ ++ index = SRLLI; ++ break; ++ case 0x4a: ++ /* SRALI */ ++ index = SRALI; ++ break; ++ case 0x4B: ++ /* ROLLI */ ++ index = ROLLI; ++ break; ++ case 0x4C: ++ /* SLLWI */ ++ index = SLLWI; ++ break; ++ case 0x4D: ++ /* SRLWI */ ++ index = SRLWI; ++ break; ++ case 0x4E: ++ /* SRAWI */ ++ index = SRAWI; ++ break; ++ case 0x4F: ++ /* ROLWI */ ++ index = ROLWI; ++ break; ++ case 0x50: ++ /* EXTLBI */ ++ index = EXTLBI; ++ break; ++ case 0x51: ++ /* EXTLHI */ ++ index = EXTLHI; ++ break; ++ case 0x52: ++ /* EXTLWI */ ++ index = EXTLWI; ++ break; ++ case 0x53: ++ /* EXTLLI */ ++ index = EXTLLI; ++ break; ++ case 0x54: ++ /* EXTHBI */ ++ index = EXTHBI; ++ break; ++ case 0x55: ++ /* EXTHHI */ ++ index = EXTHHI; ++ break; ++ case 0x56: ++ /* EXTHWI */ ++ index = EXTHWI; ++ break; ++ case 0x57: ++ /* EXTHLI */ ++ index = EXTHLI; ++ break; ++ case 0x58: ++ /* CTPOPI */ ++ index = CTPOPI; ++ break; ++ case 0x59: ++ /* CTLZI */ ++ index = CTLZI; ++ break; ++ case 0x5a: ++ /* CTTZI */ ++ index = CTTZI; ++ break; ++ case 0x5B: ++ /* REVBHI */ ++ index = REVBHI; ++ break; ++ case 0x5C: ++ /* REVBWI */ ++ index = REVBWI; ++ break; ++ case 0x5D: ++ /* REVBLI */ ++ index = REVBLI; ++ break; ++ case 0x5E: ++ /* CASWI */ ++ index = CASWI; ++ break; ++ case 0x5F: ++ /* CASLI */ ++ index = CASLI; ++ break; ++ case 0x60: ++ /* MASKLBI */ ++ index = MASKLBI; ++ break; ++ case 0x61: ++ /* MASKLHI */ ++ index = MASKLHI; ++ break; ++ case 0x62: ++ /* MASKLWI */ ++ index = MASKLWI; ++ break; ++ case 0x63: ++ /* MASKLLI */ ++ index = MASKLLI; ++ break; ++ case 0x64: ++ /* MASKHBI */ ++ index = MASKHBI; ++ break; ++ case 0x65: ++ /* MASKHHI */ ++ index = MASKHHI; ++ break; ++ case 0x66: ++ /* MASKHWI */ ++ index = MASKHWI; ++ break; ++ case 0x67: ++ /* MASKHLI */ ++ index = MASKHLI; ++ break; ++ case 0x68: ++ /* ZAPI */ ++ index = ZAPI; ++ break; ++ case 0x69: ++ /* ZAPNOTI */ ++ index = ZAPNOTI; ++ break; ++ case 0x6a: ++ /* SEXTBI */ ++ index = SEXTBI; ++ break; ++ case 0x6b: ++ /* SEXTHI */ ++ index = SEXTHI; ++ break; ++ case 0x6c: ++ /* CMPGEBI */ ++ index = CMPGEBI; ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0x13: ++ switch (fn3) { ++ case 0x0: ++ /* SELEQI */ ++ index = SELEQI; ++ break; ++ case 0x1: ++ /* SELGEI */ ++ index = SELGEI; ++ break; ++ case 0x2: ++ /* SELGTI */ ++ index = SELGTI; ++ break; ++ case 0x3: ++ /* SELLEI */ ++ index = SELLEI; ++ break; ++ case 0x4: ++ /* SELLTI */ ++ index = SELLTI; ++ break; ++ case 0x5: ++ /* SELNEI */ ++ index = SELNEI; ++ break; ++ case 0x6: ++ /* SELLBCI */ ++ index = SELLBCI; ++ break; ++ case 0x7: ++ /* SELLBSI */ ++ index = SELLBSI; ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0x14: ++ case 0x15: ++ case 0x16: ++ case 0x17: ++ /* VLOGZZ */ ++ index = VLOGZZ; ++ break; ++ case 0x18: ++ switch (fn8) { ++ case 0x00: ++ /* FADDS */ ++ index = FADDS; ++ break; ++ case 0x01: ++ /* FADDD */ ++ index = FADDD; ++ break; ++ case 0x02: ++ /* FSUBS */ ++ index = FSUBS; ++ break; ++ case 0x03: ++ /* FSUBD */ ++ index = FSUBD; ++ break; ++ case 0x4: ++ /* FMULS */ ++ index = FMULS; ++ break; ++ case 0x05: ++ /* FMULD */ ++ index = FMULD; ++ break; ++ case 0x06: ++ /* FDIVS */ ++ index = FDIVS; ++ break; ++ case 0x07: ++ /* FDIVD */ ++ index = FDIVD; ++ break; ++ case 0x08: ++ /* FSQRTS */ ++ index = FSQRTS; ++ break; ++ case 0x09: ++ /* FSQRTD */ ++ index = FSQRTD; ++ break; ++ case 0x10: ++ /* FCMPEQ */ ++ index = FCMPEQ; ++ break; ++ case 0x11: ++ /* FCMPLE */ ++ index = FCMPLE; ++ break; ++ case 0x12: ++ /* FCMPLT */ ++ index = FCMPLT; ++ break; ++ case 0x13: ++ /* FCMPUN */ ++ index = FCMPUN; ++ break; ++ case 0x20: ++ /* FCVTSD */ ++ index = FCVTSD; ++ break; ++ case 0x21: ++ /* FCVTDS */ ++ index = FCVTDS; ++ break; ++ case 0x22: ++ /* FCVTDL_G */ ++ index = FCVTDL_G; ++ break; ++ case 0x23: ++ /* FCVTDL_P */ ++ index = FCVTDL_P; ++ break; ++ case 0x24: ++ /* FCVTDL_Z */ ++ index = FCVTDL_Z; ++ break; ++ case 0x25: ++ /* FCVTDL_N */ ++ index = FCVTDL_N; ++ break; ++ case 0x27: ++ /* FCVTDL */ ++ index = FCVTDL; ++ break; ++ case 0x28: ++ /* FCVTWL */ ++ index = FCVTWL; ++ break; ++ case 0x29: ++ /* FCVTLW */ ++ index = FCVTLW; ++ break; ++ case 0x2d: ++ /* FCVTLS */ ++ index = FCVTLS; ++ break; ++ case 0x2f: ++ /* FCVTLD */ ++ index = FCVTLD; ++ break; ++ case 0x30: ++ /* FCPYS */ ++ index = FCPYS; ++ break; ++ case 0x31: ++ /* FCPYSE */ ++ index = FCPYSE; ++ break; ++ case 0x32: ++ /* FCPYSN */ ++ index = FCPYSN; ++ break; ++ case 0x40: ++ /* IFMOVS */ ++ index = IFMOVS; ++ break; ++ case 0x41: ++ /* IFMOVD */ ++ index = IFMOVD; ++ break; ++ case 0x50: ++ /* RFPCR */ ++ index = RFPCR; ++ break; ++ case 0x51: ++ /* WFPCR */ ++ index = WFPCR; ++ break; ++ case 0x54: ++ /* SETFPEC0 */ ++ index = SETFPEC0; ++ break; ++ case 0x55: ++ /* SETFPEC1 */ ++ index = SETFPEC1; ++ break; ++ case 0x56: ++ /* SETFPEC2 */ ++ index = SETFPEC2; ++ break; ++ case 0x57: ++ /* SETFPEC3 */ ++ index = SETFPEC3; ++ break; ++ case 0x58: ++ /* FRECS */ ++ index = FRECS; ++ break; ++ case 0x59: ++ /* FRECD */ ++ index = FRECD; ++ break; ++ case 0x5A: ++ /* FRIS */ ++ index = FRIS; ++ break; ++ case 0x5B: ++ /* FRIS_G */ ++ index = FRIS_G; ++ break; ++ case 0x5C: ++ /* FRIS_P */ ++ index = FRIS_P; ++ break; ++ case 0x5D: ++ /* FRIS_Z */ ++ index = FRIS_Z; ++ break; ++ case 0x5F: ++ /* FRIS_N */ ++ index = FRIS_N; ++ break; ++ case 0x60: ++ /* FRID */ ++ index = FRID; ++ break; ++ case 0x61: ++ /* FRID_G */ ++ index = FRID_G; ++ break; ++ case 0x62: ++ /* FRID_P */ ++ index = FRID_P; ++ break; ++ case 0x63: ++ /* FRID_Z */ ++ index = FRID_Z; ++ break; ++ case 0x64: ++ /* FRID_N */ ++ index = FRID_N; ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0x19: ++ switch (fn6) { ++ case 0x00: ++ /* FMAS */ ++ index = FMAS; ++ break; ++ case 0x01: ++ /* FMAD */ ++ index = FMAD; ++ break; ++ case 0x02: ++ /* FMSS */ ++ index = FMSS; ++ break; ++ case 0x03: ++ /* FMSD */ ++ index = FMSD; ++ break; ++ case 0x04: ++ /* FNMAS */ ++ index = FNMAS; ++ break; ++ case 0x05: ++ /* FNMAD */ ++ index = FNMAD; ++ break; ++ case 0x06: ++ /* FNMSS */ ++ index = FNMSS; ++ break; ++ case 0x07: ++ /* FNMSD */ ++ index = FNMSD; ++ break; ++ case 0x10: ++ /* FSELEQ */ ++ index = FSELEQ; ++ break; ++ case 0x11: ++ /* FSELNE */ ++ index = FSELNE; ++ break; ++ case 0x12: ++ /* FSELLT */ ++ index = FSELLT; ++ break; ++ case 0x13: ++ /* FSELLE */ ++ index = FSELLE; ++ break; ++ case 0x14: ++ /* FSELGT */ ++ index = FSELGT; ++ break; ++ case 0x15: ++ /* FSELGE */ ++ index = FSELGE; ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0x1A: ++ switch (fn8) { ++ case 0x00: ++ /* VADDW */ ++ index = VADDW; ++ break; ++ case 0x20: ++ /* VADDWI */ ++ index = VADDWI; ++ break; ++ case 0x01: ++ /* VSUBW */ ++ index = VSUBW; ++ break; ++ case 0x21: ++ /* VSUBWI */ ++ index = VSUBWI; ++ break; ++ case 0x02: ++ /* VCMPGEW */ ++ index = VCMPGEW; ++ break; ++ case 0x22: ++ /* VCMPGEWI */ ++ index = VCMPGEWI; ++ break; ++ case 0x03: ++ /* VCMPEQW */ ++ index = VCMPEQW; ++ break; ++ case 0x23: ++ /* VCMPEQWI */ ++ index = VCMPEQWI; ++ break; ++ case 0x04: ++ /* VCMPLEW */ ++ index = VCMPLEW; ++ break; ++ case 0x24: ++ /* VCMPLEWI */ ++ index = VCMPLEWI; ++ break; ++ case 0x05: ++ /* VCMPLTW */ ++ index = VCMPLTW; ++ break; ++ case 0x25: ++ /* VCMPLTWI */ ++ index = VCMPLTWI; ++ break; ++ case 0x06: ++ /* VCMPULEW */ ++ index = VCMPULEW; ++ break; ++ case 0x26: ++ /* VCMPULEWI */ ++ index = VCMPULEWI; ++ break; ++ case 0x07: ++ /* VCMPULTW */ ++ index = VCMPULTW; ++ break; ++ case 0x27: ++ /* VCMPULTWI */ ++ index = VCMPULTWI; ++ break; ++ case 0x08: ++ /* VSLLW */ ++ index = VSLLW; ++ break; ++ case 0x28: ++ /* VSLLWI */ ++ index = VSLLWI; ++ break; ++ case 0x09: ++ /* VSRLW */ ++ index = VSRLW; ++ break; ++ case 0x29: ++ /* VSRLWI */ ++ index = VSRLWI; ++ break; ++ case 0x0A: ++ /* VSRAW */ ++ index = VSRAW; ++ break; ++ case 0x2A: ++ /* VSRAWI */ ++ index = VSRAWI; ++ break; ++ case 0x0B: ++ /* VROLW */ ++ index = VROLW; ++ break; ++ case 0x2B: ++ /* VROLWI */ ++ index = VROLWI; ++ break; ++ case 0x0C: ++ /* SLLOW */ ++ index = SLLOW; ++ break; ++ case 0x2C: ++ /* SLLOWI */ ++ index = SLLOWI; ++ break; ++ case 0x0D: ++ /* SRLOW */ ++ index = SRLOW; ++ break; ++ case 0x2D: ++ /* SRLOWI */ ++ index = SRLOWI; ++ break; ++ case 0x0E: ++ /* VADDL */ ++ index = VADDL; ++ break; ++ case 0x2E: ++ /* VADDLI */ ++ index = VADDLI; ++ break; ++ case 0x0F: ++ /* VSUBL */ ++ index = VSUBL; ++ break; ++ case 0x2F: ++ /* VSUBLI */ ++ index = VSUBLI; ++ break; ++ case 0x10: ++ /* VSLLB */ ++ index = VSLLB; ++ break; ++ case 0x30: ++ /* VSLLBI */ ++ index = VSLLBI; ++ break; ++ case 0x11: ++ /* VSRLB */ ++ index = VSRLB; ++ break; ++ case 0x31: ++ /* VSRLBI */ ++ index = VSRLBI; ++ break; ++ case 0x12: ++ /* VSRAB */ ++ index = VSRAB; ++ break; ++ case 0x32: ++ /* VSRABI */ ++ index = VSRABI; ++ break; ++ case 0x13: ++ /* VROLB */ ++ index = VROLB; ++ break; ++ case 0x33: ++ /* VROLBI */ ++ index = VROLBI; ++ break; ++ case 0x14: ++ /* VSLLH */ ++ index = VSLLH; ++ break; ++ case 0x34: ++ /* VSLLHI */ ++ index = VSLLHI; ++ break; ++ case 0x15: ++ /* VSRLH */ ++ index = VSRLH; ++ break; ++ case 0x35: ++ /* VSRLHI */ ++ index = VSRLHI; ++ break; ++ case 0x16: ++ /* VSRAH */ ++ index = VSRAH; ++ break; ++ case 0x36: ++ /* VSRAHI */ ++ index = VSRAHI; ++ break; ++ case 0x17: ++ /* VROLH */ ++ index = VROLH; ++ break; ++ case 0x37: ++ /* VROLHI */ ++ index = VROLHI; ++ break; ++ case 0x18: ++ /* CTPOPOW */ ++ index = CTPOPOW; ++ break; ++ case 0x19: ++ /* CTLZOW */ ++ index = CTLZOW; ++ break; ++ case 0x1A: ++ /* VSLLL */ ++ index = VSLLL; ++ break; ++ case 0x3A: ++ /* VSLLLI */ ++ index = VSLLLI; ++ break; ++ case 0x1B: ++ /* VSRLL */ ++ index = VSRLL; ++ break; ++ case 0x3B: ++ /* VSRLLI */ ++ index = VSRLLI; ++ break; ++ case 0x1C: ++ /* VSRAL */ ++ index = VSRAL; ++ break; ++ case 0x3C: ++ /* VSRALI */ ++ index = VSRALI; ++ break; ++ case 0x1D: ++ /* VROLL */ ++ index = VROLL; ++ break; ++ case 0x3D: ++ /* VROLLI */ ++ index = VROLLI; ++ break; ++ case 0x1E: ++ /* VMAXB */ ++ index = VMAXB; ++ break; ++ case 0x1F: ++ /* VMINB */ ++ index = VMINB; ++ break; ++ case 0x40: ++ /* VUCADDW */ ++ index = VUCADDW; ++ break; ++ case 0x60: ++ /* VUCADDWI */ ++ index = VUCADDWI; ++ break; ++ case 0x41: ++ /* VUCSUBW */ ++ index = VUCSUBW; ++ break; ++ case 0x61: ++ /* VUCSUBWI */ ++ index = VUCSUBWI; ++ break; ++ case 0x42: ++ /* VUCADDH */ ++ index = VUCADDH; ++ break; ++ case 0x62: ++ /* VUCADDHI */ ++ index = VUCADDHI; ++ break; ++ case 0x43: ++ /* VUCSUBH */ ++ index = VUCSUBH; ++ break; ++ case 0x63: ++ /* VUCSUBHI */ ++ index = VUCSUBHI; ++ break; ++ case 0x44: ++ /* VUCADDB */ ++ index = VUCADDB; ++ break; ++ case 0x64: ++ /* VUCADDBI */ ++ index = VUCADDBI; ++ break; ++ case 0x45: ++ /* VUCSUBB */ ++ index = VUCSUBB; ++ break; ++ case 0x65: ++ /* VUCSUBBI */ ++ index = VUCSUBBI; ++ break; ++ case 0x46: ++ /* SRAOW */ ++ index = SRAOW; ++ break; ++ case 0x66: ++ /* SRAOWI */ ++ index = SRAOWI; ++ break; ++ case 0x47: ++ /* VSUMW */ ++ index = VSUMW; ++ break; ++ case 0x48: ++ /* VSUML */ ++ index = VSUML; ++ break; ++ case 0x49: ++ /* VSM4R */ ++ index = VSM4R; ++ break; ++ case 0x4A: ++ /* VBINVW */ ++ index = VBINVW; ++ break; ++ case 0x4B: ++ /* VCMPUEQB */ ++ index = VCMPUEQB; ++ break; ++ case 0x6B: ++ /* VCMPUEQBI*/ ++ break; ++ case 0x4C: ++ /* VCMPUGTB */ ++ index = VCMPUGTB; ++ break; ++ case 0x6C: ++ /* VCMPUGTBI */ ++ index = VCMPUGTBI; ++ break; ++ case 0x4D: ++ /* VSM3MSW */ ++ index = VSM3MSW; ++ break; ++ case 0x50: ++ /* VMAXH */ ++ index = VMAXH; ++ break; ++ case 0x51: ++ /* VMINH */ ++ index = VMINH; ++ break; ++ case 0x52: ++ /* VMAXW */ ++ index = VMAXW; ++ break; ++ case 0x53: ++ /* VMINW */ ++ index = VMINW; ++ break; ++ case 0x54: ++ /* VMAXL */ ++ index = VMAXL; ++ break; ++ case 0x55: ++ /* VMINL */ ++ index = VMINL; ++ break; ++ case 0x56: ++ /* VUMAXB */ ++ index = VUMAXB; ++ break; ++ case 0x57: ++ /* VUMINB */ ++ index = VUMINB; ++ break; ++ case 0x58: ++ /* VUMAXH */ ++ index = VUMAXH; ++ break; ++ case 0x59: ++ /* VUMINH */ ++ index = VUMINH; ++ break; ++ case 0x5A: ++ /* VUMAXW */ ++ index = VUMAXW; ++ break; ++ case 0x5B: ++ /* VUMINW */ ++ index = VUMINW; ++ break; ++ case 0x5C: ++ /* VUMAXL */ ++ index = VUMAXL; ++ break; ++ case 0x5D: ++ /* VUMINL */ ++ index = VUMINL; ++ break; ++ case 0x68: ++ /* VSM4KEY */ ++ index = VSM4KEY; ++ break; ++ case 0x80: ++ /* VADDS */ ++ index = VADDS; ++ break; ++ case 0x81: ++ /* VADDD */ ++ index = VADDD; ++ break; ++ case 0x82: ++ /* VSUBS */ ++ index = VSUBS; ++ break; ++ case 0x83: ++ /* VSUBD */ ++ index = VSUBD; ++ break; ++ case 0x84: ++ /* VMULS */ ++ index = VMULS; ++ break; ++ case 0x85: ++ /* VMULD */ ++ index = VMULD; ++ break; ++ case 0x86: ++ /* VDIVS */ ++ index = VDIVS; ++ break; ++ case 0x87: ++ /* VDIVD */ ++ index = VDIVD; ++ break; ++ case 0x88: ++ /* VSQRTS */ ++ index = VSQRTS; ++ break; ++ case 0x89: ++ /* VSQRTD */ ++ index = VSQRTD; ++ break; ++ case 0x8C: ++ /* VFCMPEQ */ ++ index = VFCMPEQ; ++ break; ++ case 0x8D: ++ /* VFCMPLE */ ++ index = VFCMPLE; ++ break; ++ case 0x8E: ++ /* VFCMPLT */ ++ index = VFCMPLT; ++ break; ++ case 0x8F: ++ /* VFCMPUN */ ++ index = VFCMPUN; ++ break; ++ case 0x90: ++ /* VCPYS */ ++ index = VCPYS; ++ break; ++ case 0x91: ++ /* VCPYSE */ ++ index = VCPYSE; ++ break; ++ case 0x92: ++ /* VCPYSN */ ++ index = VCPYSN; ++ break; ++ case 0x93: ++ /* VSUMS */ ++ index = VSUMS; ++ break; ++ case 0x94: ++ /* VSUMD */ ++ index = VSUMD; ++ break; ++ case 0x95: ++ /* VFCVTSD */ ++ index = VFCVTSD; ++ break; ++ case 0x96: ++ /* VFCVTDS */ ++ index = VFCVTDS; ++ break; ++ case 0x99: ++ /* VFCVTLS */ ++ index = VFCVTLS; ++ break; ++ case 0x9A: ++ /* VFCVTLD */ ++ index = VFCVTLD; ++ break; ++ case 0x9B: ++ /* VFCVTDL */ ++ index = VFCVTDL; ++ break; ++ case 0x9C: ++ /* VFCVTDL_G */ ++ index = VFCVTDL_G; ++ break; ++ case 0x9D: ++ /* VFCVTDL_P */ ++ index = VFCVTDL_P; ++ break; ++ case 0x9E: ++ /* VFCVTDL_Z */ ++ index = VFCVTDL_Z; ++ break; ++ case 0x9F: ++ /* VFCVTDL_N */ ++ index = VFCVTDL_N; ++ break; ++ case 0xA0: ++ /* VFRIS */ ++ index = VFRIS; ++ break; ++ case 0xA1: ++ /* VFRIS_G */ ++ index = VFRIS_G; ++ break; ++ case 0xA2: ++ /* VFRIS_P */ ++ index = VFRIS_P; ++ break; ++ case 0xA3: ++ /* VFRIS_Z */ ++ index = VFRIS_Z; ++ break; ++ case 0xA4: ++ /* VFRIS_N */ ++ index = VFRIS_N; ++ break; ++ case 0xA5: ++ /* VFRID */ ++ index = VFRID; ++ break; ++ case 0xA6: ++ /* VFRID_G */ ++ index = VFRID_G; ++ break; ++ case 0xA7: ++ /* VFRID_P */ ++ index = VFRID_P; ++ break; ++ case 0xA8: ++ /* VFRID_Z */ ++ index = VFRID_Z; ++ break; ++ case 0xA9: ++ /* VFRID_N */ ++ index = VFRID_N; ++ break; ++ case 0xAA: ++ /* VFRECS */ ++ index = VFRECS; ++ break; ++ case 0xAB: ++ /* VFRECD */ ++ index = VFRECD; ++ break; ++ case 0xAC: ++ /* VMAXS */ ++ index = VMAXS; ++ break; ++ case 0xAD: ++ /* VMINS */ ++ index = VMINS; ++ break; ++ case 0xAE: ++ /* VMAXD */ ++ index = VMAXD; ++ break; ++ case 0xAF: ++ /* VMIND */ ++ index = VMIND; ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0x1B: ++ switch (fn6) { ++ case 0x00: ++ /* VMAS */ ++ index = VMAS; ++ break; ++ case 0x01: ++ /* VMAD */ ++ index = VMAD; ++ break; ++ case 0x02: ++ /* VMSS */ ++ index = VMSS; ++ break; ++ case 0x03: ++ /* VMSD */ ++ index = VMSD; ++ break; ++ case 0x04: ++ /* VNMAS */ ++ index = VNMAS; ++ break; ++ case 0x05: ++ /* VNMAD */ ++ index = VNMAD; ++ break; ++ case 0x06: ++ /* VNMSS */ ++ index = VNMSS; ++ break; ++ case 0x07: ++ /* VNMSD */ ++ index = VNMSD; ++ break; ++ case 0x10: ++ /* VFSELEQ */ ++ index = VFSELEQ; ++ break; ++ case 0x12: ++ /* VFSELLT */ ++ index = VFSELLT; ++ break; ++ case 0x13: ++ /* VFSELLE */ ++ index = VFSELLE; ++ break; ++ case 0x18: ++ /* VSELEQW */ ++ index = VSELEQW; ++ break; ++ case 0x38: ++ /* VSELEQWI */ ++ index = VSELEQWI; ++ break; ++ case 0x19: ++ /* VSELLBCW */ ++ index = VSELLBCW; ++ break; ++ case 0x39: ++ /* VSELLBCWI */ ++ index = VSELLBCWI; ++ break; ++ case 0x1A: ++ /* VSELLTW */ ++ index = VSELLTW; ++ break; ++ case 0x3A: ++ /* VSELLTWI */ ++ index = VSELLTWI; ++ break; ++ case 0x1B: ++ /* VSELLEW */ ++ index = VSELLEW; ++ break; ++ case 0x3B: ++ /* VSELLEWI */ ++ index = VSELLEWI; ++ break; ++ case 0x20: ++ /* VINSW */ ++ index = VINSW; ++ break; ++ case 0x21: ++ /* VINSF */ ++ index = VINSF; ++ break; ++ case 0x22: ++ /* VEXTW */ ++ index = VEXTW; ++ break; ++ case 0x23: ++ /* VEXTF */ ++ index = VEXTF; ++ break; ++ case 0x24: ++ /* VCPYW */ ++ index = VCPYW; ++ break; ++ case 0x25: ++ /* VCPYF */ ++ index = VCPYF; ++ break; ++ case 0x26: ++ /* VCONW */ ++ index = VCONW; ++ break; ++ case 0x27: ++ /* VSHFW */ ++ index = VSHFW; ++ break; ++ case 0x28: ++ /* VCONS */ ++ index = VCONS; ++ break; ++ case 0x29: ++ /* VCOND */ ++ index = VCOND; ++ break; ++ case 0x2A: ++ /* VINSB */ ++ index = VINSB; ++ break; ++ case 0x2B: ++ /* VINSH */ ++ index = VINSH; ++ break; ++ case 0x2C: ++ /* VINSECTLH */ ++ index = VINSECTLH; ++ break; ++ case 0x2D: ++ /* VINSECTLW */ ++ index = VINSECTLW; ++ break; ++ case 0x2E: ++ /* VINSECTLL */ ++ index = VINSECTLL; ++ break; ++ case 0x2F: ++ /* VINSECTLB */ ++ index = VINSECTLB; ++ break; ++ case 0x30: ++ /* VSHFQ */ ++ index = VSHFQ; ++ break; ++ case 0x31: ++ /* VSHFQB */ ++ index = VSHFQB; ++ break; ++ case 0x32: ++ /* VCPYB */ ++ index = VCPYB; ++ break; ++ case 0x33: ++ /* VCPYH */ ++ index = VCPYH; ++ break; ++ case 0x34: ++ /* VSM3R */ ++ index = VSM3R; ++ break; ++ case 0x35: ++ /* VFCVTSH */ ++ index = VFCVTSH; ++ break; ++ case 0x36: ++ /* VFCVTHS */ ++ index = VFCVTHS; ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0x1C: ++ switch (fn4) { ++ case 0x0: ++ /* VLDW_U */ ++ index = VLDW_U; ++ break; ++ case 0x1: ++ /* VSTW_U */ ++ index = VSTW_U; ++ break; ++ case 0x2: ++ /* VLDS_U */ ++ index = VLDS_U; ++ break; ++ case 0x3: ++ /* VSTS_U */ ++ index = VSTS_U; ++ break; ++ case 0x4: ++ /* VLDD_U */ ++ index = VLDD_U; ++ break; ++ case 0x5: ++ /* VSTD_U */ ++ index = VSTD_U; ++ break; ++ case 0x8: ++ /* VSTW_UL */ ++ index = VSTW_UL; ++ break; ++ case 0x9: ++ /* VSTW_UH */ ++ index = VSTW_UH; ++ break; ++ case 0xa: ++ /* VSTS_UL */ ++ index = VSTS_UL; ++ break; ++ case 0xb: ++ /* VSTS_UH */ ++ index = VSTS_UH; ++ break; ++ case 0xc: ++ /* VSTD_UL */ ++ index = VSTD_UL; ++ break; ++ case 0xd: ++ /* VSTD_UH */ ++ index = VSTD_UH; ++ break; ++ case 0xe: ++ /* VLDD_NC */ ++ index = VLDD_NC; ++ break; ++ case 0xf: ++ /* VSTD_NC */ ++ index = VSTD_NC; ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0x1D: ++ /* LBR */ ++ index = LBR; ++ break; ++ case 0x1E: ++ switch (fn4) { ++ case 0x0: ++ /* LDBU_A */ ++ index = LDBU_A; ++ break; ++ case 0x1: ++ /* LDHU_A */ ++ index = LDHU_A; ++ break; ++ case 0x2: ++ /* LDW_A */ ++ index = LDW_A; ++ break; ++ case 0x3: ++ /* LDL_A */ ++ index = LDL_A; ++ break; ++ case 0x4: ++ /* FLDS_A */ ++ index = FLDS_A; ++ break; ++ case 0x5: ++ /* FLDD_A */ ++ index = FLDD_A; ++ break; ++ case 0x6: ++ /* STBU_A */ ++ index = STBU_A; ++ break; ++ case 0x7: ++ /* STHU_A */ ++ index = STHU_A; ++ break; ++ case 0x8: ++ /* STW_A */ ++ index = STW_A; ++ break; ++ case 0x9: ++ /* STL_A */ ++ index = STL_A; ++ break; ++ case 0xA: ++ /* FSTS_A */ ++ index = FSTS_A; ++ break; ++ case 0xB: ++ /* FSTD_A */ ++ index = FSTD_A; ++ break; ++ case 0xE: ++ /* DPFHR */ ++ index = DPFHR; ++ break; ++ case 0xF: ++ /* DPFHW */ ++ index = DPFHW; ++ break; ++ default: ++ break; ++ } ++ break; ++ case 0x20: ++ /* LDBU */ ++ index = LDBU; ++ break; ++ case 0x21: ++ /* LDHU */ ++ index = LDHU; ++ break; ++ case 0x22: ++ /* LDW */ ++ index = LDW; ++ break; ++ case 0x23: ++ /* LDL */ ++ index = LDL; ++ break; ++ case 0x24: ++ /* LDL_U */ ++ index = LDL_U; ++ break; ++ case 0x25: ++ if ((insn >> 12) & 1) { ++ /* PRI_LDL */ ++ index = PRI_LDL; ++ } else { ++ /* PRI_LDW */ ++ index = PRI_LDW; ++ } ++ break; ++ case 0x26: ++ /* FLDS */ ++ index = FLDS; ++ break; ++ case 0x27: ++ /* FLDD */ ++ index = FLDD; ++ break; ++ case 0x28: ++ /* STB */ ++ index = STB; ++ break; ++ case 0x29: ++ /* STH */ ++ index = STH; ++ break; ++ case 0x2a: ++ /* STW */ ++ index = STW; ++ break; ++ case 0x2b: ++ /* STL */ ++ index = STL; ++ break; ++ case 0x2c: ++ /* STL_U */ ++ index = STL_U; ++ break; ++ case 0x2d: ++ if ((insn >> 12) & 1) { ++ /* PRI_STL */ ++ index = PRI_STL; ++ } else { ++ /* PRI_STW */ ++ index = PRI_STW; ++ } ++ break; ++ case 0x2e: ++ /* FSTS */ ++ index = FSTS; ++ break; ++ case 0x2f: ++ /* FSTD */ ++ index = FSTD; ++ break; ++ case 0x30: ++ /* BEQ */ ++ index = BEQ; ++ break; ++ case 0x31: ++ /* BNE */ ++ index = BNE; ++ break; ++ case 0x32: ++ /* BLT */ ++ index = BLT; ++ break; ++ case 0x33: ++ /* BLE */ ++ index = BLE; ++ break; ++ case 0x34: ++ /* BGT */ ++ index = BGT; ++ break; ++ case 0x35: ++ /* BGE */ ++ index = BGE; ++ break; ++ case 0x36: ++ /* BLBC */ ++ index = BLBC; ++ break; ++ case 0x37: ++ /* BLBS */ ++ index = BLBS; ++ break; ++ case 0x38: ++ /* FBEQ */ ++ index = FBEQ; ++ break; ++ case 0x39: ++ /* FBNE */ ++ index = FBNE; ++ break; ++ case 0x3a: ++ /* FBLT */ ++ index = FBLT; ++ break; ++ case 0x3b: ++ /* FBLE */ ++ index = FBLE; ++ break; ++ case 0x3c: ++ /* FBGT */ ++ index = FBGT; ++ break; ++ case 0x3d: ++ /* FBGE */ ++ index = FBGE; ++ break; ++ case 0x3f: ++ /* LDIH */ ++ index = LDIH; ++ break; ++ case 0x3e: ++ /* LDI */ ++ index = LDI; ++ break; ++ default: ++do_invalid: ++ break; ++ } ++ count = tcg_temp_new(); ++ offs = offsetof(CPUSW64State, insn_count[index]); ++ tcg_gen_ld_i64(count, tcg_env, offs); ++ tcg_gen_addi_i64(count, count, 1); ++ tcg_gen_st_i64(count, tcg_env, offs); ++} +diff --git a/target/sw64/profile.h b/target/sw64/profile.h +new file mode 100644 +index 0000000000..5aca541ea7 +--- /dev/null ++++ b/target/sw64/profile.h +@@ -0,0 +1,541 @@ ++#ifndef PROFILE_H ++#define PROFILE_H ++#define SYS_CALL 0 ++#define CALL 1 ++#define RET 2 ++#define JMP 3 ++#define BR 4 ++#define BSR 5 ++#define MEMB 6 ++#define IMEMB 7 ++#define WMEMB 8 ++#define RTC 9 ++#define RCID 10 ++#define HALT 11 ++#define RD_F 12 ++#define WR_F 13 ++#define RTID 14 ++#define CSRWS 15 ++#define CSRWC 16 ++#define PRI_RCSR 17 ++#define PRI_WCSR 18 ++#define PRI_RET 19 ++#define LLDW 20 ++#define LLDL 21 ++#define LDW_INC 22 ++#define LDL_INC 23 ++#define LDW_DEC 24 ++#define LDL_DEC 25 ++#define LDW_SET 26 ++#define LDL_SET 27 ++#define LSTW 28 ++#define LSTL 29 ++#define LDW_NC 30 ++#define LDL_NC 31 ++#define LDD_NC 32 ++#define STW_NC 33 ++#define STL_NC 34 ++#define STD_NC 35 ++#define LDWE 36 ++#define LDSE 37 ++#define LDDE 38 ++#define VLDS 39 ++#define VLDD 40 ++#define VSTS 41 ++#define VSTD 42 ++#define FIMOVS 43 ++#define FIMOVD 44 ++#define ADDW 45 ++#define SUBW 46 ++#define S4ADDW 47 ++#define S4SUBW 48 ++#define S8ADDW 49 ++#define S8SUBW 50 ++#define ADDL 51 ++#define SUBL 52 ++#define S4ADDL 53 ++#define S4SUBL 54 ++#define S8ADDL 55 ++#define S8SUBL 56 ++#define MULW 57 ++#define DIVW 58 ++#define UDIVW 59 ++#define REMW 60 ++#define UREMW 61 ++#define MULL 62 ++#define MULH 63 ++#define DIVL 64 ++#define UDIVL 65 ++#define REML 66 ++#define UREML 67 ++#define ADDPI 68 ++#define ADDPIS 69 ++#define CMPEQ 70 ++#define CMPLT 71 ++#define CMPLE 72 ++#define CMPULT 73 ++#define CMPULE 74 ++#define SBT 75 ++#define CBT 76 ++#define AND 77 ++#define BIC 78 ++#define BIS 79 ++#define ORNOT 80 ++#define XOR 81 ++#define EQV 82 ++#define INSLB 83 ++#define INSLH 84 ++#define INSLW 85 ++#define INSLL 86 ++#define INSHB 87 ++#define INSHH 88 ++#define INSHW 89 ++#define INSHL 90 ++#define SLLL 91 ++#define SRLL 92 ++#define SRAL 93 ++#define ROLL 94 ++#define SLLW 95 ++#define SRLW 96 ++#define SRAW 97 ++#define ROLW 98 ++#define EXTLB 99 ++#define EXTLH 100 ++#define EXTLW 101 ++#define EXTLL 102 ++#define EXTHB 103 ++#define EXTHH 104 ++#define EXTHW 105 ++#define EXTHL 106 ++#define CTPOP 107 ++#define CTLZ 108 ++#define CTTZ 109 ++#define REVBH 110 ++#define REVBW 111 ++#define REVBL 112 ++#define CASW 113 ++#define CASL 114 ++#define MASKLB 115 ++#define MASKLH 116 ++#define MASKLW 117 ++#define MASKLL 118 ++#define MASKHB 119 ++#define MASKHH 120 ++#define MASKHW 121 ++#define MASKHL 122 ++#define ZAP 123 ++#define ZAPNOT 124 ++#define SEXTB 125 ++#define SEXTH 126 ++#define SELEQ 127 ++#define SELGE 128 ++#define SELGT 129 ++#define SELLE 130 ++#define SELLT 131 ++#define SELNE 132 ++#define SELLBC 133 ++#define SELLBS 134 ++#define ADDWI 135 ++#define SUBWI 136 ++#define S4ADDWI 137 ++#define S4SUBWI 138 ++#define S8ADDWI 139 ++#define S8SUBWI 140 ++#define ADDLI 141 ++#define SUBLI 142 ++#define S4ADDLI 143 ++#define S4SUBLI 144 ++#define S8ADDLI 145 ++#define S8SUBLI 146 ++#define MULWI 147 ++#define DIVWI 148 ++#define UDIVWI 149 ++#define REMWI 150 ++#define UREMWI 151 ++#define MULLI 152 ++#define MULHI 153 ++#define DIVLI 154 ++#define UDIVLI 155 ++#define REMLI 156 ++#define UREMLI 157 ++#define ADDPII 158 ++#define ADDPISI 159 ++#define CMPEQI 160 ++#define CMPLTI 161 ++#define CMPLEI 162 ++#define CMPULTI 163 ++#define CMPULEI 164 ++#define SBTI 165 ++#define CBTI 166 ++#define ANDI 167 ++#define BICI 168 ++#define BISI 169 ++#define ORNOTI 170 ++#define XORI 171 ++#define EQVI 172 ++#define INSLBI 173 ++#define INSLHI 174 ++#define INSLWI 175 ++#define INSLLI 176 ++#define INSHBI 177 ++#define INSHHI 178 ++#define INSHWI 179 ++#define INSHLI 180 ++#define SLLLI 181 ++#define SRLLI 182 ++#define SRALI 183 ++#define ROLLI 184 ++#define SLLWI 185 ++#define SRLWI 186 ++#define SRAWI 187 ++#define ROLWI 188 ++#define EXTLBI 189 ++#define EXTLHI 190 ++#define EXTLWI 191 ++#define EXTLLI 192 ++#define EXTHBI 193 ++#define EXTHHI 194 ++#define EXTHWI 195 ++#define EXTHLI 196 ++#define CTPOPI 197 ++#define CTLZI 198 ++#define CTTZI 199 ++#define REVBHI 200 ++#define REVBWI 201 ++#define REVBLI 202 ++#define CASWI 203 ++#define CASLI 204 ++#define MASKLBI 205 ++#define MASKLHI 206 ++#define MASKLWI 207 ++#define MASKLLI 208 ++#define MASKHBI 209 ++#define MASKHHI 210 ++#define MASKHWI 211 ++#define MASKHLI 212 ++#define ZAPI 213 ++#define ZAPNOTI 214 ++#define SEXTBI 215 ++#define SEXTHI 216 ++#define CMPGEBI 217 ++#define SELEQI 218 ++#define SELGEI 219 ++#define SELGTI 220 ++#define SELLEI 221 ++#define SELLTI 222 ++#define SELNEI 223 ++#define SELLBCI 224 ++#define SELLBSI 225 ++#define VLOGZZ 226 ++#define FADDS 227 ++#define FADDD 228 ++#define FSUBS 229 ++#define FSUBD 230 ++#define FMULS 231 ++#define FMULD 232 ++#define FDIVS 233 ++#define FDIVD 234 ++#define FSQRTS 235 ++#define FSQRTD 236 ++#define FCMPEQ 237 ++#define FCMPLE 238 ++#define FCMPLT 239 ++#define FCMPUN 240 ++#define FCVTSD 241 ++#define FCVTDS 242 ++#define FCVTDL_G 243 ++#define FCVTDL_P 244 ++#define FCVTDL_Z 245 ++#define FCVTDL_N 246 ++#define FCVTDL 247 ++#define FCVTWL 248 ++#define FCVTLW 249 ++#define FCVTLS 250 ++#define FCVTLD 251 ++#define FCPYS 252 ++#define FCPYSE 253 ++#define FCPYSN 254 ++#define IFMOVS 255 ++#define IFMOVD 256 ++#define RFPCR 257 ++#define WFPCR 258 ++#define SETFPEC0 259 ++#define SETFPEC1 260 ++#define SETFPEC2 261 ++#define SETFPEC3 262 ++#define FRECS 263 ++#define FRECD 264 ++#define FRIS 265 ++#define FRIS_G 266 ++#define FRIS_P 267 ++#define FRIS_Z 268 ++#define FRIS_N 269 ++#define FRID 270 ++#define FRID_G 271 ++#define FRID_P 272 ++#define FRID_Z 273 ++#define FRID_N 274 ++#define FMAS 275 ++#define FMAD 276 ++#define FMSS 277 ++#define FMSD 278 ++#define FNMAS 279 ++#define FNMAD 280 ++#define FNMSS 281 ++#define FNMSD 282 ++#define FSELEQ 283 ++#define FSELNE 284 ++#define FSELLT 285 ++#define FSELLE 286 ++#define FSELGT 287 ++#define FSELGE 288 ++#define VADDW 289 ++#define VADDWI 290 ++#define VSUBW 291 ++#define VSUBWI 292 ++#define VCMPGEW 293 ++#define VCMPGEWI 294 ++#define VCMPEQW 295 ++#define VCMPEQWI 296 ++#define VCMPLEW 297 ++#define VCMPLEWI 298 ++#define VCMPLTW 299 ++#define VCMPLTWI 300 ++#define VCMPULEW 301 ++#define VCMPULEWI 302 ++#define VCMPULTW 303 ++#define VCMPULTWI 304 ++#define VSLLW 305 ++#define VSLLWI 306 ++#define VSRLW 307 ++#define VSRLWI 308 ++#define VSRAW 309 ++#define VSRAWI 310 ++#define VROLW 311 ++#define VROLWI 312 ++#define SLLOW 313 ++#define SLLOWI 314 ++#define SRLOW 315 ++#define SRLOWI 316 ++#define VADDL 317 ++#define VADDLI 318 ++#define VSUBL 319 ++#define VSUBLI 320 ++#define VSLLB 321 ++#define VSLLBI 322 ++#define VSRLB 323 ++#define VSRLBI 324 ++#define VSRAB 325 ++#define VSRABI 326 ++#define VROLB 327 ++#define VROLBI 328 ++#define VSLLH 329 ++#define VSLLHI 330 ++#define VSRLH 331 ++#define VSRLHI 332 ++#define VSRAH 333 ++#define VSRAHI 334 ++#define VROLH 335 ++#define VROLHI 336 ++#define CTPOPOW 337 ++#define CTLZOW 338 ++#define VSLLL 339 ++#define VSLLLI 340 ++#define VSRLL 341 ++#define VSRLLI 342 ++#define VSRAL 343 ++#define VSRALI 344 ++#define VROLL 345 ++#define VROLLI 346 ++#define VMAXB 347 ++#define VMINB 348 ++#define VUCADDW 349 ++#define VUCADDWI 350 ++#define VUCSUBW 351 ++#define VUCSUBWI 352 ++#define VUCADDH 353 ++#define VUCADDHI 354 ++#define VUCSUBH 355 ++#define VUCSUBHI 356 ++#define VUCADDB 357 ++#define VUCADDBI 358 ++#define VUCSUBB 359 ++#define VUCSUBBI 360 ++#define SRAOW 361 ++#define SRAOWI 362 ++#define VSUMW 363 ++#define VSUML 364 ++#define VSM4R 365 ++#define VBINVW 366 ++#define VCMPUEQB 367 ++#define VCMPUGTB 368 ++#define VCMPUGTBI 369 ++#define VSM3MSW 370 ++#define VMAXH 371 ++#define VMINH 372 ++#define VMAXW 373 ++#define VMINW 374 ++#define VMAXL 375 ++#define VMINL 376 ++#define VUMAXB 377 ++#define VUMINB 378 ++#define VUMAXH 379 ++#define VUMINH 380 ++#define VUMAXW 381 ++#define VUMINW 382 ++#define VUMAXL 383 ++#define VUMINL 384 ++#define VSM4KEY 385 ++#define VADDS 386 ++#define VADDD 387 ++#define VSUBS 388 ++#define VSUBD 389 ++#define VMULS 390 ++#define VMULD 391 ++#define VDIVS 392 ++#define VDIVD 393 ++#define VSQRTS 394 ++#define VSQRTD 395 ++#define VFCMPEQ 396 ++#define VFCMPLE 397 ++#define VFCMPLT 398 ++#define VFCMPUN 399 ++#define VCPYS 400 ++#define VCPYSE 401 ++#define VCPYSN 402 ++#define VSUMS 403 ++#define VSUMD 404 ++#define VFCVTSD 405 ++#define VFCVTDS 406 ++#define VFCVTLS 407 ++#define VFCVTLD 408 ++#define VFCVTDL 409 ++#define VFCVTDL_G 410 ++#define VFCVTDL_P 411 ++#define VFCVTDL_Z 412 ++#define VFCVTDL_N 413 ++#define VFRIS 414 ++#define VFRIS_G 415 ++#define VFRIS_P 416 ++#define VFRIS_Z 417 ++#define VFRIS_N 418 ++#define VFRID 419 ++#define VFRID_G 420 ++#define VFRID_P 421 ++#define VFRID_Z 422 ++#define VFRID_N 423 ++#define VFRECS 424 ++#define VFRECD 425 ++#define VMAXS 426 ++#define VMINS 427 ++#define VMAXD 428 ++#define VMIND 429 ++#define VMAS 430 ++#define VMAD 431 ++#define VMSS 432 ++#define VMSD 433 ++#define VNMAS 434 ++#define VNMAD 435 ++#define VNMSS 436 ++#define VNMSD 437 ++#define VFSELEQ 438 ++#define VFSELLT 439 ++#define VFSELLE 440 ++#define VSELEQW 441 ++#define VSELEQWI 442 ++#define VSELLBCW 443 ++#define VSELLBCWI 444 ++#define VSELLTW 445 ++#define VSELLTWI 446 ++#define VSELLEW 447 ++#define VSELLEWI 448 ++#define VINSW 449 ++#define VINSF 450 ++#define VEXTW 451 ++#define VEXTF 452 ++#define VCPYW 453 ++#define VCPYF 454 ++#define VCONW 455 ++#define VSHFW 456 ++#define VCONS 457 ++#define VCOND 458 ++#define VINSB 459 ++#define VINSH 460 ++#define VINSECTLH 461 ++#define VINSECTLW 462 ++#define VINSECTLL 463 ++#define VINSECTLB 464 ++#define VSHFQ 465 ++#define VSHFQB 466 ++#define VCPYB 467 ++#define VCPYH 468 ++#define VSM3R 469 ++#define VFCVTSH 470 ++#define VFCVTHS 471 ++#define VLDW_U 472 ++#define VSTW_U 473 ++#define VLDS_U 474 ++#define VSTS_U 475 ++#define VLDD_U 476 ++#define VSTD_U 477 ++#define VSTW_UL 478 ++#define VSTW_UH 479 ++#define VSTS_UL 480 ++#define VSTS_UH 481 ++#define VSTD_UL 482 ++#define VSTD_UH 483 ++#define VLDD_NC 484 ++#define VSTD_NC 485 ++#define LBR 486 ++#define LDBU_A 487 ++#define LDHU_A 488 ++#define LDW_A 489 ++#define LDL_A 490 ++#define FLDS_A 491 ++#define FLDD_A 492 ++#define STBU_A 493 ++#define STHU_A 494 ++#define STW_A 495 ++#define STL_A 496 ++#define FSTS_A 497 ++#define FSTD_A 498 ++#define DPFHR 499 ++#define DPFHW 500 ++#define LDBU 501 ++#define LDHU 502 ++#define LDW 503 ++#define LDL 504 ++#define LDL_U 505 ++#define PRI_LDL 506 ++#define PRI_LDW 507 ++#define FLDS 508 ++#define FLDD 509 ++#define STB 510 ++#define STH 511 ++#define STW 512 ++#define STL 513 ++#define STL_U 514 ++#define PRI_STL 515 ++#define PRI_STW 516 ++#define FSTS 517 ++#define FSTD 518 ++#define BEQ 519 ++#define BNE 520 ++#define BLT 521 ++#define BLE 522 ++#define BGT 523 ++#define BGE 524 ++#define BLBC 525 ++#define BLBS 526 ++#define FBEQ 527 ++#define FBNE 528 ++#define FBLT 529 ++#define FBLE 530 ++#define FBGT 531 ++#define FBGE 532 ++#define LDIH 533 ++#define LDI 534 ++ ++extern const char *insn_opc[535]; ++ ++#endif +diff --git a/target/sw64/simd_helper.c b/target/sw64/simd_helper.c +new file mode 100644 +index 0000000000..40e80b6c55 +--- /dev/null ++++ b/target/sw64/simd_helper.c +@@ -0,0 +1,986 @@ ++#include "qemu/osdep.h" ++#include "cpu.h" ++#include "exec/exec-all.h" ++#include "exec/helper-proto.h" ++ ++static inline uint8_t *get_element_b(CPUSW64State *env, uint64_t ra, ++ int index) ++{ ++ return (uint8_t*)&env->fr[ra + (index / 8) * 32] + (index % 8); ++} ++ ++static inline uint16_t *get_element_h(CPUSW64State *env, uint64_t ra, ++ int index) ++{ ++ return (uint16_t*)&env->fr[ra + (index / 4) * 32] + (index % 4); ++} ++ ++static inline uint32_t *get_element_w(CPUSW64State *env, uint64_t ra, ++ int index) ++{ ++ return (uint32_t*)&env->fr[ra + (index / 2) * 32] + (index % 2); ++} ++ ++static inline uint64_t *get_element_l(CPUSW64State *env, uint64_t ra, ++ int index) ++{ ++ return &env->fr[ra + index * 32]; ++} ++ ++void helper_srlow(CPUSW64State *env, uint64_t ra, uint64_t rc, uint64_t shift) ++{ ++ int i; ++ int adden; ++ int dest, src; ++ adden = shift >> 6; ++ shift &= 0x3f; ++ ++ for (i = 0; (i + adden) < 4; i++) { ++ dest = i * 32 + rc; ++ src = (i + adden) * 32 + ra; ++ env->fr[dest] = env->fr[src] >> shift; ++ if (((i + adden) < 3) && (shift != 0)) ++ env->fr[dest] |= (env->fr[src + 32] << (64 - shift)); ++ } ++ ++ for (; i < 4; i++) { ++ env->fr[rc + i * 32] = 0; ++ } ++} ++ ++void helper_sllow(CPUSW64State *env, uint64_t ra, uint64_t rc, uint64_t shift) ++{ ++ int i; ++ int adden; ++ int dest, src; ++ adden = shift >> 6; ++ shift &= 0x3f; ++ ++ for (i = 3; (i - adden) >= 0; i--) { ++ dest = i * 32 + rc; ++ src = (i - adden) * 32 + ra; ++ env->fr[dest] = env->fr[src] << shift; ++ if (((i - adden) > 0) && (shift != 0)) ++ env->fr[dest] |= (env->fr[src - 32] >> (64 - shift)); ++ } ++ for (; i >= 0; i--) { ++ env->fr[rc + i * 32] = 0; ++ } ++} ++ ++static uint64_t do_logzz(uint64_t va, uint64_t vb, uint64_t vc, uint64_t zz) ++{ ++ int i; ++ uint64_t ret = 0; ++ int index; ++ ++ for (i = 0; i < 64; i++) { ++ index = (((va >> i) & 1) << 2) | (((vb >> i) & 1) << 1) | ((vc >> i) & 1); ++ ret |= ((zz >> index) & 1) << i; ++ } ++ ++ return ret; ++} ++ ++void helper_vlogzz(CPUSW64State *env, uint64_t args, uint64_t rd, uint64_t zz) ++{ ++ int i; ++ int ra, rb, rc; ++ ra = args >> 16; ++ rb = (args >> 8) & 0xff; ++ rc = args & 0xff; ++ for (i = 0; i < 4; i++) { ++ env->fr[rd + i * 32] = do_logzz(env->fr[ra + i * 32], env->fr[rb + i * 32], ++ env->fr[rc + i * 32], zz); ++ } ++} ++ ++void helper_v_print(CPUSW64State *env, uint64_t v) ++{ ++ printf("PC[%lx]: fr[%lx]:\n", GETPC(), v); ++} ++ ++void helper_vconw(CPUSW64State *env, uint64_t args, uint64_t rd, ++ uint64_t byte4_len) ++{ ++ int ra, rb; ++ int count; ++ int i; ++ uint32_t *ptr_dst, *ptr_src; ++ uint32_t tmp[8]; ++ ++ ra = (args >> 8) & 0xff; ++ rb = args & 0xff; ++ count = 8 - byte4_len; ++ ++ for (i = 0; i < 8; i++) { ++ ptr_dst = get_element_w(env, rd, i); ++ if (i < count) { ++ ptr_src = get_element_w(env, ra, i + byte4_len); ++ } else { ++ ptr_src = get_element_w(env, rb, i - count); ++ } ++ tmp[i] = *ptr_src; ++ } ++ for (i = 0; i < 8; i++) { ++ ptr_dst = get_element_w(env, rd, i); ++ *ptr_dst = tmp[i]; ++ } ++} ++ ++void helper_vcond(CPUSW64State *env, uint64_t args, uint64_t rd, ++ uint64_t byte8_len) ++{ ++ int ra, rb; ++ int count; ++ int i; ++ uint64_t *ptr_dst, *ptr_src; ++ uint64_t tmp[8]; ++ ++ ra = (args >> 8) & 0xff; ++ rb = args & 0xff; ++ count = 4 - byte8_len; ++ ++ for (i = 0; i < 4; i++) { ++ if (i < count) { ++ ptr_src = get_element_l(env, ra, i + byte8_len); ++ } else { ++ ptr_src = get_element_l(env, rb, i - count); ++ } ++ tmp[i] = *ptr_src; ++ } ++ for (i = 0; i < 4; i++) { ++ ptr_dst = get_element_l(env, rd, i + byte8_len); ++ *ptr_dst = tmp[i]; ++ } ++} ++ ++void helper_vshfw(CPUSW64State *env, uint64_t args, uint64_t rd, uint64_t vc) ++{ ++ int ra, rb; ++ int i; ++ uint32_t *ptr_dst, *ptr_src; ++ uint32_t tmp[8]; ++ int flag, idx; ++ ++ ra = (args >> 8) & 0xff; ++ rb = args & 0xff; ++ ++ for (i = 0; i < 8; i++) { ++ flag = (vc >> (i * 4)) & 0x8; ++ idx = (vc >> (i * 4)) & 0x7; ++ if (flag == 0) { ++ ptr_src = get_element_w(env, ra, idx); ++ } else { ++ ptr_src = get_element_w(env, rb, idx); ++ } ++ tmp[i] = *ptr_src; ++ } ++ for (i = 0; i < 8; i++) { ++ ptr_dst = get_element_w(env, rd, i); ++ *ptr_dst = tmp[i]; ++ } ++} ++ ++uint64_t helper_ctlzow(CPUSW64State *env, uint64_t ra) ++{ ++ int i, j; ++ uint64_t val; ++ uint64_t ctlz = 0; ++ ++ for (j = 3; j >= 0; j--) { ++ val = env->fr[ra + 32 * j]; ++ for (i = 63; i >= 0; i--) { ++ if ((val >> i) & 1) ++ return ctlz << 29; ++ else ++ ctlz++; ++ } ++ } ++ return ctlz << 29; ++} ++ ++void helper_vucaddw(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) ++{ ++ int a, b, c; ++ int ret; ++ int i; ++ ++ for (i = 0; i < 4; i++) { ++ a = (int)(env->fr[ra + i * 32] & 0xffffffff); ++ b = (int)(env->fr[rb + i * 32] & 0xffffffff); ++ c = a + b; ++ if ((c ^ a) < 0 && (c ^ b) < 0) { ++ if (a < 0) ++ c = 0x80000000; ++ else ++ c = 0x7fffffff; ++ } ++ ret = c; ++ ++ a = (int)(env->fr[ra + i * 32] >> 32); ++ b = (int)(env->fr[rb + i * 32] >> 32); ++ c = a + b; ++ if ((c ^ a) < 0 && (c ^ b) < 0) { ++ if (a < 0) ++ c = 0x80000000; ++ else ++ c = 0x7fffffff; ++ } ++ env->fr[rc + i * 32] = ((uint64_t)(uint32_t)c << 32) | ++ (uint64_t)(uint32_t)ret; ++ } ++} ++ ++void helper_vucaddwi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc) ++{ ++ int a, b, c; ++ int ret; ++ int i; ++ ++ b = (int)vb; ++ for (i = 0; i < 4; i++) { ++ a = (int)(env->fr[ra + i * 32] & 0xffffffff); ++ c = a + b; ++ if ((c ^ a) < 0 && (c ^ b) < 0) { ++ if (a < 0) ++ c = 0x80000000; ++ else ++ c = 0x7fffffff; ++ } ++ ret = c; ++ ++ a = (int)(env->fr[ra + i * 32] >> 32); ++ c = a + b; ++ if ((c ^ a) < 0 && (c ^ b) < 0) { ++ if (a < 0) ++ c = 0x80000000; ++ else ++ c = 0x7fffffff; ++ } ++ env->fr[rc + i * 32] = ((uint64_t)(uint32_t)c << 32) | ++ (uint64_t)(uint32_t)ret; ++ } ++} ++ ++void helper_vucsubw(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) ++{ ++ int a, b, c; ++ int ret; ++ int i; ++ ++ for (i = 0; i < 4; i++) { ++ a = (int)(env->fr[ra + i * 32] & 0xffffffff); ++ b = (int)(env->fr[rb + i * 32] & 0xffffffff); ++ c = a - b; ++ if ((b ^ a) < 0 && (c ^ a) < 0) { ++ if (a < 0) ++ c = 0x80000000; ++ else ++ c = 0x7fffffff; ++ } ++ ret = c; ++ ++ a = (int)(env->fr[ra + i * 32] >> 32); ++ b = (int)(env->fr[rb + i * 32] >> 32); ++ c = a - b; ++ if ((b ^ a) < 0 && (c ^ a) < 0) { ++ if (a < 0) ++ c = 0x80000000; ++ else ++ c = 0x7fffffff; ++ } ++ env->fr[rc + i * 32] = ((uint64_t)(uint32_t)c << 32) | ++ (uint64_t)(uint32_t)ret; ++ } ++} ++ ++void helper_vucsubwi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc) ++{ ++ int a, b, c; ++ int ret; ++ int i; ++ ++ b = (int)vb; ++ for (i = 0; i < 4; i++) { ++ a = (int)(env->fr[ra + i * 32] & 0xffffffff); ++ c = a - b; ++ if ((b ^ a) < 0 && (c ^ a) < 0) { ++ if (a < 0) ++ c = 0x80000000; ++ else ++ c = 0x7fffffff; ++ } ++ ret = c; ++ ++ a = (int)(env->fr[ra + i * 32] >> 32); ++ c = a - b; ++ if ((b ^ a) < 0 && (c ^ a) < 0) { ++ if (a < 0) ++ c = 0x80000000; ++ else ++ c = 0x7fffffff; ++ } ++ env->fr[rc + i * 32] = ((uint64_t)(uint32_t)c << 32) | ++ (uint64_t)(uint32_t)ret; ++ } ++} ++ ++void helper_vucaddh(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) ++{ ++ short a, b, c; ++ uint64_t ret; ++ int i, j; ++ ++ for (i = 0; i < 4; i++) { ++ ret = 0; ++ for (j = 0; j < 4; j++) { ++ a = (short)((env->fr[ra + i * 32] >> (j * 16)) & 0xffff); ++ b = (short)((env->fr[rb + i * 32] >> (j * 16)) & 0xffff); ++ c = a + b; ++ if ((c ^ a) < 0 && (c ^ b) < 0) { ++ if (a < 0) ++ c = 0x8000; ++ else ++ c = 0x7fff; ++ } ++ ret |= ((uint64_t)(uint16_t)c) << (j * 16); ++ } ++ env->fr[rc + i * 32] = ret; ++ } ++} ++ ++void helper_vucaddhi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc) ++{ ++ short a, b, c; ++ uint64_t ret; ++ int i, j; ++ ++ b = (short)vb; ++ for (i = 0; i < 4; i++) { ++ ret = 0; ++ for (j = 0; j < 4; j++) { ++ a = (short)((env->fr[ra + i * 32] >> (j * 16)) & 0xffff); ++ c = a + b; ++ if ((c ^ a) < 0 && (c ^ b) < 0) { ++ if (a < 0) ++ c = 0x8000; ++ else ++ c = 0x7fff; ++ } ++ ret |= ((uint64_t)(uint16_t)c) << (j * 16); ++ } ++ env->fr[rc + i * 32] = ret; ++ } ++} ++ ++void helper_vucsubh(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) ++{ ++ short a, b, c; ++ uint64_t ret; ++ int i, j; ++ ++ for (i = 0; i < 4; i++) { ++ ret = 0; ++ for (j = 0; j < 4; j++) { ++ a = (short)((env->fr[ra + i * 32] >> (j * 16)) & 0xffff); ++ b = (short)((env->fr[rb + i * 32] >> (j * 16)) & 0xffff); ++ c = a - b; ++ if ((b ^ a) < 0 && (c ^ a) < 0) { ++ if (a < 0) ++ c = 0x8000; ++ else ++ c = 0x7fff; ++ } ++ ret |= ((uint64_t)(uint16_t)c) << (j * 16); ++ } ++ env->fr[rc + i * 32] = ret; ++ } ++} ++ ++void helper_vucsubhi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc) ++{ ++ short a, b, c; ++ uint64_t ret; ++ int i, j; ++ ++ b = (short)vb; ++ for (i = 0; i < 4; i++) { ++ ret = 0; ++ for (j = 0; j < 4; j++) { ++ a = (short)((env->fr[ra + i * 32] >> (j * 16)) & 0xffff); ++ c = a - b; ++ if ((b ^ a) < 0 && (c ^ a) < 0) { ++ if (a < 0) ++ c = 0x8000; ++ else ++ c = 0x7fff; ++ } ++ ret |= ((uint64_t)(uint16_t)c) << (j * 16); ++ } ++ env->fr[rc + i * 32] = ret; ++ } ++} ++ ++void helper_vucaddb(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) ++{ ++ int8_t a, b, c; ++ uint64_t ret; ++ int i, j; ++ ++ for (i = 0; i < 4; i++) { ++ ret = 0; ++ for (j = 0; j < 8; j++) { ++ a = (int8_t)((env->fr[ra + i * 32] >> (j * 8)) & 0xff); ++ b = (int8_t)((env->fr[rb + i * 32] >> (j * 8)) & 0xff); ++ c = a + b; ++ if ((c ^ a) < 0 && (c ^ b) < 0) { ++ if (a < 0) ++ c = 0x80; ++ else ++ c = 0x7f; ++ } ++ ret |= ((uint64_t)(uint8_t)c) << (j * 8); ++ } ++ env->fr[rc + i * 32] = ret; ++ } ++} ++ ++void helper_vucaddbi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc) ++{ ++ int8_t a, b, c; ++ uint64_t ret; ++ int i, j; ++ ++ b = (int8_t)(vb & 0xff); ++ for (i = 0; i < 4; i++) { ++ ret = 0; ++ for (j = 0; j < 8; j++) { ++ a = (int8_t)((env->fr[ra + i * 32] >> (j * 8)) & 0xff); ++ c = a + b; ++ if ((c ^ a) < 0 && (c ^ b) < 0) { ++ if (a < 0) ++ c = 0x80; ++ else ++ c = 0x7f; ++ } ++ ret |= ((uint64_t)(uint8_t)c) << (j * 8); ++ } ++ env->fr[rc + i * 32] = ret; ++ } ++} ++ ++void helper_vucsubb(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) ++{ ++ int8_t a, b, c; ++ uint64_t ret; ++ int i, j; ++ ++ for (i = 0; i < 4; i++) { ++ ret = 0; ++ for (j = 0; j < 8; j++) { ++ a = (int8_t)((env->fr[ra + i * 32] >> (j * 8)) & 0xff); ++ b = (int8_t)((env->fr[rb + i * 32] >> (j * 8)) & 0xff); ++ c = a - b; ++ if ((b ^ a) < 0 && (c ^ a) < 0) { ++ if (a < 0) ++ c = 0x80; ++ else ++ c = 0x7f; ++ } ++ ret |= ((uint64_t)(uint8_t)c) << (j * 8); ++ } ++ env->fr[rc + i * 32] = ret; ++ } ++} ++ ++void helper_vucsubbi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc) ++{ ++ int8_t a, b, c; ++ uint64_t ret; ++ int i, j; ++ ++ b = (int8_t)(vb & 0xff); ++ for (i = 0; i < 4; i++) { ++ ret = 0; ++ for (j = 0; j < 8; j++) { ++ a = (int8_t)((env->fr[ra + i * 32] >> (j * 8)) & 0xffff); ++ c = a - b; ++ if ((b ^ a) < 0 && (c ^ a) < 0) { ++ if (a < 0) ++ c = 0x80; ++ else ++ c = 0x7f; ++ } ++ ret |= ((uint64_t)(uint8_t)c) << (j * 8); ++ } ++ env->fr[rc + i * 32] = ret; ++ } ++} ++ ++uint64_t helper_vstw(CPUSW64State *env, uint64_t t0, uint64_t t1) ++{ ++ uint64_t idx, shift; ++ ++ idx = t0 + (t1 / 2) * 32; ++ shift = (t1 % 2) * 32; ++ ++ return (env->fr[idx] >> shift) & 0xffffffffUL; ++} ++ ++uint64_t helper_vsts(CPUSW64State *env, uint64_t t0, uint64_t t1) ++{ ++ uint64_t idx, val; ++ ++ idx = t0 + t1 * 32; ++ val = env->fr[idx]; ++ ++ return ((val >> 32) & 0xc0000000) | ((val >> 29) & 0x3fffffff); ++} ++ ++uint64_t helper_vstd(CPUSW64State *env, uint64_t t0, uint64_t t1) ++{ ++ uint64_t idx; ++ ++ idx = t0 + t1 * 32; ++ return env->fr[idx]; ++} ++ ++#define HELPER_VMAX(name, _suffix, type, loop) \ ++ void glue(glue(helper_, name), _suffix)(CPUSW64State *env, uint64_t ra, \ ++ uint64_t rb, uint64_t rc) \ ++ { \ ++ int i; \ ++ type *ptr_dst, *ptr_src_a, *ptr_src_b; \ ++ \ ++ for (i = 0; i < loop; i++) { \ ++ ptr_dst = (type*)glue(get_element_, _suffix)(env, rc, i); \ ++ ptr_src_a = (type*)glue(get_element_, _suffix)(env, ra, i); \ ++ ptr_src_b = (type*)glue(get_element_, _suffix)(env, rb, i); \ ++ \ ++ if (*ptr_src_a >= *ptr_src_b) { \ ++ *ptr_dst = *ptr_src_a; \ ++ } else { \ ++ *ptr_dst = *ptr_src_b; \ ++ } \ ++ } \ ++ } ++ ++#define HELPER_VMIN(name, _suffix, type, loop) \ ++ void glue(glue(helper_, name), _suffix)(CPUSW64State *env, uint64_t ra, \ ++ uint64_t rb, uint64_t rc) \ ++ { \ ++ int i; \ ++ type *ptr_dst, *ptr_src_a, *ptr_src_b; \ ++ \ ++ for (i = 0; i < loop; i++) { \ ++ ptr_dst = (type*)glue(get_element_, _suffix)(env, rc, i); \ ++ ptr_src_a = (type*)glue(get_element_, _suffix)(env, ra, i); \ ++ ptr_src_b = (type*)glue(get_element_, _suffix)(env, rb, i); \ ++ \ ++ if (*ptr_src_a <= *ptr_src_b) { \ ++ *ptr_dst = *ptr_src_a; \ ++ } else { \ ++ *ptr_dst = *ptr_src_b; \ ++ } \ ++ } \ ++ } ++ ++HELPER_VMAX(vmax, b, int8_t, 32) ++HELPER_VMIN(vmin, b, int8_t, 32) ++HELPER_VMAX(vmax, h, int16_t, 16) ++HELPER_VMIN(vmin, h, int16_t, 16) ++HELPER_VMAX(vmax, w, int32_t, 8) ++HELPER_VMIN(vmin, w, int32_t, 8) ++HELPER_VMAX(vumax, b, uint8_t, 32) ++HELPER_VMIN(vumin, b, uint8_t, 32) ++HELPER_VMAX(vumax, h, uint16_t, 16) ++HELPER_VMIN(vumin, h, uint16_t, 16) ++HELPER_VMAX(vumax, w, uint32_t, 8) ++HELPER_VMIN(vumin, w, uint32_t, 8) ++ ++void helper_sraow(CPUSW64State *env, uint64_t ra, uint64_t rc, uint64_t shift) ++{ ++ int i; ++ int adden; ++ int dest, src; ++ uint64_t sign; ++ adden = shift >> 6; ++ shift &= 0x3f; ++ sign = (uint64_t)((int64_t)env->fr[ra + 96] >> 63); ++ ++ for (i = 0; (i + adden) < 4; i++) { ++ dest = i * 32 + rc; ++ src = (i + adden) * 32 + ra; ++ env->fr[dest] = env->fr[src] >> shift; ++ if (shift != 0) { ++ if (((i + adden) < 3)) ++ env->fr[dest] |= (env->fr[src + 32] << (64 - shift)); ++ else ++ env->fr[dest] |= (sign << (64 - shift)); ++ } ++ } ++ ++ for (; i < 4; i++) { ++ env->fr[rc + i * 32] = sign; ++ } ++} ++ ++static uint16_t sm4_sbox[16][16] = { ++ { 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05 }, ++ { 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99 }, ++ { 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62 }, ++ { 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6 }, ++ { 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8 }, ++ { 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35 }, ++ { 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87 }, ++ { 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e }, ++ { 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1 }, ++ { 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3 }, ++ { 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f }, ++ { 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51 }, ++ { 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8 }, ++ { 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0 }, ++ { 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84 }, ++ { 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48 } ++}; ++ ++static uint32_t SBOX(uint32_t val) ++{ ++ int ret = 0; ++ int i; ++ int idx_x, idx_y; ++ for (i = 0; i < 4; i++) { ++ idx_x = (val >> (i * 8)) & 0xff; ++ idx_y = idx_x & 0xf; ++ idx_x = idx_x >> 4; ++ ++ ret |= (sm4_sbox[idx_x][idx_y] << (i * 8)); ++ } ++ return ret; ++} ++ ++static uint32_t rotl(uint32_t val, int shift) ++{ ++ uint64_t ret = (uint64_t)val; ++ ret = (ret << (shift & 0x1f)); ++ return (uint32_t)((ret & 0xffffffff) | (ret >> 32)); ++} ++ ++void helper_vsm4r(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) ++{ ++ uint32_t W[12], rk[8]; ++ uint32_t temp1, temp2; ++ int i, j; ++ ++ for (i = 0; i < 8; i++) { ++ rk[i] = *get_element_w(env, rb, i); ++ } ++ for (i = 0; i < 2; i++) { ++ for (j = 0; j < 4; j++) { ++ W[j] = *get_element_w(env, ra, i * 4 + j); ++ } ++ for (j = 0; j < 8; j++) { ++ temp1 = W[j + 1] ^ W[j + 2] ^ W[j + 3] ^ rk[j]; ++ temp2 = SBOX(temp1); ++ W[j + 4] = W[j] ^ temp2 ^ rotl(temp2, 2) ^ rotl(temp2, 10) ^ rotl(temp2, 18) ^ rotl(temp2, 24); ++ } ++ ++ for (j = 0; j < 4; j++) { ++ *get_element_w(env, rc, i * 4 + j) = W[8 + j]; ++ } ++ } ++} ++ ++void helper_vcmpueqb(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) ++{ ++ uint8_t *ptr_a, *ptr_b, *ptr_c; ++ int i; ++ ++ for (i = 0; i < 32; i++) { ++ ptr_a = get_element_b(env, ra, i); ++ ptr_b = get_element_b(env, rb, i); ++ ptr_c = get_element_b(env, rc, i); ++ ++ *ptr_c = (*ptr_a == *ptr_b) ? 1 : 0; ++ ; ++ } ++} ++ ++void helper_vcmpugtb(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) ++{ ++ uint8_t *ptr_a, *ptr_b, *ptr_c; ++ int i; ++ ++ for (i = 0; i < 32; i++) { ++ ptr_a = get_element_b(env, ra, i); ++ ptr_b = get_element_b(env, rb, i); ++ ptr_c = get_element_b(env, rc, i); ++ ++ *ptr_c = (*ptr_a > *ptr_b) ? 1 : 0; ++ ; ++ } ++} ++ ++void helper_vcmpueqbi(CPUSW64State *env, uint64_t ra, uint64_t vb, ++ uint64_t rc) ++{ ++ uint8_t *ptr_a, *ptr_c; ++ int i; ++ ++ for (i = 0; i < 32; i++) { ++ ptr_a = get_element_b(env, ra, i); ++ ptr_c = get_element_b(env, rc, i); ++ ++ *ptr_c = (*ptr_a == vb) ? 1 : 0; ++ ; ++ } ++} ++ ++void helper_vcmpugtbi(CPUSW64State *env, uint64_t ra, uint64_t vb, ++ uint64_t rc) ++{ ++ uint8_t *ptr_a, *ptr_c; ++ int i; ++ ++ for (i = 0; i < 32; i++) { ++ ptr_a = get_element_b(env, ra, i); ++ ptr_c = get_element_b(env, rc, i); ++ ++ *ptr_c = (*ptr_a > vb) ? 1 : 0; ++ ; ++ } ++} ++ ++void helper_vsm3msw(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) ++{ ++ uint32_t W[24]; ++ uint32_t temp; ++ int i; ++ ++ for (i = 0; i < 8; i++) { ++ W[i + 0] = *get_element_w(env, ra, i); ++ W[i + 8] = *get_element_w(env, rb, i); ++ } ++ for (i = 16; i < 24; i++) { ++ temp = W[i - 16] ^ W[i - 9] ^ rotl(W[i - 3], 15); ++ temp = temp ^ rotl(temp, 15) ^ rotl(temp, 23) ^ rotl(W[i - 13], 7) ^ W[i - 6]; ++ W[i] = temp; ++ } ++ for (i = 0; i < 8; i++) { ++ *get_element_w(env, rc, i) = W[16 + i]; ++ } ++} ++ ++static uint32_t selck[4][8] = { ++ {0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269, 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9}, ++ {0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249, 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9}, ++ {0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229, 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299}, ++ {0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209, 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279} ++}; ++ ++void helper_vsm4key(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc) ++{ ++ uint32_t K[12], *CK; ++ int i; ++ uint32_t temp1, temp2; ++ ++ for (i = 4; i < 8; i++) { ++ K[i - 4] = *get_element_w(env, ra, i); ++ } ++ CK = selck[vb]; ++ ++ for (i = 0; i < 8; i++) { ++ temp1 = K[i + 1] ^ K[i + 2] ^ K[i + 3] ^ CK[i]; ++ temp2 = SBOX(temp1); ++ K[i + 4] = K[i] ^ temp2 ^ rotl(temp2, 13) ^ rotl(temp2, 23); ++ } ++ for (i = 0; i < 8; i++) { ++ *get_element_w(env, rc, i) = K[i + 4]; ++ } ++} ++ ++void helper_vinsb(CPUSW64State *env, uint64_t va, uint64_t rb, uint64_t vc, ++ uint64_t rd) ++{ ++ int i; ++ ++ for (i = 0; i < 128; i += 32) { ++ env->fr[rd + i] = env->fr[rb + i]; ++ } ++ ++ *get_element_b(env, rd, vc) = (uint8_t)(va & 0xff); ++} ++ ++void helper_vinsh(CPUSW64State *env, uint64_t va, uint64_t rb, uint64_t vc, ++ uint64_t rd) ++{ ++ int i; ++ ++ if (vc >= 16) ++ return; ++ ++ for (i = 0; i < 128; i += 32) { ++ env->fr[rd + i] = env->fr[rb + i]; ++ } ++ ++ *get_element_h(env, rd, vc) = (uint16_t)(va & 0xffff); ++} ++ ++void helper_vinsectlh(CPUSW64State *env, uint64_t ra, uint64_t rb, ++ uint64_t rd) ++{ ++ int i; ++ uint32_t temp[8]; ++ for (i = 0; i < 8; i++) { ++ temp[i] = *get_element_h(env, ra, i) | ((uint32_t)*get_element_h(env, rb, i) << 16); ++ } ++ for (i = 0; i < 8; i++) { ++ *get_element_w(env, rd, i) = temp[i]; ++ } ++} ++void helper_vinsectlw(CPUSW64State *env, uint64_t ra, uint64_t rb, ++ uint64_t rd) ++{ ++ int i; ++ uint64_t temp[4]; ++ for (i = 0; i < 4; i++) { ++ temp[i] = *get_element_w(env, ra, i) | ((uint64_t)*get_element_w(env, rb, i) << 32); ++ } ++ for (i = 0; i < 4; i++) { ++ *get_element_l(env, rd, i) = temp[i]; ++ } ++} ++ ++void helper_vinsectlb(CPUSW64State *env, uint64_t ra, uint64_t rb, ++ uint64_t rd) ++{ ++ int i; ++ uint16_t temp[16]; ++ for (i = 0; i < 16; i++) { ++ temp[i] = *get_element_b(env, ra, i) | ((uint16_t)*get_element_b(env, rb, i) << 8); ++ } ++ for (i = 0; i < 16; i++) { ++ *get_element_h(env, rd, i) = temp[i]; ++ } ++} ++ ++void helper_vshfq(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t vc, ++ uint64_t rd) ++{ ++ int i; ++ int idx; ++ uint64_t temp[4]; ++ for (i = 0; i < 2; i++) { ++ idx = ((vc >> (i * 2)) & 1) * 64; ++ if ((vc >> (i * 2 + 1)) & 1) { ++ temp[i * 2] = env->fr[rb + idx]; ++ temp[i * 2 + 1] = env->fr[rb + idx + 32]; ++ } else { ++ temp[i * 2] = env->fr[ra + idx]; ++ temp[i * 2 + 1] = env->fr[ra + idx + 32]; ++ } ++ } ++ for (i = 0; i < 4; i++) { ++ env->fr[rd + i * 32] = temp[i]; ++ } ++} ++ ++void helper_vshfqb(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rd) ++{ ++ int i; ++ int idx; ++ int vb; ++ uint8_t temp[32]; ++ ++ for (i = 0; i < 16; i++) { ++ vb = *get_element_b(env, rb, i); ++ if (vb >> 7) { ++ temp[i] = 0; ++ } else { ++ idx = vb & 0xf; ++ temp[i] = *get_element_b(env, ra, idx); ++ } ++ vb = *get_element_b(env, rb, i + 16); ++ if (vb >> 7) { ++ temp[i + 16] = 0; ++ } else { ++ idx = vb & 0xf; ++ temp[i + 16] = *get_element_b(env, ra, idx + 16); ++ } ++ } ++ for (i = 0; i < 4; i++) { ++ env->fr[rd + i * 32] = *((uint64_t*)temp + i); ++ } ++} ++ ++void helper_vsm3r(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t vc, ++ uint64_t rd) ++{ ++ uint32_t W[8]; ++ uint32_t A, B, C, D, E, F, G, H, T; ++ int i; ++ uint32_t SS1, SS2, TT1, TT2, P0; ++ ++ if (vc >= 16) ++ return; ++ for (i = 0; i < 8; i++) { ++ W[i] = *get_element_w(env, ra, i); ++ } ++ A = *get_element_w(env, rb, 0); ++ B = *get_element_w(env, rb, 1); ++ C = *get_element_w(env, rb, 2); ++ D = *get_element_w(env, rb, 3); ++ E = *get_element_w(env, rb, 4); ++ F = *get_element_w(env, rb, 5); ++ G = *get_element_w(env, rb, 6); ++ H = *get_element_w(env, rb, 7); ++ ++ if (vc < 4) { ++ T = 0x79cc4519; ++ for (i = 0; i < 4; i++) { ++ SS1 = rotl(rotl(A, 12) + E + rotl(T, 4 * vc + i), 7); ++ SS2 = SS1 ^ rotl(A, 12); ++ TT1 = (A ^ B ^ C) + D + SS2 + (W[i] ^ W[i + 4]); ++ TT2 = (E ^ F ^ G) + H + SS1 + W[i]; ++ ++ P0 = TT2 ^ rotl(TT2, 9) ^ rotl(TT2, 17); ++ ++ H = G; ++ G = rotl(F, 19); ++ F = E; ++ E = P0; ++ D = C; ++ C = rotl(B, 9); ++ B = A; ++ A = TT1; ++ } ++ } else { ++ T = 0x7a879d8a; ++ for (i = 0; i < 4; i++) { ++ SS1 = rotl(rotl(A, 12) + E + rotl(T, 4 * vc + i), 7); ++ SS2 = SS1 ^ rotl(A, 12); ++ TT1 = ((A & B) | (A & C) | (B & C)) + D + SS2 + (W[i] ^ W[i + 4]); ++ TT2 = ((E & F) | ((~E) & G)) + H + SS1 + W[i]; ++ ++ P0 = TT2 ^ rotl(TT2, 9) ^ rotl(TT2, 17); ++ ++ H = G; ++ G = rotl(F, 19); ++ F = E; ++ E = P0; ++ D = C; ++ C = rotl(B, 9); ++ B = A; ++ A = TT1; ++ } ++ } ++ *get_element_w(env, rd, 0) = A; ++ *get_element_w(env, rd, 1) = B; ++ *get_element_w(env, rd, 2) = C; ++ *get_element_w(env, rd, 3) = D; ++ *get_element_w(env, rd, 4) = E; ++ *get_element_w(env, rd, 5) = F; ++ *get_element_w(env, rd, 6) = G; ++ *get_element_w(env, rd, 7) = H; ++} +diff --git a/target/sw64/translate.c b/target/sw64/translate.c +new file mode 100644 +index 0000000000..b75684672f +--- /dev/null ++++ b/target/sw64/translate.c +@@ -0,0 +1,4878 @@ ++#include "translate.h" ++#include "tcg/tcg.h" ++#define DEVELOP_SW64 1 ++#ifdef DEVELOP_SW64 ++ ++#define HELPER_H "helper.h" ++#include "exec/helper-info.c.inc" ++#undef HELPER_H ++ ++#define ILLEGAL(x) \ ++ do { \ ++ printf("Illegal SW64 0x%x at line %d!\n", x, __LINE__); \ ++ exit(-1); \ ++ } while (0) ++#endif ++ ++TCGv cpu_pc; ++TCGv cpu_std_ir[31]; ++TCGv cpu_fr[128]; ++TCGv cpu_lock_addr; ++TCGv cpu_lock_flag; ++TCGv cpu_lock_success; ++#ifdef SW64_FIXLOCK ++TCGv cpu_lock_value; ++#endif ++ ++#ifndef CONFIG_USER_ONLY ++TCGv cpu_hm_ir[31]; ++#endif ++ ++void sw64_translate_init(void) ++{ ++#define DEF_VAR(V) \ ++ { &cpu_##V, #V, offsetof(CPUSW64State, V) } ++ ++ typedef struct { ++ TCGv* var; ++ const char* name; ++ int ofs; ++ } GlobalVar; ++ ++ static const GlobalVar vars[] = { ++ DEF_VAR(pc), DEF_VAR(lock_addr), ++ DEF_VAR(lock_flag), DEF_VAR(lock_success), ++#ifdef SW64_FIXLOCK ++ DEF_VAR(lock_value), ++#endif ++ }; ++ cpu_pc = tcg_global_mem_new_i64(tcg_env, ++ offsetof(CPUSW64State, pc), "PC"); ++ ++#undef DEF_VAR ++ ++ /* Use the symbolic register names that match the disassembler. */ ++ static const char ireg_names[31][4] = { ++ "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1", ++ "s2", "s3", "s4", "s5", "fp", "a0", "a1", "a2", "a3", "a4", "a5", ++ "t8", "t9", "t10", "t11", "ra", "t12", "at", "gp", "sp"}; ++ ++ static const char freg_names[128][4] = { ++ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", ++ "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", ++ "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", ++ "f30", "f31", "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", ++ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", ++ "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", ++ "f28", "f29", "f30", "f31", "f0", "f1", "f2", "f3", "f4", "f5", ++ "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", ++ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", ++ "f26", "f27", "f28", "f29", "f30", "f31", "f0", "f1", "f2", "f3", ++ "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", ++ "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", ++ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"}; ++ ++#ifndef CONFIG_USER_ONLY ++ static const char shadow_names[10][8] = { ++ "hm_p1", "hm_p2", "hm_p4", "hm_p5", "hm_p6", ++ "hm_p7", "hm_p20", "hm_p21", "hm_p22", "hm_p23"}; ++ static const int shadow_index[10] = {1, 2, 4, 5, 6, 7, 20, 21, 22, 23}; ++#endif ++ ++ int i; ++ ++ for (i = 0; i < 31; i++) { ++ cpu_std_ir[i] = tcg_global_mem_new_i64( ++ tcg_env, offsetof(CPUSW64State, ir[i]), ireg_names[i]); ++ } ++ ++ for (i = 0; i < 128; i++) { ++ cpu_fr[i] = tcg_global_mem_new_i64( ++ tcg_env, offsetof(CPUSW64State, fr[i]), freg_names[i]); ++ } ++ for (i = 0; i < ARRAY_SIZE(vars); ++i) { ++ const GlobalVar* v = &vars[i]; ++ *v->var = tcg_global_mem_new_i64(tcg_env, v->ofs, v->name); ++ } ++#ifndef CONFIG_USER_ONLY ++ memcpy(cpu_hm_ir, cpu_std_ir, sizeof(cpu_hm_ir)); ++ for (i = 0; i < 10; i++) { ++ int r = shadow_index[i]; ++ cpu_hm_ir[r] = tcg_global_mem_new_i64( ++ tcg_env, offsetof(CPUSW64State, sr[i]), shadow_names[i]); ++ } ++#endif ++} ++ ++static bool in_superpage(DisasContext* ctx, int64_t addr) ++{ ++ return false; ++} ++ ++bool use_exit_tb(DisasContext* ctx) ++{ ++ return ((tb_cflags(ctx->base.tb) & 1) || ++ ctx->base.singlestep_enabled || 1); ++} ++ ++bool use_goto_tb(DisasContext* ctx, uint64_t dest) ++{ ++ /* Suppress goto_tb in the case of single-steping and IO. */ ++ if (unlikely(use_exit_tb(ctx))) { ++ return false; ++ } ++ /* If the destination is in the superpage, the page perms can't change. */ ++ if (in_superpage(ctx, dest)) { ++ return true; ++ } ++/* Check for the dest on the same page as the start of the TB. */ ++#ifndef CONFIG_USER_ONLY ++ return ((ctx->base.tb->pc ^ dest) & TARGET_PAGE_MASK) == 0; ++#else ++ return true; ++#endif ++} ++ ++void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src) ++{ ++ uint64_t mzero = 1ull << 63; ++ ++ switch (cond) { ++ case TCG_COND_LE: ++ case TCG_COND_GT: ++ /* For <= or >, the -0.0 value directly compares the way we want. */ ++ tcg_gen_mov_i64(dest, src); ++ break; ++ ++ case TCG_COND_EQ: ++ case TCG_COND_NE: ++ /* For == or !=, we can simply mask off the sign bit and compare. */ ++ tcg_gen_andi_i64(dest, src, mzero - 1); ++ break; ++ ++ case TCG_COND_GE: ++ case TCG_COND_LT: ++ /* For >= or <, map -0.0 to +0.0 via comparison and mask. */ ++ tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero); ++ tcg_gen_neg_i64(dest, dest); ++ tcg_gen_and_i64(dest, dest, src); ++ break; ++ ++ default: ++ abort(); ++ } ++} ++ ++static TCGv load_zero(DisasContext *ctx) ++{ ++ if (!ctx->zero) { ++ ctx->zero = tcg_constant_i64(0); ++ } ++ return ctx->zero; ++} ++ ++static void free_context_temps(DisasContext *ctx) ++{ ++ if (ctx->zero) { ++ ctx->zero = NULL; ++ } ++} ++ ++static TCGv load_gir(DisasContext *ctx, unsigned reg) ++{ ++ if (likely(reg < 31)) { ++ return ctx->ir[reg]; ++ } else { ++ return load_zero(ctx); ++ } ++} ++ ++static void gen_excp_1(int exception, int error_code) ++{ ++ TCGv_i32 tmp1, tmp2; ++ ++ tmp1 = tcg_constant_i32(exception); ++ tmp2 = tcg_constant_i32(error_code); ++ gen_helper_excp(tcg_env, tmp1, tmp2); ++} ++ ++static DisasJumpType gen_excp(DisasContext* ctx, int exception, ++ int error_code) ++{ ++ tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); ++ gen_excp_1(exception, error_code); ++ return DISAS_NORETURN; ++} ++ ++static int i_count = 1; ++ ++static inline DisasJumpType gen_invalid(DisasContext *ctx) ++{ ++ if (i_count == 0) { ++ i_count++; ++ return DISAS_NEXT; ++ } ++ fprintf(stderr, "here %lx\n", ctx->base.pc_next); ++ return gen_excp(ctx, EXCP_OPCDEC, 0); ++} ++ ++static uint64_t zapnot_mask(uint8_t byte_mask) ++{ ++ uint64_t mask = 0; ++ int i; ++ ++ for (i = 0; i < 8; ++i) { ++ if ((byte_mask >> i) & 1) { ++ mask |= 0xffull << (i * 8); ++ } ++ } ++ return mask; ++} ++ ++static void gen_ins_l(DisasContext* ctx, TCGv vc, TCGv va, TCGv vb, ++ uint8_t byte_mask) ++{ ++ TCGv tmp = tcg_temp_new(); ++ TCGv shift = tcg_temp_new(); ++ ++ tcg_gen_andi_i64(tmp, va, zapnot_mask(byte_mask)); ++ ++ tcg_gen_andi_i64(shift, vb, 7); ++ tcg_gen_shli_i64(shift, shift, 3); ++ tcg_gen_shl_i64(vc, tmp, shift); ++ ++} ++ ++static void gen_ins_h(DisasContext* ctx, TCGv vc, TCGv va, TCGv vb, ++ uint8_t byte_mask) ++{ ++ TCGv tmp = tcg_temp_new(); ++ TCGv shift = tcg_temp_new(); ++ ++ tcg_gen_andi_i64(tmp, va, zapnot_mask(byte_mask)); ++ ++ tcg_gen_shli_i64(shift, vb, 3); ++ tcg_gen_not_i64(shift, shift); ++ tcg_gen_andi_i64(shift, shift, 0x3f); ++ ++ tcg_gen_shr_i64(vc, tmp, shift); ++ tcg_gen_shri_i64(vc, vc, 1); ++} ++ ++static void gen_ext_l(DisasContext* ctx, TCGv vc, TCGv va, TCGv vb, ++ uint8_t byte_mask) ++{ ++ TCGv tmp = tcg_temp_new(); ++ TCGv shift = tcg_temp_new(); ++ ++ tcg_gen_andi_i64(shift, vb, 7); ++ tcg_gen_shli_i64(shift, shift, 3); ++ tcg_gen_shr_i64(tmp, va, shift); ++ ++ tcg_gen_andi_i64(vc, tmp, zapnot_mask(byte_mask)); ++ ++} ++ ++static void gen_ext_h(DisasContext* ctx, TCGv vc, TCGv va, TCGv vb, ++ uint8_t byte_mask) ++{ ++ TCGv tmp = tcg_temp_new(); ++ TCGv shift = tcg_temp_new(); ++ ++ tcg_gen_andi_i64(shift, vb, 7); ++ tcg_gen_shli_i64(shift, shift, 3); ++ tcg_gen_movi_i64(tmp, 64); ++ tcg_gen_sub_i64(shift, tmp, shift); ++ tcg_gen_shl_i64(tmp, va, shift); ++ ++ tcg_gen_andi_i64(vc, tmp, zapnot_mask(byte_mask)); ++ ++} ++ ++static void gen_mask_l(DisasContext* ctx, TCGv vc, TCGv va, TCGv vb, ++ uint8_t byte_mask) ++{ ++ TCGv shift = tcg_temp_new(); ++ TCGv mask = tcg_temp_new(); ++ ++ tcg_gen_andi_i64(shift, vb, 7); ++ tcg_gen_shli_i64(shift, shift, 3); ++ tcg_gen_movi_i64(mask, zapnot_mask(byte_mask)); ++ tcg_gen_shl_i64(mask, mask, shift); ++ ++ tcg_gen_andc_i64(vc, va, mask); ++ ++} ++ ++static void gen_mask_h(DisasContext *ctx, TCGv vc, TCGv va, TCGv vb, ++ uint8_t byte_mask) ++{ ++ TCGv shift = tcg_temp_new(); ++ TCGv mask = tcg_temp_new(); ++ ++ /* The instruction description is as above, where the byte_mask ++ is shifted left, and then we extract bits <15:8>. This can be ++ emulated with a right-shift on the expanded byte mask. This ++ requires extra care because for an input <2:0> == 0 we need a ++ shift of 64 bits in order to generate a zero. This is done by ++ splitting the shift into two parts, the variable shift - 1 ++ followed by a constant 1 shift. The code we expand below is ++ equivalent to ~(B * 8) & 63. */ ++ ++ tcg_gen_shli_i64(shift, vb, 3); ++ tcg_gen_not_i64(shift, shift); ++ tcg_gen_andi_i64(shift, shift, 0x3f); ++ tcg_gen_movi_i64(mask, zapnot_mask(byte_mask)); ++ tcg_gen_shr_i64(mask, mask, shift); ++ tcg_gen_shri_i64(mask, mask, 1); ++ ++ tcg_gen_andc_i64(vc, va, mask); ++ ++} ++ ++static void gen_lshift_mask(TCGv v0, TCGv v1, int size) ++{ ++ TCGv mask = tcg_constant_i64((1UL << size) - 1); ++ int i; ++ ++ tcg_gen_shl_i64(mask, mask, v1); ++ tcg_gen_andi_i64(mask, mask, (uint64_t)((1UL << size) - 1)); ++ tcg_gen_mov_i64(v0, mask); ++ ++ for (i = size; i < 64; i = i * 2) { ++ tcg_gen_shli_i64(mask, v0, i); ++ tcg_gen_or_i64(v0, mask, v0); ++ } ++} ++ ++static void gen_lshifti_mask(TCGv v0, int v1, int size) ++{ ++ int i; ++ uint64_t mask; ++ mask = (1UL << size) - 1; ++ mask = (mask << v1) & (uint64_t)((1UL << size) - 1); ++ for (i = size; i < 64; i = i * 2) { ++ mask |= (mask << i); ++ } ++ tcg_gen_movi_i64(v0, mask); ++} ++ ++static void gen_rshift_mask(TCGv v0, TCGv v1, int size) ++{ ++ TCGv mask = tcg_constant_i64((1UL << size) - 1); ++ int i; ++ ++ tcg_gen_shr_i64(mask, mask, v1); ++ tcg_gen_mov_i64(v0, mask); ++ ++ for (i = size; i < 64; i = i * 2) { ++ tcg_gen_shli_i64(mask, v0, i); ++ tcg_gen_or_i64(v0, mask, v0); ++ } ++} ++ ++static void gen_rshifti_mask(TCGv v0, int v1, int size) ++{ ++ int i; ++ uint64_t mask; ++ mask = (1UL << size) - 1; ++ mask = (mask >> v1) & (uint64_t)((1UL << size) - 1); ++ for (i = size; i < 64; i = i * 2) { ++ mask |= (mask << i); ++ } ++ tcg_gen_movi_i64(v0, mask); ++} ++ ++static void gen_rsign_mask(TCGv v0, TCGv v1,int size) ++{ ++ TCGv mask, tmp; ++ uint64_t sign_mask = 0; ++ mask = tcg_temp_new(); ++ tmp = tcg_temp_new(); ++ int i; ++ ++ for (i = 0; i < 64 / size; i++) { ++ sign_mask |= (1UL << (size - 1)) << (i * size); ++ } ++ ++ tcg_gen_andi_i64(mask, v1, sign_mask); ++ tcg_gen_mov_i64(tmp, mask); ++ for (i = 1; i < size; i = i * 2) { ++ tcg_gen_shri_i64(mask, tmp, i); ++ tcg_gen_or_i64(tmp, mask, tmp); ++ } ++ tcg_gen_andc_i64(v0, tmp, v0); ++} ++ ++static inline void gen_load_mem( ++ DisasContext *ctx, void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1, int flags), ++ int ra, int rb, int32_t disp16, bool fp, uint64_t mask) ++{ ++ TCGv tmp, addr, va; ++ ++ /* LDQ_U with ra $31 is UNOP. Other various loads are forms of ++ prefetches, which we can treat as nops. No worries about ++ missed exceptions here. */ ++ if (unlikely(ra == 31)) { ++ return; ++ } ++ ++ tmp = tcg_temp_new(); ++ addr = load_gir(ctx, rb); ++ ++ if (disp16) { ++ tcg_gen_addi_i64(tmp, addr, (int64_t)disp16); ++ addr = tmp; ++ } else { ++ tcg_gen_mov_i64(tmp, addr); ++ addr = tmp; ++ } ++ if (mask) { ++ tcg_gen_andi_i64(tmp, addr, mask); ++ } ++ ++ va = (fp ? cpu_fr[ra] : load_gir(ctx, ra)); ++ tcg_gen_qemu_load(va, addr, ctx->mem_idx); ++ gen_helper_trace_mem(tcg_env, addr, va); ++ ++} ++ ++static inline void gen_store_mem( ++ DisasContext *ctx, void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1, int flags), ++ int ra, int rb, int32_t disp16, bool fp, uint64_t mask) ++{ ++ TCGv tmp, addr, va; ++ ++ tmp = tcg_temp_new(); ++ addr = load_gir(ctx, rb); ++ if (disp16) { ++ tcg_gen_addi_i64(tmp, addr, disp16); ++ addr = tmp; ++ } else { ++ tcg_gen_mov_i64(tmp, addr); ++ addr = tmp; ++ } ++ if (mask) { ++ tcg_gen_andi_i64(tmp, addr, mask); ++ } ++ va = (fp ? cpu_fr[ra] : load_gir(ctx, ra)); ++ ++ tcg_gen_qemu_store(va, addr, ctx->mem_idx); ++ gen_helper_trace_mem(tcg_env, addr, va); ++} ++ ++static void cal_with_iregs_2(DisasContext *ctx, TCGv vc, TCGv va, TCGv vb, ++ int32_t disp13, uint16_t fn) ++{ ++ TCGv tmp, t1, tmp1; ++ TCGv_i32 tmpa, tmpb, tmpc; ++ ++ switch (fn & 0xff) { ++ case 0x00: ++ /* ADDW */ ++ tcg_gen_add_i64(vc, va, vb); ++ tcg_gen_ext32s_i64(vc, vc); ++ break; ++ case 0x01: ++ /* SUBW */ ++ tcg_gen_sub_i64(vc, va, vb); ++ tcg_gen_ext32s_i64(vc, vc); ++ break; ++ case 0x02: ++ /* S4ADDW */ ++ tmp = tcg_temp_new(); ++ tcg_gen_shli_i64(tmp, va, 2); ++ tcg_gen_add_i64(tmp, tmp, vb); ++ tcg_gen_ext32s_i64(vc, tmp); ++ break; ++ case 0x03: ++ /* S4SUBW */ ++ tmp = tcg_temp_new(); ++ tcg_gen_shli_i64(tmp, va, 2); ++ tcg_gen_sub_i64(tmp, tmp, vb); ++ tcg_gen_ext32s_i64(vc, tmp); ++ break; ++ case 0x04: ++ /* S8ADDW */ ++ tmp = tcg_temp_new(); ++ tcg_gen_shli_i64(tmp, va, 3); ++ tcg_gen_add_i64(tmp, tmp, vb); ++ tcg_gen_ext32s_i64(vc, tmp); ++ break; ++ case 0x05: ++ /* S8SUBW */ ++ tmp = tcg_temp_new(); ++ tcg_gen_shli_i64(tmp, va, 3); ++ tcg_gen_sub_i64(tmp, tmp, vb); ++ tcg_gen_ext32s_i64(vc, tmp); ++ break; ++ ++ case 0x08: ++ /* ADDL */ ++ tcg_gen_add_i64(vc, va, vb); ++ break; ++ case 0x09: ++ /* SUBL */ ++ tcg_gen_sub_i64(vc, va, vb); ++ break; ++ case 0x0a: ++ /* S4ADDL */ ++ tmp = tcg_temp_new(); ++ tcg_gen_shli_i64(tmp, va, 2); ++ tcg_gen_add_i64(vc, tmp, vb); ++ break; ++ case 0x0b: ++ /* S4SUBL */ ++ tmp = tcg_temp_new(); ++ tcg_gen_shli_i64(tmp, va, 2); ++ tcg_gen_sub_i64(vc, tmp, vb); ++ break; ++ case 0x0c: ++ /* S8ADDL */ ++ tmp = tcg_temp_new(); ++ tcg_gen_shli_i64(tmp, va, 3); ++ tcg_gen_add_i64(vc, tmp, vb); ++ break; ++ case 0x0d: ++ /* S8SUBL */ ++ tmp = tcg_temp_new(); ++ tcg_gen_shli_i64(tmp, va, 3); ++ tcg_gen_sub_i64(vc, tmp, vb); ++ break; ++ case 0x10: ++ /* MULW */ ++ tcg_gen_mul_i64(vc, va, vb); ++ tcg_gen_ext32s_i64(vc, vc); ++ break; ++ case 0x11: ++ /* TODO: DIVW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmpa = tcg_temp_new_i32(); ++ tmpb = tcg_temp_new_i32(); ++ tmpc = tcg_temp_new_i32(); ++ tcg_gen_extrl_i64_i32(tmpa, va); ++ tcg_gen_extrl_i64_i32(tmpb, vb); ++ tcg_gen_div_i32(tmpc, tmpa, tmpb); ++ tcg_gen_ext_i32_i64(vc, tmpc); ++ break; ++ case 0x12: ++ /* UDIVW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmpa = tcg_temp_new_i32(); ++ tmpb = tcg_temp_new_i32(); ++ tmpc = tcg_temp_new_i32(); ++ tcg_gen_extrl_i64_i32(tmpa, va); ++ tcg_gen_extrl_i64_i32(tmpb, vb); ++ tcg_gen_divu_i32(tmpc, tmpa, tmpb); ++ tcg_gen_extu_i32_i64(vc, tmpc); ++ break; ++ case 0x13: ++ /* REMW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmpa = tcg_temp_new_i32(); ++ tmpb = tcg_temp_new_i32(); ++ tmpc = tcg_temp_new_i32(); ++ tcg_gen_extrl_i64_i32(tmpa, va); ++ tcg_gen_extrl_i64_i32(tmpb, vb); ++ tcg_gen_rem_i32(tmpc, tmpa, tmpb); ++ tcg_gen_ext_i32_i64(vc, tmpc); ++ break; ++ case 0x14: ++ /* UREMW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmpa = tcg_temp_new_i32(); ++ tmpb = tcg_temp_new_i32(); ++ tmpc = tcg_temp_new_i32(); ++ tcg_gen_extrl_i64_i32(tmpa, va); ++ tcg_gen_extrl_i64_i32(tmpb, vb); ++ tcg_gen_remu_i32(tmpc, tmpa, tmpb); ++ tcg_gen_extu_i32_i64(vc, tmpc); ++ break; ++ case 0x18: ++ /* MULL */ ++ tcg_gen_mul_i64(vc, va, vb); ++ break; ++ case 0x19: ++ /* MULH */ ++ tmp = tcg_temp_new(); ++ tcg_gen_mulu2_i64(tmp, vc, va, vb); ++ break; ++ case 0x1A: ++ /* DIVL */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tcg_gen_div_i64(vc, va, vb); ++ break; ++ case 0x1B: ++ /* UDIVL */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tcg_gen_divu_i64(vc, va, vb); ++ break; ++ case 0x1C: ++ /* REML */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tcg_gen_rem_i64(vc, va, vb); ++ break; ++ case 0x1D: ++ /* UREML */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tcg_gen_remu_i64(vc, va, vb); ++ break; ++ case 0x1E: ++ /* TODO:ADDPI */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp = tcg_constant_i64(disp13); ++ tcg_gen_shli_i64(tmp, tmp, 2); ++ tcg_gen_addi_i64(vc, tmp, ctx->base.pc_next & ~3UL); ++ break; ++ case 0x1F: ++ /* ADDPIS */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp = tcg_constant_i64(disp13); ++ tcg_gen_shli_i64(tmp, tmp, 16); ++ tcg_gen_addi_i64(vc, tmp, ctx->base.pc_next & 0xffffffffffff0000); ++ break; ++ case 0x28: ++ /* CMPEQ */ ++ tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb); ++ break; ++ case 0x29: ++ /* CMPLT */ ++ tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb); ++ break; ++ case 0x2a: ++ /* CMPLE */ ++ tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb); ++ break; ++ case 0x2b: ++ /* CMPULT */ ++ tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb); ++ break; ++ case 0x2c: ++ /* CMPULE */ ++ tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb); ++ break; ++ case 0x2D: ++ /* SBT */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp = tcg_temp_new_i64(); ++ t1 = tcg_constant_i64(1); ++ tcg_gen_andi_i64(tmp, vb, 0x3f); ++ tcg_gen_shl_i64(tmp, t1, tmp); ++ tcg_gen_or_i64(vc, va, tmp); ++ break; ++ case 0x2E: ++ /* CBT */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp = tcg_temp_new_i64(); ++ t1 = tcg_constant_i64(1); ++ tcg_gen_andi_i64(tmp, vb, 0x3f); ++ tcg_gen_shl_i64(tmp, t1, tmp); ++ tcg_gen_andc_i64(vc, va, tmp); ++ break; ++ case 0x38: ++ /* AND */ ++ tcg_gen_and_i64(vc, va, vb); ++ break; ++ case 0x39: ++ /* BIC */ ++ tcg_gen_andc_i64(vc, va, vb); ++ break; ++ case 0x3a: ++ /* BIS */ ++ tcg_gen_or_i64(vc, va, vb); ++ break; ++ case 0x3b: ++ /* ORNOT */ ++ tcg_gen_orc_i64(vc, va, vb); ++ break; ++ case 0x3c: ++ /* XOR */ ++ tcg_gen_xor_i64(vc, va, vb); ++ break; ++ case 0x3d: ++ /* EQV */ ++ tcg_gen_eqv_i64(vc, va, vb); ++ break; ++ case 0x40: ++ /* INSLB */ ++ gen_ins_l(ctx, vc, va, vb, 0x1); ++ break; ++ case 0x41: ++ /* INSLH */ ++ gen_ins_l(ctx, vc, va, vb, 0x3); ++ break; ++ case 0x42: ++ /* INSLW */ ++ gen_ins_l(ctx, vc, va, vb, 0xf); ++ break; ++ case 0x43: ++ /* INSLL */ ++ gen_ins_l(ctx, vc, va, vb, 0xff); ++ break; ++ case 0x44: ++ /* INSHB */ ++ gen_ins_h(ctx, vc, va, vb, 0x1); ++ break; ++ case 0x45: ++ /* INSHH */ ++ gen_ins_h(ctx, vc, va, vb, 0x3); ++ break; ++ case 0x46: ++ /* INSHW */ ++ gen_ins_h(ctx, vc, va, vb, 0xf); ++ break; ++ case 0x47: ++ /* INSHL */ ++ gen_ins_h(ctx, vc, va, vb, 0xff); ++ break; ++ case 0x48: ++ /* SLL/SLLL */ ++ tmp = tcg_temp_new(); ++ tcg_gen_andi_i64(tmp, vb, 0x3f); ++ tcg_gen_shl_i64(vc, va, tmp); ++ break; ++ case 0x49: ++ /* SRL/SRLL */ ++ tmp = tcg_temp_new(); ++ tcg_gen_andi_i64(tmp, vb, 0x3f); ++ tcg_gen_shr_i64(vc, va, tmp); ++ break; ++ case 0x4a: ++ /* SRA/SRAL */ ++ tmp = tcg_temp_new(); ++ tcg_gen_andi_i64(tmp, vb, 0x3f); ++ tcg_gen_sar_i64(vc, va, tmp); ++ break; ++ case 0x4B: ++ /* ROLL */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp = tcg_temp_new_i64(); ++ tcg_gen_andi_i64(tmp, vb, 0x3f); ++ tcg_gen_rotl_i64(vc, va, tmp); ++ break; ++ case 0x4C: ++ /* SLLW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp = tcg_temp_new(); ++ tmp1 = tcg_temp_new(); ++ tcg_gen_andi_i64(tmp, vb, 0x1f); ++ tcg_gen_ext32u_i64(tmp1, va); ++ tcg_gen_shl_i64(vc, tmp1, tmp); ++ tcg_gen_ext32u_i64(vc, vc); ++ break; ++ case 0x4D: ++ /* SRLW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp = tcg_temp_new(); ++ tcg_gen_andi_i64(tmp, vb, 0x1f); ++ tcg_gen_ext32u_i64(va, va); ++ tcg_gen_shr_i64(vc, va, tmp); ++ tcg_gen_ext32u_i64(vc, vc); ++ break; ++ case 0x4E: ++ /* SRAW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp = tcg_temp_new(); ++ tcg_gen_andi_i64(tmp, vb, 0x1f); ++ tcg_gen_ext32s_i64(va, va); ++ tcg_gen_sar_i64(vc, va, tmp); ++ tcg_gen_ext32u_i64(vc, vc); ++ break; ++ case 0x4F: ++ /* ROLW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmpa = tcg_temp_new_i32(); ++ tmpb = tcg_temp_new_i32(); ++ tmpc = tcg_temp_new_i32(); ++ tcg_gen_extrl_i64_i32(tmpa, va); ++ tcg_gen_extrl_i64_i32(tmpb, vb); ++ tcg_gen_rotl_i32(tmpc, tmpa, tmpb); ++ tcg_gen_extu_i32_i64(vc, tmpc); ++ break; ++ case 0x50: ++ /* EXTLB */ ++ gen_ext_l(ctx, vc, va, vb, 0x1); ++ break; ++ case 0x51: ++ /* EXTLH */ ++ gen_ext_l(ctx, vc, va, vb, 0x3); ++ break; ++ case 0x52: ++ /* EXTLW */ ++ gen_ext_l(ctx, vc, va, vb, 0xf); ++ break; ++ case 0x53: ++ /* EXTLL */ ++ gen_ext_l(ctx, vc, va, vb, 0xff); ++ break; ++ case 0x54: ++ /* EXTHB */ ++ gen_ext_h(ctx, vc, va, vb, 0x1); ++ break; ++ case 0x55: ++ /* EXTHH */ ++ gen_ext_h(ctx, vc, va, vb, 0x3); ++ break; ++ case 0x56: ++ /* EXTHW */ ++ gen_ext_h(ctx, vc, va, vb, 0xf); ++ break; ++ case 0x57: ++ /* EXTHL */ ++ gen_ext_h(ctx, vc, va, vb, 0xff); ++ break; ++ case 0x58: ++ /* CTPOP */ ++ tcg_gen_ctpop_i64(vc, vb); ++ break; ++ case 0x59: ++ /* CTLZ */ ++ tcg_gen_clzi_i64(vc, vb, 64); ++ break; ++ case 0x5a: ++ /* CTTZ */ ++ tcg_gen_ctzi_i64(vc, vb, 64); ++ break; ++ case 0x5B: ++ /* REVBH */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp = tcg_temp_new_i64(); ++ tcg_gen_andi_i64(tmp, vb, 0xffffUL); ++ tcg_gen_bswap16_i64(vc, tmp, TCG_BSWAP_IZ); ++ break; ++ case 0x5C: ++ /* REVBW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp = tcg_temp_new_i64(); ++ tcg_gen_andi_i64(tmp, vb, 0xffffffffUL); ++ tcg_gen_bswap32_i64(vc, tmp, TCG_BSWAP_IZ); ++ break; ++ case 0x5D: ++ /* REVBL */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tcg_gen_bswap64_i64(vc, vb); ++ break; ++ case 0x5E: ++ /* CASW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tcg_gen_atomic_cmpxchg_i64(vc, vb, va, vc, ctx->mem_idx, MO_TEUL); ++ break; ++ case 0x5F: ++ /* CASL */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tcg_gen_atomic_cmpxchg_i64(vc, vb, va, vc, ctx->mem_idx, MO_TEUQ); ++ break; ++ case 0x60: ++ /* MASKLB */ ++ gen_mask_l(ctx, vc, va, vb, 0x1); ++ break; ++ case 0x61: ++ /* MASKLH */ ++ gen_mask_l(ctx, vc, va, vb, 0x3); ++ break; ++ case 0x62: ++ /* MASKLW */ ++ gen_mask_l(ctx, vc, va, vb, 0xf); ++ break; ++ case 0x63: ++ /* MASKLL */ ++ gen_mask_l(ctx, vc, va, vb, 0xff); ++ break; ++ case 0x64: ++ /* MASKHB */ ++ gen_mask_h(ctx, vc, va, vb, 0x1); ++ break; ++ case 0x65: ++ /* MASKHH */ ++ gen_mask_h(ctx, vc, va, vb, 0x3); ++ break; ++ case 0x66: ++ /* MASKHW */ ++ gen_mask_h(ctx, vc, va, vb, 0xf); ++ break; ++ case 0x67: ++ /* MASKHL */ ++ gen_mask_h(ctx, vc, va, vb, 0xff); ++ break; ++ case 0x68: ++ /* ZAP */ ++ gen_helper_zap(vc, va, vb); ++ break; ++ case 0x69: ++ /* ZAPNOT */ ++ gen_helper_zapnot(vc, va, vb); ++ break; ++ case 0x6a: ++ /* SEXTB */ ++ tcg_gen_ext8s_i64(vc, vb); ++ break; ++ case 0x6b: ++ /* SEXTH */ ++ tcg_gen_ext16s_i64(vc, vb); ++ break; ++ case 0x6c: ++ /* CMPGEB*/ ++ gen_helper_cmpgeb(vc, va, vb); ++ break; ++ default: ++ ILLEGAL(fn); ++ } ++} ++ ++static void cal_with_imm_2(DisasContext *ctx, TCGv vc, TCGv va, int64_t disp, ++ uint8_t fn) ++{ ++ TCGv_i64 t0 = tcg_constant_i64(disp); ++ cal_with_iregs_2(ctx, vc, va, t0, 0, fn); ++} ++ ++static void cal_with_iregs_3(DisasContext *ctx, TCGv vd, TCGv va, TCGv vb, ++ TCGv vc, uint8_t fn) ++{ ++ TCGv_i64 t0 = tcg_constant_i64(0); ++ TCGv_i64 tmp; ++ switch (fn) { ++ case 0x0: ++ /* SELEQ */ ++ tcg_gen_movcond_i64(TCG_COND_EQ, vd, va, t0, vb, vc); ++ break; ++ case 0x1: ++ /* SELGE */ ++ tcg_gen_movcond_i64(TCG_COND_GE, vd, va, t0, vb, vc); ++ break; ++ case 0x2: ++ /* SELGT */ ++ tcg_gen_movcond_i64(TCG_COND_GT, vd, va, t0, vb, vc); ++ break; ++ case 0x3: ++ /* SELLE */ ++ tcg_gen_movcond_i64(TCG_COND_LE, vd, va, t0, vb, vc); ++ break; ++ case 0x4: ++ /* SELLT */ ++ tcg_gen_movcond_i64(TCG_COND_LT, vd, va, t0, vb, vc); ++ break; ++ case 0x5: ++ /* SELNE */ ++ tcg_gen_movcond_i64(TCG_COND_NE, vd, va, t0, vb, vc); ++ break; ++ case 0x6: ++ /* SELLBC */ ++ tmp = tcg_temp_new_i64(); ++ tcg_gen_andi_i64(tmp, va, 1); ++ tcg_gen_movcond_i64(TCG_COND_EQ, vd, tmp, t0, vb, vc); ++ break; ++ case 0x7: ++ /* SELLBS */ ++ tmp = tcg_temp_new_i64(); ++ tcg_gen_andi_i64(tmp, va, 1); ++ tcg_gen_movcond_i64(TCG_COND_NE, vd, tmp, t0, vb, vc); ++ break; ++ default: ++ ILLEGAL(fn); ++ break; ++ } ++} ++ ++static void cal_with_imm_3(DisasContext *ctx, TCGv vd, TCGv va, int32_t disp, ++ TCGv vc, uint8_t fn) ++{ ++ TCGv_i64 vb = tcg_constant_i64(disp); ++ cal_with_iregs_3(ctx, vd, va, vb, vc, fn); ++} ++ ++static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp) ++{ ++ uint64_t dest = ctx->base.pc_next + ((int64_t)disp << 2); ++ if (ra != 31) { ++ tcg_gen_movi_i64(load_gir(ctx, ra), ctx->base.pc_next & (~0x3UL)); ++ } ++ if (disp == 0) { ++ return 0; ++ } else if (use_goto_tb(ctx, dest)) { ++ tcg_gen_goto_tb(0); ++ tcg_gen_movi_i64(cpu_pc, dest); ++ tcg_gen_exit_tb(ctx->base.tb, 0); ++ return DISAS_NORETURN; ++ } else { ++ tcg_gen_movi_i64(cpu_pc, dest); ++ return DISAS_PC_UPDATED; ++ } ++} ++ ++static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond, ++ TCGv cmp, int disp) ++{ ++ uint64_t dest = ctx->base.pc_next + (disp << 2); ++ TCGLabel* lab_true = gen_new_label(); ++ ++ if (use_goto_tb(ctx, dest)) { ++ tcg_gen_brcondi_i64(cond, cmp, 0, lab_true); ++ ++ tcg_gen_goto_tb(0); ++ tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); ++ tcg_gen_exit_tb(ctx->base.tb, 0); ++ ++ gen_set_label(lab_true); ++ tcg_gen_goto_tb(1); ++ tcg_gen_movi_i64(cpu_pc, dest); ++ tcg_gen_exit_tb(ctx->base.tb, 1); ++ ++ return DISAS_NORETURN; ++ } else { ++ TCGv_i64 t = tcg_constant_i64(0); ++ TCGv_i64 d = tcg_constant_i64(dest); ++ TCGv_i64 p = tcg_constant_i64(ctx->base.pc_next); ++ ++ tcg_gen_movcond_i64(cond, cpu_pc, cmp, t, d, p); ++ ++ return DISAS_PC_UPDATED; ++ } ++} ++ ++static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, uint32_t ra, ++ int32_t disp, uint64_t mask) ++{ ++ TCGv tmp = tcg_temp_new(); ++ DisasJumpType ret; ++ ++ tcg_gen_andi_i64(tmp, load_gir(ctx, ra), mask); ++ ret = gen_bcond_internal(ctx, cond, tmp, disp); ++ return ret; ++} ++ ++static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, ++ int32_t disp) ++{ ++ TCGv cmp_tmp = tcg_temp_new(); ++ DisasJumpType ret; ++ ++ gen_fold_mzero(cond, cmp_tmp, cpu_fr[ra]); ++ ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp); ++ return ret; ++} ++ ++#ifndef CONFIG_USER_ONLY ++static void gen_qemu_pri_ldw(TCGv t0, TCGv t1, int memidx) ++{ ++ gen_helper_pri_ldw(t0, tcg_env, t1); ++} ++ ++static void gen_qemu_pri_stw(TCGv t0, TCGv t1, int memidx) ++{ ++ gen_helper_pri_stw(tcg_env, t0, t1); ++} ++ ++static void gen_qemu_pri_ldl(TCGv t0, TCGv t1, int memidx) ++{ ++ gen_helper_pri_ldl(t0, tcg_env, t1); ++} ++ ++static void gen_qemu_pri_stl(TCGv t0, TCGv t1, int memidx) ++{ ++ gen_helper_pri_stl(tcg_env, t0, t1); ++} ++#endif ++ ++static inline void gen_load_mem_simd( ++ DisasContext *ctx, void (*tcg_gen_qemu_load)(int t0, TCGv t1, int flags), ++ int ra, int rb, int32_t disp16, uint64_t mask) ++{ ++ TCGv tmp, addr; ++ ++ /* LDQ_U with ra $31 is UNOP. Other various loads are forms of ++ prefetches, which we can treat as nops. No worries about ++ missed exceptions here. */ ++ if (unlikely(ra == 31)) ++ return; ++ ++ tmp = tcg_temp_new(); ++ addr = load_gir(ctx, rb); ++ ++ if (disp16) { ++ tcg_gen_addi_i64(tmp, addr, (int64_t)disp16); ++ addr = tmp; ++ } else { ++ tcg_gen_mov_i64(tmp, addr); ++ addr = tmp; ++ } ++ ++ if (mask) { ++ tcg_gen_andi_i64(addr, addr, mask); ++ } ++ ++ tcg_gen_qemu_load(ra, addr, ctx->mem_idx); ++} ++ ++static inline void gen_store_mem_simd( ++ DisasContext *ctx, void (*tcg_gen_qemu_store)(int t0, TCGv t1, int flags), ++ int ra, int rb, int32_t disp16, uint64_t mask) ++{ ++ TCGv tmp, addr; ++ ++ tmp = tcg_temp_new(); ++ addr = load_gir(ctx, rb); ++ if (disp16) { ++ tcg_gen_addi_i64(tmp, addr, (int64_t)disp16); ++ addr = tmp; ++ } else { ++ tcg_gen_mov_i64(tmp, addr); ++ addr = tmp; ++ } ++ if (mask) { ++ tcg_gen_andi_i64(addr, addr, mask); ++ } ++ tcg_gen_qemu_store(ra, addr, ctx->mem_idx); ++} ++ ++static void gen_qemu_ldwe(int t0, TCGv t1, int memidx) ++{ ++ TCGv tmp = tcg_temp_new(); ++ ++ tcg_gen_qemu_ld_i64(tmp, t1, memidx, MO_ALIGN_4 | MO_LEUL); ++ tcg_gen_shli_i64(cpu_fr[t0], tmp, 32); ++ tcg_gen_or_i64(cpu_fr[t0], cpu_fr[t0], tmp); ++ tcg_gen_mov_i64(cpu_fr[t0 + 32], cpu_fr[t0]); ++ tcg_gen_mov_i64(cpu_fr[t0 + 64], cpu_fr[t0]); ++ tcg_gen_mov_i64(cpu_fr[t0 + 96], cpu_fr[t0]); ++ ++} ++ ++static void gen_qemu_vlds(int t0, TCGv t1, int memidx) ++{ ++ int i; ++ TCGv_i32 tmp32 = tcg_temp_new_i32(); ++ ++ tcg_gen_qemu_ld_i32(tmp32, t1, memidx, MO_ALIGN_4 | MO_LEUL); ++ gen_helper_memory_to_s(cpu_fr[t0], tmp32); ++ tcg_gen_addi_i64(t1, t1, 4); ++ ++ for (i = 1; i < 4; i++) { ++ tcg_gen_qemu_ld_i32(tmp32, t1, memidx, MO_LEUL); ++ gen_helper_memory_to_s(cpu_fr[t0 + i * 32], tmp32); ++ tcg_gen_addi_i64(t1, t1, 4); ++ } ++ ++} ++ ++static void gen_qemu_ldse(int t0, TCGv t1, int memidx) ++{ ++ TCGv_i32 tmp32 = tcg_temp_new_i32(); ++ ++ tcg_gen_qemu_ld_i32(tmp32, t1, memidx, MO_ALIGN_4 | MO_LEUL); ++ gen_helper_memory_to_s(cpu_fr[t0], tmp32); ++ tcg_gen_mov_i64(cpu_fr[t0 + 32], cpu_fr[t0]); ++ tcg_gen_mov_i64(cpu_fr[t0 + 64], cpu_fr[t0]); ++ tcg_gen_mov_i64(cpu_fr[t0 + 96], cpu_fr[t0]); ++ ++} ++ ++static void gen_qemu_ldde(int t0, TCGv t1, int memidx) ++{ ++ tcg_gen_qemu_ld_i64(cpu_fr[t0], t1, memidx, MO_ALIGN_4 | MO_TEUQ); ++ tcg_gen_mov_i64(cpu_fr[t0 + 32], cpu_fr[t0]); ++ tcg_gen_mov_i64(cpu_fr[t0 + 64], cpu_fr[t0]); ++ tcg_gen_mov_i64(cpu_fr[t0 + 96], cpu_fr[t0]); ++} ++ ++static void gen_qemu_vldd(int t0, TCGv t1, int memidx) ++{ ++ tcg_gen_qemu_ld_i64(cpu_fr[t0], t1, memidx, MO_ALIGN_4 | MO_TEUQ); ++ tcg_gen_addi_i64(t1, t1, 8); ++ tcg_gen_qemu_ld_i64(cpu_fr[t0 + 32], t1, memidx, MO_TEUQ); ++ tcg_gen_addi_i64(t1, t1, 8); ++ tcg_gen_qemu_ld_i64(cpu_fr[t0 + 64], t1, memidx, MO_TEUQ); ++ tcg_gen_addi_i64(t1, t1, 8); ++ tcg_gen_qemu_ld_i64(cpu_fr[t0 + 96], t1, memidx, MO_TEUQ); ++} ++ ++static void gen_qemu_vsts(int t0, TCGv t1, int memidx) ++{ ++ int i; ++ TCGv_i32 tmp = tcg_temp_new_i32(); ++ ++ gen_helper_s_to_memory(tmp, cpu_fr[t0]); ++ tcg_gen_qemu_st_i32(tmp, t1, memidx, MO_ALIGN_4 | MO_LEUL); ++ tcg_gen_addi_i64(t1, t1, 4); ++ for (i = 1; i < 4; i++) { ++ gen_helper_s_to_memory(tmp, cpu_fr[t0 + 32 * i]); ++ tcg_gen_qemu_st_i32(tmp, t1, memidx, MO_LEUL); ++ tcg_gen_addi_i64(t1, t1, 4); ++ } ++} ++ ++static void gen_qemu_vstd(int t0, TCGv t1, int memidx) ++{ ++ tcg_gen_qemu_st_i64(cpu_fr[t0], t1, memidx, MO_ALIGN_4 | MO_TEUQ); ++ tcg_gen_addi_i64(t1, t1, 8); ++ tcg_gen_qemu_st_i64(cpu_fr[t0 + 32], t1, memidx, MO_TEUQ); ++ tcg_gen_addi_i64(t1, t1, 8); ++ tcg_gen_qemu_st_i64(cpu_fr[t0 + 64], t1, memidx, MO_TEUQ); ++ tcg_gen_addi_i64(t1, t1, 8); ++ tcg_gen_qemu_st_i64(cpu_fr[t0 + 96], t1, memidx, MO_TEUQ); ++} ++ ++static inline void gen_qemu_fsts(TCGv t0, TCGv t1, int flags) ++{ ++ TCGv_i32 tmp = tcg_temp_new_i32(); ++ gen_helper_s_to_memory(tmp, t0); ++ tcg_gen_qemu_st_i32(tmp, t1, flags, MO_LEUL); ++} ++ ++static inline void gen_qemu_flds(TCGv t0, TCGv t1, int flags) ++{ ++ TCGv_i32 tmp = tcg_temp_new_i32(); ++ tcg_gen_qemu_ld_i32(tmp, t1, flags, MO_LEUL); ++ gen_helper_memory_to_s(t0, tmp); ++} ++ ++static TCGv gen_ieee_input(DisasContext *ctx, int reg, int is_cmp) ++{ ++ TCGv val; ++ ++ if (unlikely(reg == 31)) { ++ val = load_zero(ctx); ++ } else { ++ val = cpu_fr[reg]; ++#ifndef CONFIG_USER_ONLY ++ /* In system mode, raise exceptions for denormals like real ++ hardware. In user mode, proceed as if the OS completion ++ handler is handling the denormal as per spec. */ ++ gen_helper_ieee_input(tcg_env, val); ++#endif ++ } ++ return val; ++} ++ ++static void gen_fp_exc_raise(int rc) ++{ ++#ifndef CONFIG_USER_ONLY ++ TCGv_i32 reg = tcg_constant_i32(rc + 32); ++ gen_helper_fp_exc_raise(tcg_env, reg); ++#endif ++} ++ ++static void gen_ieee_arith2(DisasContext *ctx, ++ void (*helper)(TCGv, TCGv_ptr, TCGv), int ra, ++ int rc) ++{ ++ TCGv va, vc; ++ ++ va = gen_ieee_input(ctx, ra, 0); ++ vc = cpu_fr[rc]; ++ helper(vc, tcg_env, va); ++ ++ gen_fp_exc_raise(rc); ++} ++ ++static void gen_ieee_arith3(DisasContext *ctx, ++ void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), int ra, ++ int rb, int rc) ++{ ++ TCGv va, vb, vc; ++ ++ va = gen_ieee_input(ctx, ra, 0); ++ vb = gen_ieee_input(ctx, rb, 0); ++ vc = cpu_fr[rc]; ++ helper(vc, tcg_env, va, vb); ++ ++ gen_fp_exc_raise(rc); ++} ++ ++#define IEEE_ARITH2(name) \ ++ static inline void glue(gen_, name)(DisasContext * ctx, int ra, int rc) { \ ++ gen_ieee_arith2(ctx, gen_helper_##name, ra, rc); \ ++ } ++ ++#define IEEE_ARITH3(name) \ ++ static inline void glue(gen_, name)(DisasContext * ctx, int ra, int rb, \ ++ int rc) { \ ++ gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc); \ ++ } ++IEEE_ARITH3(fadds) ++IEEE_ARITH3(faddd) ++IEEE_ARITH3(fsubs) ++IEEE_ARITH3(fsubd) ++IEEE_ARITH3(fmuls) ++IEEE_ARITH3(fmuld) ++IEEE_ARITH3(fdivs) ++IEEE_ARITH3(fdivd) ++IEEE_ARITH2(frecs) ++IEEE_ARITH2(frecd) ++ ++static void gen_ieee_compare(DisasContext *ctx, ++ void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), int ra, ++ int rb, int rc) ++{ ++ TCGv va, vb, vc; ++ ++ va = gen_ieee_input(ctx, ra, 1); ++ vb = gen_ieee_input(ctx, rb, 1); ++ vc = cpu_fr[rc]; ++ helper(vc, tcg_env, va, vb); ++ ++ gen_fp_exc_raise(rc); ++} ++ ++#define IEEE_CMP2(name) \ ++ static inline void glue(gen_, name)(DisasContext *ctx, int ra, int rb, \ ++ int rc) { \ ++ gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc); \ ++ } ++ ++IEEE_CMP2(fcmpun) ++IEEE_CMP2(fcmpeq) ++IEEE_CMP2(fcmplt) ++IEEE_CMP2(fcmple) ++ ++static void gen_fcvtdl(int rb, int rc, uint64_t round_mode) ++{ ++ TCGv tmp64; ++ tmp64 = tcg_temp_new_i64(); ++ tcg_gen_movi_i64(tmp64, round_mode); ++ gen_helper_fcvtdl(cpu_fr[rc], tcg_env, cpu_fr[rb], tmp64); ++ gen_fp_exc_raise(rc); ++} ++ ++static void gen_transformat(int rb, int rc, ++ void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), ++ uint64_t round_mode) ++{ ++ TCGv tmp64; ++ tmp64 = tcg_temp_new_i64(); ++ tcg_gen_movi_i64(tmp64, round_mode); ++ helper(cpu_fr[rc], tcg_env, cpu_fr[rb], tmp64); ++ gen_fp_exc_raise(rc); ++} ++ ++static void cal_with_fregs_2(DisasContext *ctx, uint8_t rc, uint8_t ra, ++ uint8_t rb, uint8_t fn) ++{ ++ TCGv tmp64; ++ TCGv_i32 tmp32; ++ switch (fn) { ++ case 0x00: ++ /* FADDS */ ++ gen_fadds(ctx, ra, rb, rc); ++ break; ++ case 0x01: ++ /* FADDD */ ++ gen_faddd(ctx, ra, rb, rc); ++ break; ++ case 0x02: ++ /* FSUBS */ ++ gen_fsubs(ctx, ra, rb, rc); ++ break; ++ case 0x03: ++ /* FSUBD */ ++ gen_fsubd(ctx, ra, rb, rc); ++ break; ++ case 0x4: ++ /* FMULS */ ++ gen_fmuls(ctx, ra, rb, rc); ++ break; ++ case 0x05: ++ /* FMULD */ ++ gen_fmuld(ctx, ra, rb, rc); ++ break; ++ case 0x06: ++ /* FDIVS */ ++ gen_fdivs(ctx, ra, rb, rc); ++ break; ++ case 0x07: ++ /* FDIVD */ ++ gen_fdivd(ctx, ra, rb, rc); ++ break; ++ case 0x08: ++ /* FSQRTS */ ++ gen_helper_fsqrts(cpu_fr[rc], tcg_env, cpu_fr[rb]); ++ break; ++ case 0x09: ++ /* FSQRTD */ ++ gen_helper_fsqrt(cpu_fr[rc], tcg_env, cpu_fr[rb]); ++ break; ++ case 0x10: ++ /* FCMPEQ */ ++ gen_fcmpeq(ctx, ra, rb, rc); ++ break; ++ case 0x11: ++ /* FCMPLE */ ++ gen_fcmple(ctx, ra, rb, rc); ++ break; ++ case 0x12: ++ /* FCMPLT */ ++ gen_fcmplt(ctx, ra, rb, rc); ++ break; ++ case 0x13: ++ /* FCMPUN */ ++ gen_fcmpun(ctx, ra, rb, rc); ++ break; ++ case 0x20: ++ /* FCVTSD */ ++ gen_helper_fcvtsd(cpu_fr[rc], tcg_env, cpu_fr[rb]); ++ break; ++ case 0x21: ++ /* FCVTDS */ ++ gen_helper_fcvtds(cpu_fr[rc], tcg_env, cpu_fr[rb]); ++ break; ++ case 0x22: ++ /* FCVTDL_G */ ++ gen_fcvtdl(rb, rc, 0); ++ break; ++ case 0x23: ++ /* FCVTDL_P */ ++ gen_fcvtdl(rb, rc, 2); ++ break; ++ case 0x24: ++ /* FCVTDL_Z */ ++ gen_fcvtdl(rb, rc, 3); ++ break; ++ case 0x25: ++ /* FCVTDL_N */ ++ gen_fcvtdl(rb, rc, 1); ++ break; ++ case 0x27: ++ /* FCVTDL */ ++ gen_helper_fcvtdl_dyn(cpu_fr[rc], tcg_env, cpu_fr[rb]); ++ break; ++ case 0x28: ++ /* FCVTWL */ ++ gen_helper_fcvtwl(cpu_fr[rc], tcg_env, cpu_fr[rb]); ++ tcg_gen_ext32s_i64(cpu_fr[rc], cpu_fr[rc]); ++ break; ++ case 0x29: ++ /* FCVTLW */ ++ gen_helper_fcvtlw(cpu_fr[rc], tcg_env, cpu_fr[rb]); ++ break; ++ case 0x2d: ++ /* FCVTLS */ ++ gen_helper_fcvtls(cpu_fr[rc], tcg_env, cpu_fr[rb]); ++ break; ++ case 0x2f: ++ /* FCVTLD */ ++ gen_helper_fcvtld(cpu_fr[rc], tcg_env, cpu_fr[rb]); ++ break; ++ case 0x30: ++ /* FCPYS */ ++ tmp64 = tcg_temp_new(); ++ tcg_gen_shri_i64(tmp64, cpu_fr[ra], 63); ++ tcg_gen_shli_i64(tmp64, tmp64, 63); ++ tcg_gen_andi_i64(cpu_fr[rc], cpu_fr[rb], 0x7fffffffffffffffUL); ++ tcg_gen_or_i64(cpu_fr[rc], tmp64, cpu_fr[rc]); ++ break; ++ case 0x31: ++ /* FCPYSE */ ++ tmp64 = tcg_temp_new(); ++ tcg_gen_shri_i64(tmp64, cpu_fr[ra], 52); ++ tcg_gen_shli_i64(tmp64, tmp64, 52); ++ tcg_gen_andi_i64(cpu_fr[rc], cpu_fr[rb], 0x000fffffffffffffUL); ++ tcg_gen_or_i64(cpu_fr[rc], tmp64, cpu_fr[rc]); ++ break; ++ case 0x32: ++ /* FCPYSN */ ++ tmp64 = tcg_temp_new(); ++ tcg_gen_shri_i64(tmp64, cpu_fr[ra], 63); ++ tcg_gen_not_i64(tmp64, tmp64); ++ tcg_gen_shli_i64(tmp64, tmp64, 63); ++ tcg_gen_andi_i64(cpu_fr[rc], cpu_fr[rb], 0x7fffffffffffffffUL); ++ tcg_gen_or_i64(cpu_fr[rc], tmp64, cpu_fr[rc]); ++ break; ++ case 0x40: ++ /* IFMOVS */ ++ tmp64 = tcg_temp_new(); ++ tmp32 = tcg_temp_new_i32(); ++ tcg_gen_movi_i64(tmp64, ra); ++ tcg_gen_extrl_i64_i32(tmp32, load_gir(ctx, ra)); ++ gen_helper_memory_to_s(tmp64, tmp32); ++ tcg_gen_mov_i64(cpu_fr[rc], tmp64); ++ tcg_gen_movi_i64(tmp64, rc); ++ break; ++ case 0x41: ++ /* IFMOVD */ ++ tcg_gen_mov_i64(cpu_fr[rc], load_gir(ctx, ra)); ++ break; ++ case 0x50: ++ /* RFPCR */ ++ gen_helper_load_fpcr(cpu_fr[ra], tcg_env); ++ break; ++ case 0x51: ++ /* WFPCR */ ++ gen_helper_store_fpcr(tcg_env, cpu_fr[ra]); ++ break; ++ case 0x54: ++ /* SETFPEC0 */ ++ tmp64 = tcg_constant_i64(0); ++ gen_helper_setfpcrx(tcg_env, tmp64); ++ break; ++ case 0x55: ++ /* SETFPEC1 */ ++ tmp64 = tcg_constant_i64(1); ++ gen_helper_setfpcrx(tcg_env, tmp64); ++ break; ++ case 0x56: ++ /* SETFPEC2 */ ++ tmp64 = tcg_constant_i64(2); ++ gen_helper_setfpcrx(tcg_env, tmp64); ++ break; ++ case 0x57: ++ /* SETFPEC3 */ ++ tmp64 = tcg_constant_i64(3); ++ gen_helper_setfpcrx(tcg_env, tmp64); ++ break; ++ case 0x58: ++ /* TODO:FRECS */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ gen_frecs(ctx, ra, rc); ++ break; ++ case 0x59: ++ /* TODO:FRECD */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ gen_frecd(ctx, ra, rc); ++ break; ++ case 0x5A: ++ /* TODO:FRIS */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ gen_transformat(rb, rc, gen_helper_fris, ++ 5); ++ break; ++ case 0x5B: ++ /* TODO:FRIS_G */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ gen_transformat(rb, rc, gen_helper_fris, 0); ++ break; ++ case 0x5C: ++ /* TODO:FRIS_P */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ gen_transformat(rb, rc, gen_helper_fris, 2); ++ break; ++ case 0x5D: ++ /* TODO:FRIS_Z */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ gen_transformat(rb, rc, gen_helper_fris, 3); ++ break; ++ case 0x5F: ++ /* TODO:FRIS_N */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ gen_transformat(rb, rc, gen_helper_fris, 1); ++ break; ++ case 0x60: ++ /* TODO:FRID */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ gen_transformat(rb, rc, gen_helper_frid, ++ 5); ++ break; ++ case 0x61: ++ /* TODO:FRID_G */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ gen_transformat(rb, rc, gen_helper_frid, 0); ++ break; ++ case 0x62: ++ /* TODO:FRID_P */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ gen_transformat(rb, rc, gen_helper_frid, 2); ++ break; ++ case 0x63: ++ /* TODO:FRID_Z */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ gen_transformat(rb, rc, gen_helper_frid, 3); ++ break; ++ case 0x64: ++ /* TODO:FRID_N */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ gen_transformat(rb, rc, gen_helper_frid, 1); ++ break; ++ default: ++ fprintf(stderr, "Illegal insn func[%x]\n", fn); ++ gen_invalid(ctx); ++ break; ++ } ++} ++ ++static void cal_with_fregs_4(DisasContext *ctx, uint8_t rd, uint8_t ra, ++ uint8_t rb, uint8_t rc, uint8_t fn) ++{ ++ TCGv zero = tcg_constant_i64(0); ++ TCGv va, vb, vc, vd, tmp64; ++ ++ va = cpu_fr[ra]; ++ vb = cpu_fr[rb]; ++ vc = cpu_fr[rc]; ++ vd = cpu_fr[rd]; ++ ++ switch (fn) { ++ case 0x00: ++ /* FMAS */ ++ gen_helper_fmas(vd, tcg_env, va, vb, vc); ++ break; ++ case 0x01: ++ /* FMAD */ ++ gen_helper_fmad(vd, tcg_env, va, vb, vc); ++ break; ++ case 0x02: ++ /* FMSS */ ++ gen_helper_fmss(vd, tcg_env, va, vb, vc); ++ break; ++ case 0x03: ++ /* FMSD */ ++ gen_helper_fmsd(vd, tcg_env, va, vb, vc); ++ break; ++ case 0x04: ++ /* FNMAS */ ++ gen_helper_fnmas(vd, tcg_env, va, vb, vc); ++ break; ++ case 0x05: ++ /* FNMAD */ ++ gen_helper_fnmad(vd, tcg_env, va, vb, vc); ++ break; ++ case 0x06: ++ /* FNMSS */ ++ gen_helper_fnmss(vd, tcg_env, va, vb, vc); ++ break; ++ case 0x07: ++ /* FNMSD */ ++ gen_helper_fnmsd(vd, tcg_env, va, vb, vc); ++ break; ++ case 0x10: ++ /* FSELEQ */ ++ tmp64 = tcg_temp_new(); ++ gen_helper_fcmpeq(tmp64, tcg_env, va, zero); ++ tcg_gen_movcond_i64(TCG_COND_EQ, vd, tmp64, zero, vc, vb); ++ break; ++ case 0x11: ++ /* FSELNE */ ++ tmp64 = tcg_temp_new(); ++ gen_helper_fcmpeq(tmp64, tcg_env, va, zero); ++ tcg_gen_movcond_i64(TCG_COND_EQ, vd, tmp64, zero, vb, vc); ++ break; ++ case 0x12: ++ /* FSELLT */ ++ tmp64 = tcg_temp_new(); ++ gen_helper_fcmplt(tmp64, tcg_env, va, zero); ++ tcg_gen_movcond_i64(TCG_COND_EQ, vd, tmp64, zero, vc, vb); ++ break; ++ case 0x13: ++ /* FSELLE */ ++ tmp64 = tcg_temp_new(); ++ gen_helper_fcmple(tmp64, tcg_env, va, zero); ++ tcg_gen_movcond_i64(TCG_COND_EQ, vd, tmp64, zero, vc, vb); ++ break; ++ case 0x14: ++ /* FSELGT */ ++ tmp64 = tcg_temp_new(); ++ gen_helper_fcmpgt(tmp64, tcg_env, va, zero); ++ tcg_gen_movcond_i64(TCG_COND_NE, vd, tmp64, zero, vb, vc); ++ break; ++ case 0x15: ++ /* FSELGE */ ++ tmp64 = tcg_temp_new(); ++ gen_helper_fcmpge(tmp64, tcg_env, va, zero); ++ tcg_gen_movcond_i64(TCG_COND_NE, vd, tmp64, zero, vb, vc); ++ break; ++ default: ++ fprintf(stderr, "Illegal insn func[%x]\n", fn); ++ gen_invalid(ctx); ++ break; ++ } ++} ++static inline void gen_qemu_lldw(TCGv t0, TCGv t1, int flags) ++{ ++ tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL); ++ tcg_gen_mov_i64(cpu_lock_addr, t1); ++#ifdef SW64_FIXLOCK ++ tcg_gen_ext32u_i64(cpu_lock_value, t0); ++#endif ++} ++ ++static inline void gen_qemu_lldl(TCGv t0, TCGv t1, int flags) ++{ ++ tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEUQ); ++ tcg_gen_mov_i64(cpu_lock_addr, t1); ++#ifdef SW64_FIXLOCK ++ tcg_gen_mov_i64(cpu_lock_value, t0); ++#endif ++} ++ ++static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, ++ int32_t disp16, int mem_idx, ++ MemOp op) ++{ ++ TCGLabel *lab_fail, *lab_done; ++ TCGv addr; ++ ++ addr = tcg_temp_new_i64(); ++ tcg_gen_addi_i64(addr, load_gir(ctx, rb), disp16); ++ free_context_temps(ctx); ++ ++ lab_fail = gen_new_label(); ++ lab_done = gen_new_label(); ++ tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail); ++ if (test_feature(ctx->env, SW64_FEATURE_CORE3)) ++ tcg_gen_brcondi_i64(TCG_COND_NE, cpu_lock_flag, 0x1, lab_fail); ++#ifdef SW64_FIXLOCK ++ TCGv val = tcg_temp_new_i64(); ++ tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value, ++ load_gir(ctx, ra), mem_idx, op); ++ tcg_gen_setcond_i64(TCG_COND_EQ, cpu_lock_success, val, cpu_lock_value); ++#else ++ tcg_gen_qemu_st_i64(load_gir(ctx, ra), addr, mem_idx, op); ++#endif ++ ++ tcg_gen_br(lab_done); ++ ++ gen_set_label(lab_fail); ++ tcg_gen_movi_i64(cpu_lock_success, 0); ++ gen_set_label(lab_done); ++ ++ if (test_feature(ctx->env, SW64_FEATURE_CORE4)) ++ { ++ tcg_gen_mov_i64(load_gir(ctx, ra), cpu_lock_success); ++ tcg_gen_movi_i64(cpu_lock_success, 0); ++ } ++ tcg_gen_movi_i64(cpu_lock_flag, 0); ++ tcg_gen_movi_i64(cpu_lock_addr, -1); ++ return DISAS_NEXT; ++} ++ ++static void write_csr(int idx, TCGv va, CPUSW64State *env) ++{ ++ TCGv_i64 tmp = tcg_constant_i64(idx); ++ gen_helper_write_csr(tcg_env, tmp, va); ++} ++ ++static DisasJumpType gen_sys_call(DisasContext *ctx, int syscode) ++{ ++ if (syscode >= 0x80 && syscode <= 0xbf) { ++ switch (syscode) { ++ case 0x86: ++ /* IMB */ ++ /* No-op inside QEMU */ ++ break; ++#ifdef CONFIG_USER_ONLY ++ case 0x9E: ++ /* RDUNIQUE */ ++ tcg_gen_ld_i64(ctx->ir[IDX_V0], tcg_env, ++ offsetof(CPUSW64State, unique)); ++ break; ++ case 0x9F: ++ /* WRUNIQUE */ ++ tcg_gen_st_i64(ctx->ir[IDX_A0], tcg_env, ++ offsetof(CPUSW64State, unique)); ++ write_csr(0xc7, ctx->ir[IDX_A0], ctx->env); ++ break; ++#endif ++ default: ++ goto do_sys_call; ++ } ++ return DISAS_NEXT; ++ } ++do_sys_call: ++#ifdef CONFIG_USER_ONLY ++ return gen_excp(ctx, EXCP_CALL_SYS, syscode); ++#else ++ tcg_gen_movi_i64(cpu_hm_ir[23], ctx->base.pc_next); ++ return gen_excp(ctx, EXCP_CALL_SYS, syscode); ++#endif ++} ++ ++static void read_csr(int idx, TCGv va) ++{ ++ TCGv_i64 tmp = tcg_constant_i64(idx); ++ gen_helper_read_csr(va, tcg_env, tmp); ++} ++ ++static inline void ldx_set(DisasContext *ctx, int ra, int rb, int32_t disp12, ++ bool bype) ++{ ++ TCGv tmp, addr, va, t1; ++ ++ /* LDQ_U with ra $31 is UNOP. Other various loads are forms of ++ prefetches, which we can treat as nops. No worries about ++ missed exceptions here. */ ++ if (unlikely(ra == 31)) { ++ return; ++ } ++ ++ tmp = tcg_temp_new(); ++ t1 = tcg_constant_i64(1); ++ addr = load_gir(ctx, rb); ++ ++ tcg_gen_addi_i64(tmp, addr, disp12); ++ addr = tmp; ++ ++ va = load_gir(ctx, ra); ++ if (bype == 0) { ++ tcg_gen_atomic_xchg_i64(va, addr, t1, ctx->mem_idx, MO_TESL); ++ } else { ++ tcg_gen_atomic_xchg_i64(va, addr, t1, ctx->mem_idx, MO_TEUQ); ++ } ++ ++} ++ ++static inline void ldx_xxx(DisasContext *ctx, int ra, int rb, int32_t disp12, ++ bool bype, int64_t val) ++{ ++ TCGv tmp, addr, va, t; ++ ++ /* LDQ_U with ra $31 is UNOP. Other various loads are forms of ++ prefetches, which we can treat as nops. No worries about ++ missed exceptions here. */ ++ if (unlikely(ra == 31)) { ++ return; ++ } ++ ++ tmp = tcg_temp_new(); ++ t = tcg_constant_i64(val); ++ addr = load_gir(ctx, rb); ++ ++ tcg_gen_addi_i64(tmp, addr, disp12); ++ addr = tmp; ++ ++ va = load_gir(ctx, ra); ++ if (bype == 0) { ++ tcg_gen_atomic_fetch_add_i64(va, addr, t, ctx->mem_idx, MO_TESL); ++ } else { ++ tcg_gen_atomic_fetch_add_i64(va, addr, t, ctx->mem_idx, MO_TEUQ); ++ } ++ ++} ++ ++static void tcg_gen_srlow_i64(int ra, int rc, int rb) ++{ ++ TCGv va, vb, vc; ++ TCGv shift; ++ ++ va = tcg_constant_i64(ra); ++ vc = tcg_constant_i64(rc); ++ shift = tcg_temp_new(); ++ vb = cpu_fr[rb]; ++ tcg_gen_shri_i64(shift, vb, 29); ++ tcg_gen_andi_i64(shift, shift, 0xff); ++ ++ gen_helper_srlow(tcg_env, va, vc, shift); ++ ++} ++ ++static void tcg_gen_srlowi_i64(int ra, int rc, int disp8) ++{ ++ TCGv va, vc; ++ TCGv shift; ++ ++ va = tcg_constant_i64(ra); ++ vc = tcg_constant_i64(rc); ++ shift = tcg_temp_new(); ++ tcg_gen_movi_i64(shift, disp8); ++ tcg_gen_andi_i64(shift, shift, 0xff); ++ ++ gen_helper_srlow(tcg_env, va, vc, shift); ++ ++} ++ ++static void tcg_gen_sllow_i64(int ra, int rc, int rb) ++{ ++ TCGv va, vb, vc; ++ TCGv shift; ++ ++ va = tcg_constant_i64(ra); ++ vc = tcg_constant_i64(rc); ++ shift = tcg_temp_new(); ++ vb = cpu_fr[rb]; ++ tcg_gen_shri_i64(shift, vb, 29); ++ tcg_gen_andi_i64(shift, shift, 0xff); ++ ++ gen_helper_sllow(tcg_env, va, vc, shift); ++ ++} ++ ++static void tcg_gen_sllowi_i64(int ra, int rc, int disp8) ++{ ++ TCGv va, vc; ++ TCGv shift; ++ ++ va = tcg_constant_i64(ra); ++ vc = tcg_constant_i64(rc); ++ shift = tcg_temp_new(); ++ tcg_gen_movi_i64(shift, disp8); ++ tcg_gen_andi_i64(shift, shift, 0xff); ++ ++ gen_helper_sllow(tcg_env, va, vc, shift); ++ ++} ++ ++static void tcg_gen_sraow_i64(int ra, int rc, int rb) ++{ ++ TCGv va, vb, vc; ++ TCGv shift; ++ ++ va = tcg_constant_i64(ra); ++ vc = tcg_constant_i64(rc); ++ shift = tcg_temp_new(); ++ vb = cpu_fr[rb]; ++ tcg_gen_shri_i64(shift, vb, 29); ++ tcg_gen_andi_i64(shift, shift, 0xff); ++ ++ gen_helper_sraow(tcg_env, va, vc, shift); ++ ++} ++ ++static void tcg_gen_sraowi_i64(int ra, int rc, int disp8) ++{ ++ TCGv va, vc; ++ TCGv shift; ++ ++ va = tcg_constant_i64(ra); ++ vc = tcg_constant_i64(rc); ++ shift = tcg_temp_new(); ++ tcg_gen_movi_i64(shift, disp8); ++ tcg_gen_andi_i64(shift, shift, 0xff); ++ ++ gen_helper_sraow(tcg_env, va, vc, shift); ++ ++} ++ ++static void gen_qemu_vstw_uh(int t0, TCGv t1, int memidx) ++{ ++ TCGv byte4_len; ++ TCGv addr_start, addr_end; ++ TCGv tmp[8]; ++ TCGv ti; ++ int i; ++ ++ tmp[0] = tcg_temp_new(); ++ tmp[1] = tcg_temp_new(); ++ tmp[2] = tcg_temp_new(); ++ tmp[3] = tcg_temp_new(); ++ tmp[4] = tcg_temp_new(); ++ tmp[5] = tcg_temp_new(); ++ tmp[6] = tcg_temp_new(); ++ tmp[7] = tcg_temp_new(); ++ ti = tcg_temp_new(); ++ addr_start = tcg_temp_new(); ++ addr_end = tcg_temp_new(); ++ byte4_len = tcg_temp_new(); ++ ++ tcg_gen_shri_i64(byte4_len, t1, 2); ++ tcg_gen_andi_i64(byte4_len, byte4_len, 0x7UL); ++ tcg_gen_andi_i64(t1, t1, ~0x3UL); /* t1 = addr + byte4_len * 4 */ ++ tcg_gen_andi_i64(addr_start, t1, ~0x1fUL); ++ tcg_gen_mov_i64(addr_end, t1); ++ for (i = 7; i >= 0; i--) { ++ tcg_gen_movcond_i64(TCG_COND_GEU, t1, t1, addr_start, t1, addr_start); ++ tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEUL); ++ tcg_gen_subi_i64(t1, t1, 4); ++ tcg_gen_movi_i64(ti, i); ++ if (i % 2) ++ tcg_gen_shli_i64(tmp[i], tmp[i], 32); ++ } ++ tcg_gen_subfi_i64(byte4_len, 8, byte4_len); ++ ++ for (i = 0; i < 8; i++) { ++ tcg_gen_movi_i64(ti, i); ++ tcg_gen_movcond_i64(TCG_COND_GEU, tmp[i], ti, byte4_len, cpu_fr[t0 + (i / 2)*32], tmp[i]); ++ if (i % 2) ++ tcg_gen_shri_i64(tmp[i], tmp[i], 32); ++ else ++ tcg_gen_andi_i64(tmp[i], tmp[i], 0xffffffffUL); ++ } ++ ++ tcg_gen_subi_i64(addr_end, addr_end, 32); ++ for (i = 0; i < 8; i++) { ++ tcg_gen_movcond_i64(TCG_COND_GEU, t1, addr_end, addr_start, addr_end, addr_start); ++ tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEUL); ++ tcg_gen_addi_i64(addr_end, addr_end, 4); ++ } ++ ++} ++ ++static void gen_qemu_vstw_ul(int t0, TCGv t1, int memidx) ++{ ++ TCGv byte4_len; ++ TCGv addr_start, addr_end; ++ TCGv tmp[8]; ++ TCGv ti; ++ int i; ++ ++ tmp[0] = tcg_temp_new(); ++ tmp[1] = tcg_temp_new(); ++ tmp[2] = tcg_temp_new(); ++ tmp[3] = tcg_temp_new(); ++ tmp[4] = tcg_temp_new(); ++ tmp[5] = tcg_temp_new(); ++ tmp[6] = tcg_temp_new(); ++ tmp[7] = tcg_temp_new(); ++ ti = tcg_temp_new(); ++ addr_start = tcg_temp_new(); ++ addr_end = tcg_temp_new(); ++ byte4_len = tcg_temp_new(); ++ ++ tcg_gen_shri_i64(byte4_len, t1, 2); ++ tcg_gen_andi_i64(byte4_len, byte4_len, 0x7UL); ++ tcg_gen_andi_i64(t1, t1, ~0x3UL); /* t1 = addr + byte4_len * 4 */ ++ tcg_gen_mov_i64(addr_start, t1); /* t1 = addr + byte4_len * 4 */ ++ tcg_gen_addi_i64(addr_end, addr_start, 24); ++ for (i = 0; i < 8; i++) { ++ tcg_gen_movcond_i64(TCG_COND_LEU, t1, t1, addr_end, t1, addr_end); ++ tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEUL); ++ tcg_gen_addi_i64(t1, t1, 4); ++ if (i % 2) ++ tcg_gen_shli_i64(tmp[i], tmp[i], 32); ++ } ++ tcg_gen_subfi_i64(byte4_len, 8, byte4_len); ++ ++ for (i = 0; i < 8; i++) { ++ tcg_gen_movi_i64(ti, i); ++ tcg_gen_movcond_i64(TCG_COND_LTU, tmp[i], ti, byte4_len, cpu_fr[t0 + (i/2)*32], tmp[i]); ++ if (i % 2) ++ tcg_gen_shri_i64(tmp[i], tmp[i], 32); ++ else ++ tcg_gen_andi_i64(tmp[i], tmp[i], 0xffffffffUL); ++ } ++ ++ tcg_gen_addi_i64(addr_start, addr_start, 32); ++ for (i = 7; i >= 0; i--) { ++ tcg_gen_subi_i64(addr_start, addr_start, 4); ++ tcg_gen_movcond_i64(TCG_COND_LEU, t1, addr_start, addr_end, addr_start, addr_end); ++ tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEUL); ++ } ++ ++} ++ ++static void gen_qemu_vsts_uh(int t0, TCGv t1, int memidx) ++{ ++ TCGv byte4_len; ++ TCGv addr_start, addr_end; ++ TCGv tmp[4]; ++ TCGv ftmp; ++ TCGv ti; ++ int i; ++ ++ tmp[0] = tcg_temp_new(); ++ tmp[1] = tcg_temp_new(); ++ tmp[2] = tcg_temp_new(); ++ tmp[3] = tcg_temp_new(); ++ ti = tcg_temp_new(); ++ ftmp = tcg_temp_new(); ++ addr_start = tcg_temp_new(); ++ addr_end = tcg_temp_new(); ++ byte4_len = tcg_temp_new(); ++ ++ tcg_gen_shri_i64(byte4_len, t1, 2); ++ tcg_gen_andi_i64(byte4_len, byte4_len, 0x3UL); ++ tcg_gen_andi_i64(t1, t1, ~0x3UL); /* t1 = addr + byte4_len * 4 */ ++ tcg_gen_andi_i64(addr_start, t1, ~0xfUL); ++ tcg_gen_mov_i64(addr_end, t1); ++ for (i = 3; i >= 0; i--) { ++ tcg_gen_movcond_i64(TCG_COND_GEU, t1, t1, addr_start, t1, addr_start); ++ tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEUL); ++ tcg_gen_subi_i64(t1, t1, 4); ++ } ++ tcg_gen_subfi_i64(byte4_len, 4, byte4_len); ++ ++ for (i = 0; i < 4; i++) { ++ tcg_gen_shri_i64(ti, cpu_fr[t0 + i * 32], 62); ++ tcg_gen_shli_i64(ti, ti, 30); ++ tcg_gen_shri_i64(ftmp, cpu_fr[t0 + i * 32], 29); ++ tcg_gen_andi_i64(ftmp, ftmp, 0x3fffffffUL); ++ tcg_gen_or_i64(ftmp, ftmp, ti); ++ tcg_gen_movi_i64(ti, i); ++ tcg_gen_movcond_i64(TCG_COND_GEU, tmp[i], ti, byte4_len, ftmp, tmp[i]); ++ } ++ ++ tcg_gen_subi_i64(addr_end, addr_end, 16); ++ for (i = 0; i < 4; i++) { ++ tcg_gen_movcond_i64(TCG_COND_GEU, t1, addr_end, addr_start, addr_end, addr_start); ++ tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEUL); ++ tcg_gen_addi_i64(addr_end, addr_end, 4); ++ } ++ ++} ++ ++static void gen_qemu_vsts_ul(int t0, TCGv t1, int memidx) ++{ ++ TCGv byte4_len; ++ TCGv addr_start, addr_end; ++ TCGv tmp[4]; ++ TCGv ftmp; ++ TCGv ti; ++ int i; ++ ++ tmp[0] = tcg_temp_new(); ++ tmp[1] = tcg_temp_new(); ++ tmp[2] = tcg_temp_new(); ++ tmp[3] = tcg_temp_new(); ++ ftmp = tcg_temp_new(); ++ ti = tcg_temp_new(); ++ addr_start = tcg_temp_new(); ++ addr_end = tcg_temp_new(); ++ byte4_len = tcg_temp_new(); ++ ++ tcg_gen_shri_i64(byte4_len, t1, 2); ++ tcg_gen_andi_i64(byte4_len, byte4_len, 0x3UL); ++ tcg_gen_andi_i64(t1, t1, ~0x3UL); /* t1 = addr + byte4_len * 4 */ ++ tcg_gen_mov_i64(addr_start, t1); /* t1 = addr + byte4_len * 4 */ ++ tcg_gen_addi_i64(addr_end, addr_start, 12); ++ for (i = 0; i < 4; i++) { ++ tcg_gen_movcond_i64(TCG_COND_LEU, t1, t1, addr_end, t1, addr_end); ++ tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEUL); ++ tcg_gen_addi_i64(t1, t1, 4); ++ } ++ tcg_gen_subfi_i64(byte4_len, 4, byte4_len); ++ ++ for (i = 0; i < 4; i++) { ++ tcg_gen_shri_i64(ti, cpu_fr[t0 + i * 32], 62); ++ tcg_gen_shli_i64(ti, ti, 30); ++ tcg_gen_shri_i64(ftmp, cpu_fr[t0 + i * 32], 29); ++ tcg_gen_andi_i64(ftmp, ftmp, 0x3fffffffUL); ++ tcg_gen_or_i64(ftmp, ftmp, ti); ++ tcg_gen_movi_i64(ti, i); ++ tcg_gen_movcond_i64(TCG_COND_LTU, tmp[i], ti, byte4_len, ftmp, tmp[i]); ++ } ++ ++ tcg_gen_addi_i64(addr_start, addr_start, 16); ++ for (i = 3; i >= 0; i--) { ++ tcg_gen_subi_i64(addr_start, addr_start, 4); ++ tcg_gen_movcond_i64(TCG_COND_LEU, t1, addr_start, addr_end, addr_start, addr_end); ++ tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEUL); ++ } ++ ++} ++ ++static void gen_qemu_vstd_uh(int t0, TCGv t1, int memidx) ++{ ++ TCGv byte8_len; ++ TCGv addr_start, addr_end; ++ TCGv tmp[4]; ++ TCGv ti; ++ int i; ++ ++ tmp[0] = tcg_temp_new(); ++ tmp[1] = tcg_temp_new(); ++ tmp[2] = tcg_temp_new(); ++ tmp[3] = tcg_temp_new(); ++ ti = tcg_temp_new(); ++ addr_start = tcg_temp_new(); ++ addr_end = tcg_temp_new(); ++ byte8_len = tcg_temp_new(); ++ ++ tcg_gen_shri_i64(byte8_len, t1, 3); ++ tcg_gen_andi_i64(byte8_len, byte8_len, 0x3UL); ++ tcg_gen_andi_i64(t1, t1, ~0x7UL); /* t1 = addr + byte4_len * 4 */ ++ tcg_gen_andi_i64(addr_start, t1, ~0x1fUL); ++ tcg_gen_mov_i64(addr_end, t1); ++ for (i = 3; i >= 0; i--) { ++ tcg_gen_movcond_i64(TCG_COND_GEU, t1, t1, addr_start, t1, addr_start); ++ tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEUQ); ++ tcg_gen_subi_i64(t1, t1, 8); ++ } ++ tcg_gen_subfi_i64(byte8_len, 4, byte8_len); ++ ++ for (i = 0; i < 4; i++) { ++ tcg_gen_movi_i64(ti, i); ++ tcg_gen_movcond_i64(TCG_COND_GEU, tmp[i], ti, byte8_len, cpu_fr[t0 + i*32], tmp[i]); ++ } ++ ++ tcg_gen_subi_i64(addr_end, addr_end, 32); ++ for (i = 0; i < 4; i++) { ++ tcg_gen_movcond_i64(TCG_COND_GEU, t1, addr_end, addr_start, addr_end, addr_start); ++ tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEUQ); ++ tcg_gen_addi_i64(addr_end, addr_end, 8); ++ } ++ ++} ++ ++static void gen_qemu_vstd_ul(int t0, TCGv t1, int memidx) ++{ ++ TCGv byte8_len; ++ TCGv addr_start, addr_end; ++ TCGv tmp[4]; ++ TCGv ti; ++ int i; ++ ++ tmp[0] = tcg_temp_new(); ++ tmp[1] = tcg_temp_new(); ++ tmp[2] = tcg_temp_new(); ++ tmp[3] = tcg_temp_new(); ++ ti = tcg_temp_new(); ++ addr_start = tcg_temp_new(); ++ addr_end = tcg_temp_new(); ++ byte8_len = tcg_temp_new(); ++ ++ tcg_gen_shri_i64(byte8_len, t1, 3); ++ tcg_gen_andi_i64(byte8_len, byte8_len, 0x3UL); ++ tcg_gen_andi_i64(t1, t1, ~0x7UL); /* t1 = addr + byte4_len * 4 */ ++ tcg_gen_mov_i64(addr_start, t1); /* t1 = addr + byte4_len * 4 */ ++ tcg_gen_addi_i64(addr_end, addr_start, 24); ++ for (i = 0; i < 4; i++) { ++ tcg_gen_movcond_i64(TCG_COND_LEU, t1, t1, addr_end, t1, addr_end); ++ tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEUQ); ++ tcg_gen_addi_i64(t1, t1, 8); ++ } ++ tcg_gen_subfi_i64(byte8_len, 4, byte8_len); ++ ++ for (i = 0; i < 4; i++) { ++ tcg_gen_movi_i64(ti, i); ++ tcg_gen_movcond_i64(TCG_COND_LTU, tmp[i], ti, byte8_len, cpu_fr[t0 + i*32], tmp[i]); ++ } ++ ++ tcg_gen_addi_i64(addr_start, addr_start, 32); ++ for (i = 3; i >= 0; i--) { ++ tcg_gen_subi_i64(addr_start, addr_start, 8); ++ tcg_gen_movcond_i64(TCG_COND_LEU, t1, addr_start, addr_end, addr_start, addr_end); ++ tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEUQ); ++ } ++ ++} ++ ++static void tcg_gen_vcpys_i64(int ra, int rb, int rc) ++{ ++ int i; ++ TCGv tmp64 = tcg_temp_new(); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shri_i64(tmp64, cpu_fr[ra + i], 63); ++ tcg_gen_shli_i64(tmp64, tmp64, 63); ++ tcg_gen_andi_i64(cpu_fr[rc + i], cpu_fr[rb + i], 0x7fffffffffffffffUL); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); ++ } ++} ++ ++static void tcg_gen_vcpyse_i64(int ra, int rb, int rc) ++{ ++ int i; ++ ++ TCGv tmp64 = tcg_temp_new(); ++ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shri_i64(tmp64, cpu_fr[ra + i], 52); ++ tcg_gen_shli_i64(tmp64, tmp64, 52); ++ tcg_gen_andi_i64(cpu_fr[rc + i], cpu_fr[rb + i], 0x000fffffffffffffUL); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); ++ } ++} ++ ++static void tcg_gen_vcpysn_i64(int ra, int rb, int rc) ++{ ++ int i; ++ TCGv tmp64 = tcg_temp_new(); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shri_i64(tmp64, cpu_fr[ra + i], 63); ++ tcg_gen_not_i64(tmp64, tmp64); ++ tcg_gen_shli_i64(tmp64, tmp64, 63); ++ tcg_gen_andi_i64(cpu_fr[rc + i], cpu_fr[rb + i], 0x7fffffffffffffffUL); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); ++ } ++} ++ ++static void tcg_gen_vlogzz_i64(DisasContext *ctx, int opc, int ra, int rb, ++ int rc, int rd, int fn6) ++{ ++ TCGv zz; ++ TCGv args, vd; ++ zz = tcg_constant_i64(((opc & 0x3) << 6) | fn6); ++ args = tcg_constant_i64((ra << 16) | (rb << 8) | rc); ++ vd = tcg_constant_i64(rd); ++ ++ gen_helper_vlogzz(tcg_env, args, vd, zz); ++ ++} ++ ++static void gen_qemu_vcmpxxw_i64(TCGCond cond, int ra, int rb, int rc) ++{ ++ TCGv va, vb, vc, tmp64; ++ int i; ++ ++ va = tcg_temp_new(); ++ vb = tcg_temp_new(); ++ vc = tcg_temp_new(); ++ tmp64 = tcg_temp_new(); ++ ++ for (i = 0; i < 128; i += 32) { ++ if ((cond >> 1) & 1) { ++ tcg_gen_ext32s_i64(va, cpu_fr[ra + i]); ++ tcg_gen_ext32s_i64(vb, cpu_fr[rb + i]); ++ } else { ++ tcg_gen_ext32u_i64(va, cpu_fr[ra + i]); ++ tcg_gen_ext32u_i64(vb, cpu_fr[rb + i]); ++ } ++ tcg_gen_setcond_i64(cond, vc, va, vb); ++ tcg_gen_mov_i64(tmp64, vc); ++ ++ tcg_gen_shri_i64(va, cpu_fr[ra + i], 32); ++ tcg_gen_shri_i64(vb, cpu_fr[rb + i], 32); ++ if ((cond >> 1) & 1) { ++ tcg_gen_ext32s_i64(va, va); ++ tcg_gen_ext32s_i64(vb, vb); ++ } else { ++ tcg_gen_ext32u_i64(va, va); ++ tcg_gen_ext32u_i64(vb, vb); ++ } ++ tcg_gen_setcond_i64(cond, vc, va, vb); ++ tcg_gen_shli_i64(vc, vc, 32); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc); ++ } ++} ++ ++static void gen_qemu_vcmpxxwi_i64(TCGCond cond, int ra, int rb, int rc) ++{ ++ TCGv va, vb, vc, tmp64; ++ int i; ++ ++ va = tcg_temp_new(); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_temp_new(); ++ tmp64 = tcg_temp_new(); ++ ++ for (i = 0; i < 128; i += 32) { ++ if ((cond >> 1) & 1) { ++ tcg_gen_ext32s_i64(va, cpu_fr[ra + i]); ++ } else { ++ tcg_gen_ext32u_i64(va, cpu_fr[ra + i]); ++ } ++ tcg_gen_setcond_i64(cond, vc, va, vb); ++ tcg_gen_mov_i64(tmp64, vc); ++ ++ tcg_gen_shri_i64(va, cpu_fr[ra + i], 32); ++ if ((cond >> 1) & 1) { ++ tcg_gen_ext32s_i64(va, va); ++ } else { ++ tcg_gen_ext32u_i64(va, va); ++ } ++ tcg_gen_setcond_i64(cond, vc, va, vb); ++ tcg_gen_shli_i64(vc, vc, 32); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc); ++ } ++} ++ ++static void gen_qemu_vselxxw(TCGCond cond, int ra, int rb, int rc, int rd, ++ int mask) ++{ ++ int i; ++ ++ TCGv t0 = tcg_constant_i64(0); ++ TCGv tmpa = tcg_temp_new(); ++ TCGv tmpb = tcg_temp_new(); ++ TCGv tmpc = tcg_temp_new(); ++ TCGv tmpd = tcg_temp_new(); ++ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_ext32s_i64(tmpa, cpu_fr[ra + i]); ++ tcg_gen_ext32u_i64(tmpb, cpu_fr[rb + i]); ++ tcg_gen_ext32u_i64(tmpc, cpu_fr[rc + i]); ++ if (mask) tcg_gen_andi_i64(tmpa, tmpa, mask); ++ tcg_gen_movcond_i64(cond, tmpd, tmpa, t0, tmpb, tmpc); ++ ++ tcg_gen_andi_i64(tmpa, cpu_fr[ra + i], 0xffffffff00000000UL); ++ tcg_gen_andi_i64(tmpb, cpu_fr[rb + i], 0xffffffff00000000UL); ++ tcg_gen_andi_i64(tmpc, cpu_fr[rc + i], 0xffffffff00000000UL); ++ if (mask) tcg_gen_andi_i64(tmpa, tmpa, (uint64_t)mask << 32); ++ tcg_gen_movcond_i64(cond, cpu_fr[rd + i], tmpa, t0, tmpb, tmpc); ++ ++ tcg_gen_or_i64(cpu_fr[rd + i], cpu_fr[rd + i], tmpd); ++ } ++ ++} ++ ++static void gen_qemu_vselxxwi(TCGCond cond, int ra, int rb, int disp8, int rd, ++ int mask) ++{ ++ int i; ++ ++ TCGv t0 = tcg_constant_i64(0); ++ TCGv tmpa = tcg_temp_new(); ++ TCGv tmpb = tcg_temp_new(); ++ TCGv tmpc_0 = tcg_temp_new(); ++ TCGv tmpc_1 = tcg_temp_new(); ++ TCGv tmpd = tcg_temp_new(); ++ ++ tcg_gen_movi_i64(tmpc_0, (uint64_t)(((uint64_t)disp8))); ++ tcg_gen_movi_i64(tmpc_1, (uint64_t)(((uint64_t)disp8 << 32))); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_ext32s_i64(tmpa, cpu_fr[ra + i]); ++ tcg_gen_ext32u_i64(tmpb, cpu_fr[rb + i]); ++ if (mask) tcg_gen_andi_i64(tmpa, tmpa, mask); ++ tcg_gen_movcond_i64(cond, tmpd, tmpa, t0, tmpb, tmpc_0); ++ ++ tcg_gen_andi_i64(tmpa, cpu_fr[ra + i], 0xffffffff00000000UL); ++ tcg_gen_andi_i64(tmpb, cpu_fr[rb + i], 0xffffffff00000000UL); ++ if (mask) tcg_gen_andi_i64(tmpa, tmpa, (uint64_t)mask << 32); ++ tcg_gen_movcond_i64(cond, cpu_fr[rd + i], tmpa, t0, tmpb, tmpc_1); ++ ++ tcg_gen_or_i64(cpu_fr[rd + i], cpu_fr[rd + i], tmpd); ++ } ++ ++} ++ ++DisasJumpType translate_one(DisasContextBase *dcbase, uint32_t insn, ++ CPUState *cpu) ++{ ++ int32_t disp5, disp8, disp12, disp13, disp16, disp21, disp26 __attribute__((unused)); ++ uint8_t opc, ra, rb, rc, rd; ++ uint16_t fn3, fn4, fn6, fn8, fn11; ++ int32_t i; ++ TCGv va, vb, vc, vd; ++ TCGv_i32 tmp32; ++ TCGv_i64 tmp64, tmp64_0, tmp64_1, tmp64_2, tmp64_3, shift; ++ TCGv_i32 tmpa, tmpb, tmpc; ++ DisasJumpType ret; ++ DisasContext* ctx = container_of(dcbase, DisasContext, base); ++ ++ opc = extract32(insn, 26, 6); ++ ra = extract32(insn, 21, 5); ++ rb = extract32(insn, 16, 5); ++ rc = extract32(insn, 0, 5); ++ rd = extract32(insn, 5, 5); ++ ++ fn3 = extract32(insn, 10, 3); ++ fn6 = extract32(insn, 10, 6); ++ fn4 = extract32(insn, 12, 4); ++ fn8 = extract32(insn, 5, 8); ++ fn11 = extract32(insn, 5, 11); ++ ++ disp5 = extract32(insn, 5, 5); ++ disp8 = extract32(insn, 13, 8); ++ disp12 = sextract32(insn, 0, 12); ++ disp13 = sextract32(insn, 13, 13); ++ disp16 = sextract32(insn, 0, 16); ++ disp21 = sextract32(insn, 0, 21); ++ disp26 = sextract32(insn, 0, 26); ++ ++ ret = DISAS_NEXT; ++ insn_profile(ctx, insn); ++ ++ switch (opc) { ++ case 0x00: ++ /* SYS_CALL */ ++ ret = gen_sys_call(ctx, insn & 0x1ffffff); ++ break; ++ case 0x01: ++ /* CALL */ ++ case 0x02: ++ /* RET */ ++ case 0x03: ++ /* JMP */ ++ vb = load_gir(ctx, rb); ++ tcg_gen_addi_i64(cpu_pc, vb, ctx->base.pc_next & 0x3); ++ if (ra != 31) { ++ tcg_gen_movi_i64(load_gir(ctx, ra), ctx->base.pc_next & (~3UL)); ++ } ++ ret = DISAS_PC_UPDATED; ++ break; ++ case 0x04: ++ /* BR */ ++ case 0x05: ++ /* BSR */ ++ ret = gen_bdirect(ctx, ra, disp21); ++ break; ++ case 0x06: ++ switch (disp16) { ++ case 0x0000: ++ /* MEMB */ ++ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); ++ break; ++ case 0x0001: ++ /* IMEMB */ ++ /* No achievement in Qemu*/ ++ break; ++ case 0x0002: ++ /* WMEMB */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); ++ break; ++ case 0x0020: ++ /* RTC */ ++ if (disp16 && unlikely(ra == 31)) break; ++ va = load_gir(ctx, ra); ++ gen_helper_rtc(va); ++ break; ++ case 0x0040: ++ /* RCID */ ++ if (disp16 && unlikely(ra == 31)) break; ++ va = load_gir(ctx, ra); ++ read_csr(0xc9, va); ++ break; ++ case 0x0080: ++ /* HALT */ ++#ifndef CONFIG_USER_ONLY ++ { ++ tmp32 = tcg_constant_i32(1); ++ tcg_gen_st_i32( ++ tmp32, tcg_env, ++ -offsetof(SW64CPU, env) + offsetof(CPUState, halted)); ++ } ++ ret = gen_excp(ctx, EXCP_HALTED, 0); ++#endif ++ break; ++ case 0x1000: ++ /* RD_F */ ++ if(test_feature(ctx->env, SW64_FEATURE_CORE4)) break; ++ if (disp16 && unlikely(ra == 31)) break; ++ va = load_gir(ctx, ra); ++ tcg_gen_mov_i64(va, cpu_lock_success); ++ break; ++ case 0x1020: ++ /* WR_F */ ++ if(test_feature(ctx->env, SW64_FEATURE_CORE4)) break; ++ if (disp16 && unlikely(ra == 31)) break; ++ va = load_gir(ctx, ra); ++ tcg_gen_andi_i64(cpu_lock_flag, va, 0x1); ++ break; ++ case 0x1040: ++ /* RTID */ ++ if (unlikely(ra == 31)) break; ++ va = load_gir(ctx, ra); ++ read_csr(0xc7, va); ++ break; ++ default: ++ if ((disp16 & 0xFF00) == 0xFC00) { ++ /* CSRWS */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new_i64(); ++ read_csr(disp16 & 0xff, tmp64); ++ va = load_gir(ctx, ra); ++ tcg_gen_or_i64(tmp64, tmp64, va); ++ write_csr(disp16 & 0xff, tmp64, ctx->env); ++ break; ++ } ++ if ((disp16 & 0xFF00) == 0xFD00) { ++ /* CSRWC */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new_i64(); ++ read_csr(disp16 & 0xff, tmp64); ++ va = load_gir(ctx, ra); ++ tcg_gen_andc_i64(tmp64, tmp64, va); ++ write_csr(disp16 & 0xff, tmp64, ctx->env); ++ break; ++ } ++ if ((disp16 & 0xFF00) == 0xFE00) { ++ /* PRI_RCSR */ ++ if (disp16 && unlikely(ra == 31)) break; ++ va = load_gir(ctx, ra); ++ read_csr(disp16 & 0xff, va); ++ break; ++ } ++ if ((disp16 & 0xFF00) == 0xFF00) { ++ /* PRI_WCSR */ ++ va = load_gir(ctx, ra); ++ write_csr(disp16 & 0xff, va ,ctx->env); ++ break; ++ } ++ goto do_invalid; ++ } ++ break; ++ case 0x07: ++ /* PRI_RET */ ++ va = load_gir(ctx, ra); ++ tcg_gen_mov_i64(cpu_pc, va); ++ gen_helper_cpustate_update(tcg_env, va); ++ ret = DISAS_PC_UPDATED_NOCHAIN; ++ break; ++ case 0x08: ++ switch (fn4) { ++ case 0x0: ++ /* LLDW */ ++ gen_load_mem(ctx, &gen_qemu_lldw, ra, rb, disp12, 0, 0); ++ break; ++ case 0x1: ++ /* LLDL */ ++ gen_load_mem(ctx, &gen_qemu_lldl, ra, rb, disp12, 0, 0); ++ break; ++ case 0x2: ++ /* LDW_INC */ ++ ldx_xxx(ctx, ra, rb, disp12, 0, 1); ++ break; ++ case 0x3: ++ /* LDL_INC */ ++ ldx_xxx(ctx, ra, rb, disp12, 1, 1); ++ break; ++ case 0x4: ++ /* LDW_DEC */ ++ ldx_xxx(ctx, ra, rb, disp12, 0, -1); ++ break; ++ case 0x5: ++ /* LDL_DEC */ ++ ldx_xxx(ctx, ra, rb, disp12, 1, -1); ++ break; ++ case 0x6: ++ /* LDW_SET */ ++ ldx_set(ctx, ra, rb, disp12, 0); ++ break; ++ case 0x7: ++ /* LDL_SET */ ++ ldx_set(ctx, ra, rb, disp12, 1); ++ break; ++ case 0x8: ++ /* LSTW */ ++ ret = gen_store_conditional(ctx, ra, rb, disp12, ++ ctx->mem_idx, MO_LEUL); ++ break; ++ case 0x9: ++ /* LSTL */ ++ ret = gen_store_conditional(ctx, ra, rb, disp12, ++ ctx->mem_idx, MO_LEUQ); ++ break; ++ case 0xa: ++ /* LDW_NC */ ++ gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp12, 0, ++ 0); ++ break; ++ case 0xb: ++ /* LDL_NC */ ++ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp12, 0, 0); ++ break; ++ case 0xc: ++ /* LDD_NC */ ++ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp12, 1, 0); ++ break; ++ case 0xd: ++ /* STW_NC */ ++ gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp12, 0, ++ 0); ++ break; ++ case 0xe: ++ /* STL_NC */ ++ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp12, 0, ++ 0); ++ break; ++ case 0xf: ++ /* STD_NC */ ++ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp12, 1, ++ 0); ++ break; ++ default: ++ goto do_invalid; ++ } ++ break; ++ case 0x9: ++ /* LDWE */ ++ gen_load_mem_simd(ctx, &gen_qemu_ldwe, ra, rb, disp16, 0); ++ break; ++ case 0x0a: ++ /* LDSE */ ++ gen_load_mem_simd(ctx, &gen_qemu_ldse, ra, rb, disp16, 0); ++ break; ++ case 0x0b: ++ /* LDDE */ ++ gen_load_mem_simd(ctx, &gen_qemu_ldde, ra, rb, disp16, 0); ++ break; ++ case 0x0c: ++ /* VLDS */ ++ gen_load_mem_simd(ctx, &gen_qemu_vlds, ra, rb, disp16, 0); ++ break; ++ case 0x0d: ++ /* VLDD */ ++ if (unlikely(ra == 31)) break; ++ gen_load_mem_simd(ctx, &gen_qemu_vldd, ra, rb, disp16, 0); ++ break; ++ case 0x0e: ++ /* VSTS */ ++ gen_store_mem_simd(ctx, &gen_qemu_vsts, ra, rb, disp16, 0); ++ break; ++ case 0x0f: ++ /* VSTD */ ++ gen_store_mem_simd(ctx, &gen_qemu_vstd, ra, rb, disp16, 0); ++ break; ++ case 0x10: ++ if (unlikely(rc == 31)) break; ++ if (fn11 == 0x70) { ++ /* FIMOVS */ ++ va = cpu_fr[ra]; ++ vc = load_gir(ctx, rc); ++ tmp32 = tcg_temp_new_i32(); ++ gen_helper_s_to_memory(tmp32, va); ++ tcg_gen_ext_i32_i64(vc, tmp32); ++ } else if (fn11 == 0x78) { ++ /* FIMOVD */ ++ va = cpu_fr[ra]; ++ vc = load_gir(ctx, rc); ++ tcg_gen_mov_i64(vc, va); ++ } else { ++ va = load_gir(ctx, ra); ++ vb = load_gir(ctx, rb); ++ vc = load_gir(ctx, rc); ++ cal_with_iregs_2(ctx, vc, va, vb, disp13, fn11); ++ } ++ break; ++ case 0x11: ++ if (unlikely(rc == 31)) break; ++ va = load_gir(ctx, ra); ++ vb = load_gir(ctx, rb); ++ vc = load_gir(ctx, rc); ++ vd = load_gir(ctx, rd); ++ cal_with_iregs_3(ctx, vc, va, vb, vd, fn3); ++ break; ++ case 0x12: ++ if (unlikely(rc == 31)) break; ++ va = load_gir(ctx, ra); ++ vc = load_gir(ctx, rc); ++ cal_with_imm_2(ctx, vc, va, disp8, fn8); ++ break; ++ case 0x13: ++ if (rc == 31) /* Special deal */ ++ break; ++ va = load_gir(ctx, ra); ++ vc = load_gir(ctx, rc); ++ vd = load_gir(ctx, rd); ++ cal_with_imm_3(ctx, vc, va, disp8, vd, fn3); ++ break; ++ case 0x14: ++ case 0x15: ++ case 0x16: ++ case 0x17: ++ /* VLOGZZ */ ++ tcg_gen_vlogzz_i64(ctx, opc, ra, rb, rd, rc, fn6); ++ break; ++ case 0x18: ++ if (unlikely(rc == 31)) break; ++ cal_with_fregs_2(ctx, rc, ra, rb, fn8); ++ break; ++ case 0x19: ++ if (unlikely(rc == 31)) break; ++ cal_with_fregs_4(ctx, rc, ra, rb, rd, fn6); ++ break; ++ case 0x1A: ++ /* SIMD */ ++ if (unlikely(rc == 31)) break; ++ switch (fn8) { ++ case 0x00: ++ /* VADDW */ ++ tmp64 = tcg_temp_new(); ++ va = tcg_temp_new(); ++ vb = tcg_temp_new(); ++ vc = tcg_temp_new(); ++ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_andi_i64(va, cpu_fr[ra + i], 0xffffffffUL); ++ tcg_gen_andi_i64(vb, cpu_fr[rb + i], 0xffffffffUL); ++ tcg_gen_add_i64(tmp64, va, vb); ++ tcg_gen_ext32u_i64(tmp64, tmp64); ++ tcg_gen_andi_i64(va, cpu_fr[ra + i], ++ 0xffffffff00000000UL); ++ tcg_gen_andi_i64(vb, cpu_fr[rb + i], ++ 0xffffffff00000000UL); ++ tcg_gen_add_i64(vc, va, vb); ++ tcg_gen_or_i64(tmp64, tmp64, vc); ++ tcg_gen_mov_i64(cpu_fr[rc + i], tmp64); ++ } ++ break; ++ case 0x20: ++ /* VADDW */ ++ tmp64 = tcg_temp_new(); ++ va = tcg_temp_new(); ++ vc = tcg_temp_new(); ++ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_andi_i64(va, cpu_fr[ra + i], 0xffffffffUL); ++ tcg_gen_addi_i64(tmp64, va, disp8); ++ tcg_gen_ext32u_i64(tmp64, tmp64); ++ tcg_gen_andi_i64(va, cpu_fr[ra + i], ++ 0xffffffff00000000UL); ++ tcg_gen_addi_i64(vc, va, ((uint64_t)disp8 << 32)); ++ tcg_gen_or_i64(tmp64, tmp64, vc); ++ tcg_gen_mov_i64(cpu_fr[rc + i], tmp64); ++ } ++ break; ++ case 0x01: ++ /* VSUBW */ ++ tmp64 = tcg_temp_new(); ++ va = tcg_temp_new(); ++ vb = tcg_temp_new(); ++ vc = tcg_temp_new(); ++ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_andi_i64(va, cpu_fr[ra + i], 0xffffffffUL); ++ tcg_gen_andi_i64(vb, cpu_fr[rb + i], 0xffffffffUL); ++ tcg_gen_sub_i64(tmp64, va, vb); ++ tcg_gen_ext32u_i64(tmp64, tmp64); ++ tcg_gen_andi_i64(va, cpu_fr[ra + i], ++ 0xffffffff00000000UL); ++ tcg_gen_andi_i64(vb, cpu_fr[rb + i], ++ 0xffffffff00000000UL); ++ tcg_gen_sub_i64(vc, va, vb); ++ tcg_gen_or_i64(tmp64, tmp64, vc); ++ tcg_gen_mov_i64(cpu_fr[rc + i], tmp64); ++ } ++ break; ++ case 0x21: ++ /* VSUBW */ ++ tmp64 = tcg_temp_new(); ++ va = tcg_temp_new(); ++ vc = tcg_temp_new(); ++ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_andi_i64(va, cpu_fr[ra + i], 0xffffffffUL); ++ tcg_gen_subi_i64(tmp64, va, disp8); ++ tcg_gen_ext32u_i64(tmp64, tmp64); ++ tcg_gen_andi_i64(va, cpu_fr[ra + i], ++ 0xffffffff00000000UL); ++ tcg_gen_subi_i64(vc, va, ((uint64_t)disp8 << 32)); ++ tcg_gen_or_i64(tmp64, tmp64, vc); ++ tcg_gen_mov_i64(cpu_fr[rc + i], tmp64); ++ } ++ break; ++ case 0x02: ++ /* VCMPGEW */ ++ tmp64 = tcg_constant_i64(0); ++ va = tcg_temp_new(); ++ vb = tcg_temp_new(); ++ vc = tcg_temp_new(); ++ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_ext32s_i64(va, cpu_fr[ra + i]); ++ tcg_gen_ext32s_i64(vb, cpu_fr[rb + i]); ++ tcg_gen_setcond_i64(TCG_COND_GE, vc, va, vb); ++ tcg_gen_or_i64(tmp64, tmp64, vc); ++ tcg_gen_shri_i64(va, cpu_fr[ra + i], 32); ++ tcg_gen_shri_i64(vb, cpu_fr[rb + i], 32); ++ tcg_gen_ext32s_i64(va, va); ++ tcg_gen_ext32s_i64(vb, vb); ++ tcg_gen_setcond_i64(TCG_COND_GE, vc, va, vb); ++ tcg_gen_or_i64(tmp64, tmp64, vc); ++ } ++ tcg_gen_shli_i64(cpu_fr[rc], tmp64, 29); ++ break; ++ case 0x22: ++ /* VCMPGEW */ ++ tmp64 = tcg_constant_i64(0); ++ va = tcg_temp_new(); ++ vb = tcg_constant_i64(disp8); ++ vc = tcg_temp_new(); ++ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_ext32s_i64(va, cpu_fr[ra + i]); ++ tcg_gen_setcond_i64(TCG_COND_GE, vc, va, vb); ++ tcg_gen_or_i64(tmp64, tmp64, vc); ++ tcg_gen_shri_i64(va, cpu_fr[ra + i], 32); ++ tcg_gen_ext32s_i64(va, va); ++ tcg_gen_setcond_i64(TCG_COND_GE, vc, va, vb); ++ tcg_gen_or_i64(tmp64, tmp64, vc); ++ } ++ tcg_gen_shli_i64(cpu_fr[rc], tmp64, 29); ++ break; ++ case 0x03: ++ /* VCMPEQW */ ++ gen_qemu_vcmpxxw_i64(TCG_COND_EQ, ra, rb, rc); ++ break; ++ case 0x23: ++ /* VCMPEQW */ ++ gen_qemu_vcmpxxwi_i64(TCG_COND_EQ, ra, disp8, rc); ++ break; ++ case 0x04: ++ /* VCMPLEW */ ++ gen_qemu_vcmpxxw_i64(TCG_COND_LE, ra, rb, rc); ++ break; ++ case 0x24: ++ /* VCMPLEW */ ++ gen_qemu_vcmpxxwi_i64(TCG_COND_LE, ra, disp8, rc); ++ break; ++ case 0x05: ++ /* VCMPLTW */ ++ gen_qemu_vcmpxxw_i64(TCG_COND_LT, ra, rb, rc); ++ break; ++ case 0x25: ++ /* VCMPLTW */ ++ gen_qemu_vcmpxxwi_i64(TCG_COND_LT, ra, disp8, rc); ++ break; ++ case 0x06: ++ /* VCMPULEW */ ++ gen_qemu_vcmpxxw_i64(TCG_COND_LEU, ra, rb, rc); ++ break; ++ case 0x26: ++ /* VCMPULEW */ ++ gen_qemu_vcmpxxwi_i64(TCG_COND_LEU, ra, disp8, rc); ++ break; ++ case 0x07: ++ /* VCMPULTW */ ++ gen_qemu_vcmpxxw_i64(TCG_COND_LTU, ra, rb, rc); ++ break; ++ case 0x27: ++ /* VCMPULTW */ ++ gen_qemu_vcmpxxwi_i64(TCG_COND_LTU, ra, disp8, rc); ++ break; ++ case 0x08: ++ /* VSLLW */ ++ tmp64 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ vc = tcg_temp_new(); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shri_i64(shift, cpu_fr[rb], 29); ++ tcg_gen_andi_i64(shift, shift, 0x1fUL); ++ ++ tcg_gen_shl_i64(vc, cpu_fr[ra + i], shift); ++ tcg_gen_ext32u_i64(tmp64, vc); ++ ++ tcg_gen_andi_i64(vc, cpu_fr[ra + i], ++ 0xffffffff00000000UL); ++ tcg_gen_shl_i64(vc, vc, shift); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc); ++ } ++ break; ++ case 0x28: ++ /* VSLLW */ ++ tmp64 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ vc = tcg_temp_new(); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_movi_i64(shift, disp8 & 0x1fUL); ++ ++ tcg_gen_shl_i64(vc, cpu_fr[ra + i], shift); ++ tcg_gen_ext32u_i64(tmp64, vc); ++ ++ tcg_gen_andi_i64(vc, cpu_fr[ra + i], ++ 0xffffffff00000000UL); ++ tcg_gen_shl_i64(vc, vc, shift); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc); ++ } ++ break; ++ case 0x09: ++ /* VSRLW */ ++ tmp64 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ vc = tcg_temp_new(); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shri_i64(shift, cpu_fr[rb], 29); ++ tcg_gen_andi_i64(shift, shift, 0x1fUL); ++ ++ tcg_gen_ext32u_i64(vc, cpu_fr[ra + i]); ++ tcg_gen_shr_i64(tmp64, vc, shift); ++ ++ tcg_gen_shr_i64(vc, cpu_fr[ra + i], shift); ++ tcg_gen_andi_i64(vc, vc, 0xffffffff00000000UL); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc); ++ } ++ break; ++ case 0x29: ++ /* VSRLW */ ++ tmp64 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ vc = tcg_temp_new(); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_movi_i64(shift, disp8 & 0x1fUL); ++ ++ tcg_gen_ext32u_i64(vc, cpu_fr[ra + i]); ++ tcg_gen_shr_i64(tmp64, vc, shift); ++ ++ tcg_gen_shr_i64(vc, cpu_fr[ra + i], shift); ++ tcg_gen_andi_i64(vc, vc, 0xffffffff00000000UL); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc); ++ } ++ break; ++ case 0x0A: ++ /* VSRAW */ ++ tmp64 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ vc = tcg_temp_new(); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shri_i64(shift, cpu_fr[rb], 29); ++ tcg_gen_andi_i64(shift, shift, 0x1fUL); ++ ++ tcg_gen_ext32s_i64(vc, cpu_fr[ra + i]); ++ tcg_gen_sar_i64(tmp64, vc, shift); ++ ++ tcg_gen_sar_i64(vc, cpu_fr[ra + i], shift); ++ tcg_gen_andi_i64(vc, vc, 0xffffffff00000000UL); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc); ++ } ++ break; ++ case 0x2A: ++ /* VSRAWI */ ++ tmp64 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ vc = tcg_temp_new(); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_movi_i64(shift, disp8 & 0x1fUL); ++ ++ tcg_gen_ext32s_i64(vc, cpu_fr[ra + i]); ++ tcg_gen_sar_i64(tmp64, vc, shift); ++ ++ tcg_gen_sar_i64(vc, cpu_fr[ra + i], shift); ++ tcg_gen_andi_i64(vc, vc, 0xffffffff00000000UL); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc); ++ } ++ break; ++ case 0x0B: ++ /* VROLW */ ++ tmpa = tcg_temp_new_i32(); ++ tmpb = tcg_temp_new_i32(); ++ tmpc = tcg_temp_new_i32(); ++ tmp64 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ vc = tcg_temp_new(); ++ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shri_i64(shift, cpu_fr[rb], 29); ++ tcg_gen_andi_i64(shift, shift, 0x1fUL); ++ ++ tcg_gen_extrl_i64_i32(tmpa, cpu_fr[ra + i]); ++ tcg_gen_extrl_i64_i32(tmpb, shift); ++ ++ tcg_gen_rotl_i32(tmpc, tmpa, tmpb); ++ tcg_gen_extu_i32_i64(tmp64, tmpc); ++ ++ tcg_gen_extrh_i64_i32(tmpa, cpu_fr[ra + i]); ++ tcg_gen_rotl_i32(tmpc, tmpa, tmpb); ++ tcg_gen_extu_i32_i64(vc, tmpc); ++ tcg_gen_shli_i64(vc, vc, 32); ++ ++ tcg_gen_or_i64(cpu_fr[rc + i], vc, tmp64); ++ } ++ break; ++ case 0x2B: ++ /* VROLW */ ++ tmpa = tcg_temp_new_i32(); ++ tmpb = tcg_temp_new_i32(); ++ tmpc = tcg_temp_new_i32(); ++ tmp64 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ vc = tcg_temp_new(); ++ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_movi_i64(shift, disp8 & 0x1fUL); ++ ++ tcg_gen_extrl_i64_i32(tmpa, cpu_fr[ra + i]); ++ tcg_gen_extrl_i64_i32(tmpb, shift); ++ ++ tcg_gen_rotl_i32(tmpc, tmpa, tmpb); ++ tcg_gen_extu_i32_i64(tmp64, tmpc); ++ ++ tcg_gen_extrh_i64_i32(tmpa, cpu_fr[ra + i]); ++ tcg_gen_rotl_i32(tmpc, tmpa, tmpb); ++ tcg_gen_extu_i32_i64(vc, tmpc); ++ tcg_gen_shli_i64(vc, vc, 32); ++ ++ tcg_gen_or_i64(cpu_fr[rc + i], vc, tmp64); ++ } ++ break; ++ case 0x0C: ++ /* SLLOW */ ++ tcg_gen_sllow_i64(ra, rc, rb); ++ break; ++ case 0x2C: ++ /* SLLOW */ ++ tcg_gen_sllowi_i64(ra, rc, disp8); ++ break; ++ case 0x0D: ++ /* SRLOW */ ++ tcg_gen_srlow_i64(ra, rc, rb); ++ break; ++ case 0x2D: ++ /* SRLOW */ ++ tcg_gen_srlowi_i64(ra, rc, disp8); ++ break; ++ case 0x0E: ++ /* VADDL */ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_add_i64(cpu_fr[rc + i], cpu_fr[ra + i], ++ cpu_fr[rb + i]); ++ } ++ break; ++ case 0x2E: ++ /* VADDL */ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_addi_i64(cpu_fr[rc + i], cpu_fr[ra + i], disp8); ++ } ++ break; ++ case 0x0F: ++ /* VSUBL */ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_sub_i64(cpu_fr[rc + i], cpu_fr[ra + i], ++ cpu_fr[rb + i]); ++ } ++ break; ++ case 0x2F: ++ /* VSUBL */ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_subi_i64(cpu_fr[rc + i], cpu_fr[ra + i], disp8); ++ } ++ break; ++ case 0x10: ++ /* VSLLB */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ tcg_gen_shri_i64(shift, cpu_fr[rb], 29); ++ tcg_gen_andi_i64(shift, shift, 0x7UL); ++ gen_lshift_mask(tmp64, shift, 8); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shl_i64(cpu_fr[rc + i], cpu_fr[ra + i], shift); ++ tcg_gen_and_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); ++ } ++ break; ++ case 0x30: ++ /* VSLLBI */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ gen_lshifti_mask(tmp64, disp8 & 0x7UL, 8); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shli_i64(cpu_fr[rc + i], cpu_fr[ra + i], ++ disp8 & 0x7UL); ++ tcg_gen_and_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); ++ } ++ break; ++ case 0x11: ++ /* VSRLB */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ tcg_gen_shri_i64(shift, cpu_fr[rb], 29); ++ tcg_gen_andi_i64(shift, shift, 0x7UL); ++ gen_rshift_mask(tmp64, shift, 8); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shr_i64(cpu_fr[rc + i], cpu_fr[ra + i], shift); ++ tcg_gen_and_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); ++ } ++ break; ++ case 0x31: ++ /* VSRLBI */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ gen_rshifti_mask(tmp64, disp8 & 0x7UL, 8); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shri_i64(cpu_fr[rc + i], cpu_fr[ra + i], ++ disp8 & 0x7UL); ++ tcg_gen_and_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); ++ } ++ break; ++ case 0x12: ++ /* VSRAB */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_temp_new(); ++ tmp64_1 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ tcg_gen_shri_i64(shift, cpu_fr[rb], 29); ++ tcg_gen_andi_i64(shift, shift, 0x7UL); ++ gen_rshift_mask(tmp64, shift, 8); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_mov_i64(tmp64_0, tmp64); ++ gen_rsign_mask(tmp64_0, cpu_fr[ra + i], 8); ++ tcg_gen_sar_i64(cpu_fr[rc + i], cpu_fr[ra + i], shift); ++ tcg_gen_and_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64_0, cpu_fr[rc + i]); ++ } ++ break; ++ case 0x32: ++ /* VSRABI */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_temp_new(); ++ gen_rshifti_mask(tmp64, disp8 & 0x7UL, 8); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_mov_i64(tmp64_0, tmp64); ++ gen_rsign_mask(tmp64_0, cpu_fr[ra + i], 8); ++ tcg_gen_sari_i64(cpu_fr[rc + i], cpu_fr[ra + i], disp8 & 0x7UL); ++ tcg_gen_and_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64_0, cpu_fr[rc + i]); ++ } ++ break; ++ case 0x13: ++ /* VROLB */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_temp_new(); ++ tmp64_1 = tcg_temp_new(); ++ tmp64_2 = tcg_temp_new(); ++ tmp64_3 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ tcg_gen_shri_i64(shift, cpu_fr[rb], 29); ++ tcg_gen_andi_i64(shift, shift, 0x7UL); ++ tcg_gen_subfi_i64(tmp64_3, 8, shift); ++ gen_lshift_mask(tmp64, shift, 8); ++ tcg_gen_not_i64(tmp64_0, tmp64); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shl_i64(tmp64_1, cpu_fr[ra + i], shift); ++ tcg_gen_and_i64(tmp64_1, tmp64_1, tmp64); ++ tcg_gen_shr_i64(tmp64_2, cpu_fr[ra + i], tmp64_3); ++ tcg_gen_and_i64(tmp64_2, tmp64_2, tmp64_0); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64_1, tmp64_2); ++ } ++ break; ++ case 0x33: ++ /* VROLBI */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_temp_new(); ++ tmp64_1 = tcg_temp_new(); ++ tmp64_2 = tcg_temp_new(); ++ tmp64_3 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ gen_lshifti_mask(tmp64, disp8 & 0x7UL, 8); ++ tcg_gen_not_i64(tmp64_0, tmp64); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shli_i64(tmp64_1, cpu_fr[ra + i], disp8 & 0x7UL); ++ tcg_gen_and_i64(tmp64_1, tmp64_1, tmp64); ++ tcg_gen_shri_i64(tmp64_2, cpu_fr[ra + i], 8 - (disp8 & 0x7UL)); ++ tcg_gen_and_i64(tmp64_2, tmp64_2, tmp64_0); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64_1, tmp64_2); ++ } ++ break; ++ case 0x14: ++ /* VSLLH */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ tcg_gen_shri_i64(shift, cpu_fr[rb], 29); ++ tcg_gen_andi_i64(shift, shift, 0xfUL); ++ gen_lshift_mask(tmp64, shift, 16); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shl_i64(cpu_fr[rc + i], cpu_fr[ra + i], shift); ++ tcg_gen_and_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); ++ } ++ break; ++ case 0x34: ++ /* VSLLHI */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ gen_lshifti_mask(tmp64, disp8 & 0xfUL, 16); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shli_i64(cpu_fr[rc + i], cpu_fr[ra + i], ++ disp8 & 0xfUL); ++ tcg_gen_and_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); ++ } ++ break; ++ case 0x15: ++ /* VSRLH */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ tcg_gen_shri_i64(shift, cpu_fr[rb], 29); ++ tcg_gen_andi_i64(shift, shift, 0xfUL); ++ gen_rshift_mask(tmp64, shift, 16); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shr_i64(cpu_fr[rc + i], cpu_fr[ra + i], shift); ++ tcg_gen_and_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); ++ } ++ break; ++ case 0x35: ++ /* VSRLHI */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ gen_rshifti_mask(tmp64, disp8 & 0xfUL, 16); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shri_i64(cpu_fr[rc + i], cpu_fr[ra + i], ++ disp8 & 0xfUL); ++ tcg_gen_and_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); ++ } ++ break; ++ case 0x16: ++ /* VSRAH */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ tcg_gen_shri_i64(shift, cpu_fr[rb], 29); ++ tcg_gen_andi_i64(shift, shift, 0xfUL); ++ gen_rshift_mask(tmp64, shift, 16); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_mov_i64(tmp64_0, tmp64); ++ gen_rsign_mask(tmp64_0, cpu_fr[ra + i], 16); ++ tcg_gen_sar_i64(cpu_fr[rc + i], cpu_fr[ra + i], shift); ++ tcg_gen_and_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64_0, cpu_fr[rc + i]); ++ } ++ break; ++ case 0x36: ++ /* VSRAHI */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ gen_rshifti_mask(tmp64, disp8 & 0xfUL, 16); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_mov_i64(tmp64_0, tmp64); ++ gen_rsign_mask(tmp64_0, cpu_fr[ra + i], 16); ++ tcg_gen_sari_i64(cpu_fr[rc + i], cpu_fr[ra + i], disp8 & 0xfUL); ++ tcg_gen_and_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64_0, cpu_fr[rc + i]); ++ } ++ break; ++ case 0x17: ++ /* VROLH */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_temp_new(); ++ tmp64_1 = tcg_temp_new(); ++ tmp64_2 = tcg_temp_new(); ++ tmp64_3 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ tcg_gen_shri_i64(shift, cpu_fr[rb], 29); ++ tcg_gen_andi_i64(shift, shift, 0xfUL); ++ tcg_gen_subfi_i64(tmp64_3, 16, shift); ++ gen_lshift_mask(tmp64, shift, 16); ++ tcg_gen_not_i64(tmp64_0, tmp64); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shl_i64(tmp64_1, cpu_fr[ra + i], shift); ++ tcg_gen_and_i64(tmp64_1, tmp64_1, tmp64); ++ tcg_gen_shr_i64(tmp64_2, cpu_fr[ra + i], tmp64_3); ++ tcg_gen_and_i64(tmp64_2, tmp64_2, tmp64_0); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64_1, tmp64_2); ++ } ++ break; ++ case 0x37: ++ /* VROLHI */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_temp_new(); ++ tmp64_1 = tcg_temp_new(); ++ tmp64_2 = tcg_temp_new(); ++ tmp64_3 = tcg_temp_new(); ++ shift = tcg_temp_new(); ++ gen_lshifti_mask(tmp64, disp8 & 0xfUL, 16); ++ tcg_gen_not_i64(tmp64_0, tmp64); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shli_i64(tmp64_1, cpu_fr[ra + i], disp8 & 0xfUL); ++ tcg_gen_and_i64(tmp64_1, tmp64_1, tmp64); ++ tcg_gen_shri_i64(tmp64_2, cpu_fr[ra + i], 16 - (disp8 & 0xfUL)); ++ tcg_gen_and_i64(tmp64_2, tmp64_2, tmp64_0); ++ tcg_gen_or_i64(cpu_fr[rc + i], tmp64_1, tmp64_2); ++ } ++ break; ++ case 0x18: ++ /* CTPOPOW */ ++ tmp64 = tcg_constant_i64(0); ++ tmp64_0 = tcg_temp_new(); ++ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_ctpop_i64(tmp64_0, cpu_fr[ra + i]); ++ tcg_gen_add_i64(tmp64, tmp64, tmp64_0); ++ } ++ tcg_gen_shli_i64(cpu_fr[rc], tmp64, 29); ++ break; ++ case 0x19: ++ /* CTLZOW */ ++ va = tcg_constant_i64(ra); ++ gen_helper_ctlzow(cpu_fr[rc], tcg_env, va); ++ break; ++ case 0x1A: ++ /* VSLLL */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shri_i64(tmp64, cpu_fr[rb], 29); ++ tcg_gen_andi_i64(tmp64, tmp64, 0x3f); ++ ++ tcg_gen_shl_i64(cpu_fr[rc + i], cpu_fr[ra + i], tmp64); ++ } ++ break; ++ case 0x3A: ++ /* VSLLLI */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shli_i64(cpu_fr[rc + i], cpu_fr[ra + i], ++ disp8 & 0x3f); ++ } ++ break; ++ case 0x1B: ++ /* VSRLL */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shri_i64(tmp64, cpu_fr[rb], 29); ++ tcg_gen_andi_i64(tmp64, tmp64, 0x3f); ++ ++ tcg_gen_shr_i64(cpu_fr[rc + i], cpu_fr[ra + i], tmp64); ++ } ++ break; ++ case 0x3B: ++ /* VSRLLI */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shri_i64(cpu_fr[rc + i], cpu_fr[ra + i], ++ disp8 & 0x3f); ++ } ++ break; ++ case 0x1C: ++ /* VSRAL */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shri_i64(tmp64, cpu_fr[rb], 29); ++ tcg_gen_andi_i64(tmp64, tmp64, 0x3f); ++ ++ tcg_gen_sar_i64(cpu_fr[rc + i], cpu_fr[ra + i], tmp64); ++ } ++ break; ++ case 0x3C: ++ /* VSRALI */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_sari_i64(cpu_fr[rc + i], cpu_fr[ra + i], ++ disp8 & 0x3f); ++ } ++ break; ++ case 0x1D: ++ /* VROLL */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_shri_i64(tmp64, cpu_fr[rb], 29); ++ tcg_gen_andi_i64(tmp64, tmp64, 0x3f); ++ ++ tcg_gen_rotl_i64(cpu_fr[rc + i], cpu_fr[ra + i], tmp64); ++ } ++ break; ++ case 0x3D: ++ /* VROLL */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_rotli_i64(cpu_fr[rc + i], cpu_fr[ra + i], ++ disp8 & 0x3f); ++ } ++ break; ++ case 0x1E: ++ /* VMAXB */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ ++ gen_helper_vmaxb(tcg_env, va, vb, vc); ++ ++ break; ++ case 0x1F: ++ /* VMINB */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ ++ gen_helper_vminb(tcg_env, va, vb, vc); ++ ++ break; ++ case 0x40: ++ /* VUCADDW */ ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vucaddw(tcg_env, va, vb, vc); ++ break; ++ case 0x60: ++ /* VUCADDW */ ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(disp8); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vucaddwi(tcg_env, va, vb, vc); ++ break; ++ case 0x41: ++ /* VUCSUBW */ ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vucsubw(tcg_env, va, vb, vc); ++ break; ++ case 0x61: ++ /* VUCSUBW */ ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(disp8); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vucsubwi(tcg_env, va, vb, vc); ++ break; ++ case 0x42: ++ /* VUCADDH */ ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vucaddh(tcg_env, va, vb, vc); ++ break; ++ case 0x62: ++ /* VUCADDH */ ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(disp8); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vucaddhi(tcg_env, va, vb, vc); ++ break; ++ case 0x43: ++ /* VUCSUBH */ ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vucsubh(tcg_env, va, vb, vc); ++ break; ++ case 0x63: ++ /* VUCSUBH */ ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(disp8); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vucsubhi(tcg_env, va, vb, vc); ++ break; ++ case 0x44: ++ /* VUCADDB */ ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vucaddb(tcg_env, va, vb, vc); ++ break; ++ case 0x64: ++ /* VUCADDB */ ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(disp8); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vucaddbi(tcg_env, va, vb, vc); ++ break; ++ case 0x45: ++ /* VUCSUBB */ ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vucsubb(tcg_env, va, vb, vc); ++ break; ++ case 0x65: ++ /* VUCSUBB */ ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(disp8); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vucsubbi(tcg_env, va, vb, vc); ++ break; ++ case 0x46: ++ /* SRAOW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tcg_gen_sraow_i64(ra, rc, rb); ++ break; ++ case 0x66: ++ /* SRAOWI */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tcg_gen_sraowi_i64(ra, rc, disp8); ++ break; ++ case 0x47: ++ /* TODO:VSUMW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ tcg_gen_ext32u_i64(tmp64, cpu_fr[ra]); ++ tcg_gen_shri_i64(cpu_fr[rc], cpu_fr[ra], 32); ++ tcg_gen_add_i64(cpu_fr[rc], tmp64, cpu_fr[rc]); ++ for (i = 32; i < 128; i += 32) { ++ tcg_gen_ext32u_i64(tmp64, cpu_fr[ra + i]); ++ tcg_gen_add_i64(cpu_fr[rc], tmp64, cpu_fr[rc]); ++ tcg_gen_shri_i64(tmp64, cpu_fr[ra + i], 32); ++ tcg_gen_add_i64(cpu_fr[rc], tmp64, cpu_fr[rc]); ++ } ++ break; ++ case 0x48: ++ /* VSUML */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tcg_gen_mov_i64(cpu_fr[rc], cpu_fr[ra]); ++ for (i = 32; i < 128; i += 32) { ++ tcg_gen_add_i64(cpu_fr[rc], cpu_fr[ra + i], cpu_fr[rc]); ++ } ++ break; ++ case 0x49: ++ /* VSM4R */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vsm4r(tcg_env, va, vb, vc); ++ break; ++ case 0x4A: ++ /* VBINVW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_bswap64_i64(tmp64, cpu_fr[rb + i]); ++ tcg_gen_rotli_i64(tmp64, tmp64, 32); ++ tcg_gen_mov_i64(cpu_fr[rc + i], tmp64); ++ } ++ break; ++ case 0x4B: ++ /* VCMPUEQB */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vcmpueqb(tcg_env, va, vb, vc); ++ break; ++ case 0x6B: ++ /* VCMPUEQBI*/ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(disp8); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vcmpueqbi(tcg_env, va, vb, vc); ++ break; ++ case 0x4C: ++ /* VCMPUGTB */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vcmpugtb(tcg_env, va, vb, vc); ++ break; ++ case 0x6C: ++ /* VCMPUGTBI */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(disp8); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vcmpugtbi(tcg_env, va, vb, vc); ++ break; ++ case 0x4D: ++ /* VSM3MSW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vsm3msw(tcg_env, va, vb, vc); ++ break; ++ case 0x50: ++ /* VMAXH */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ ++ gen_helper_vmaxh(tcg_env, va, vb, vc); ++ ++ break; ++ case 0x51: ++ /* VMINH */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ ++ gen_helper_vminh(tcg_env, va, vb, vc); ++ ++ break; ++ case 0x52: ++ /* VMAXW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ ++ gen_helper_vmaxw(tcg_env, va, vb, vc); ++ ++ break; ++ case 0x53: ++ /* VMINW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ ++ gen_helper_vminw(tcg_env, va, vb, vc); ++ ++ break; ++ case 0x54: ++ /* VMAXL */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ va = cpu_fr[ra + i]; ++ vb = cpu_fr[rb + i]; ++ vc = cpu_fr[rc + i]; ++ tcg_gen_movcond_i64(TCG_COND_GE, vc, va, vb, va, vb); ++ } ++ break; ++ case 0x55: ++ /* VMINL */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ va = cpu_fr[ra + i]; ++ vb = cpu_fr[rb + i]; ++ vc = cpu_fr[rc + i]; ++ tcg_gen_movcond_i64(TCG_COND_LE, vc, va, vb, va, vb); ++ } ++ break; ++ case 0x56: ++ /* VUMAXB */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ ++ gen_helper_vumaxb(tcg_env, va, vb, vc); ++ ++ break; ++ case 0x57: ++ /* VUMINB */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ ++ gen_helper_vuminb(tcg_env, va, vb, vc); ++ ++ break; ++ case 0x58: ++ /* VUMAXH */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ ++ gen_helper_vumaxh(tcg_env, va, vb, vc); ++ ++ break; ++ case 0x59: ++ /* VUMINH */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ ++ gen_helper_vuminh(tcg_env, va, vb, vc); ++ ++ break; ++ case 0x5A: ++ /* VUMAXW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ ++ gen_helper_vumaxw(tcg_env, va, vb, vc); ++ ++ break; ++ case 0x5B: ++ /* VUMINW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rc); ++ ++ gen_helper_vuminw(tcg_env, va, vb, vc); ++ ++ break; ++ case 0x5C: ++ /* VUMAXL */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ va = cpu_fr[ra + i]; ++ vb = cpu_fr[rb + i]; ++ vc = cpu_fr[rc + i]; ++ tcg_gen_movcond_i64(TCG_COND_GEU, vc, va, vb, va, vb); ++ } ++ break; ++ case 0x5D: ++ /* VUMINL */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ va = cpu_fr[ra + i]; ++ vb = cpu_fr[rb + i]; ++ vc = cpu_fr[rc + i]; ++ tcg_gen_movcond_i64(TCG_COND_LEU, vc, va, vb, va, vb); ++ } ++ break; ++ case 0x68: ++ /* VSM4KEY */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(disp8); ++ vc = tcg_constant_i64(rc); ++ gen_helper_vsm4key(tcg_env, va, vb, vc); ++ break; ++ case 0x80: ++ /* VADDS */ ++ for (i = 0; i < 128; i += 32) ++ gen_fadds(ctx, ra + i, rb + i, rc + i); ++ break; ++ case 0x81: ++ /* VADDD */ ++ for (i = 0; i < 128; i += 32) ++ gen_faddd(ctx, ra + i, rb + i, rc + i); ++ break; ++ case 0x82: ++ /* VSUBS */ ++ for (i = 0; i < 128; i += 32) ++ gen_fsubs(ctx, ra + i, rb + i, rc + i); ++ break; ++ case 0x83: ++ /* VSUBD */ ++ for (i = 0; i < 128; i += 32) ++ gen_fsubd(ctx, ra + i, rb + i, rc + i); ++ break; ++ case 0x84: ++ /* VMULS */ ++ for (i = 0; i < 128; i += 32) ++ gen_fmuls(ctx, ra + i, rb + i, rc + i); ++ break; ++ case 0x85: ++ /* VMULD */ ++ for (i = 0; i < 128; i += 32) ++ gen_fmuld(ctx, ra + i, rb + i, rc + i); ++ break; ++ case 0x86: ++ /* VDIVS */ ++ for (i = 0; i < 128; i += 32) ++ gen_fdivs(ctx, ra + i, rb + i, rc + i); ++ break; ++ case 0x87: ++ /* VDIVD */ ++ for (i = 0; i < 128; i += 32) ++ gen_fdivd(ctx, ra + i, rb + i, rc + i); ++ break; ++ case 0x88: ++ /* VSQRTS */ ++ for (i = 0; i < 128; i += 32) ++ gen_helper_fsqrts(cpu_fr[rc + i], tcg_env, ++ cpu_fr[rb + i]); ++ break; ++ case 0x89: ++ /* VSQRTD */ ++ for (i = 0; i < 128; i += 32) ++ gen_helper_fsqrt(cpu_fr[rc + i], tcg_env, ++ cpu_fr[rb + i]); ++ break; ++ case 0x8C: ++ /* VFCMPEQ */ ++ for (i = 0; i < 128; i += 32) ++ gen_fcmpeq(ctx, ra + i, rb + i, rc + i); ++ break; ++ case 0x8D: ++ /* VFCMPLE */ ++ for (i = 0; i < 128; i += 32) ++ gen_fcmple(ctx, ra + i, rb + i, rc + i); ++ break; ++ case 0x8E: ++ /* VFCMPLT */ ++ for (i = 0; i < 128; i += 32) ++ gen_fcmplt(ctx, ra + i, rb + i, rc + i); ++ break; ++ case 0x8F: ++ /* VFCMPUN */ ++ for (i = 0; i < 128; i += 32) ++ gen_fcmpun(ctx, ra + i, rb + i, rc + i); ++ break; ++ case 0x90: ++ /* VCPYS */ ++ tcg_gen_vcpys_i64(ra, rb, rc); ++ break; ++ case 0x91: ++ /* VCPYSE */ ++ tcg_gen_vcpyse_i64(ra, rb, rc); ++ break; ++ case 0x92: ++ /* VCPYSN */ ++ tcg_gen_vcpysn_i64(ra, rb, rc); ++ break; ++ case 0x93: ++ /* VSUMS */ ++ gen_fadds(ctx, ra, ra + 32, rc); ++ gen_fadds(ctx, rc, ra + 64, rc); ++ gen_fadds(ctx, rc, ra + 96, rc); ++ break; ++ case 0x94: ++ /* VSUMD */ ++ gen_faddd(ctx, ra, ra + 32, rc); ++ gen_faddd(ctx, rc, ra + 64, rc); ++ gen_faddd(ctx, rc, ra + 96, rc); ++ break; ++ case 0x95: ++ /* VFCVTSD */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_helper_fcvtsd(cpu_fr[rc + i], tcg_env, ++ cpu_fr[rb + i]); ++ } ++ break; ++ case 0x96: ++ /* VFCVTDS */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_helper_fcvtds(cpu_fr[rc + i], tcg_env, ++ cpu_fr[rb + i]); ++ } ++ break; ++ case 0x99: ++ /* VFCVTLS */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_helper_fcvtls(cpu_fr[rc + i], tcg_env, ++ cpu_fr[rb + i]); ++ } ++ break; ++ case 0x9A: ++ /* VFCVTLD */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_helper_fcvtld(cpu_fr[rc + i], tcg_env, ++ cpu_fr[rb + i]); ++ } ++ break; ++ case 0x9B: ++ /* VFCVTDL */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_helper_fcvtdl_dyn(cpu_fr[rc + i], tcg_env, ++ cpu_fr[rb + i]); ++ } ++ break; ++ case 0x9C: ++ /* VFCVTDL_G */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_fcvtdl(rb + i, rc + i, 0); ++ } ++ break; ++ case 0x9D: ++ /* VFCVTDL_P */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_fcvtdl(rb + i, rc + i, 2); ++ } ++ break; ++ case 0x9E: ++ /* VFCVTDL_Z */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_fcvtdl(rb + i, rc + i, 3); ++ } ++ break; ++ case 0x9F: ++ /* VFCVTDL_N */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_fcvtdl(rb + i, rc + i, 1); ++ } ++ break; ++ case 0xA0: ++ /* VFRIS */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_transformat( ++ rb + i, rc + i, gen_helper_fris, ++ 5); ++ } ++ break; ++ case 0xA1: ++ /* VFRIS_G */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_transformat(rb + i, rc + i, gen_helper_fris, 0); ++ } ++ break; ++ case 0xA2: ++ /* VFRIS_P */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_transformat(rb + i, rc + i, gen_helper_fris, 2); ++ } ++ break; ++ case 0xA3: ++ /* VFRIS_Z */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_transformat(rb + i, rc + i, gen_helper_fris, 3); ++ } ++ break; ++ case 0xA4: ++ /* VFRIS_N */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_transformat(rb + i, rc + i, gen_helper_fris, 1); ++ } ++ break; ++ case 0xA5: ++ /* VFRID */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_transformat(rb + i, rc + i, gen_helper_frid, 5); ++ } ++ break; ++ case 0xA6: ++ /* VFRID_G */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_transformat(rb + i, rc + i, gen_helper_frid, 0); ++ } ++ break; ++ case 0xA7: ++ /* VFRID_P */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_transformat(rb + i, rc + i, gen_helper_frid, 2); ++ } ++ break; ++ case 0xA8: ++ /* VFRID_Z */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_transformat(rb + i, rc + i, gen_helper_frid, 3); ++ } ++ break; ++ case 0xA9: ++ /* VFRID_N */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_transformat(rb + i, rc + i, gen_helper_frid, 1); ++ } ++ break; ++ case 0xAA: ++ /* VFRECS */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_frecs(ctx, ra + i, rc + i); ++ } ++ break; ++ case 0xAB: ++ /* VFRECD */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ for (i = 0; i < 128; i += 32) { ++ gen_frecd(ctx, ra + i, rc + i); ++ } ++ break; ++ case 0xAC: ++ /* VMAXS */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_constant_i64(0); ++ for (i = 0; i < 128; i += 32) { ++ va = cpu_fr[ra + i]; ++ vb = cpu_fr[rb + i]; ++ vc = cpu_fr[rc + i]; ++ gen_helper_fcmpge_s(tmp64, tcg_env, va, vb); ++ tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp64, tmp64_0, ++ va, vb); ++ } ++ break; ++ case 0xAD: ++ /* VMINS */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_constant_i64(0); ++ for (i = 0; i < 128; i += 32) { ++ va = cpu_fr[ra + i]; ++ vb = cpu_fr[rb + i]; ++ vc = cpu_fr[rc + i]; ++ gen_helper_fcmple_s(tmp64, tcg_env, va, vb); ++ tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp64, tmp64_0, ++ va, vb); ++ } ++ break; ++ case 0xAE: ++ /* VMAXD */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_constant_i64(0); ++ for (i = 0; i < 128; i += 32) { ++ va = cpu_fr[ra + i]; ++ vb = cpu_fr[rb + i]; ++ vc = cpu_fr[rc + i]; ++ gen_helper_fcmpge(tmp64, tcg_env, va, vb); ++ tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp64, tmp64_0, ++ va, vb); ++ } ++ break; ++ case 0xAF: ++ /* VMIND */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_constant_i64(0); ++ for (i = 0; i < 128; i += 32) { ++ va = cpu_fr[ra + i]; ++ vb = cpu_fr[rb + i]; ++ vc = cpu_fr[rc + i]; ++ gen_helper_fcmple(tmp64, tcg_env, va, vb); ++ tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp64, tmp64_0, ++ va, vb); ++ } ++ break; ++ default: ++ printf("ILLEGAL BELOW OPC[%x] func[%08x]\n", opc, fn8); ++ ret = gen_invalid(ctx); ++ break; ++ } ++ break; ++ case 0x1B: ++ /* SIMD */ ++ if (unlikely(rc == 31)) break; ++ switch (fn6) { ++ case 0x00: ++ /* VMAS */ ++ for (i = 0; i < 128; i += 32) ++ gen_helper_fmas(cpu_fr[rc + i], tcg_env, cpu_fr[ra + i], ++ cpu_fr[rb + i], cpu_fr[rd + i]); ++ break; ++ case 0x01: ++ /* VMAD */ ++ for (i = 0; i < 128; i += 32) ++ gen_helper_fmad(cpu_fr[rc + i], tcg_env, cpu_fr[ra + i], ++ cpu_fr[rb + i], cpu_fr[rd + i]); ++ break; ++ case 0x02: ++ /* VMSS */ ++ for (i = 0; i < 128; i += 32) ++ gen_helper_fmss(cpu_fr[rc + i], tcg_env, cpu_fr[ra + i], ++ cpu_fr[rb + i], cpu_fr[rd + i]); ++ break; ++ case 0x03: ++ /* VMSD */ ++ for (i = 0; i < 128; i += 32) ++ gen_helper_fmsd(cpu_fr[rc + i], tcg_env, cpu_fr[ra + i], ++ cpu_fr[rb + i], cpu_fr[rd + i]); ++ break; ++ case 0x04: ++ /* VNMAS */ ++ for (i = 0; i < 128; i += 32) ++ gen_helper_fnmas(cpu_fr[rc + i], tcg_env, ++ cpu_fr[ra + i], cpu_fr[rb + i], ++ cpu_fr[rd + i]); ++ break; ++ case 0x05: ++ /* VNMAD */ ++ for (i = 0; i < 128; i += 32) ++ gen_helper_fnmad(cpu_fr[rc + i], tcg_env, ++ cpu_fr[ra + i], cpu_fr[rb + i], ++ cpu_fr[rd + i]); ++ break; ++ case 0x06: ++ /* VNMSS */ ++ for (i = 0; i < 128; i += 32) ++ gen_helper_fnmss(cpu_fr[rc + i], tcg_env, ++ cpu_fr[ra + i], cpu_fr[rb + i], ++ cpu_fr[rd + i]); ++ break; ++ case 0x07: ++ /* VNMSD */ ++ for (i = 0; i < 128; i += 32) ++ gen_helper_fnmsd(cpu_fr[rc + i], tcg_env, ++ cpu_fr[ra + i], cpu_fr[rb + i], ++ cpu_fr[rd + i]); ++ break; ++ case 0x10: ++ /* VFSELEQ */ ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_constant_i64(0); ++ for (i = 0; i < 128; i += 32) { ++ gen_helper_fcmpeq(tmp64, tcg_env, cpu_fr[ra + i], ++ tmp64_0); ++ tcg_gen_movcond_i64(TCG_COND_EQ, cpu_fr[rc + i], tmp64, ++ tmp64_0, cpu_fr[rd + i], ++ cpu_fr[rb + i]); ++ } ++ break; ++ case 0x12: ++ /* VFSELLT */ ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_constant_i64(0); ++ tmp64_1 = tcg_temp_new(); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_andi_i64(tmp64, cpu_fr[ra + i], ++ 0x7fffffffffffffffUL); ++ tcg_gen_setcond_i64(TCG_COND_NE, tmp64, tmp64, ++ tmp64_0); ++ tcg_gen_shri_i64(tmp64_1, cpu_fr[ra +i], 63); ++ tcg_gen_and_i64(tmp64, tmp64_1, tmp64); ++ tcg_gen_movcond_i64(TCG_COND_EQ, cpu_fr[rc + i], tmp64, ++ tmp64_0, cpu_fr[rd + i], ++ cpu_fr[rb + i]); ++ } ++ break; ++ case 0x13: ++ /* VFSELLE */ ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_constant_i64(0); ++ tmp64_1 = tcg_temp_new(); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_andi_i64(tmp64, cpu_fr[ra + i], ++ 0x7fffffffffffffffUL); ++ tcg_gen_setcond_i64(TCG_COND_EQ, tmp64, tmp64, ++ tmp64_0); ++ tcg_gen_shri_i64(tmp64_1, cpu_fr[ra + i], 63); ++ tcg_gen_or_i64(tmp64, tmp64_1, tmp64); ++ tcg_gen_movcond_i64(TCG_COND_EQ, cpu_fr[rc + i], tmp64, ++ tmp64_0, cpu_fr[rd + i], ++ cpu_fr[rb + i]); ++ } ++ break; ++ case 0x18: ++ /* VSELEQW */ ++ gen_qemu_vselxxw(TCG_COND_EQ, ra, rb, rd, rc, 0); ++ break; ++ case 0x38: ++ /* VSELEQW */ ++ gen_qemu_vselxxwi(TCG_COND_EQ, ra, rb, disp5, rc, 0); ++ break; ++ case 0x19: ++ /* VSELLBCW */ ++ gen_qemu_vselxxw(TCG_COND_EQ, ra, rb, rd, rc, 1); ++ break; ++ case 0x39: ++ /* VSELLBCW */ ++ gen_qemu_vselxxwi(TCG_COND_EQ, ra, rb, disp5, rc, 1); ++ break; ++ case 0x1A: ++ /* VSELLTW */ ++ gen_qemu_vselxxw(TCG_COND_LT, ra, rb, rd, rc, 0); ++ break; ++ case 0x3A: ++ /* VSELLTW */ ++ gen_qemu_vselxxwi(TCG_COND_LT, ra, rb, disp5, rc, 0); ++ break; ++ case 0x1B: ++ /* VSELLEW */ ++ gen_qemu_vselxxw(TCG_COND_LE, ra, rb, rd, rc, 0); ++ break; ++ case 0x3B: ++ /* VSELLEW */ ++ gen_qemu_vselxxwi(TCG_COND_LE, ra, rb, disp5, rc, 0); ++ break; ++ case 0x20: ++ /* VINSW */ ++ if (disp5 > 7) break; ++ tmp64 = tcg_temp_new(); ++ tmp32 = tcg_temp_new_i32(); ++ gen_helper_s_to_memory(tmp32, cpu_fr[ra]); ++ tcg_gen_extu_i32_i64(tmp64, tmp32); ++ tcg_gen_shli_i64(tmp64, tmp64, (disp5 % 2) * 32); ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_mov_i64(cpu_fr[rc + i], cpu_fr[rb + i]); ++ } ++ if (disp5 % 2) { ++ tcg_gen_andi_i64(cpu_fr[rc + (disp5 / 2) * 32], ++ cpu_fr[rc + (disp5 / 2) * 32], ++ 0xffffffffUL); ++ } else { ++ tcg_gen_andi_i64(cpu_fr[rc + (disp5 / 2) * 32], ++ cpu_fr[rc + (disp5 / 2) * 32], ++ 0xffffffff00000000UL); ++ } ++ tcg_gen_or_i64(cpu_fr[rc + (disp5 / 2) * 32], ++ cpu_fr[rc + (disp5 / 2) * 32], tmp64); ++ break; ++ case 0x21: ++ /* VINSF */ ++ if (disp5 > 3) break; ++ tmp64 = tcg_temp_new(); ++ tcg_gen_mov_i64(tmp64, cpu_fr[ra]); ++ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_mov_i64(cpu_fr[rc + i], cpu_fr[rb + i]); ++ } ++ tcg_gen_mov_i64(cpu_fr[rc + disp5 * 32], tmp64); ++ break; ++ case 0x22: ++ /* VEXTW */ ++ if (disp5 > 7) break; ++ tmp64 = tcg_temp_new(); ++ tmp32 = tcg_temp_new_i32(); ++ tcg_gen_shri_i64(tmp64, cpu_fr[ra + (disp5 / 2) * 32], ++ (disp5 % 2) * 32); ++ tcg_gen_extrl_i64_i32(tmp32, tmp64); ++ gen_helper_memory_to_s(tmp64, tmp32); ++ tcg_gen_mov_i64(cpu_fr[rc], tmp64); ++ break; ++ case 0x23: ++ /* VEXTF */ ++ if (disp5 > 3) break; ++ tcg_gen_mov_i64(cpu_fr[rc], cpu_fr[ra + disp5 * 32]); ++ break; ++ case 0x24: ++ /* VCPYW */ ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_temp_new(); ++ tcg_gen_shri_i64(tmp64, cpu_fr[ra], 29); ++ tcg_gen_andi_i64(tmp64_0, tmp64, 0x3fffffffUL); ++ tcg_gen_shri_i64(tmp64, cpu_fr[ra], 62); ++ tcg_gen_shli_i64(tmp64, tmp64, 30); ++ tcg_gen_or_i64(tmp64_0, tmp64, tmp64_0); ++ tcg_gen_mov_i64(tmp64, tmp64_0); ++ tcg_gen_shli_i64(tmp64, tmp64, 32); ++ tcg_gen_or_i64(tmp64_0, tmp64_0, tmp64); ++ tcg_gen_mov_i64(cpu_fr[rc], tmp64_0); ++ tcg_gen_mov_i64(cpu_fr[rc + 32], cpu_fr[rc]); ++ tcg_gen_mov_i64(cpu_fr[rc + 64], cpu_fr[rc]); ++ tcg_gen_mov_i64(cpu_fr[rc + 96], cpu_fr[rc]); ++ break; ++ case 0x25: ++ /* VCPYF */ ++ for (i = 0; i < 128; i += 32) { ++ tcg_gen_mov_i64(cpu_fr[rc + i], cpu_fr[ra]); ++ } ++ break; ++ case 0x26: ++ /* VCONW */ ++ tmp64 = tcg_constant_i64(ra << 8 | rb); ++ tmp64_0 = tcg_temp_new(); ++ vd = tcg_constant_i64(rc); ++ tcg_gen_shri_i64(tmp64_0, cpu_fr[rd], 2); ++ tcg_gen_andi_i64(tmp64_0, tmp64_0, 0x7ul); ++ gen_helper_vconw(tcg_env, tmp64, vd, tmp64_0); ++ break; ++ case 0x27: ++ /* VSHFW */ ++ tmp64 = tcg_constant_i64(ra << 8 | rb); ++ vd = tcg_constant_i64(rc); ++ gen_helper_vshfw(tcg_env, tmp64, vd, cpu_fr[rd]); ++ break; ++ case 0x28: ++ /* VCONS */ ++ tmp64 = tcg_constant_i64(ra << 8 | rb); ++ tmp64_0 = tcg_temp_new(); ++ vd = tcg_constant_i64(rc); ++ tcg_gen_shri_i64(tmp64_0, cpu_fr[rd], 2); ++ tcg_gen_andi_i64(tmp64_0, tmp64_0, 0x3ul); ++ gen_helper_vcond(tcg_env, tmp64, vd, tmp64_0); ++ break; ++ case 0x29: ++ /* FIXME: VCOND maybe it's wrong in the instruction book ++ * that there are no temp. */ ++ tmp64 = tcg_constant_i64(ra << 8 | rb); ++ tmp64_0 = tcg_temp_new(); ++ vd = tcg_constant_i64(rc); ++ tcg_gen_shri_i64(tmp64_0, cpu_fr[rd], 3); ++ tcg_gen_andi_i64(tmp64_0, tmp64_0, 0x3ul); ++ gen_helper_vcond(tcg_env, tmp64, vd, tmp64_0); ++ break; ++ case 0x2A: ++ /* VINSB */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = cpu_fr[ra]; ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rd); ++ vd = tcg_constant_i64(rc); ++ gen_helper_vinsb(tcg_env, va, vb, vc, vd); ++ break; ++ case 0x2B: ++ /* VINSH */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = cpu_fr[ra]; ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rd); ++ vd = tcg_constant_i64(rc); ++ gen_helper_vinsh(tcg_env, va, vb, vc, vd); ++ break; ++ case 0x2C: ++ /* VINSECTLH */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vd = tcg_constant_i64(rc); ++ gen_helper_vinsectlh(tcg_env, va, vb, vd); ++ break; ++ case 0x2D: ++ /* VINSECTLW */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vd = tcg_constant_i64(rc); ++ gen_helper_vinsectlw(tcg_env, va, vb, vd); ++ break; ++ case 0x2E: ++ /* VINSECTLL */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ tmp64_0 = tcg_temp_new(); ++ tcg_gen_mov_i64(cpu_fr[rc + 96], cpu_fr[rb + 32]); ++ tcg_gen_mov_i64(cpu_fr[rc + 64], cpu_fr[ra + 32]); ++ tcg_gen_mov_i64(cpu_fr[rc + 32], cpu_fr[rb]); ++ tcg_gen_mov_i64(cpu_fr[rc], cpu_fr[ra]); ++ break; ++ case 0x2F: ++ /* VINSECTLB */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vd = tcg_constant_i64(rc); ++ gen_helper_vinsectlb(tcg_env, va, vb, vd); ++ break; ++ case 0x30: ++ /* VSHFQ */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rd); ++ vd = tcg_constant_i64(rc); ++ gen_helper_vshfq(tcg_env, va, vb, vc, vd); ++ break; ++ case 0x31: ++ /* VSHFQB */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vd = tcg_constant_i64(rc); ++ gen_helper_vshfqb(tcg_env, va, vb, vd); ++ break; ++ case 0x32: ++ /* VCPYB */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ tcg_gen_andi_i64(cpu_fr[rc], cpu_fr[ra], 0xffUL); ++ tcg_gen_shli_i64(tmp64, cpu_fr[rc], 8); ++ tcg_gen_or_i64(cpu_fr[rc], tmp64, cpu_fr[rc]); ++ tcg_gen_shli_i64(tmp64, cpu_fr[rc], 16); ++ tcg_gen_or_i64(cpu_fr[rc], tmp64, cpu_fr[rc]); ++ tcg_gen_shli_i64(tmp64, cpu_fr[rc], 32); ++ tcg_gen_or_i64(cpu_fr[rc], tmp64, cpu_fr[rc]); ++ tcg_gen_mov_i64(cpu_fr[rc + 32], cpu_fr[rc]); ++ tcg_gen_mov_i64(cpu_fr[rc + 64], cpu_fr[rc]); ++ tcg_gen_mov_i64(cpu_fr[rc + 96], cpu_fr[rc]); ++ break; ++ case 0x33: ++ /* VCPYH */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ tmp64 = tcg_temp_new(); ++ tcg_gen_andi_i64(cpu_fr[rc], cpu_fr[ra], 0xffffUL); ++ tcg_gen_shli_i64(tmp64, cpu_fr[rc], 16); ++ tcg_gen_or_i64(cpu_fr[rc], tmp64, cpu_fr[rc]); ++ tcg_gen_shli_i64(tmp64, cpu_fr[rc], 32); ++ tcg_gen_or_i64(cpu_fr[rc], tmp64, cpu_fr[rc]); ++ tcg_gen_mov_i64(cpu_fr[rc + 32], cpu_fr[rc]); ++ tcg_gen_mov_i64(cpu_fr[rc + 64], cpu_fr[rc]); ++ tcg_gen_mov_i64(cpu_fr[rc + 96], cpu_fr[rc]); ++ break; ++ case 0x34: ++ /* VSM3R */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rd); ++ vd = tcg_constant_i64(rc); ++ gen_helper_vsm3r(tcg_env, va, vb, vc, vd); ++ break; ++ case 0x35: ++ /* VFCVTSH */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rd); ++ vd = tcg_constant_i64(rc); ++ gen_helper_vfcvtsh(tcg_env, va, vb, vc, vd); ++ break; ++ case 0x36: ++ /* VFCVTHS */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ va = tcg_constant_i64(ra); ++ vb = tcg_constant_i64(rb); ++ vc = tcg_constant_i64(rd); ++ vd = tcg_constant_i64(rc); ++ gen_helper_vfcvths(tcg_env, va, vb, vc, vd); ++ break; ++ default: ++ printf("ILLEGAL BELOW OPC[%x] func[%08x]\n", opc, fn6); ++ ret = gen_invalid(ctx); ++ break; ++ } ++ break; ++ case 0x1C: ++ switch (fn4) { ++ case 0x0: ++ /* VLDW_U */ ++ if (unlikely(ra == 31)) break; ++ gen_load_mem_simd(ctx, &gen_qemu_vldd, ra, rb, disp12, ++ ~0x1fUL); ++ break; ++ case 0x1: ++ /* VSTW_U */ ++ gen_store_mem_simd(ctx, &gen_qemu_vstd, ra, rb, disp12, ++ ~0x1fUL); ++ break; ++ case 0x2: ++ /* VLDS_U */ ++ if (unlikely(ra == 31)) break; ++ gen_load_mem_simd(ctx, &gen_qemu_vlds, ra, rb, disp12, ++ ~0xfUL); ++ break; ++ case 0x3: ++ /* VSTS_U */ ++ gen_store_mem_simd(ctx, &gen_qemu_vsts, ra, rb, disp12, ++ ~0xfUL); ++ break; ++ case 0x4: ++ /* VLDD_U */ ++ if (unlikely(ra == 31)) break; ++ gen_load_mem_simd(ctx, &gen_qemu_vldd, ra, rb, disp12, ++ ~0x1fUL); ++ break; ++ case 0x5: ++ /* VSTD_U */ ++ gen_store_mem_simd(ctx, &gen_qemu_vstd, ra, rb, disp12, ++ ~0x1fUL); ++ break; ++ case 0x8: ++ /* VSTW_UL */ ++ gen_store_mem_simd(ctx, &gen_qemu_vstw_ul, ra, rb, disp12, ++ 0); ++ break; ++ case 0x9: ++ /* VSTW_UH */ ++ gen_store_mem_simd(ctx, &gen_qemu_vstw_uh, ra, rb, disp12, ++ 0); ++ break; ++ case 0xa: ++ /* VSTS_UL */ ++ gen_store_mem_simd(ctx, &gen_qemu_vsts_ul, ra, rb, disp12, ++ 0); ++ break; ++ case 0xb: ++ /* VSTS_UH */ ++ gen_store_mem_simd(ctx, &gen_qemu_vsts_uh, ra, rb, disp12, ++ 0); ++ break; ++ case 0xc: ++ /* VSTD_UL */ ++ gen_store_mem_simd(ctx, &gen_qemu_vstd_ul, ra, rb, disp12, ++ 0); ++ break; ++ case 0xd: ++ /* VSTD_UH */ ++ gen_store_mem_simd(ctx, &gen_qemu_vstd_uh, ra, rb, disp12, ++ 0); ++ break; ++ case 0xe: ++ /* VLDD_NC */ ++ gen_load_mem_simd(ctx, &gen_qemu_vldd, ra, rb, disp12, 0); ++ break; ++ case 0xf: ++ /* VSTD_NC */ ++ gen_store_mem_simd(ctx, &gen_qemu_vstd, ra, rb, disp12, 0); ++ break; ++ default: ++ printf("ILLEGAL BELOW OPC[%x] func[%08x]\n", opc, fn4); ++ ret = gen_invalid(ctx); ++ break; ++ } ++ break; ++ case 0x1D: ++ /* LBR */ ++ ret = gen_bdirect(ctx, 31, disp26); ++ break; ++ case 0x1E: ++ /* LD/ST CORE4 */ ++ arch_assert(test_feature(ctx->env, SW64_FEATURE_CORE4)); ++ switch (fn4) { ++ case 0x0: ++ /* LDBU_A */ ++ vb = load_gir(ctx, rb); ++ gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, 0, 0, 0); ++ tcg_gen_addi_i64(vb, vb, disp12); ++ break; ++ case 0x1: ++ /* LDHU_A */ ++ vb = load_gir(ctx, rb); ++ gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, 0, 0, 0); ++ tcg_gen_addi_i64(vb, vb, disp12); ++ break; ++ case 0x2: ++ /* LDW_A */ ++ vb = load_gir(ctx, rb); ++ /* SEXT to ra */ ++ gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, 0, 0, 0); ++ tcg_gen_addi_i64(vb, vb, disp12); ++ break; ++ case 0x3: ++ /* LDL_A */ ++ vb = load_gir(ctx, rb); ++ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, 0, 0, 0); ++ tcg_gen_addi_i64(vb, vb, disp12); ++ break; ++ case 0x4: ++ /* FLDS_A */ ++ vb = load_gir(ctx, rb); ++ gen_load_mem(ctx, &gen_qemu_flds, ra, rb, 0, 1, 0); ++ tcg_gen_addi_i64(vb, vb, disp12); ++ break; ++ case 0x5: ++ /* FLDD_A */ ++ vb = load_gir(ctx, rb); ++ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, 0, 1, 0); ++ tcg_gen_addi_i64(vb, vb, disp12); ++ break; ++ case 0x6: ++ /* STBU_A */ ++ vb = load_gir(ctx, rb); ++ gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, 0, 0, 0); ++ tcg_gen_addi_i64(vb, vb, disp12); ++ break; ++ case 0x7: ++ /* STHU_A */ ++ vb = load_gir(ctx, rb); ++ gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, 0, 0, 0); ++ tcg_gen_addi_i64(vb, vb, disp12); ++ break; ++ case 0x8: ++ /* STW_A */ ++ vb = load_gir(ctx, rb); ++ gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, 0, 0, 0); ++ tcg_gen_addi_i64(vb, vb, disp12); ++ break; ++ case 0x9: ++ /* STL_A */ ++ vb = load_gir(ctx, rb); ++ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, 0, 0, 0); ++ tcg_gen_addi_i64(vb, vb, disp12); ++ break; ++ case 0xA: ++ /* FSTS_A */ ++ vb = load_gir(ctx, rb); ++ gen_store_mem(ctx, &gen_qemu_fsts, ra, rb, 0, 1, 0); ++ tcg_gen_addi_i64(vb, vb, disp12); ++ break; ++ case 0xB: ++ /* FSTD_A */ ++ vb = load_gir(ctx, rb); ++ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, 0, 1, ++ 0); ++ tcg_gen_addi_i64(vb, vb, disp12); ++ break; ++ case 0xE: ++ /* TODO: DPFHR */ ++ break; ++ case 0xF: ++ /* TODO: DPFHW */ ++ break; ++ default: ++ printf("ILLEGAL BELOW OPC[%x] func[%08x]\n", opc, fn4); ++ ret = gen_invalid(ctx); ++ } ++ break; ++ case 0x20: ++ /* LDBU */ ++ gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0); ++ break; ++ case 0x21: ++ /* LDHU */ ++ gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0); ++ break; ++ case 0x22: ++ /* LDW */ ++ gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0); ++ break; ++ case 0x23: ++ /* LDL */ ++ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0); ++ break; ++ case 0x24: ++ /* LDL_U */ ++ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, ~0x7UL); ++ break; ++ case 0x25: ++ /* PRI_LD */ ++#ifndef CONFIG_USER_ONLY ++ if ((insn >> 12) & 1) { ++ gen_load_mem(ctx, &gen_qemu_pri_ldl, ra, rb, disp12, 0, ~0x7UL); ++ } else { ++ gen_load_mem(ctx, &gen_qemu_pri_ldw, ra, rb, disp12, 0, ~0x3UL); ++ } ++#endif ++ break; ++ case 0x26: ++ /* FLDS */ ++ gen_load_mem(ctx, &gen_qemu_flds, ra, rb, disp16, 1, 0); ++ break; ++ case 0x27: ++ /* FLDD */ ++ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0); ++ break; ++ case 0x28: ++ /* STB */ ++ gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0); ++ break; ++ case 0x29: ++ /* STH */ ++ gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0); ++ break; ++ case 0x2a: ++ /* STW */ ++ gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0); ++ break; ++ case 0x2b: ++ /* STL */ ++ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0); ++ break; ++ case 0x2c: ++ /* STL_U */ ++ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, ~0x7UL); ++ break; ++ case 0x2d: ++ /* PRI_ST */ ++#ifndef CONFIG_USER_ONLY ++ if ((insn >> 12) & 1) { ++ gen_store_mem(ctx, &gen_qemu_pri_stl, ra, rb, disp12, 0, ~0x7UL); ++ } else { ++ gen_store_mem(ctx, &gen_qemu_pri_stw, ra, rb, disp12, 0, ~0x3UL); ++ } ++#endif ++ break; ++ case 0x2e: ++ /* FSTS */ ++ gen_store_mem(ctx, &gen_qemu_fsts, ra, rb, disp16, 1, 0); ++ break; ++ case 0x2f: ++ /* FSTD */ ++ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0); ++ break; ++ case 0x30: ++ /* BEQ */ ++ ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, (uint64_t)-1); ++ break; ++ case 0x31: ++ /* BNE */ ++ ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, (uint64_t)-1); ++ break; ++ case 0x32: ++ /* BLT */ ++ ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, (uint64_t)-1); ++ break; ++ case 0x33: ++ /* BLE */ ++ ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, (uint64_t)-1); ++ break; ++ case 0x34: ++ /* BGT */ ++ ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, (uint64_t)-1); ++ break; ++ case 0x35: ++ /* BGE */ ++ ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, (uint64_t)-1); ++ break; ++ case 0x36: ++ /* BLBC */ ++ ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1); ++ break; ++ case 0x37: ++ /* BLBS */ ++ ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1); ++ break; ++ case 0x38: ++ /* FBEQ */ ++ ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21); ++ break; ++ case 0x39: ++ /* FBNE */ ++ ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21); ++ break; ++ case 0x3a: ++ /* FBLT */ ++ ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21); ++ break; ++ case 0x3b: ++ /* FBLE */ ++ ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21); ++ break; ++ case 0x3c: ++ /* FBGT */ ++ ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21); ++ break; ++ case 0x3d: ++ /* FBGE */ ++ ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21); ++ break; ++ case 0x3f: ++ /* LDIH */ ++ disp16 = ((uint32_t)disp16) << 16; ++ if (ra == 31) break; ++ va = load_gir(ctx, ra); ++ if (rb == 31) { ++ tcg_gen_movi_i64(va, disp16); ++ } else { ++ tcg_gen_addi_i64(va, load_gir(ctx, rb), (int64_t)disp16); ++ } ++ break; ++ case 0x3e: ++ /* LDI */ ++ if (ra == 31) break; ++ va = load_gir(ctx, ra); ++ if (rb == 31) { ++ tcg_gen_movi_i64(va, disp16); ++ } else { ++ tcg_gen_addi_i64(va, load_gir(ctx, rb), (int64_t)disp16); ++ } ++ break; ++ do_invalid: ++ default: ++ printf("ILLEGAL BELOW OPC[%x] insn[%08x]\n", opc, insn); ++ ret = gen_invalid(ctx); ++ } ++ return ret; ++} ++static void sw64_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) ++{ ++ DisasContext* ctx = container_of(dcbase, DisasContext, base); ++ CPUSW64State* env = cpu_env(cpu); /*init by instance_initfn*/ ++ ++ ctx->tbflags = ctx->base.tb->flags; ++ ctx->mem_idx = cpu_mmu_index(env, false); ++#ifdef CONFIG_USER_ONLY ++ ctx->ir = cpu_std_ir; ++#else ++ ctx->ir = (ctx->tbflags & ENV_FLAG_HM_MODE ? cpu_hm_ir : cpu_std_ir); ++#endif ++ ctx->zero = NULL; ++} ++ ++static void sw64_tr_tb_start(DisasContextBase *db, CPUState *cpu) ++{ ++} ++ ++static void sw64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) ++{ ++ tcg_gen_insn_start(dcbase->pc_next); ++} ++ ++static void sw64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) ++{ ++ DisasContext *ctx = container_of(dcbase, DisasContext, base); ++ CPUSW64State *env = cpu_env(cpu); ++ uint32_t insn; ++ ++ insn = cpu_ldl_code(env, ctx->base.pc_next & (~3UL)); ++ ctx->env = env; ++ ctx->base.pc_next += 4; ++ ctx->base.is_jmp = ctx->translate_one(dcbase, insn, cpu); ++ free_context_temps(ctx); ++} ++ ++static void sw64_tr_tb_stop(DisasContextBase* dcbase, CPUState* cpu) { ++ DisasContext* ctx = container_of(dcbase, DisasContext, base); ++ ++ switch (ctx->base.is_jmp) { ++ case DISAS_NORETURN: ++ break; ++ case DISAS_TOO_MANY: ++ if (use_goto_tb(ctx, ctx->base.pc_next)) { ++ tcg_gen_goto_tb(0); ++ tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); ++ tcg_gen_exit_tb(ctx->base.tb, 0); ++ } ++ /* FALLTHRU */ ++ case DISAS_PC_STALE: ++ tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); ++ /* FALLTHRU */ ++ case DISAS_PC_UPDATED: ++ if (!use_exit_tb(ctx)) { ++ tcg_gen_lookup_and_goto_ptr(); ++ break; ++ } ++ /* FALLTHRU */ ++ case DISAS_PC_UPDATED_NOCHAIN: ++ if (ctx->base.singlestep_enabled) { ++ cpu_loop_exit(cpu); ++ } else { ++ tcg_gen_exit_tb(NULL, 0); ++ } ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++} ++ ++static void sw64_tr_disas_log(const DisasContextBase* dcbase, ++ CPUState* cpu, FILE *logfile) ++{ ++ SW64CPU* sc = SW64_CPU(cpu); ++ ++ qemu_log("IN(%d): %s\n", sc->core_id, ++ lookup_symbol(dcbase->pc_first)); ++ target_disas(logfile, cpu, dcbase->pc_first & (~0x3UL), dcbase->tb->size); ++} ++ ++static void init_transops(CPUState *cpu, DisasContext *dc) ++{ ++ dc->translate_one = translate_one; ++} ++ ++static const TranslatorOps sw64_trans_ops = { ++ .init_disas_context = sw64_tr_init_disas_context, ++ .tb_start = sw64_tr_tb_start, ++ .insn_start = sw64_tr_insn_start, ++ .translate_insn = sw64_tr_translate_insn, ++ .tb_stop = sw64_tr_tb_stop, ++ .disas_log = sw64_tr_disas_log, ++}; ++ ++void gen_intermediate_code(CPUState* cpu, TranslationBlock* tb, int *max_insns, ++ target_ulong pc, void *host_pc) ++{ ++ DisasContext dc; ++ init_transops(cpu, &dc); ++ ++ translator_loop(cpu, tb, max_insns, pc, host_pc, &sw64_trans_ops, &dc.base); ++} +diff --git a/target/sw64/translate.h b/target/sw64/translate.h +new file mode 100644 +index 0000000000..183d7680b8 +--- /dev/null ++++ b/target/sw64/translate.h +@@ -0,0 +1,60 @@ ++#ifndef SW64_TRANSLATE_H ++#define SW64_TRANSLATE_H ++#include "qemu/osdep.h" ++#include "cpu.h" ++#include "sysemu/cpus.h" ++#include "disas/disas.h" ++#include "qemu/host-utils.h" ++#include "exec/exec-all.h" ++#include "exec/cpu_ldst.h" ++#include "tcg/tcg-op.h" ++#include "tcg/tcg-op-gvec.h" ++#include "exec/helper-proto.h" ++#include "exec/helper-gen.h" ++#include "exec/translator.h" ++#include "exec/log.h" ++ ++#define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0 ++#define DISAS_PC_UPDATED DISAS_TARGET_1 ++#define DISAS_PC_STALE DISAS_TARGET_2 ++#define DISAS_PC_UPDATED_T DISAS_TOO_MANY ++ ++typedef struct DisasContext DisasContext; ++struct DisasContext { ++ DisasContextBase base; ++ ++ uint32_t tbflags; ++ ++ /* The set of registers active in the current context. */ ++ TCGv *ir; ++ ++ /* Accel: Temporaries for $31 and $f31 as source and destination. */ ++ TCGv zero; ++ int mem_idx; ++ CPUSW64State *env; ++ DisasJumpType (*translate_one)(DisasContextBase *dcbase, uint32_t insn, ++ CPUState *cpu); ++}; ++ ++extern TCGv cpu_pc; ++extern TCGv cpu_std_ir[31]; ++extern TCGv cpu_fr[128]; ++extern TCGv cpu_lock_addr; ++extern TCGv cpu_lock_flag; ++extern TCGv cpu_lock_success; ++#ifdef SW64_FIXLOCK ++extern TCGv cpu_lock_value; ++#endif ++#ifndef CONFIG_USER_ONLY ++extern TCGv cpu_hm_ir[31]; ++#endif ++ ++DisasJumpType translate_one(DisasContextBase *dcbase, uint32_t insn, ++ CPUState *cpu); ++DisasJumpType th1_translate_one(DisasContextBase *dcbase, uint32_t insn, ++ CPUState *cpu); ++bool use_exit_tb(DisasContext *ctx); ++bool use_goto_tb(DisasContext *ctx, uint64_t dest); ++void insn_profile(DisasContext *ctx, uint32_t insn); ++extern void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src); ++#endif +diff --git a/tcg/sw64/tcg-target-con-set.h b/tcg/sw64/tcg-target-con-set.h +new file mode 100755 +index 0000000000..71fdfdcbef +--- /dev/null ++++ b/tcg/sw64/tcg-target-con-set.h +@@ -0,0 +1,39 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Define SW_64 target-specific constraint sets. ++ * Copyright (c) 2021 Linaro ++ */ ++ ++/* ++ * C_On_Im(...) defines a constraint set with outputs and inputs. ++ * Each operand should be a sequence of constraint letters as defined by ++ * tcg-target-con-str.h; the constraint combination is inclusive or. ++ */ ++C_O0_I1(r) ++C_O0_I2(lZ, l) ++C_O0_I2(r, rA) ++C_O0_I2(rZ, r) ++C_O0_I2(w, r) ++C_O1_I1(r, l) ++C_O1_I1(r, r) ++C_O1_I1(w, r) ++C_O1_I1(w, w) ++C_O1_I1(w, wr) ++C_O1_I2(r, 0, rZ) ++C_O1_I2(r, r, r) ++C_O1_I2(r, r, rA) ++C_O1_I2(r, r, rAL) ++C_O1_I2(r, r, ri) ++C_O1_I2(r, r, rL) ++C_O1_I2(r, rZ, rZ) ++C_O1_I2(w, 0, w) ++C_O1_I2(w, w, w) ++C_O1_I2(w, w, wN) ++C_O1_I2(w, w, wO) ++C_O1_I2(w, w, wZ) ++C_O1_I3(w, w, w, w) ++C_O1_I4(r, r, rA, rZ, rZ) ++C_O2_I4(r, r, rZ, rZ, rA, rMZ) ++C_O1_I4(r, r, rU, rZ, rZ) ++C_O0_I2(r, rU) ++C_O1_I2(r, r, rU) +diff --git a/tcg/sw64/tcg-target-con-str.h b/tcg/sw64/tcg-target-con-str.h +new file mode 100755 +index 0000000000..47edb3837b +--- /dev/null ++++ b/tcg/sw64/tcg-target-con-str.h +@@ -0,0 +1,28 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Define sw_64 target-specific operand constraints. ++ * Copyright (c) 2021 Linaro ++ */ ++ ++/* ++ * Define constraint letters for register sets: ++ * REGS(letter, register_mask) ++ */ ++REGS('r', ALL_GENERAL_REGS) ++REGS('l', ALL_QLDST_REGS) ++REGS('w', ALL_VECTOR_REGS) ++ ++/* ++ * Define constraint letters for constants: ++ * CONST(letter, TCG_CT_CONST_* bit set) ++ */ ++ ++CONST('Z', TCG_CT_CONST_ZERO) ++CONST('A', TCG_CT_CONST_LONG) ++CONST('M', TCG_CT_CONST_MONE) ++CONST('O', TCG_CT_CONST_ORRI) ++CONST('W', TCG_CT_CONST_WORD) ++CONST('L', TCG_CT_CONST_LONG) ++CONST('U', TCG_CT_CONST_U8) ++CONST('S', TCG_CT_CONST_S8) ++ +diff --git a/tcg/sw64/tcg-target-reg-bits.h b/tcg/sw64/tcg-target-reg-bits.h +new file mode 100644 +index 0000000000..34a6711013 +--- /dev/null ++++ b/tcg/sw64/tcg-target-reg-bits.h +@@ -0,0 +1,12 @@ ++/* SPDX-License-Identifier: MIT */ ++/* ++ * Define target-specific register size ++ * Copyright (c) 2023 Linaro ++ */ ++ ++#ifndef TCG_TARGET_REG_BITS_H ++#define TCG_TARGET_REG_BITS_H ++ ++#define TCG_TARGET_REG_BITS 64 ++ ++#endif +diff --git a/tcg/sw64/tcg-target.c.inc b/tcg/sw64/tcg-target.c.inc +new file mode 100755 +index 0000000000..d92dcf9a2b +--- /dev/null ++++ b/tcg/sw64/tcg-target.c.inc +@@ -0,0 +1,2560 @@ ++/* ++ * Initial TCG Implementation for sw_64 ++ * ++ */ ++ ++#include "../tcg-ldst.c.inc" ++#include "../tcg-pool.c.inc" ++#include "qemu/bitops.h" ++ ++/* We're going to re-use TCGType in setting of the SF bit, which controls ++ the size of the operation performed. If we know the values match, it ++ makes things much cleaner. */ ++QEMU_BUILD_BUG_ON(TCG_TYPE_I32 != 0 || TCG_TYPE_I64 != 1); ++ ++#ifdef CONFIG_DEBUG_TCG ++static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { ++ "X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7", ++ "X8", "X9", "X10", "X11", "X12", "X13", "X14", "fp", ++ "X16", "X17", "X18", "X19", "X20", "X21", "X22", "X23", ++ "X24", "X25", "X26", "X27", "X28", "X29", "Xsp", "X31", ++ ++ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", ++ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", ++ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", ++ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", ++}; ++#endif /* CONFIG_DEBUG_TCG */ ++ ++static const int tcg_target_reg_alloc_order[] = { ++ /* TCG_REG_X9 qemu saved for AREG0*/ ++ TCG_REG_X10, TCG_REG_X11, TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, ++ ++ TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, TCG_REG_X4, ++ TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, TCG_REG_X8, ++ ++ TCG_REG_X22, TCG_REG_X23, /* TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, */ ++ ++ /* TCG_REG_SP=TCG_REG_X15 saved for system*/ ++ TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19, TCG_REG_X20, TCG_REG_X21, TCG_REG_X28, /* TCG_REG_X29, TCG_REG_X30, TCG_REG_X31 */ ++ ++ /* TCG_REG_TMP=TCG_REG_X27 reserved as temporary register */ ++ /* TCG_REG_TMP2=TCG_REG_X25 reserved as temporary register */ ++ /* TCG_REG_TMP3=TCG_REG_X24 reserved as temporary register */ ++ /* TCG_REG_RA=TCG_REG_X26 reserved as temporary */ ++ /* TCG_REG_GP=TCG_REG_X29 gp saved for system*/ ++ /* TCG_REG_SP=TCG_REG_X30 sp saved for system*/ ++ /* TCG_REG_ZERO=TCG_REG_X31 zero saved for system*/ ++ ++ TCG_REG_F2, TCG_REG_F3, TCG_REG_F4, TCG_REG_F5, TCG_REG_F6, TCG_REG_F7, TCG_REG_F8, TCG_REG_F9, /* f2-f9 saved registers */ ++ /* TCG_VEC_TMP=TCG_REG_F10, TCG_VEC_TMP2=TCG_REG_F11, are saved as temporary */ ++ TCG_REG_F12, TCG_REG_F13, TCG_REG_F14, TCG_REG_F15, /* f10-f15 temporary registers */ ++ ++ TCG_REG_F22, TCG_REG_F23, TCG_REG_F24, TCG_REG_F25, TCG_REG_F26, TCG_REG_F27, TCG_REG_F28, TCG_REG_F29, TCG_REG_F30, /* f22-f30 temporary registers */ ++ /* TCG_REG_F31, zero saved for system */ ++ ++ TCG_REG_F16, TCG_REG_F17, TCG_REG_F18, TCG_REG_F19, TCG_REG_F20, TCG_REG_F21, /* input args */ ++ ++ TCG_REG_F0, TCG_REG_F1, /*output args */ ++}; ++ ++static const int tcg_target_call_iarg_regs[6] = { ++ TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19, TCG_REG_X20, TCG_REG_X21, ++}; ++static const int tcg_target_call_oarg_regs[1] = { ++ TCG_REG_X0, ++}; ++ ++static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) ++{ ++ tcg_debug_assert(kind == TCG_CALL_RET_NORMAL); ++ tcg_debug_assert(slot >= 0 && slot <= 1); ++ return TCG_REG_X0 + slot; ++} ++ ++#define TCG_REG_TMP TCG_REG_X27 ++#define TCG_REG_TMP1 TCG_REG_X26 ++#define TCG_REG_TMP2 TCG_REG_X25 ++#define TCG_REG_TMP3 TCG_REG_X24 ++#define TCG_FLOAT_TMP TCG_REG_F10 ++#define TCG_FLOAT_TMP2 TCG_REG_F11 ++ ++#define REG0(I) (const_args[I] ? TCG_REG_ZERO : (TCGReg)args[I]) ++#define tcg_out_insn_jump tcg_out_insn_ldst ++#define tcg_out_insn_bitReg tcg_out_insn_simpleReg ++#define zeroExt 0 ++#define sigExt 1 ++#define noPara 0//represent this parament of function isnot needed. ++ ++#define TCG_REG_GUEST_BASE TCG_REG_X14 ++ ++static bool reloc_pc21(tcg_insn_unit *src_rw, const tcg_insn_unit *target) ++{ ++ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); ++ ptrdiff_t offset = target - src_rx -1; ++ ++ if (offset == sextract64(offset, 0, 21)) { ++ /* read instruction, mask away previous PC_REL21 parameter contents, ++ set the proper offset, then write back the instruction. */ ++ *src_rw = deposit32(*src_rw, 0, 21, offset); ++ return true; ++ } ++ return false; ++} ++ ++static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) ++{ ++ tcg_debug_assert(addend == 0); ++ switch (type) { ++ case R_SW_64_BRADDR: ++ return reloc_pc21(code_ptr, (const tcg_insn_unit *)value); ++ default: ++ g_assert_not_reached(); ++ } ++} ++ ++/* ++* contact with "tcg-target-con-str.h" ++*/ ++#define TCG_CT_CONST_ZERO 0x100 ++#define TCG_CT_CONST_LONG 0x200 ++#define TCG_CT_CONST_MONE 0x400 ++#define TCG_CT_CONST_ORRI 0x800 ++#define TCG_CT_CONST_WORD 0X1000 ++#define TCG_CT_CONST_U8 0x2000 ++#define TCG_CT_CONST_S8 0X4000 ++ ++#define ALL_GENERAL_REGS 0xffffffffu ++#define ALL_VECTOR_REGS 0xffffffff00000000ull ++ ++#ifdef CONFIG_SOFTMMU ++#define ALL_QLDST_REGS \ ++ (ALL_GENERAL_REGS & ~((1 << TCG_REG_X0) | (1 << TCG_REG_X1) | \ ++ (1 << TCG_REG_X2) | (1 << TCG_REG_X3))) ++#else ++#define ALL_QLDST_REGS ALL_GENERAL_REGS ++#endif ++ ++/* sw test if a constant matches the constraint */ ++static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) ++{ ++ if (ct & TCG_CT_CONST) { ++ return 1; ++ } ++ if (type == TCG_TYPE_I32) { ++ val = (int32_t)val; ++ } ++ if ((ct & TCG_CT_CONST_U8) && 0 <= val && val <= 255) { ++ return 1; ++ } ++ if ((ct & TCG_CT_CONST_LONG)) { ++ return 1; ++ } ++ if ((ct & TCG_CT_CONST_MONE)) { ++ return 1; ++ } ++ if ((ct & TCG_CT_CONST_ORRI)) { ++ return 1; ++ } ++ if ((ct & TCG_CT_CONST_WORD)) { ++ return 1; ++ } ++ if ((ct & TCG_CT_CONST_ZERO) && val == 0) { ++ return 1; ++ } ++ return 0; ++} ++ ++#define OPC_OP(x) (((uint32_t)(x) & 0x3f) << 26) ++#define OPC_FUNC(x) (((x) & 0xff) << 5) ++#define OPC_FUNC_COMPLEX(x) (((x) & 0xff) << 10) ++typedef enum { ++ OPC_NOP =0X43ff075f, ++ OPC_SYS_CALL =OPC_OP(0x00), ++ OPC_CALL =OPC_OP(0x01), ++ OPC_RET =OPC_OP(0x02), ++ OPC_JMP =OPC_OP(0x03), ++ OPC_BR =OPC_OP(0x04), ++ OPC_BSR =OPC_OP(0x05), ++ OPC_PRI_RET =OPC_OP(0x07), ++ OPC_LDWE =OPC_OP(0x09), ++ OPC_LDSE =OPC_OP(0x0A), ++ OPC_LDDE =OPC_OP(0x0B), ++ OPC_VLDS =OPC_OP(0x0C), ++ OPC_VLDD =OPC_OP(0x0D), ++ OPC_VSTS =OPC_OP(0x0E), ++ OPC_VSTD =OPC_OP(0x0F), ++ ++ OPC_LDBU =OPC_OP(0x20), ++ OPC_LDHU =OPC_OP(0x21), ++ OPC_LDW =OPC_OP(0x22), ++ OPC_LDL =OPC_OP(0x23), ++ OPC_LDL_U =OPC_OP(0x24), ++ OPC_FLDS =OPC_OP(0X26), ++ OPC_PRI_LD =OPC_OP(0x25), ++ OPC_FLDD =OPC_OP(0X27), ++ OPC_STB =OPC_OP(0X28), ++ OPC_STH =OPC_OP(0x29), ++ OPC_STW =OPC_OP(0x2a), ++ OPC_STL =OPC_OP(0x2B), ++ OPC_STL_U =OPC_OP(0x2C), ++ OPC_PRI_ST =OPC_OP(0x2D), ++ OPC_FSTS =OPC_OP(0x2E), ++ OPC_FSTD =OPC_OP(0x2F), ++ ++ OPC_BEQ =OPC_OP(0x30), ++ OPC_BNE =OPC_OP(0x31), ++ OPC_BLT =OPC_OP(0x32), ++ OPC_BLE =OPC_OP(0x33), ++ OPC_BGT =OPC_OP(0x34), ++ OPC_BGE =OPC_OP(0x35), ++ OPC_BLBC =OPC_OP(0x36), ++ OPC_BLBS =OPC_OP(0x37), ++ ++ OPC_FBEQ =OPC_OP(0x38), ++ OPC_FBNE =OPC_OP(0x39), ++ OPC_FBLT =OPC_OP(0x3A), ++ OPC_FBLE =OPC_OP(0x3B), ++ OPC_FBGT =OPC_OP(0x3C), ++ OPC_FBGE =OPC_OP(0x3D), ++ OPC_LDI =OPC_OP(0x3E), ++ OPC_LDIH =OPC_OP(0x3F), ++ ++ OPC_ADDW =(OPC_OP(0x10) | OPC_FUNC(0x0)), ++ OPC_ADDW_I =(OPC_OP(0x12) | OPC_FUNC(0x0)), ++ OPC_SUBW =(OPC_OP(0x10) | OPC_FUNC(0x1)), ++ OPC_SUBW_I =(OPC_OP(0x12) | OPC_FUNC(0x1)), ++ OPC_S4ADDW =(OPC_OP(0x10) | OPC_FUNC(0x02)), ++ OPC_S4ADDW_I =(OPC_OP(0x12) | OPC_FUNC(0x02)), ++ OPC_S4SUBW =(OPC_OP(0x10) | OPC_FUNC(0x03)), ++ OPC_S4SUBW_I =(OPC_OP(0x12) | OPC_FUNC(0x03)), ++ ++ OPC_S8ADDW =(OPC_OP(0x10) | OPC_FUNC(0x04)), ++ OPC_S8ADDW_I =(OPC_OP(0x12) | OPC_FUNC(0x04)), ++ OPC_S8SUBW =(OPC_OP(0x10) | OPC_FUNC(0x05)), ++ OPC_S8SUBW_I =(OPC_OP(0x12) | OPC_FUNC(0x05)), ++ ++ OPC_ADDL =(OPC_OP(0x10) | OPC_FUNC(0x8)), ++ OPC_ADDL_I =(OPC_OP(0x12) | OPC_FUNC(0x8)), ++ OPC_SUBL =(OPC_OP(0x10) | OPC_FUNC(0x9)), ++ OPC_SUBL_I =(OPC_OP(0x12) | OPC_FUNC(0x9)), ++ ++ OPC_S4ADDL =(OPC_OP(0x10) | OPC_FUNC(0xA)), ++ OPC_S4ADDL_I =(OPC_OP(0x12) | OPC_FUNC(0xA)), ++ OPC_S4SUBL =(OPC_OP(0x10) | OPC_FUNC(0xB)), ++ OPC_S4SUBL_I =(OPC_OP(0x12) | OPC_FUNC(0xB)), ++ ++ OPC_S8ADDL =(OPC_OP(0x10) | OPC_FUNC(0xC)), ++ OPC_S8ADDL_I =(OPC_OP(0x12) | OPC_FUNC(0xC)), ++ OPC_S8SUBL =(OPC_OP(0x10) | OPC_FUNC(0xD)), ++ OPC_S8SUBL_I =(OPC_OP(0x12) | OPC_FUNC(0xD)), ++ ++ OPC_MULW =(OPC_OP(0x10) | OPC_FUNC(0x10)), ++ OPC_MULW_I =(OPC_OP(0x12) | OPC_FUNC(0x10)), ++ OPC_MULL =(OPC_OP(0x10) | OPC_FUNC(0x18)), ++ OPC_MULL_I =(OPC_OP(0x12) | OPC_FUNC(0x18)), ++ ++ OPC_UMULH =(OPC_OP(0x10) | OPC_FUNC(0x19)), ++ OPC_UMULH_I =(OPC_OP(0x12) | OPC_FUNC(0x19)), ++ ++ OPC_CTPOP =(OPC_OP(0x10) | OPC_FUNC(0x58)), ++ OPC_CTLZ =(OPC_OP(0x10) | OPC_FUNC(0x59)), ++ OPC_CTTZ =(OPC_OP(0x10) | OPC_FUNC(0x5A)), ++ ++ OPC_ZAP =(OPC_OP(0x10) | OPC_FUNC(0x68)), ++ OPC_ZAP_I =(OPC_OP(0x12) | OPC_FUNC(0x68)), ++ OPC_ZAPNOT =(OPC_OP(0x10) | OPC_FUNC(0x69)), ++ OPC_ZAPNOT_I =(OPC_OP(0x12) | OPC_FUNC(0x69)), ++ ++ OPC_SEXTB =(OPC_OP(0x10) | OPC_FUNC(0x6A)), ++ OPC_SEXTB_I =(OPC_OP(0x12) | OPC_FUNC(0x6A)), ++ OPC_SEXTH =(OPC_OP(0x10) | OPC_FUNC(0x6B)), ++ OPC_SEXTH_I =(OPC_OP(0x12) | OPC_FUNC(0x6B)), ++ ++ OPC_CMPEQ =(OPC_OP(0x10) | OPC_FUNC(0x28)), ++ OPC_CMPEQ_I =(OPC_OP(0x12) | OPC_FUNC(0x28)), ++ ++ OPC_CMPLT =(OPC_OP(0x10) | OPC_FUNC(0x29)), ++ OPC_CMPLT_I =(OPC_OP(0x12) | OPC_FUNC(0x29)), ++ OPC_CMPLE =(OPC_OP(0x10) | OPC_FUNC(0x2A)), ++ OPC_CMPLE_I =(OPC_OP(0x12) | OPC_FUNC(0x2A)), ++ ++ OPC_CMPULT =(OPC_OP(0x10) | OPC_FUNC(0x2B)), ++ OPC_CMPULT_I =(OPC_OP(0x12) | OPC_FUNC(0x2B)), ++ OPC_CMPULE =(OPC_OP(0x10) | OPC_FUNC(0x2C)), ++ OPC_CMPULE_I =(OPC_OP(0x12) | OPC_FUNC(0x2C)), ++ ++ OPC_AND =(OPC_OP(0x10) | OPC_FUNC(0x38)), ++ OPC_BIC =(OPC_OP(0x10) | OPC_FUNC(0x39)), ++ OPC_BIS =(OPC_OP(0x10) | OPC_FUNC(0x3A)), ++ OPC_ORNOT =(OPC_OP(0x10) | OPC_FUNC(0x3B)), ++ OPC_XOR =(OPC_OP(0x10) | OPC_FUNC(0x3C)), ++ OPC_EQV =(OPC_OP(0x10) | OPC_FUNC(0x3D)), ++ ++ OPC_AND_I =(OPC_OP(0x12) | OPC_FUNC(0x38)), ++ OPC_BIC_I =(OPC_OP(0x12) | OPC_FUNC(0x39)), ++ OPC_BIS_I =(OPC_OP(0x12) | OPC_FUNC(0x3A)), ++ OPC_ORNOT_I =(OPC_OP(0x12) | OPC_FUNC(0x3B)), ++ OPC_XOR_I =(OPC_OP(0x12) | OPC_FUNC(0x3C)), ++ OPC_EQV_I =(OPC_OP(0x12) | OPC_FUNC(0x3D)), ++ ++ OPC_SLL =(OPC_OP(0x10) | OPC_FUNC(0x48)), ++ OPC_SRL =(OPC_OP(0x10) | OPC_FUNC(0x49)), ++ OPC_SRA =(OPC_OP(0x10) | OPC_FUNC(0x4A)), ++ OPC_SLL_I =(OPC_OP(0x12) | OPC_FUNC(0x48)), ++ OPC_SRL_I =(OPC_OP(0x12) | OPC_FUNC(0x49)), ++ OPC_SRA_I =(OPC_OP(0x12) | OPC_FUNC(0x4A)), ++ ++ OPC_SELEQ =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x00)), ++ OPC_SELGE =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x01)), ++ OPC_SELGT =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x02)), ++ OPC_SELLE =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x03)), ++ OPC_SELLT =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x04)), ++ OPC_SELNE =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x05)), ++ OPC_SELLBC =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x06)), ++ OPC_SELLBS =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x07)), ++ OPC_SELEQ_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x00)), ++ OPC_SELGE_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x01)), ++ OPC_SELGT_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x02)), ++ OPC_SELLE_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x03)), ++ OPC_SELLT_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x04)), ++ OPC_SELNE_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x05)), ++ OPC_SELLBC_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x06)), ++ OPC_SELLBS_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x07)), ++ ++ OPC_INS0B =(OPC_OP(0x10) | OPC_FUNC(0x40)), ++ OPC_INS1B =(OPC_OP(0x10) | OPC_FUNC(0x41)), ++ OPC_INS2B =(OPC_OP(0x10) | OPC_FUNC(0x42)), ++ OPC_INS3B =(OPC_OP(0x10) | OPC_FUNC(0x43)), ++ OPC_INS4B =(OPC_OP(0x10) | OPC_FUNC(0x44)), ++ OPC_INS5B =(OPC_OP(0x10) | OPC_FUNC(0x45)), ++ OPC_INS6B =(OPC_OP(0x10) | OPC_FUNC(0x46)), ++ OPC_INS7B =(OPC_OP(0x10) | OPC_FUNC(0x47)), ++ OPC_INS0B_I =(OPC_OP(0x12) | OPC_FUNC(0x40)), ++ OPC_INS1B_I =(OPC_OP(0x12) | OPC_FUNC(0x41)), ++ OPC_INS2B_I =(OPC_OP(0x12) | OPC_FUNC(0x42)), ++ OPC_INS3B_I =(OPC_OP(0x12) | OPC_FUNC(0x43)), ++ OPC_INS4B_I =(OPC_OP(0x12) | OPC_FUNC(0x44)), ++ OPC_INS5B_I =(OPC_OP(0x12) | OPC_FUNC(0x45)), ++ OPC_INS6B_I =(OPC_OP(0x12) | OPC_FUNC(0x46)), ++ OPC_INS7B_I =(OPC_OP(0x12) | OPC_FUNC(0x47)), ++ ++ OPC_EXTLB =(OPC_OP(0x10) | OPC_FUNC(0x50)), ++ OPC_EXTLH =(OPC_OP(0x10) | OPC_FUNC(0x51)), ++ OPC_EXTLW =(OPC_OP(0x10) | OPC_FUNC(0x52)), ++ OPC_EXTLL =(OPC_OP(0x10) | OPC_FUNC(0x53)), ++ OPC_EXTHB =(OPC_OP(0x10) | OPC_FUNC(0x54)), ++ OPC_EXTHH =(OPC_OP(0x10) | OPC_FUNC(0x55)), ++ OPC_EXTHW =(OPC_OP(0x10) | OPC_FUNC(0x56)), ++ OPC_EXTHL =(OPC_OP(0x10) | OPC_FUNC(0x57)), ++ OPC_EXTLB_I =(OPC_OP(0x12) | OPC_FUNC(0x50)), ++ OPC_EXTLH_I =(OPC_OP(0x12) | OPC_FUNC(0x51)), ++ OPC_EXTLW_I =(OPC_OP(0x12) | OPC_FUNC(0x52)), ++ OPC_EXTLL_I =(OPC_OP(0x12) | OPC_FUNC(0x53)), ++ OPC_EXTHB_I =(OPC_OP(0x12) | OPC_FUNC(0x54)), ++ OPC_EXTHH_I =(OPC_OP(0x12) | OPC_FUNC(0x55)), ++ OPC_EXTHW_I =(OPC_OP(0x12) | OPC_FUNC(0x56)), ++ OPC_EXTHL_I =(OPC_OP(0x12) | OPC_FUNC(0x57)), ++ ++ OPC_MASKLB =(OPC_OP(0x10) | OPC_FUNC(0x60)), ++ OPC_MASKLH =(OPC_OP(0x10) | OPC_FUNC(0x61)), ++ OPC_MASKLW =(OPC_OP(0x10) | OPC_FUNC(0x62)), ++ OPC_MASKLL =(OPC_OP(0x10) | OPC_FUNC(0x63)), ++ OPC_MASKHB =(OPC_OP(0x10) | OPC_FUNC(0x64)), ++ OPC_MASKHH =(OPC_OP(0x10) | OPC_FUNC(0x65)), ++ OPC_MASKHW =(OPC_OP(0x10) | OPC_FUNC(0x66)), ++ OPC_MASKHL =(OPC_OP(0x10) | OPC_FUNC(0x67)), ++ OPC_MASKLB_I =(OPC_OP(0x12) | OPC_FUNC(0x60)), ++ OPC_MASKLH_I =(OPC_OP(0x12) | OPC_FUNC(0x61)), ++ OPC_MASKLW_I =(OPC_OP(0x12) | OPC_FUNC(0x62)), ++ OPC_MASKLL_I =(OPC_OP(0x12) | OPC_FUNC(0x63)), ++ OPC_MASKHB_I =(OPC_OP(0x12) | OPC_FUNC(0x64)), ++ OPC_MASKHH_I =(OPC_OP(0x12) | OPC_FUNC(0x65)), ++ OPC_MASKHW_I =(OPC_OP(0x12) | OPC_FUNC(0x66)), ++ OPC_MASKHL_I =(OPC_OP(0x12) | OPC_FUNC(0x67)), ++ ++ OPC_CNPGEB =(OPC_OP(0x10) | OPC_FUNC(0x6C)), ++ OPC_CNPGEB_I =(OPC_OP(0x12) | OPC_FUNC(0x6C)), ++ ++ OPC_MEMB =(OPC_OP(0x06) | OPC_FUNC(0x0)), ++ OPC_RTC =(OPC_OP(0x06) | OPC_FUNC(0x20)), ++ ++ /*float insn*/ ++ OPC_RFPCR = (OPC_OP(0x18) | OPC_FUNC(0x50)), ++ OPC_WFPCR = (OPC_OP(0x18) | OPC_FUNC(0x51)), ++ OPC_SETFPEC0 = (OPC_OP(0x18) | OPC_FUNC(0x54)), ++ OPC_SETFPEC1 = (OPC_OP(0x18) | OPC_FUNC(0x55)), ++ OPC_SETFPEC2 = (OPC_OP(0x18) | OPC_FUNC(0x56)), ++ OPC_SETFPEC3 = (OPC_OP(0x18) | OPC_FUNC(0x57)), ++ ++ ++ OPC_IFMOVS = (OPC_OP(0x18) | OPC_FUNC(0x40)), ++ OPC_IFMOVD = (OPC_OP(0x18) | OPC_FUNC(0x41)), ++ OPC_FIMOVS = (OPC_OP(0x10) | OPC_FUNC(0x70)), ++ OPC_FIMOVD = (OPC_OP(0x10) | OPC_FUNC(0x78)), ++ ++ /*translate S--D*/ ++ /*translate S/D--Long*/ ++ OPC_FCVTSD = (OPC_OP(0x18) | OPC_FUNC(0x20)), ++ OPC_FCVTDS = (OPC_OP(0x18) | OPC_FUNC(0x21)), ++ OPC_FCVTDL_G = (OPC_OP(0x18) | OPC_FUNC(0x22)), ++ OPC_FCVTDL_P = (OPC_OP(0x18) | OPC_FUNC(0x23)), ++ OPC_FCVTDL_Z = (OPC_OP(0x18) | OPC_FUNC(0x24)), ++ OPC_FCVTDL_N = (OPC_OP(0x18) | OPC_FUNC(0x25)), ++ OPC_FCVTDL = (OPC_OP(0x18) | OPC_FUNC(0x27)), ++ OPC_FCVTLS = (OPC_OP(0x18) | OPC_FUNC(0x2D)), ++ OPC_FCVTLD = (OPC_OP(0x18) | OPC_FUNC(0x2F)), ++ ++ ++ OPC_FADDS = (OPC_OP(0x18) | OPC_FUNC(0x00)), ++ OPC_FADDD = (OPC_OP(0x18) | OPC_FUNC(0x01)), ++ OPC_FSUBS = (OPC_OP(0x18) | OPC_FUNC(0x02)), ++ OPC_FSUBD = (OPC_OP(0x18) | OPC_FUNC(0x03)), ++ OPC_FMULS = (OPC_OP(0x18) | OPC_FUNC(0x04)), ++ OPC_FMULD = (OPC_OP(0x18) | OPC_FUNC(0x05)), ++ OPC_FDIVS = (OPC_OP(0x18) | OPC_FUNC(0x06)), ++ OPC_FDIVD = (OPC_OP(0x18) | OPC_FUNC(0x07)), ++ OPC_FSQRTS = (OPC_OP(0x18) | OPC_FUNC(0x08)), ++ OPC_FSQRTD = (OPC_OP(0x18) | OPC_FUNC(0x09)), ++}SW_64Insn; ++ ++static inline uint32_t tcg_in32(TCGContext *s) ++{ ++ uint32_t v = *(uint32_t *)s->code_ptr; ++ return v; ++} ++ ++/* ++ * SW instruction format of br(alias jump) ++ * insn = opcode[31,26]:Rd[25,21]:disp[20,0], ++ */ ++static void tcg_out_insn_br(TCGContext *s, SW_64Insn insn, TCGReg rd, intptr_t imm64) ++{ ++ tcg_debug_assert(imm64 <= 0xfffff && imm64 >= -0x100000); ++ tcg_out32(s, insn | (rd & 0x1f) << 21 | (imm64 & 0x1fffff)); ++} ++ ++/* ++ * SW instruction format of (load and store) ++ * insn = opcode[31,26]:rd[25,21]:rn[20,16]:disp[15,0] ++ */ ++static void tcg_out_insn_ldst(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, intptr_t imm16) ++{ ++ tcg_debug_assert(imm16 <= 0x7fff && imm16 >= -0x8000); ++ tcg_out32(s, insn | (rd & 0x1f) << 21 | (rn & 0x1f) << 16 | (imm16 & 0xffff)); ++} ++ ++/* ++ * SW instruction format of simple operator for Register ++ * insn = opcode[31,26]:rn(ra)[25,21]:rn(rb)[20,16]:Zeors[15,13]:function[12,5]:rd(rc)[4,0] ++ */ ++static void tcg_out_insn_simpleReg(TCGContext *s, SW_64Insn insn,TCGReg rd, TCGReg rn, TCGReg rm) ++{ ++ tcg_out32(s, insn | (rn & 0x1f) << 21 | (rm & 0x1f) << 16 | (rd & 0x1f)); ++} ++ ++/* ++ * SW instruction format of simple operator for imm ++ * insn = opcode[31,26]:rn(ra)[25,21]:disp[20,13]:function[12,5]:rd(rc)[4,0] ++ */ ++static void tcg_out_simple(TCGContext *s, SW_64Insn insn_Imm, SW_64Insn insn_Reg, TCGReg rd, TCGReg rn, intptr_t imm64) ++{ ++ if (imm64 <= 0x7f && imm64 >= -0x80) { ++ tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f)); ++ } else { ++ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP3, imm64); ++ tcg_out_insn_simpleReg(s, insn_Reg, rd, rn, TCG_REG_TMP3); ++ } ++} ++ ++static void tcg_out_insn_simpleImm(TCGContext *s, SW_64Insn insn_Imm, TCGReg rd, TCGReg rn, unsigned long imm64) ++{ ++ tcg_debug_assert(imm64 <= 255); ++ tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f)); ++} ++ ++/* ++ * sw bit operation: and bis etc ++ */ ++static void tcg_out_bit(TCGContext *s, SW_64Insn insn_Imm, SW_64Insn insn_Reg, TCGReg rd, TCGReg rn, unsigned long imm64) ++{ ++ if (imm64 <= 255) { ++ tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f)); ++ } else { ++ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, imm64); ++ tcg_out_insn_bitReg(s, insn_Reg, rd, rn, TCG_REG_TMP); ++ } ++} ++ ++/* ++ * SW instruction format of complex operator ++ * insn = opcode[31,26]:rd[25,21]:rn[20,16],function[15,10]:rm[9,5]:rx[4,0] ++ */ ++static void tcg_out_insn_complexReg(TCGContext *s, SW_64Insn insn, TCGReg cond, TCGReg rd, TCGReg rn, TCGReg rm) ++{ ++ tcg_out32(s, insn | (cond & 0x1f) << 21 | (rn & 0x1f) << 16 | (rm & 0x1f) << 5 | (rd & 0x1f)); ++} ++ ++static void tcg_out_insn_complexImm(TCGContext *s, SW_64Insn insn, TCGReg cond, TCGReg rd, intptr_t imm8, TCGReg rm) ++{ ++ tcg_out32(s, insn | (cond & 0x1f) << 21 | (imm8 & 0xff) << 13 | (rm & 0x1f) << 5 | (rd & 0x1f)); ++} ++ ++static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn) ++{ ++ if (ext == TCG_TYPE_I64) { ++ tcg_out_insn_simpleReg(s, OPC_BIS, rd, rn, TCG_REG_ZERO); ++ } else { ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rn, 0xf); ++ } ++} ++ ++static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, tcg_target_long orig) ++{ ++ tcg_target_long l0=0, l1=0, l2=0, l3=0, extra=0; ++ tcg_target_long val = orig; ++ TCGReg rs = TCG_REG_ZERO; ++ ++ if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { ++ val = (int32_t)val;//val64bit ++ } ++ ++ if (orig == (int16_t)orig) { ++ tcg_out_insn_ldst(s, OPC_LDI, rd, TCG_REG_ZERO, (int16_t)orig); ++ return; ++ } ++ ++ if (orig == (uint8_t)orig) { ++ tcg_out_insn_simpleImm(s, OPC_BIS_I, rd, TCG_REG_ZERO, (uint8_t)orig); ++ return; ++ } ++ ++ if (type == TCG_TYPE_I32) { ++ val = (int32_t)val; ++ } ++ ++ l0 = (int16_t)val; ++ val = (val - l0) >> 16; ++ l1 = (int16_t)val; ++ ++ if (orig >> 31 == -1 || orig >> 31 == 0) { ++ if (l1 < 0 && orig >= 0) { ++ extra = 0x4000; ++ l1 = (int16_t)(val - 0x4000); ++ } ++ } else { ++ val = (val - l1) >> 16; ++ l2 = (int16_t)val; ++ val = (val - l2) >> 16; ++ l3 = (int16_t)val; ++ ++ if (l3) { ++ tcg_out_insn_ldst(s, OPC_LDIH, rd, rs, l3); ++ rs = rd; ++ } ++ if (l2) { ++ tcg_out_insn_ldst(s, OPC_LDI, rd, rs, l2); ++ rs = rd; ++ } ++ if (l3 || l2) ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, rd, rd, 32); ++ } ++ ++ if (l1) { ++ tcg_out_insn_ldst(s, OPC_LDIH, rd, rs, l1); ++ rs = rd; ++ } ++ ++ if (extra) { ++ tcg_out_insn_ldst(s, OPC_LDIH, rd, rs, extra); ++ rs = rd; ++ } ++ ++ tcg_out_insn_ldst(s, OPC_LDI, rd, rs, l0); ++ if (type == TCG_TYPE_I32) { ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); ++ } ++} ++ ++static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) ++{ ++ if (ret == arg) { ++ return true; ++ } ++ switch (type) { ++ case TCG_TYPE_I32: ++ case TCG_TYPE_I64: ++ if (ret < 32 && arg < 32) { ++ tcg_out_movr(s, type, ret, arg); ++ break; ++ } else if (ret < 32) { ++ tcg_debug_assert(0); ++ break; ++ } else if (arg < 32) { ++ tcg_debug_assert(0); ++ break; ++ } ++ /* FALLTHRU */ ++ case TCG_TYPE_V64: ++ case TCG_TYPE_V128: ++ tcg_debug_assert(0); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ return true; ++} ++ ++static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits, ++ TCGReg rd, TCGReg rn) ++{ ++ /* ++ * Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 ++ * int bits = (8 << s_bits) - 1; ++ * tcg_out_sbfm(s, ext, rd, rn, 0, bits); ++ */ ++ switch (s_bits) { ++ case MO_8: ++ tcg_out_insn_simpleReg(s, OPC_SEXTB, rd, TCG_REG_ZERO, rn); ++ break; ++ case MO_16: ++ tcg_out_insn_simpleReg(s, OPC_SEXTH, rd, TCG_REG_ZERO, rn); ++ break; ++ case MO_32: ++ tcg_out_insn_simpleReg(s, OPC_ADDW, rd, rn, TCG_REG_ZERO); ++ break; ++ default: ++ tcg_debug_assert(0); ++ break; ++ } ++ if (ext == TCG_TYPE_I32) { ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); ++ } ++} ++ ++static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn) ++{ ++ tcg_out_sxt(s, type, MO_8, rd, rn); ++} ++ ++static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn) ++{ ++ tcg_out_sxt(s, type, MO_16, rd, rn); ++} ++ ++static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn) ++{ ++ tcg_out_sxt(s, TCG_TYPE_I64, MO_32, rd, rn); ++} ++ ++static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn) ++{ ++ tcg_out_ext32s(s, rd, rn); ++} ++ ++static inline void tcg_out_uxt(TCGContext *s, MemOp s_bits, ++ TCGReg rd, TCGReg rn) ++{ ++ /* ++ * Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 ++ * int bits = (8 << s_bits) - 1; ++ * tcg_out_sbfm(s, ext, rd, rn, 0, bits); ++ */ ++ switch (s_bits) { ++ case MO_8: ++ tcg_out_insn_simpleReg(s, OPC_SEXTB, rd, TCG_REG_ZERO, rn); ++ break; ++ case MO_16: ++ tcg_out_insn_simpleReg(s, OPC_SEXTH, rd, TCG_REG_ZERO, rn); ++ break; ++ case MO_32: ++ tcg_out_insn_simpleReg(s, OPC_ADDW, rd, rn, TCG_REG_ZERO); ++ break; ++ default: ++ tcg_debug_assert(0); ++ break; ++ } ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); ++} ++ ++static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn) ++{ ++ tcg_out_uxt(s, MO_8, rd, rn); ++} ++ ++static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn) ++{ ++ tcg_out_uxt(s, MO_16, rd, rn); ++} ++ ++static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn) ++{ ++ tcg_out_movr(s, TCG_TYPE_I32, rd, rn); ++} ++ ++static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn) ++{ ++ tcg_out_ext32u(s, rd, rn); ++} ++ ++static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn) ++{ ++ tcg_out_mov(s, TCG_TYPE_I32, rd, rn); ++} ++ ++/* ++ * counting heading/tailing zero numbers ++ */ ++static void tcg_out_ctz64(TCGContext *s, SW_64Insn opc, TCGReg rd, TCGReg rn, TCGArg b, bool const_b) ++{ ++ if (const_b && b == 64) { ++ if (opc == OPC_CTLZ) { ++ tcg_out_insn_simpleReg(s, OPC_CTLZ, rd, TCG_REG_ZERO, rn); ++ } else { ++ tcg_out_insn_simpleReg(s, OPC_CTTZ, rd, TCG_REG_ZERO, rn); ++ } ++ } else { ++ if (opc == OPC_CTLZ) { ++ tcg_out_insn_simpleReg(s, OPC_CTLZ, TCG_REG_TMP2, TCG_REG_ZERO, rn); ++ } else { ++ tcg_out_insn_simpleReg(s, OPC_CTTZ, TCG_REG_TMP2, TCG_REG_ZERO, rn); ++ } ++ if (const_b) { ++ if (b == -1) { ++ tcg_out_insn_bitReg(s, OPC_ORNOT, rd, TCG_REG_ZERO, TCG_REG_ZERO); ++ tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP2, rd); ++ } else if (b == 0) { ++ tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP2, TCG_REG_ZERO); ++ } else { ++ tcg_out_movi(s, TCG_TYPE_I64, rd, b); ++ tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP2, rd); ++ } ++ } else { ++ tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP2, b); ++ } ++ } ++} ++ ++/* ++ * counting heading/tailing zero numbers ++ */ ++static void tcg_out_ctz32(TCGContext *s, SW_64Insn opc, TCGReg rd, TCGReg rn, TCGArg b, bool const_b) ++{ ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_TMP, rn, 0xf); ++ ++ if (const_b && b == 32) { ++ if (opc == OPC_CTLZ) { ++ tcg_out_insn_simpleReg(s, OPC_CTLZ, rd, TCG_REG_ZERO, TCG_REG_TMP); ++ tcg_out_insn_simpleImm(s, OPC_SUBW_I, rd, rd, 32); ++ } else { ++ tcg_out_insn_simpleReg(s, OPC_CTTZ, rd, TCG_REG_ZERO, TCG_REG_TMP); ++ tcg_out_insn_complexImm(s, OPC_SELEQ_I, TCG_REG_TMP, rd, 32, rd); ++ } ++ } else { ++ if (opc == OPC_CTLZ) { ++ tcg_out_insn_simpleReg(s, OPC_CTLZ, TCG_REG_TMP2, TCG_REG_ZERO, TCG_REG_TMP); ++ tcg_out_insn_simpleImm(s, OPC_SUBW_I, TCG_REG_TMP2, TCG_REG_TMP2, 32); ++ } else { ++ tcg_out_insn_simpleReg(s, OPC_CTTZ, TCG_REG_TMP2, TCG_REG_ZERO, TCG_REG_TMP); ++ tcg_out_insn_complexImm(s, OPC_SELEQ_I, TCG_REG_TMP, TCG_REG_TMP2, 32, TCG_REG_TMP2); ++ } ++ if (const_b) { ++ if (b == -1) { ++ tcg_out_insn_bitReg(s, OPC_ORNOT, rd, TCG_REG_ZERO, TCG_REG_ZERO); ++ tcg_out_insn_complexReg(s, OPC_SELNE, TCG_REG_TMP, rd, TCG_REG_TMP2, rd); ++ } else if (b == 0) { ++ tcg_out_insn_complexReg(s, OPC_SELNE, TCG_REG_TMP, rd, TCG_REG_TMP2, TCG_REG_ZERO); ++ } else { ++ tcg_out_movi(s, TCG_TYPE_I32, rd, b); ++ tcg_out_insn_complexReg(s, OPC_SELNE, TCG_REG_TMP, rd, TCG_REG_TMP2, rd); ++ } ++ } else { ++ tcg_out_insn_complexReg(s, OPC_SELNE, TCG_REG_TMP, rd, TCG_REG_TMP2, b); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); ++ } ++ } ++} ++ ++/* ++ * memory protect for order of (ld and st) ++ */ ++static void tcg_out_mb(TCGContext *s) ++{ ++ tcg_out32(s, OPC_MEMB); ++} ++ ++static inline void tcg_out_bswap16(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn) ++{ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP2, rn, 1); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 0); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 8); ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 3); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 16); ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 2); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 24); ++ ++ if (ext == TCG_TYPE_I32) { ++ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP2, TCG_REG_TMP); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 5); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 32); ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 4); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 40); ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 7); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 48); ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 6); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 56); ++ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP2, TCG_REG_TMP); ++ } ++} ++ ++static void tcg_out_bswap32(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn) ++{ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP2, rn, 3); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 2); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 8); ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 1); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 16); ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 0); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 24); ++ ++ if (ext == TCG_TYPE_I32) { ++ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP2, TCG_REG_TMP); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 7); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 32); ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 6); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 40); ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 5); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 48); ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 4); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 56); ++ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP2, TCG_REG_TMP); ++ } ++} ++ ++static void tcg_out_bswap64(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn) ++{ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP2, rn, 7); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 6); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 8); ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 5); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 16); ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 4); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 24); ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 3); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 32); ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 2); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 40); ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 1); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 48); ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); ++ ++ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 0); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 56); ++ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP2, TCG_REG_TMP); ++} ++ ++static void tcg_out_extract(TCGContext *s, TCGReg rd, TCGReg rn, int lsb, int len) ++{ ++ //get 000..111..0000 ++ tcg_out_insn_bitReg(s, OPC_ORNOT, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_ZERO); ++ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, TCG_REG_TMP, 64 - len); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, lsb); ++ /* get rn[lsb, lsb+len-1]-->rd[lsb, lsb+len-1] */ ++ tcg_out_insn_bitReg(s, OPC_AND, rd, rn, TCG_REG_TMP); ++ ++ /* rd[lsb, lsb+len-1] --> rd[0, len-1] */ ++ tcg_out_insn_simpleImm(s, OPC_SRL_I, rd, rd, lsb); ++} ++ ++static void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, int lsb, int len) ++{ ++ tcg_out_insn_bitReg(s, OPC_ORNOT, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_ZERO); ++ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, TCG_REG_TMP, 64 - len); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, lsb); ++ ++ /* TCG_REG_TMP2 = rn[msb,lsb] */ ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP2, rn, 64-len); ++ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, TCG_REG_TMP2, 64-len-lsb); ++ ++ /* clear rd[msb,lsb] */ ++ tcg_out_insn_bitReg(s, OPC_BIC, rd, rd, TCG_REG_TMP); ++ /* rd = rd[63:msb+1]:rn[msb,lsb]:rd[lsb-1,0] */ ++ tcg_out_insn_bitReg(s, OPC_BIS, rd, rd, TCG_REG_TMP2); ++ ++ if (ext == TCG_TYPE_I32) { ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); ++ } ++} ++ ++static void tcg_out_mulsh64(TCGContext *s, TCGReg rd, TCGReg rn, TCGReg rm) ++{ ++ tcg_out_insn_simpleReg(s, OPC_UMULH, TCG_REG_TMP, rn, rm); ++ ++ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, rn, 63); ++ tcg_out_insn_complexReg(s, OPC_SELEQ, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_ZERO, rm); ++ tcg_out_insn_simpleReg(s, OPC_SUBL, TCG_REG_TMP, TCG_REG_TMP, TCG_REG_TMP2); ++ ++ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, rm, 63); ++ tcg_out_insn_complexReg(s, OPC_SELEQ, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_ZERO, rn); ++ tcg_out_insn_simpleReg(s, OPC_SUBL, rd, TCG_REG_TMP, TCG_REG_TMP2); ++} ++ ++static void tcg_out_sar(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGArg a2, bool c2) ++{ ++ unsigned int bits = ext ? 64 : 32; ++ unsigned int max = bits - 1; ++ if (ext == TCG_TYPE_I32) { ++ tcg_out_insn_simpleReg(s, OPC_ADDW, TCG_REG_TMP, rn, TCG_REG_ZERO); ++ ++ if (c2) { ++ tcg_out_insn_simpleImm(s, OPC_SRA_I, rd, TCG_REG_TMP, a2 & max); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_SRA, rd, TCG_REG_TMP, a2); ++ } ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); ++ } else { ++ if (c2) { ++ tcg_out_insn_simpleImm(s, OPC_SRA_I, rd, rn, a2 & max); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_SRA, rd, rn, a2); ++ } ++ } ++} ++ ++static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2) ++{ ++ return false; ++} ++ ++static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs, ++ tcg_target_long imm) ++{ ++ /* This function is only used for passing structs by reference. */ ++ g_assert_not_reached(); ++} ++ ++/* ++ * memory <=> Reg in (B H W L) bytes ++ */ ++static void tcg_out_ldst(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, intptr_t offset, bool sign) ++{ ++ if (offset != sextract64(offset, 0, 15)) { ++ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP2, offset); ++ tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP2, TCG_REG_TMP2, rn); ++ tcg_out_insn_ldst(s, insn, rd, TCG_REG_TMP2, 0); ++ } else { ++ tcg_out_insn_ldst(s, insn, rd, rn, offset); ++ } ++ ++ switch (insn) { ++ case OPC_LDBU: ++ if (sign) ++ tcg_out_insn_simpleReg(s, OPC_SEXTB, rd, TCG_REG_ZERO, rd); ++ break; ++ case OPC_LDHU: ++ if (sign) ++ tcg_out_insn_simpleReg(s, OPC_SEXTH, rd, TCG_REG_ZERO, rd); ++ break; ++ case OPC_LDW: ++ if (!sign) ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); ++ break; ++ default: ++ break; ++ } ++} ++ ++static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn, intptr_t ofs) ++{ ++ switch (type) { ++ case TCG_TYPE_I32: ++ tcg_out_ldst(s, OPC_LDW, rd, rn, ofs, zeroExt); ++ break; ++ case TCG_TYPE_I64: ++ tcg_out_ldst(s, OPC_LDL, rd, rn, ofs, sigExt); ++ break; ++ case TCG_TYPE_V64: ++ case TCG_TYPE_V128: ++ tcg_debug_assert(0); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++} ++ ++static void tcg_out_st(TCGContext *s, TCGType type, TCGReg rd,TCGReg rn, intptr_t ofs) ++{ ++ switch (type) { ++ case TCG_TYPE_I32: ++ tcg_out_ldst(s, OPC_STW, rd, rn, ofs, noPara); ++ break; ++ case TCG_TYPE_I64: ++ tcg_out_ldst(s, OPC_STL, rd, rn, ofs, noPara); ++ break; ++ case TCG_TYPE_V64: ++ case TCG_TYPE_V128: ++ tcg_debug_assert(0); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++} ++ ++static void tcg_out_cond_cmp(TCGContext *s, TCGType ext, TCGCond cond, TCGReg ret, TCGArg a, tcg_target_long b, bool const_b) ++{ ++ if (const_b && (b < 0 || b > 0xff)) { ++ tcg_out_movi(s, ext, TCG_REG_TMP2, b); ++ b = TCG_REG_TMP2; ++ const_b = 0; ++ } ++ ++ if (ext == TCG_TYPE_I32) { ++ tcg_out_insn_simpleReg(s, OPC_ADDW, a, a, TCG_REG_ZERO); ++ if (!const_b) { ++ tcg_out_insn_simpleReg(s, OPC_ADDW, b, b, TCG_REG_ZERO); ++ } else { ++ b = (int32_t)b; ++ } ++ } ++ ++ if (const_b) { ++ switch (cond) { ++ case TCG_COND_EQ: ++ case TCG_COND_NE: ++ tcg_out_insn_simpleImm(s, OPC_CMPEQ_I, ret, a, b); ++ break; ++ case TCG_COND_LT: ++ case TCG_COND_GE: ++ tcg_out_insn_simpleImm(s, OPC_CMPLT_I, ret, a, b); ++ break; ++ case TCG_COND_LE: ++ case TCG_COND_GT: ++ tcg_out_insn_simpleImm(s, OPC_CMPLE_I, ret, a, b); ++ break; ++ case TCG_COND_LTU: ++ case TCG_COND_GEU: ++ tcg_out_insn_simpleImm(s, OPC_CMPULT_I, ret, a, b); ++ break; ++ case TCG_COND_LEU: ++ case TCG_COND_GTU: ++ tcg_out_insn_simpleImm(s, OPC_CMPULE_I, ret, a, b); ++ break; ++ default: ++ tcg_debug_assert(0); ++ break; ++ } ++ } else { ++ switch (cond) { ++ case TCG_COND_EQ: ++ case TCG_COND_NE: ++ tcg_out_insn_simpleReg(s, OPC_CMPEQ, ret, a, b); ++ break; ++ case TCG_COND_LT: ++ case TCG_COND_GE: ++ tcg_out_insn_simpleReg(s, OPC_CMPLT, ret, a, b); ++ break; ++ case TCG_COND_LE: ++ case TCG_COND_GT: ++ tcg_out_insn_simpleReg(s, OPC_CMPLE, ret, a, b); ++ break; ++ case TCG_COND_LTU: ++ case TCG_COND_GEU: ++ tcg_out_insn_simpleReg(s, OPC_CMPULT, ret, a, b); ++ break; ++ case TCG_COND_LEU: ++ case TCG_COND_GTU: ++ tcg_out_insn_simpleReg(s, OPC_CMPULE, ret, a, b); ++ break; ++ default: ++ tcg_debug_assert(0); ++ break; ++ } ++ } ++ ++ if (ext == TCG_TYPE_I32) { ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a, a, 0xf); ++ if (!const_b) { ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, b, b, 0xf); ++ } ++ } ++ ++ switch (cond) { ++ case TCG_COND_NE: ++ case TCG_COND_GE: ++ case TCG_COND_GT: ++ case TCG_COND_GEU: ++ case TCG_COND_GTU: ++ tcg_out_insn_simpleImm(s, OPC_XOR_I, ret, ret, 0x1); ++ break; ++ case TCG_COND_ALWAYS: ++ case TCG_COND_NEVER: ++ tcg_debug_assert(0); ++ break; ++ default: ++ break; ++ } ++} ++ ++/* ++ * step1 tcg_out_cmp() ,"eq" and "ne" in the same case with the same insn; ++ * store compare result by TCG_REG_TMP, for step2; ++ * step2: jump address with compare result. in last "switch" section, we diff qe/ne by different case with different insn. ++ */ ++static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond cond, TCGArg a, tcg_target_long b, bool b_const, TCGLabel *l) ++{ ++ intptr_t offset; ++ bool need_cmp; ++ ++ if (b_const && b == 0 && (cond == TCG_COND_EQ || cond == TCG_COND_NE)) { ++ need_cmp = false; ++ if (ext == TCG_TYPE_I32) { ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_TMP, a, 0xf); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP, a, TCG_REG_ZERO); ++ } ++ } else { ++ need_cmp = true; ++ tcg_out_cond_cmp(s, ext, cond, TCG_REG_TMP, a, b, b_const); ++ } ++ ++ if (!l->has_value) { ++ tcg_out_reloc(s, s->code_ptr, R_SW_64_BRADDR, l, 0); ++ offset=0; //offset = tcg_in32(s) >> 5;//luo br $31, 0, do not jump here! ++ } else { ++ offset = tcg_pcrel_diff(s, l->u.value_ptr); ++ offset = offset - 4; ++ offset = offset >> 2; ++ tcg_debug_assert(offset == sextract64(offset, 0, 21)); ++ } ++ ++ if (need_cmp) { ++ tcg_out_insn_br(s, OPC_BGT, TCG_REG_TMP, offset); ++ } else if (cond == TCG_COND_EQ) { ++ tcg_out_insn_br(s, OPC_BEQ, TCG_REG_TMP, offset); ++ } else { ++ tcg_out_insn_br(s, OPC_BNE, TCG_REG_TMP, offset); ++ } ++} ++ ++static void tcg_out_setcond(TCGContext *s, TCGType ext, TCGCond cond, TCGReg ret, ++ TCGReg a, tcg_target_long b, bool const_b) ++{ ++ switch (cond) { ++ case TCG_COND_EQ: ++ case TCG_COND_LT: ++ case TCG_COND_LE: ++ case TCG_COND_LTU: ++ case TCG_COND_LEU: ++ case TCG_COND_NE: ++ case TCG_COND_GE: ++ case TCG_COND_GT: ++ case TCG_COND_GEU: ++ case TCG_COND_GTU: ++ tcg_out_cond_cmp(s, ext, cond, ret, a, b, const_b); ++ break; ++ default: ++ g_assert_not_reached(); ++ break; ++ } ++} ++ ++static void tcg_out_movcond(TCGContext *s, TCGType ext, TCGCond cond, TCGReg ret, ++ TCGReg a1, tcg_target_long a2, bool const_b, TCGReg v1, TCGReg v2) ++{ ++ tcg_out_cond_cmp(s, ext, cond, TCG_REG_TMP, a1, a2, const_b); ++ tcg_out_insn_complexReg(s, OPC_SELLBS, TCG_REG_TMP, ret, v1, v2); ++} ++ ++static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,TCGReg base, intptr_t ofs) ++{ ++ if (type <= TCG_TYPE_I64 && val == 0) { ++ tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); ++ return true; ++ } ++ return false; ++} ++ ++static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,TCGReg rn, int64_t imm64) ++{ ++ if (ext == TCG_TYPE_I64) { ++ if (imm64 >= 0) { ++ if (0 <=imm64 && imm64 <= 255) { ++ /* we use tcg_out_insn_simpleImm because imm64 is between 0~255 */ ++ tcg_out_insn_simpleImm(s, OPC_ADDL_I, rd, rn, imm64); ++ } else { ++ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, imm64); ++ tcg_out_insn_simpleReg(s, OPC_ADDL, rd, rn, TCG_REG_TMP); ++ } ++ } else { ++ if (0 < -imm64 && -imm64 <= 255) { ++ /* we use tcg_out_insn_simpleImm because -imm64 is between 0~255 */ ++ tcg_out_insn_simpleImm(s, OPC_SUBL_I, rd, rn, -imm64); ++ } else { ++ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, -imm64); ++ tcg_out_insn_simpleReg(s, OPC_SUBL, rd, rn, TCG_REG_TMP); ++ } ++ } ++ } else { ++ if (imm64 >= 0) { ++ if (0 <=imm64 && imm64 <= 255) { ++ /* we use tcg_out_insn_simpleImm because imm64 is between 0~255 */ ++ tcg_out_insn_simpleImm(s, OPC_ADDW_I, rd, rn, imm64); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); ++ } else { ++ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, imm64); ++ tcg_out_insn_simpleReg(s, OPC_ADDW, rd, rn, TCG_REG_TMP); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); ++ } ++ } else { ++ if (0 < -imm64 && -imm64 <= 255) { ++ /* we use tcg_out_insn_simpleImm because -imm64 is between 0~255 */ ++ tcg_out_insn_simpleImm(s, OPC_SUBW_I, rd, rn, -imm64); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); ++ } else { ++ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, -imm64); ++ tcg_out_insn_simpleReg(s, OPC_SUBW, rd, rn, TCG_REG_TMP); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); ++ } ++ } ++ } ++} ++ ++static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) ++{ ++ ptrdiff_t offset = (tcg_pcrel_diff(s, target) - 4) >> 2; ++ tcg_debug_assert(offset == sextract64(offset, 0, 21)); ++ tcg_out_insn_br(s, OPC_BR, TCG_REG_ZERO, offset); ++} ++ ++static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *target) ++{ ++ ptrdiff_t offset = (tcg_pcrel_diff(s, target) - 4) >> 2; ++ if (offset == sextract64(offset, 0, 21)) { ++ tcg_out_insn_br(s, OPC_BSR, TCG_REG_RA, offset); ++ } else { ++ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target); ++ tcg_out_insn_jump(s, OPC_CALL, TCG_REG_RA, TCG_REG_TMP, noPara); ++ } ++} ++ ++static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target, ++ const TCGHelperInfo *info) ++{ ++ tcg_out_call_int(s, target); ++} ++ ++static void modify_direct_addr(uintptr_t addr, uintptr_t jmp_rw, uintptr_t jmp_rx) ++{ ++ tcg_target_long l0=0, l1=0; ++ tcg_target_long val = addr; ++ TCGReg rs = TCG_REG_ZERO; ++ TCGReg rd = TCG_REG_TMP; ++ tcg_insn_unit i_nop=0, i1=0, i2=0; ++ uint64_t pair = 0; ++ i_nop = OPC_NOP; ++ uintptr_t jmp = jmp_rw; ++ ++ l0 = (int16_t)val; ++ val = (val - l0) >> 16; ++ l1 = (int16_t)val; ++ if (l1) { ++ i1 = OPC_LDIH | (rd & 0x1f) << 21 | (rs & 0x1f) << 16 | (l1 & 0xffff); ++ } else { ++ i1 = i_nop; ++ } ++ i2 = OPC_LDI | (rd & 0x1f) << 21 | (rs & 0x1f) << 16 | (l0 & 0xffff); ++ pair = (uint64_t)i1 << 32 | i2; ++ qatomic_set((uint64_t *)jmp, pair); ++ flush_idcache_range(jmp_rx, jmp_rw, 8); ++} ++ ++static const tcg_insn_unit *tb_ret_addr; ++ ++static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) ++{ ++ const tcg_insn_unit *target; ++ ptrdiff_t offset; ++ ++ /* Reuse the zeroing that exists for goto_ptr. */ ++ if (a0 == 0) { ++ target = tcg_code_gen_epilogue; ++ } else { ++ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0); ++ target = tb_ret_addr; ++ } ++ ++ offset = tcg_pcrel_diff(s, target) >> 2; ++ if (offset == sextract64(offset, 0, 21)) { ++ tcg_out_insn_br(s, OPC_BR, TCG_REG_ZERO, offset); ++ } else { ++ /* ++ * Only x16/x17 generate BTI type Jump (2), ++ * other registers generate BTI type Jump|Call (3). ++ */ ++ QEMU_BUILD_BUG_ON(TCG_REG_TMP1 != TCG_REG_X26); ++ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP1, (intptr_t)target); ++ tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, TCG_REG_TMP1, noPara); ++ } ++} ++ ++static void tcg_out_goto_tb(TCGContext *s, int which) ++{ ++ /* ++ * Direct branch, or indirect address load, will be patched ++ * by tb_target_set_jmp_target. Assert indirect load offset ++ * in range early, regardless of direct branch distance. ++ */ ++ intptr_t i_off = tcg_pcrel_diff(s, (void *)get_jmp_target_addr(s, which)); ++ tcg_debug_assert(i_off == sextract64(i_off, 0, 21)); ++ ++ set_jmp_insn_offset(s, which); ++ tcg_out32(s, OPC_BR); ++ tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, TCG_REG_TMP, noPara); ++ set_jmp_reset_offset(s, which); ++} ++ ++void tb_target_set_jmp_target(const TranslationBlock *tb, int n, ++ uintptr_t jmp_rx, uintptr_t jmp_rw) ++{ ++ uintptr_t addr = tb->jmp_target_addr[n]; ++ tcg_insn_unit i1, i2; ++ uint64_t pair; ++ ++ ptrdiff_t offset = addr - jmp_rx -4; ++ ++ if (offset == sextract64(offset, 0, 21)) { ++ i1 = OPC_BR | (TCG_REG_ZERO & 0x1f) << 21| ((offset >> 2) & 0x1fffff); ++ i2 = OPC_NOP; ++ pair = (uint64_t)i2 << 32 | i1; ++ qatomic_set((uint64_t *)jmp_rw, pair); ++ flush_idcache_range(jmp_rx, jmp_rw, 8); ++ } else if (offset == sextract64(offset, 0, 32)) { ++ modify_direct_addr(addr, jmp_rw, jmp_rx); ++ } else { ++ tcg_debug_assert("tb_target"); ++ } ++} ++ ++static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l) ++{ ++ if (!l->has_value) { ++ tcg_out_reloc(s, s->code_ptr, R_SW_64_BRADDR, l, 0); ++ tcg_out_insn_br(s, OPC_BR, TCG_REG_ZERO, 0); ++ } else { ++ tcg_out_goto(s, l->u.value_ptr); ++ } ++} ++ ++/* ++ * result: rd=rn(64,64-m]:rm(64-m,0] ++ * 1: rn(m,0]--->TCG_REG_TMP(64,64-m] ++ * 2: rm(64,64-m]--->rm(64-m,0] ++ * 3: rd=TCG_REG_TMP(64,64-m]:rm(64-m,0] ++ */ ++static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm, unsigned int m) ++{ ++ int bits = ext ? 64 : 32; ++ int max = bits - 1; ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, rn, bits - (m & max)); ++ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, rm, (m & max)); ++ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); ++} ++ ++static inline void tcg_out_rotr_Imm(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m) ++{ ++ unsigned int bits = ext ? 64 : 32; ++ unsigned int max = bits - 1; ++ if (ext == TCG_TYPE_I64) { ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, rn, bits - (m & max)); ++ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, rn, (m & max)); ++ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); ++ } else { ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rn, 0xf); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, rd, bits - (m & max)); ++ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, rd, (m & max)); ++ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); ++ } ++} ++ ++static inline void tcg_out_rotr_Reg(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm) ++{ ++ unsigned int bits = ext ? 64 : 32; ++ tcg_out_insn_simpleImm(s, OPC_SUBL_I, TCG_REG_TMP, rm, bits); ++ tcg_out_insn_bitReg(s, OPC_SUBL, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_TMP); ++ ++ if (ext == TCG_TYPE_I64) { ++ tcg_out_insn_bitReg(s, OPC_SLL, TCG_REG_TMP2, rn, TCG_REG_TMP); ++ tcg_out_insn_bitReg(s, OPC_SRL, TCG_REG_TMP, rn, rm); ++ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); ++ } else { ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rn, 0xf); ++ tcg_out_insn_bitReg(s, OPC_SLL, TCG_REG_TMP2, rd, TCG_REG_TMP); ++ tcg_out_insn_bitReg(s, OPC_SRL, TCG_REG_TMP, rd, rm); ++ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); ++ } ++} ++ ++static inline void tcg_out_rotl_Imm(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m) ++{ ++ unsigned int bits = ext ? 64 : 32; ++ unsigned int max = bits - 1; ++ ++ if (ext == TCG_TYPE_I64) { ++ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, rn, bits -(m & max)); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP2, rn, (m & max)); ++ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); ++ } else { ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rn, 0xf); ++ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, rd, bits -(m & max)); ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP2, rd, (m & max)); ++ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); ++ } ++} ++ ++static inline void tcg_out_rotl_Reg(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm) ++{ ++ unsigned int bits = ext ? 64 : 32; ++ tcg_out_insn_simpleImm(s, OPC_SUBL_I, TCG_REG_TMP, rm, bits); ++ tcg_out_insn_bitReg(s, OPC_SUBL, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_TMP); ++ ++ if (ext == TCG_TYPE_I64) { ++ tcg_out_insn_bitReg(s, OPC_SRL, TCG_REG_TMP2, rn, TCG_REG_TMP); ++ tcg_out_insn_bitReg(s, OPC_SLL, TCG_REG_TMP, rn, rm); ++ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); ++ } else { ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rn, 0xf); ++ tcg_out_insn_bitReg(s, OPC_SRL, TCG_REG_TMP2, rd, TCG_REG_TMP); ++ tcg_out_insn_bitReg(s, OPC_SLL, TCG_REG_TMP, rd, rm); ++ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); ++ } ++} ++ ++static inline void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target) ++{ ++ ptrdiff_t offset = tcg_pcrel_diff(s, target); ++ tcg_debug_assert(offset == sextract64(offset, 0, 21)); ++ tcg_out_insn_br(s, OPC_BR, rd, 0); ++ tcg_out_insn_simpleImm(s, OPC_SUBL_I, rd, rd, 4); ++ if (offset >= 0) { ++ tcg_out_simple(s, OPC_ADDL_I, OPC_ADDL, rd, rd, offset); ++ } else { ++ tcg_out_simple(s, OPC_SUBL_I, OPC_SUBL, rd, rd, -offset); ++ } ++} ++ ++typedef struct { ++ TCGReg base; ++ TCGReg index; ++ TCGType index_ext; ++ TCGAtomAlign aa; ++} HostAddress; ++ ++bool tcg_target_has_memory_bswap(MemOp memop) ++{ ++ return false; ++} ++ ++static const TCGLdstHelperParam ldst_helper_param = { ++ .ntmp = 1, .tmp = { TCG_REG_TMP } ++}; ++ ++static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) ++{ ++ MemOpIdx oi = lb->oi; ++ MemOp opc = get_memop(oi); ++ ++ if (!reloc_pc21(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { ++ return false; ++ } ++ ++ tcg_out_ld_helper_args(s, lb, &ldst_helper_param); ++ tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]); ++ tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param); ++ tcg_out_goto(s, lb->raddr); ++ return true; ++} ++ ++static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) ++{ ++ MemOpIdx oi = lb->oi; ++ MemOp opc = get_memop(oi); ++ ++ if (!reloc_pc21(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { ++ return false; ++ } ++ ++ tcg_out_st_helper_args(s, lb, &ldst_helper_param); ++ tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE]); ++ ++ tcg_out_goto(s, lb->raddr); ++ return true; ++} ++ ++ ++/* We expect to use a 7-bit scaled negative offset from ENV. */ ++#define MIN_TLB_MASK_TABLE_OFS -512 ++ ++/* These offsets are built into the LDP below. */ ++QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); ++QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); ++ ++/* ++ * For system-mode, perform the TLB load and compare. ++ * For user-mode, perform any required alignment tests. ++ * In both cases, return a TCGLabelQemuLdst structure if the slow path ++ * is required and fill in @h with the host address for the fast path. ++*/ ++static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ++ TCGReg addr_reg, MemOpIdx oi, ++ bool is_ld) ++{ ++ TCGType addr_type = s->addr_type; ++ TCGLabelQemuLdst *ldst = NULL; ++ MemOp opc = get_memop(oi); ++ MemOp a_bits = get_alignment_bits(opc); ++ MemOp s_bits = opc & MO_SIZE; ++ unsigned a_mask; ++ ++ h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false); ++ a_mask = (1 << h->aa.align) - 1; ++ ++ if (tcg_use_softmmu) { ++ unsigned s_mask = (1u << s_bits) - 1; ++ unsigned mem_index = get_mmuidx(oi); ++ TCGReg addr_adj; ++ TCGType mask_type; ++ uint64_t compare_mask; ++ ++ ldst = new_ldst_label(s); ++ ldst->is_ld = is_ld; ++ ldst->oi = oi; ++ ldst->addrlo_reg = addr_reg; ++ ++ mask_type = (s->page_bits + s->tlb_dyn_max_bits > 32 ++ ? TCG_TYPE_I64 : TCG_TYPE_I32); ++ ++ /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {tmp0,tmp1}. */ ++ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); ++ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); ++ tcg_out_insn_ldst(s, OPC_LDL, TCG_REG_TMP1, TCG_AREG0, tlb_mask_table_ofs(s, mem_index)); ++ tcg_out_insn_ldst(s, OPC_LDL, TCG_REG_TMP2, TCG_AREG0, tlb_mask_table_ofs(s, mem_index)+8); ++ ++ /* Extract the TLB index from the address into X0. */ ++ if (mask_type == TCG_TYPE_I64) { ++ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, addr_reg, s->page_bits - CPU_TLB_ENTRY_BITS); ++ tcg_out_insn_bitReg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP); ++ } else { ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_TMP, addr_reg, 0xf); ++ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, TCG_REG_TMP, s->page_bits - CPU_TLB_ENTRY_BITS); ++ tcg_out_insn_bitReg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_TMP1, TCG_REG_TMP1, 0xf); ++ } ++ /* Add the tlb_table pointer, creating the CPUTLBEntry address into X1. */ ++ tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); ++ ++ /* Load the tlb comparator into X0, and the fast path addend into X1. */ ++ tcg_out_ld(s, addr_type, TCG_REG_TMP1, TCG_REG_TMP2, is_ld ++ ? offsetof(CPUTLBEntry, addr_read) ++ : offsetof(CPUTLBEntry, addr_write)); ++ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, ++ offsetof(CPUTLBEntry, addend)); ++ ++ /* ++ * For aligned accesses, we check the first byte and include ++ * the alignment bits within the address. For unaligned access, ++ * we check that we don't cross pages using the address of the ++ * last byte of the access. ++ */ ++ if (a_bits >= s_bits) { ++ addr_adj = addr_reg; ++ } else { ++ if (s_mask >= a_mask) { ++ tcg_out_simple(s, OPC_ADDL_I, OPC_ADDL, TCG_REG_TMP3, addr_reg, s_mask - a_mask); ++ } else { ++ tcg_out_simple(s, OPC_SUBL_I, OPC_SUBL, TCG_REG_TMP3, addr_reg, a_mask - s_mask); ++ } ++ ++ addr_adj = TCG_REG_TMP3; ++ } ++ compare_mask = (uint64_t)s->page_mask | a_mask; ++ ++ /* Store the page mask part of the address into X3. */ ++ tcg_out_bit(s, OPC_AND_I, OPC_AND, TCG_REG_TMP3, addr_adj, compare_mask); ++ ++ /* Perform the address comparison. */ ++ tcg_out_cond_cmp(s, 1, TCG_COND_NE, TCG_REG_TMP, TCG_REG_TMP1, TCG_REG_TMP3, 0); ++ ++ /* If not equal, we jump to the slow path. */ ++ ldst->label_ptr[0] = s->code_ptr; ++ tcg_out_insn_br(s, OPC_BGT, TCG_REG_TMP, 0); ++ ++ h->base = TCG_REG_TMP2; ++ h->index = addr_reg; ++ h->index_ext = addr_type; ++ } else { ++ if(a_mask) { ++ ldst = new_ldst_label(s); ++ ++ ldst->is_ld = is_ld; ++ ldst->oi = oi; ++ ldst->addrlo_reg = addr_reg; ++ ++ /* tst addr, #mask */ ++ tcg_out_bit(s, OPC_AND_I, OPC_AND, TCG_REG_ZERO, addr_reg, a_mask); ++ ++ /* b.ne slow_path */ ++ ldst->label_ptr[0] = s->code_ptr; ++ tcg_out_insn_br(s, OPC_BGT, TCG_REG_TMP, 0); ++ } ++ ++ if (guest_base || addr_type == TCG_TYPE_I32) { ++ h->base = TCG_REG_GUEST_BASE; ++ h->index = addr_reg; ++ h->index_ext = addr_type; ++ } else { ++ h->base = addr_reg; ++ h->index = TCG_REG_ZERO; ++ h->index_ext = TCG_TYPE_I64; ++ } ++ } ++ ++ return ldst; ++} ++ ++static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext, ++ TCGReg data_r, HostAddress h) ++{ ++ if (h.index_ext == TCG_TYPE_I32) { ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_TMP, h.index, 0xf); ++ tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP, h.base, TCG_REG_TMP); ++ } else { ++ tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP, h.base, h.index); ++ } ++ ++ const MemOp bswap = memop & MO_BSWAP; ++ ++ switch (memop & MO_SSIZE) { ++ case MO_UB: ++ tcg_out_ldst(s, OPC_LDBU, data_r, TCG_REG_TMP, 0, zeroExt); ++ break; ++ case MO_SB: ++ tcg_out_ldst(s, OPC_LDBU, data_r, TCG_REG_TMP, 0, sigExt); ++ if (ext == TCG_TYPE_I32) { ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, data_r, data_r, 0xf); ++ } ++ break; ++ case MO_UW: ++ tcg_out_ldst(s, OPC_LDHU, data_r, TCG_REG_TMP, 0, zeroExt); ++ if (bswap) { ++ tcg_out_bswap16(s, ext, data_r, data_r); ++ } ++ break; ++ case MO_SW: ++ if (bswap) { ++ tcg_out_ldst(s, OPC_LDHU, data_r, TCG_REG_TMP, 0, zeroExt); ++ tcg_out_bswap16(s, ext, data_r, data_r); ++ tcg_out_insn_simpleReg(s, OPC_SEXTH, data_r, TCG_REG_ZERO, data_r); ++ } else { ++ tcg_out_ldst(s, OPC_LDHU, data_r, TCG_REG_TMP, 0, sigExt); ++ } ++ ++ if (ext == TCG_TYPE_I32) { ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, data_r, data_r, 0xf); ++ } ++ break; ++ case MO_UL: ++ tcg_out_ldst(s, OPC_LDW, data_r, TCG_REG_TMP, 0, zeroExt); ++ if (bswap) { ++ tcg_out_bswap32(s, ext, data_r, data_r); ++ } ++ break; ++ case MO_SL: ++ if (bswap) { ++ tcg_out_ldst(s, OPC_LDW, data_r, TCG_REG_TMP, 0, zeroExt); ++ tcg_out_bswap32(s, ext, data_r, data_r); ++ tcg_out_insn_simpleReg(s, OPC_ADDW, data_r, data_r, TCG_REG_ZERO); ++ } else { ++ tcg_out_ldst(s, OPC_LDW, data_r, TCG_REG_TMP, 0, sigExt); ++ } ++ break; ++ case MO_UQ: ++ tcg_out_ldst(s, OPC_LDL, data_r, TCG_REG_TMP, 0, zeroExt); ++ if (bswap) { ++ tcg_out_bswap64(s, ext, data_r, data_r); ++ } ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++} ++ ++static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, ++ MemOpIdx oi, TCGType data_type) ++{ ++ TCGLabelQemuLdst *ldst; ++ HostAddress h; ++ ++ ldst = prepare_host_addr(s, &h, addr_reg, oi, true); ++ tcg_out_qemu_ld_direct(s, get_memop(oi), data_type, data_reg, h); ++ ++ if(ldst) { ++ ldst->type = data_type; ++ ldst->datalo_reg = data_reg; ++ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); ++ } ++} ++ ++static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, ++ TCGReg data_r, HostAddress h) ++{ ++ if (h.index_ext == TCG_TYPE_I32) { ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_TMP, h.index, 0xf); ++ tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP, h.base, TCG_REG_TMP); ++ } else { ++ tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP, h.base, h.index); ++ } ++ ++ const MemOp bswap = memop & MO_BSWAP; ++ ++ switch (memop & MO_SIZE) { ++ case MO_8: ++ tcg_out_ldst(s, OPC_STB, data_r, TCG_REG_TMP, 0, 0); ++ break; ++ case MO_16: ++ if (bswap && data_r != TCG_REG_ZERO) { ++ tcg_out_bswap16(s, TCG_TYPE_I32, TCG_REG_TMP3, data_r); ++ data_r = TCG_REG_TMP3; ++ } ++ tcg_out_ldst(s, OPC_STH, data_r, TCG_REG_TMP, 0, 0); ++ break; ++ case MO_32: ++ if (bswap && data_r != TCG_REG_ZERO) { ++ tcg_out_bswap32(s, TCG_TYPE_I32, TCG_REG_TMP3, data_r); ++ data_r = TCG_REG_TMP3; ++ } ++ tcg_out_ldst(s, OPC_STW, data_r, TCG_REG_TMP, 0, 0); ++ break; ++ case MO_64: ++ if (bswap && data_r != TCG_REG_ZERO) { ++ tcg_out_bswap64(s, TCG_TYPE_I64, TCG_REG_TMP3, data_r); ++ data_r = TCG_REG_TMP3; ++ } ++ tcg_out_ldst(s, OPC_STL, data_r, TCG_REG_TMP, 0, 0); ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++} ++ ++static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, ++ MemOpIdx oi, TCGType data_type) ++{ ++ TCGLabelQemuLdst *ldst; ++ HostAddress h; ++ ++ ldst = prepare_host_addr(s, &h, addr_reg, oi, false); ++ tcg_out_qemu_st_direct(s, get_memop(oi), data_reg, h); ++ ++ if(ldst) { ++ ldst->type = data_type; ++ ldst->datalo_reg = data_reg; ++ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); ++ } ++} ++ ++static void tcg_out_op(TCGContext *s, TCGOpcode opc, ++ const TCGArg args[TCG_MAX_OP_ARGS], ++ const int const_args[TCG_MAX_OP_ARGS]) ++{ ++ /* 99% of the time, we can signal the use of extension registers ++ * by looking to see if the opcode handles 64-bit data. */ ++ TCGType ext = (tcg_op_defs[opc].flags & TCG_OPF_64BIT) != 0; ++ ++ /* Hoist the loads of the most common arguments. */ ++ TCGArg a0 = args[0]; ++ TCGArg a1 = args[1]; ++ TCGArg a2 = args[2]; ++ int c2 = const_args[2]; ++ ++ /* Some operands are defined with "rZ" constraint, a register or ++ * the zero register. These need not actually test args[I] == 0. */ ++ ++ switch (opc) { ++ case INDEX_op_goto_ptr: ++ tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, a0, noPara); ++ break; ++ case INDEX_op_br: ++ tcg_out_goto_label(s, arg_label(a0)); ++ break; ++ case INDEX_op_ld8u_i32: ++ case INDEX_op_ld8u_i64: ++ tcg_out_ldst(s, OPC_LDBU, a0, a1, a2, 0); ++ break; ++ case INDEX_op_ld8s_i32: ++ tcg_out_ldst(s, OPC_LDBU, a0, a1, a2, 1); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); ++ break; ++ case INDEX_op_ld8s_i64: ++ tcg_out_ldst(s, OPC_LDBU, a0, a1, a2, 1); ++ break; ++ case INDEX_op_ld16u_i32: ++ case INDEX_op_ld16u_i64: ++ tcg_out_ldst(s, OPC_LDHU, a0, a1, a2, 0); ++ break; ++ case INDEX_op_ld16s_i32: ++ tcg_out_ldst(s, OPC_LDHU, a0, a1, a2, 1); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); ++ break; ++ case INDEX_op_ld16s_i64: ++ tcg_out_ldst(s, OPC_LDHU, a0, a1, a2, 1); ++ break; ++ case INDEX_op_ld_i32: ++ tcg_out_ldst(s, OPC_LDW, a0, a1, a2, 0); ++ break; ++ case INDEX_op_ld32u_i64: ++ tcg_out_ldst(s, OPC_LDW, a0, a1, a2, 0); ++ break; ++ case INDEX_op_ld32s_i64: ++ tcg_out_ldst(s, OPC_LDW, a0, a1, a2, 1); ++ break; ++ case INDEX_op_ld_i64: ++ tcg_out_ldst(s, OPC_LDL, a0, a1, a2, 1); ++ break; ++ case INDEX_op_st8_i32: ++ case INDEX_op_st8_i64: ++ tcg_out_ldst(s, OPC_STB, REG0(0), a1, a2, 0); ++ break; ++ case INDEX_op_st16_i32: ++ case INDEX_op_st16_i64: ++ tcg_out_ldst(s, OPC_STH, REG0(0), a1, a2, 0); ++ break; ++ case INDEX_op_st_i32: ++ case INDEX_op_st32_i64: ++ tcg_out_ldst(s, OPC_STW, REG0(0), a1, a2, 0); ++ break; ++ case INDEX_op_st_i64: ++ tcg_out_ldst(s, OPC_STL, REG0(0), a1, a2, 0); ++ break; ++ case INDEX_op_add_i32: ++ a2 = (int32_t)a2; ++ if (c2) { ++ tcg_out_addsubi(s, ext, a0, a1, a2); ++ } else { ++ tcg_out_insn_simpleReg(s, OPC_ADDW, a0, a1, a2); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); ++ } ++ break; ++ case INDEX_op_add_i64: ++ if (c2) { ++ tcg_out_addsubi(s, ext, a0, a1, a2); ++ } else { ++ tcg_out_insn_simpleReg(s, OPC_ADDL, a0, a1, a2); ++ } ++ break; ++ case INDEX_op_sub_i32: ++ a2 = (int32_t)a2; ++ if (c2) { ++ tcg_out_addsubi(s, ext, a0, a1, -a2); ++ } else { ++ tcg_out_insn_simpleReg(s, OPC_SUBW, a0, a1, a2); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); ++ } ++ break; ++ case INDEX_op_sub_i64: ++ if (c2) { ++ tcg_out_addsubi(s, ext, a0, a1, -a2); ++ } else { ++ tcg_out_insn_simpleReg(s, OPC_SUBL, a0, a1, a2); ++ } ++ break; ++ case INDEX_op_neg_i32: ++ tcg_out_insn_bitReg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); ++ break; ++ case INDEX_op_neg_i64: ++ tcg_out_insn_bitReg(s, OPC_SUBL, a0, TCG_REG_ZERO, a1); ++ break; ++ case INDEX_op_and_i32: ++ if (c2) { ++ a2 = (int32_t)a2; ++ tcg_out_bit(s, OPC_AND_I, OPC_AND, a0, a1, a2); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_AND, a0, a1, a2); ++ } ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); ++ break; ++ case INDEX_op_and_i64: ++ if (c2) { ++ tcg_out_bit(s, OPC_AND_I, OPC_AND, a0, a1, a2); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_AND, a0, a1, a2); ++ } ++ break; ++ case INDEX_op_andc_i32: ++ case INDEX_op_andc_i64: ++ tcg_debug_assert(0); ++ break; ++ case INDEX_op_or_i32: ++ if (c2) { ++ a2 = (int32_t)a2; ++ tcg_out_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, a2); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_BIS, a0, a1, a2); ++ } ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); ++ break; ++ case INDEX_op_or_i64: ++ if (c2) { ++ tcg_out_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, a2); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_BIS, a0, a1, a2); ++ } ++ break; ++ case INDEX_op_orc_i32: ++ if (c2) { ++ a2 = (int32_t)a2; ++ tcg_out_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, ~a2); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_ORNOT, a0, a1, a2); ++ } ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); ++ break; ++ case INDEX_op_orc_i64: ++ if (c2) { ++ tcg_out_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, ~a2); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_ORNOT, a0, a1, a2); ++ } ++ break; ++ case INDEX_op_xor_i32: ++ if (c2) { ++ a2 = (int32_t)a2; ++ tcg_out_bit(s, OPC_XOR_I, OPC_XOR, a0, a1, a2); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_XOR, a0, a1, a2); ++ } ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); ++ break; ++ case INDEX_op_xor_i64: ++ if (c2) { ++ tcg_out_bit(s, OPC_XOR_I, OPC_XOR, a0, a1, a2); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_XOR, a0, a1, a2); ++ } ++ break; ++ case INDEX_op_eqv_i32: ++ case INDEX_op_eqv_i64: ++ tcg_debug_assert(0); ++ break; ++ case INDEX_op_not_i32: ++ tcg_out_insn_bitReg(s, OPC_ORNOT, a0, TCG_REG_ZERO, a1); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); ++ break; ++ case INDEX_op_not_i64: ++ tcg_out_insn_bitReg(s, OPC_ORNOT, a0, TCG_REG_ZERO, a1); ++ break; ++ case INDEX_op_mul_i32: ++ tcg_out_insn_simpleReg(s, OPC_MULL, a0, a1, a2); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); ++ break; ++ case INDEX_op_mul_i64: ++ tcg_out_insn_simpleReg(s, OPC_MULL, a0, a1, a2); ++ break; ++ case INDEX_op_div_i32: ++ case INDEX_op_div_i64: ++ tcg_debug_assert(0); ++ break; ++ case INDEX_op_divu_i32: ++ case INDEX_op_divu_i64: ++ tcg_debug_assert(0); ++ break; ++ case INDEX_op_rem_i32: ++ case INDEX_op_rem_i64: ++ tcg_debug_assert(0); ++ break; ++ case INDEX_op_remu_i32: ++ case INDEX_op_remu_i64: ++ tcg_debug_assert(0); ++ break; ++ case INDEX_op_shl_i32: /* sw logical left*/ ++ if (c2) { ++ unsigned int bits = ext ? 64 : 32; ++ unsigned int max = bits - 1; ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, a0, a1, a2&max); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_SLL, a0, a1, a2); ++ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); ++ } ++ break; ++ case INDEX_op_shl_i64: ++ if (c2) { ++ unsigned int bits = ext ? 64 : 32; ++ unsigned int max = bits - 1; ++ tcg_out_insn_simpleImm(s, OPC_SLL_I, a0, a1, a2&max); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_SLL, a0, a1, a2); ++ } ++ break; ++ case INDEX_op_shr_i32: /* sw logical right */ ++ a2 = (int32_t)a2; ++ if (c2) { ++ int bits = ext ? 64 : 32; ++ int max = bits - 1; ++ tcg_out_insn_simpleImm(s, OPC_SRL_I, a0, a1, a2&max); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_SRL, a0, a1, a2); ++ } ++ break; ++ case INDEX_op_shr_i64: ++ if (c2) { ++ int bits = ext ? 64 : 32; ++ int max = bits - 1; ++ tcg_out_insn_simpleImm(s, OPC_SRL_I, a0, a1, a2&max); ++ } else { ++ tcg_out_insn_bitReg(s, OPC_SRL, a0, a1, a2); ++ } ++ break; ++ case INDEX_op_sar_i32: ++ a2 = (int32_t)a2; ++ tcg_out_sar(s, ext, a0, a1, a2, c2); ++ break; ++ case INDEX_op_sar_i64: /* sw arithmetic right*/ ++ tcg_out_sar(s, ext, a0, a1, a2, c2); ++ break; ++ case INDEX_op_rotr_i32: /* loop shift */ ++ case INDEX_op_rotr_i64: ++ if (c2) {/* loop right shift a2*/ ++ tcg_out_rotr_Imm(s, ext, a0, a1, a2); ++ } else { ++ tcg_out_rotr_Reg(s, ext, a0, a1, a2); ++ } ++ break; ++ case INDEX_op_rotl_i32: /* loop shift */ ++ case INDEX_op_rotl_i64: /* sw */ ++ if (c2) {/* loop left shift a2*/ ++ tcg_out_rotl_Imm(s, ext, a0, a1, a2); ++ } else { ++ tcg_out_rotl_Reg(s, ext, a0, a1, a2); ++ } ++ break; ++ case INDEX_op_clz_i32: ++ tcg_out_ctz32(s, OPC_CTLZ, a0, a1, a2, c2); ++ break; ++ case INDEX_op_clz_i64: /* counting leading zero numbers */ ++ tcg_out_ctz64(s, OPC_CTLZ, a0, a1, a2, c2); ++ break; ++ case INDEX_op_ctz_i32: ++ tcg_out_ctz32(s, OPC_CTTZ, a0, a1, a2, c2); ++ break; ++ case INDEX_op_ctz_i64: /* counting tailing zero numbers */ ++ tcg_out_ctz64(s, OPC_CTTZ, a0, a1, a2, c2); ++ break; ++ case INDEX_op_brcond_i32: ++ case INDEX_op_brcond_i64: ++ tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], arg_label(args[3])); ++ break; ++ case INDEX_op_setcond_i32: ++ a2 = (int32_t)a2; ++ tcg_out_setcond(s, ext, args[3], a0, a1, a2, c2); ++ break; ++ case INDEX_op_setcond_i64: ++ tcg_out_setcond(s, ext, args[3], a0, a1, a2, c2); ++ break; ++ case INDEX_op_movcond_i32: ++ a2 = (int32_t)a2; ++ tcg_out_movcond(s, ext, args[5], a0, a1, a2, c2, REG0(3), REG0(4)); ++ break; ++ case INDEX_op_movcond_i64: ++ tcg_out_movcond(s, ext, args[5], a0, a1, a2, c2, REG0(3), REG0(4)); ++ break; ++ case INDEX_op_qemu_ld_a32_i32: ++ case INDEX_op_qemu_ld_a64_i32: ++ tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32); ++ break; ++ case INDEX_op_qemu_ld_a32_i64: ++ case INDEX_op_qemu_ld_a64_i64: ++ tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64); ++ break; ++ case INDEX_op_qemu_st_a32_i32: ++ case INDEX_op_qemu_st_a64_i32: ++ tcg_out_qemu_st(s, REG0(0), a1, a2, TCG_TYPE_I32); ++ break; ++ case INDEX_op_qemu_st_a32_i64: ++ case INDEX_op_qemu_st_a64_i64: ++ tcg_out_qemu_st(s, REG0(0), a1, a2, TCG_TYPE_I64); ++ break; ++ case INDEX_op_bswap64_i64: ++ tcg_out_bswap64(s, ext, a0, a1); ++ break; ++ case INDEX_op_bswap32_i32: ++ case INDEX_op_bswap32_i64: ++ tcg_out_bswap32(s, ext, a0, a1); ++ break; ++ case INDEX_op_bswap16_i32: ++ case INDEX_op_bswap16_i64: ++ tcg_out_bswap16(s, ext, a0, a1); ++ break; ++ case INDEX_op_deposit_i32: ++ case INDEX_op_deposit_i64: ++ tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]); ++ break; ++ case INDEX_op_extract_i32: ++ case INDEX_op_extract_i64: ++ tcg_out_extract(s, a0, a1, a2, args[3]); ++ break; ++ case INDEX_op_sextract_i32: ++ case INDEX_op_sextract_i64: ++ tcg_debug_assert(0); ++ break; ++ case INDEX_op_extract2_i32: /* extract REG0(2) right args[3] bit to REG0(1) left ,save to a0*/ ++ case INDEX_op_extract2_i64: ++ tcg_debug_assert(0); ++ break; ++ case INDEX_op_add2_i32: ++ case INDEX_op_add2_i64: ++ tcg_debug_assert(0); ++ break; ++ case INDEX_op_sub2_i32: ++ case INDEX_op_sub2_i64: ++ tcg_debug_assert(0); ++ break; ++ case INDEX_op_muluh_i64: ++ tcg_out_insn_simpleReg(s, OPC_UMULH, a0, a1, a2); ++ break; ++ case INDEX_op_mulsh_i64: ++ tcg_out_mulsh64(s, a0, a1, a2); ++ break; ++ case INDEX_op_mb: ++ tcg_out_mb(s); ++ break; ++ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ ++ break; ++ case INDEX_op_mov_i64: ++ break; ++ case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */ ++ break; ++ case INDEX_op_call: /* Always emitted via tcg_out_call. */ ++ case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */ ++ case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */ ++ case INDEX_op_ext8s_i64: ++ case INDEX_op_ext8u_i32: ++ case INDEX_op_ext8u_i64: ++ case INDEX_op_ext16s_i64: ++ case INDEX_op_ext16s_i32: ++ case INDEX_op_ext16u_i64: ++ case INDEX_op_ext16u_i32: ++ case INDEX_op_ext32s_i64: ++ case INDEX_op_ext32u_i64: ++ case INDEX_op_ext_i32_i64: ++ case INDEX_op_extu_i32_i64: ++ case INDEX_op_extrl_i64_i32: ++ default: ++ g_assert_not_reached(); ++ } ++#undef REG0 ++} ++ ++static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) ++{ ++ switch (op) { ++ case INDEX_op_goto_ptr: ++ return C_O0_I1(r); ++ case INDEX_op_ld8u_i32: ++ case INDEX_op_ld8s_i32: ++ case INDEX_op_ld16u_i32: ++ case INDEX_op_ld16s_i32: ++ case INDEX_op_ld_i32: ++ case INDEX_op_ld8u_i64: ++ case INDEX_op_ld8s_i64: ++ case INDEX_op_ld16u_i64: ++ case INDEX_op_ld16s_i64: ++ case INDEX_op_ld32u_i64: ++ case INDEX_op_ld32s_i64: ++ case INDEX_op_ld_i64: ++ case INDEX_op_neg_i32: ++ case INDEX_op_neg_i64: ++ case INDEX_op_not_i32: ++ case INDEX_op_not_i64: ++ case INDEX_op_bswap16_i32: ++ case INDEX_op_bswap32_i32: ++ case INDEX_op_bswap16_i64: ++ case INDEX_op_bswap32_i64: ++ case INDEX_op_bswap64_i64: ++ case INDEX_op_ext8s_i32: ++ case INDEX_op_ext16s_i32: ++ case INDEX_op_ext8u_i32: ++ case INDEX_op_ext16u_i32: ++ case INDEX_op_ext8s_i64: ++ case INDEX_op_ext16s_i64: ++ case INDEX_op_ext32s_i64: ++ case INDEX_op_ext8u_i64: ++ case INDEX_op_ext16u_i64: ++ case INDEX_op_ext32u_i64: ++ case INDEX_op_ext_i32_i64: ++ case INDEX_op_extu_i32_i64: ++ case INDEX_op_extract_i32: ++ case INDEX_op_extract_i64: ++ case INDEX_op_sextract_i32: ++ case INDEX_op_sextract_i64: ++ return C_O1_I1(r, r); ++ case INDEX_op_st8_i32: ++ case INDEX_op_st16_i32: ++ case INDEX_op_st_i32: ++ case INDEX_op_st8_i64: ++ case INDEX_op_st16_i64: ++ case INDEX_op_st32_i64: ++ case INDEX_op_st_i64: ++ return C_O0_I2(rZ, r); ++ case INDEX_op_add_i32: ++ case INDEX_op_add_i64: ++ case INDEX_op_sub_i32: ++ case INDEX_op_sub_i64: ++ return C_O1_I2(r, r, rU); ++ case INDEX_op_setcond_i32: ++ case INDEX_op_setcond_i64: ++ return C_O1_I2(r, r, rU); ++ case INDEX_op_mul_i32: ++ case INDEX_op_mul_i64: ++ case INDEX_op_div_i32: ++ case INDEX_op_div_i64: ++ case INDEX_op_divu_i32: ++ case INDEX_op_divu_i64: ++ case INDEX_op_rem_i32: ++ case INDEX_op_rem_i64: ++ case INDEX_op_remu_i32: ++ case INDEX_op_remu_i64: ++ case INDEX_op_muluh_i64: ++ case INDEX_op_mulsh_i64: ++ return C_O1_I2(r, r, r); ++ case INDEX_op_and_i32: ++ case INDEX_op_and_i64: ++ case INDEX_op_or_i32: ++ case INDEX_op_or_i64: ++ case INDEX_op_xor_i32: ++ case INDEX_op_xor_i64: ++ case INDEX_op_andc_i32: ++ case INDEX_op_andc_i64: ++ case INDEX_op_orc_i32: ++ case INDEX_op_orc_i64: ++ case INDEX_op_eqv_i32: ++ case INDEX_op_eqv_i64: ++ return C_O1_I2(r, r, rU); ++ case INDEX_op_shl_i32: ++ case INDEX_op_shr_i32: ++ case INDEX_op_sar_i32: ++ case INDEX_op_rotl_i32: ++ case INDEX_op_rotr_i32: ++ case INDEX_op_shl_i64: ++ case INDEX_op_shr_i64: ++ case INDEX_op_sar_i64: ++ case INDEX_op_rotl_i64: ++ case INDEX_op_rotr_i64: ++ return C_O1_I2(r, r, ri); ++ case INDEX_op_clz_i32: ++ case INDEX_op_clz_i64: ++ return C_O1_I2(r, r, r); ++ case INDEX_op_ctz_i32: ++ case INDEX_op_ctz_i64: ++ return C_O1_I2(r, r, r); ++ case INDEX_op_brcond_i32: ++ case INDEX_op_brcond_i64: ++ return C_O0_I2(r, rU); ++ case INDEX_op_movcond_i32: ++ case INDEX_op_movcond_i64: ++ return C_O1_I4(r, r, rU, rZ, rZ); ++ case INDEX_op_qemu_ld_a32_i32: ++ case INDEX_op_qemu_ld_a64_i32: ++ case INDEX_op_qemu_ld_a32_i64: ++ case INDEX_op_qemu_ld_a64_i64: ++ return C_O1_I1(r, r); ++ case INDEX_op_qemu_st_a32_i32: ++ case INDEX_op_qemu_st_a64_i32: ++ case INDEX_op_qemu_st_a32_i64: ++ case INDEX_op_qemu_st_a64_i64: ++ return C_O0_I2(rZ, r); ++ case INDEX_op_deposit_i32: ++ case INDEX_op_deposit_i64: ++ return C_O1_I2(r, 0, rZ); ++ case INDEX_op_extract2_i32: ++ case INDEX_op_extract2_i64: ++ return C_O1_I2(r, rZ, rZ); ++ case INDEX_op_add2_i32: ++ case INDEX_op_add2_i64: ++ case INDEX_op_sub2_i32: ++ case INDEX_op_sub2_i64: ++ return C_O2_I4(r, r, rZ, rZ, rA, rMZ); ++ case INDEX_op_add_vec: ++ case INDEX_op_sub_vec: ++ case INDEX_op_mul_vec: ++ case INDEX_op_xor_vec: ++ case INDEX_op_ssadd_vec: ++ case INDEX_op_sssub_vec: ++ case INDEX_op_usadd_vec: ++ case INDEX_op_ussub_vec: ++ case INDEX_op_smax_vec: ++ case INDEX_op_smin_vec: ++ case INDEX_op_umax_vec: ++ case INDEX_op_umin_vec: ++ case INDEX_op_shlv_vec: ++ case INDEX_op_shrv_vec: ++ case INDEX_op_sarv_vec: ++ return C_O1_I2(w, w, w); ++ case INDEX_op_not_vec: ++ case INDEX_op_neg_vec: ++ case INDEX_op_abs_vec: ++ case INDEX_op_shli_vec: ++ case INDEX_op_shri_vec: ++ case INDEX_op_sari_vec: ++ return C_O1_I1(w, w); ++ case INDEX_op_ld_vec: ++ case INDEX_op_dupm_vec: ++ return C_O1_I1(w, r); ++ case INDEX_op_st_vec: ++ return C_O0_I2(w, r); ++ case INDEX_op_dup_vec: ++ return C_O1_I1(w, wr); ++ case INDEX_op_or_vec: ++ case INDEX_op_andc_vec: ++ return C_O1_I2(w, w, wO); ++ case INDEX_op_and_vec: ++ case INDEX_op_orc_vec: ++ return C_O1_I2(w, w, wN); ++ case INDEX_op_cmp_vec: ++ return C_O1_I2(w, w, wZ); ++ case INDEX_op_bitsel_vec: ++ return C_O1_I3(w, w, w, w); ++ default: ++ g_assert_not_reached(); ++ } ++} ++ ++static void tcg_out_tb_start(TCGContext *s) ++{ ++ /* nothing to do */ ++} ++ ++ ++static void tcg_target_init(TCGContext *s) ++{ ++ tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffffu; ++ tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffffu; ++ tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull; ++ tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull; ++ tcg_target_call_clobber_regs = -1ull; ++ ++ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X9); ++ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X10); ++ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X11); ++ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X12); ++ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X13); ++ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X14); ++ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X15); ++ ++ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F2); ++ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F3); ++ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F4); ++ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F5); ++ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F6); ++ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F7); ++ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F8); ++ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F9); ++ ++ s->reserved_regs = 0; ++ tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); ++ tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP); ++ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); ++ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); ++ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP3); ++ tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); ++ tcg_regset_set_reg(s->reserved_regs, TCG_REG_X29); ++ tcg_regset_set_reg(s->reserved_regs, TCG_FLOAT_TMP); ++ tcg_regset_set_reg(s->reserved_regs, TCG_FLOAT_TMP2); ++} ++ ++ ++#define PUSH_SIZE ((15-9+1+1) * 8) ++#define FRAME_SIZE \ ++ ((PUSH_SIZE \ ++ + TCG_STATIC_CALL_ARGS_SIZE \ ++ + CPU_TEMP_BUF_NLONGS * sizeof(long) \ ++ + TCG_TARGET_STACK_ALIGN - 1) \ ++ & ~(TCG_TARGET_STACK_ALIGN - 1)) ++ ++ ++/* We're expecting a 2 byte uleb128 encoded value. */ ++QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); ++ ++/* We're expecting to use a single ADDI insn. */ ++QEMU_BUILD_BUG_ON(FRAME_SIZE - PUSH_SIZE > 0xfff); ++ ++static void tcg_target_qemu_prologue(TCGContext *s) ++{ ++ TCGReg r; ++ ++ /* allocate space for all saved registers */ ++ /* subl $sp,PUSH_SIZE,$sp */ ++ tcg_out_simple(s, OPC_SUBL_I, OPC_SUBL, TCG_REG_SP, TCG_REG_SP, PUSH_SIZE); ++ ++ /* Push (FP, LR) */ ++ /* stl $fp,0($sp) */ ++ tcg_out_insn_ldst(s, OPC_STL, TCG_REG_FP, TCG_REG_SP, 0); ++ /* stl $26,8($sp) */ ++ tcg_out_insn_ldst(s, OPC_STL, TCG_REG_RA, TCG_REG_SP, 8); ++ ++ ++ /* Set up frame pointer for canonical unwinding. */ ++ /* TCG_REG_FP=TCG_REG_SP */ ++ tcg_out_movr(s, TCG_TYPE_I64, TCG_REG_FP, TCG_REG_SP); ++ ++ /* Store callee-preserved regs x9..x14. */ ++ for (r = TCG_REG_X9; r <= TCG_REG_X14; r += 1){ ++ int ofs = (r - TCG_REG_X9 + 2) * 8; ++ tcg_out_insn_ldst(s, OPC_STL, r, TCG_REG_SP, ofs); ++ } ++ ++ /* Make stack space for TCG locals. */ ++ /* subl $sp,FRAME_SIZE-PUSH_SIZE,$sp */ ++ tcg_out_simple(s, OPC_SUBL_I, OPC_SUBL, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE - PUSH_SIZE); ++ ++ /* Inform TCG about how to find TCG locals with register, offset, size. */ ++ tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, ++ CPU_TEMP_BUF_NLONGS * sizeof(long)); ++ ++ if (!tcg_use_softmmu) { ++ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); ++ tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); ++ } ++ ++ /* TCG_AREG0=tcg_target_call_iarg_regs[0], on sw, we mov $16 to $9 */ ++ tcg_out_mov(s, TCG_TYPE_I64, TCG_AREG0, tcg_target_call_iarg_regs[0]); ++ tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], noPara); ++ ++ /* ++ * Return path for goto_ptr. Set return value to 0, a-la exit_tb, ++ * and fall through to the rest of the epilogue. ++ */ ++ tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); ++ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, 0); ++ ++ /* TB epilogue */ ++ tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); ++ ++ /* Remove TCG locals stack space. */ ++ /* addl $sp,FRAME_SIZE-PUSH_SIZE,$sp */ ++ tcg_out_simple(s, OPC_ADDL_I, OPC_ADDL, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE - PUSH_SIZE); ++ ++ /* Restore registers x9..x14. */ ++ for (r = TCG_REG_X9; r <= TCG_REG_X14; r += 1) { ++ int ofs = (r - TCG_REG_X9 + 2) * 8; ++ tcg_out_insn_ldst(s, OPC_LDL, r, TCG_REG_SP, ofs); ++ } ++ ++ /* Pop (FP, LR) */ ++ /* ldl $fp,0($sp) */ ++ tcg_out_insn_ldst(s, OPC_LDL, TCG_REG_FP, TCG_REG_SP, 0); ++ /* ldl $26,8($sp) */ ++ tcg_out_insn_ldst(s, OPC_LDL, TCG_REG_RA, TCG_REG_SP, 8); ++ ++ /* restore SP to previous frame. */ ++ /* addl $sp,PUSH_SIZE,$sp */ ++ tcg_out_simple(s, OPC_ADDL_I, OPC_ADDL, TCG_REG_SP, TCG_REG_SP, PUSH_SIZE); ++ ++ tcg_out_insn_jump(s, OPC_RET, TCG_REG_ZERO, TCG_REG_RA, noPara); ++} ++ ++static void tcg_out_nop_fill(tcg_insn_unit *p, int count) ++{ ++ int i; ++ for (i = 0; i < count; ++i) { ++ p[i] = OPC_NOP; ++ } ++} ++ ++typedef struct { ++ DebugFrameHeader h; ++ uint8_t fde_def_cfa[4]; ++ uint8_t fde_reg_ofs[8 * 2]; ++} DebugFrame; ++ ++/* ++ * GDB doesn't appear to require proper setting of ELF_HOST_FLAGS, ++ * which is good because they're really quite complicated for SW64. ++ */ ++#define ELF_HOST_MACHINE EM_SW_64 ++ ++static const DebugFrame debug_frame = { ++ .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ ++ .h.cie.id = -1, ++ .h.cie.version = 1, ++ .h.cie.code_align = 1, ++ .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ ++ .h.cie.return_column = TCG_REG_RA, ++ ++ /* Total FDE size does not include the "len" member. */ ++ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), ++ ++ .fde_def_cfa = { ++ 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ ++ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ ++ (FRAME_SIZE >> 7) ++ }, ++ .fde_reg_ofs = { ++ 0x80 + 14, 1, /* DW_CFA_offset, */ ++ 0x80 + 13, 2, /* DW_CFA_offset, */ ++ 0x80 + 12, 3, /* DW_CFA_offset, */ ++ 0x80 + 11, 4, /* DW_CFA_offset, */ ++ 0x80 + 10, 5, /* DW_CFA_offset, */ ++ 0x80 + 9, 6, /* DW_CFA_offset, */ ++ 0x80 + 26, 7, /* DW_CFA_offset, ra, -24 */ ++ 0x80 + 15, 8, /* DW_CFA_offset, fp, -8 */ ++ } ++}; ++ ++void tcg_register_jit(const void *buf, size_t buf_size) ++{ ++ tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); ++} +diff --git a/tcg/sw64/tcg-target.h b/tcg/sw64/tcg-target.h +new file mode 100755 +index 0000000000..20e0ba0072 +--- /dev/null ++++ b/tcg/sw64/tcg-target.h +@@ -0,0 +1,137 @@ ++/* ++ * Initial TCG Implementation for sw_64 ++ * ++ */ ++ ++#ifndef SW_64_TCG_TARGET_H ++#define SW_64_TCG_TARGET_H ++ ++#define TCG_TARGET_INSN_UNIT_SIZE 4 ++ ++typedef enum { ++ TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, ++ TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, ++ TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, ++ TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15, ++ TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19, ++ TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, ++ TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, ++ TCG_REG_X28, TCG_REG_X29, TCG_REG_X30, TCG_REG_X31, ++ ++ TCG_REG_F0=32, TCG_REG_F1, TCG_REG_F2, TCG_REG_F3, ++ TCG_REG_F4, TCG_REG_F5, TCG_REG_F6, TCG_REG_F7, ++ TCG_REG_F8, TCG_REG_F9, TCG_REG_F10, TCG_REG_F11, ++ TCG_REG_F12, TCG_REG_F13, TCG_REG_F14, TCG_REG_F15, ++ TCG_REG_F16, TCG_REG_F17, TCG_REG_F18, TCG_REG_F19, ++ TCG_REG_F20, TCG_REG_F21, TCG_REG_F22, TCG_REG_F23, ++ TCG_REG_F24, TCG_REG_F25, TCG_REG_F26, TCG_REG_F27, ++ TCG_REG_F28, TCG_REG_F29, TCG_REG_F30, TCG_REG_F31, ++ ++ /* Aliases. */ ++ TCG_REG_FP = TCG_REG_X15, ++ TCG_REG_RA = TCG_REG_X26, ++ TCG_REG_GP = TCG_REG_X29, ++ TCG_REG_SP = TCG_REG_X30, ++ TCG_REG_ZERO = TCG_REG_X31, ++ TCG_AREG0 = TCG_REG_X9, ++} TCGReg; ++ ++#define TCG_TARGET_NB_REGS 64 ++#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) ++ ++/* used for function call generation */ ++#define TCG_REG_CALL_STACK TCG_REG_SP ++#define TCG_TARGET_STACK_ALIGN 16 ++#define TCG_TARGET_CALL_ALIGN_ARGS 1 /*luo*/ ++#define TCG_TARGET_CALL_STACK_OFFSET 0 /*luo*/ ++#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL ++#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL ++#define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL ++#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL ++ ++/* optional instructions */ ++#define TCG_TARGET_HAS_neg_i64 1 ++#define TCG_TARGET_HAS_direct_jump 0 ++#define TCG_TARGET_HAS_goto_ptr 1 ++#define TCG_TARGET_HAS_qemu_st8_i32 0 ++#define TCG_TARGET_HAS_not_i32 1 ++#define TCG_TARGET_HAS_neg_i32 1 ++#define TCG_TARGET_HAS_div_i32 1 ++#define TCG_TARGET_HAS_movcond_i32 1 ++#define TCG_TARGET_HAS_rem_i32 0 ++#define TCG_TARGET_HAS_rot_i32 1 ++#define TCG_TARGET_HAS_deposit_i32 1 ++#define TCG_TARGET_HAS_extract_i32 1 ++#define TCG_TARGET_HAS_sextract_i32 0 ++#define TCG_TARGET_HAS_extract2_i32 0 ++#define TCG_TARGET_HAS_negsetcond_i32 0 ++#define TCG_TARGET_HAS_add2_i32 0 ++#define TCG_TARGET_HAS_sub2_i32 0 ++#define TCG_TARGET_HAS_sub2_i32 0 ++#define TCG_TARGET_HAS_mulu2_i32 0 ++#define TCG_TARGET_HAS_muluh_i32 0 ++#define TCG_TARGET_HAS_muls2_i32 0 ++#define TCG_TARGET_HAS_not_i32 1 ++#define TCG_TARGET_HAS_mulsh_i32 0 ++#define TCG_TARGET_HAS_ext8s_i32 0 ++#define TCG_TARGET_HAS_ext16s_i32 0 ++#define TCG_TARGET_HAS_ext8u_i32 1 ++#define TCG_TARGET_HAS_ext16u_i32 1 ++#define TCG_TARGET_HAS_bswap16_i32 0 ++#define TCG_TARGET_HAS_bswap32_i32 0 ++#define TCG_TARGET_HAS_andc_i32 0 ++#define TCG_TARGET_HAS_eqv_i32 0 ++#define TCG_TARGET_HAS_nand_i32 0 ++#define TCG_TARGET_HAS_nor_i32 0 ++#define TCG_TARGET_HAS_clz_i32 0 ++#define TCG_TARGET_HAS_ctz_i32 0 ++#define TCG_TARGET_HAS_orc_i32 0 ++#define TCG_TARGET_HAS_ctpop_i32 0 ++#define TCG_TARGET_HAS_movcond_i64 1 ++#define TCG_TARGET_HAS_div_i64 1 ++#define TCG_TARGET_HAS_rem_i64 0 ++#define TCG_TARGET_HAS_div2_i64 0 ++#define TCG_TARGET_HAS_rot_i64 1 ++#define TCG_TARGET_HAS_deposit_i64 1 ++#define TCG_TARGET_HAS_extract_i64 1 ++#define TCG_TARGET_HAS_sextract_i64 0 ++#define TCG_TARGET_HAS_extract2_i64 0 ++#define TCG_TARGET_HAS_negsetcond_i64 0 ++#define TCG_TARGET_HAS_extr_i64_i32 0 ++#define TCG_TARGET_HAS_extrl_i64_i32 0 ++#define TCG_TARGET_HAS_extrh_i64_i32 0 ++#define TCG_TARGET_HAS_ext8s_i64 0 ++#define TCG_TARGET_HAS_ext16s_i64 0 ++#define TCG_TARGET_HAS_ext32s_i64 1 ++#define TCG_TARGET_HAS_ext8u_i64 1 ++#define TCG_TARGET_HAS_ext16u_i64 1 ++#define TCG_TARGET_HAS_ext32u_i64 1 ++#define TCG_TARGET_HAS_bswap16_i64 0 ++#define TCG_TARGET_HAS_bswap32_i64 0 ++#define TCG_TARGET_HAS_bswap64_i64 0 ++#define TCG_TARGET_HAS_not_i64 1 ++#define TCG_TARGET_HAS_andc_i64 0 ++#define TCG_TARGET_HAS_orc_i64 1 ++#define TCG_TARGET_HAS_eqv_i64 0 ++#define TCG_TARGET_HAS_nand_i64 0 ++#define TCG_TARGET_HAS_nor_i64 0 ++#define TCG_TARGET_HAS_clz_i64 1 ++#define TCG_TARGET_HAS_ctz_i64 1 ++#define TCG_TARGET_HAS_ctpop_i64 0 ++#define TCG_TARGET_HAS_add2_i64 0 ++#define TCG_TARGET_HAS_sub2_i64 0 ++#define TCG_TARGET_HAS_mulu2_i64 0 ++#define TCG_TARGET_HAS_muls2_i64 0 ++#define TCG_TARGET_HAS_muluh_i64 1 ++#define TCG_TARGET_HAS_mulsh_i64 1 ++ ++#define TCG_TARGET_HAS_qemu_ldst_i128 0 ++ ++#define TCG_TARGET_DEFAULT_MO (0) ++#define TCG_TARGET_HAS_MEMORY_BSWAP 0 ++/* optional instructions */ ++#ifdef CONFIG_SOFTMMU ++#define TCG_TARGET_NEED_LDST_LABELS ++#endif ++#define TCG_TARGET_NEED_POOL_LABELS ++#endif /* SW_64_TCG_TARGET_H */ +diff --git a/tcg/sw64/tcg-target.opc.h b/tcg/sw64/tcg-target.opc.h +new file mode 100755 +index 0000000000..bce30accd9 +--- /dev/null ++++ b/tcg/sw64/tcg-target.opc.h +@@ -0,0 +1,15 @@ ++/* ++ * Copyright (c) 2019 Linaro ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or ++ * (at your option) any later version. ++ * ++ * See the COPYING file in the top-level directory for details. ++ * ++ * Target-specific opcodes for host vector expansion. These will be ++ * emitted by tcg_expand_vec_op. For those familiar with GCC internals, ++ * consider these to be UNSPEC with names. ++ */ ++ ++DEF(aa64_sshl_vec, 1, 2, 0, IMPLVEC) ++DEF(aa64_sli_vec, 1, 2, 1, IMPLVEC) +-- +2.43.5 + diff --git a/0286-virtio-snd-add-max-size-bounds-check-in-input-cb.patch b/0286-virtio-snd-add-max-size-bounds-check-in-input-cb.patch new file mode 100644 index 0000000..2f3fe30 --- /dev/null +++ b/0286-virtio-snd-add-max-size-bounds-check-in-input-cb.patch @@ -0,0 +1,61 @@ +From ee4c973ea162a299fbb18686d1f0a19670544854 Mon Sep 17 00:00:00 2001 +From: Manos Pitsidianakis +Date: Mon, 8 Jul 2024 10:09:49 +0300 +Subject: [PATCH] virtio-snd: add max size bounds check in input cb +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +commit 98e77e3dd8dd6e7aa9a7dffa60f49c8c8a49d4e3 upstream + +When reading input audio in the virtio-snd input callback, +virtio_snd_pcm_in_cb(), we do not check whether the iov can actually fit +the data buffer. This is because we use the buffer->size field as a +total-so-far accumulator instead of byte-size-left like in TX buffers. + +This triggers an out of bounds write if the size of the virtio queue +element is equal to virtio_snd_pcm_status, which makes the available +space for audio data zero. This commit adds a check for reaching the +maximum buffer size before attempting any writes. + +Reported-by: Zheyu Ma +Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2427 +Signed-off-by: Manos Pitsidianakis +Message-Id: +Reviewed-by: Philippe Mathieu-Daudé +Reviewed-by: Michael S. Tsirkin +Signed-off-by: Michael S. Tsirkin +Signed-off-by: Xuchun Shang +--- + hw/audio/virtio-snd.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/hw/audio/virtio-snd.c b/hw/audio/virtio-snd.c +index 137fa77a01..15986af41e 100644 +--- a/hw/audio/virtio-snd.c ++++ b/hw/audio/virtio-snd.c +@@ -1274,7 +1274,7 @@ static void virtio_snd_pcm_in_cb(void *data, int available) + { + VirtIOSoundPCMStream *stream = data; + VirtIOSoundPCMBuffer *buffer; +- size_t size; ++ size_t size, max_size; + + WITH_QEMU_LOCK_GUARD(&stream->queue_mutex) { + while (!QSIMPLEQ_EMPTY(&stream->queue)) { +@@ -1288,7 +1288,12 @@ static void virtio_snd_pcm_in_cb(void *data, int available) + continue; + } + ++ max_size = iov_size(buffer->elem->in_sg, buffer->elem->in_num); + for (;;) { ++ if (buffer->size >= max_size) { ++ return_rx_buffer(stream, buffer); ++ break; ++ } + size = AUD_read(stream->voice.in, + buffer->data + buffer->size, + MIN(available, (stream->params.period_bytes - +-- +2.43.5 + diff --git a/0287-virtio-snd-check-for-invalid-param-shift-operands.patch b/0287-virtio-snd-check-for-invalid-param-shift-operands.patch new file mode 100644 index 0000000..b749b99 --- /dev/null +++ b/0287-virtio-snd-check-for-invalid-param-shift-operands.patch @@ -0,0 +1,56 @@ +From 040ca8080f6bc91ae676ba5c5abcc20edf863487 Mon Sep 17 00:00:00 2001 +From: Manos Pitsidianakis +Date: Thu, 11 Jul 2024 10:38:49 +0300 +Subject: [PATCH] virtio-snd: check for invalid param shift operands +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +commit 9b6083465fb8311f2410615f8303a41f580a2a20 upstream + +When setting the parameters of a PCM stream, we compute the bit flag +with the format and rate values as shift operand to check if they are +set in supported_formats and supported_rates. + +If the guest provides a format/rate value which when shifting 1 results +in a value bigger than the number of bits in +supported_formats/supported_rates, we must report an error. + +Previously, this ended up triggering the not reached assertions later +when converting to internal QEMU values. + +Reported-by: Zheyu Ma +Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2416 +Signed-off-by: Manos Pitsidianakis +Message-Id: +Reviewed-by: Philippe Mathieu-Daudé +Reviewed-by: Michael S. Tsirkin +Signed-off-by: Michael S. Tsirkin +Signed-off-by: Xuchun Shang +--- + hw/audio/virtio-snd.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/hw/audio/virtio-snd.c b/hw/audio/virtio-snd.c +index 15986af41e..3ac5f78714 100644 +--- a/hw/audio/virtio-snd.c ++++ b/hw/audio/virtio-snd.c +@@ -281,11 +281,13 @@ uint32_t virtio_snd_set_pcm_params(VirtIOSound *s, + error_report("Number of channels is not supported."); + return cpu_to_le32(VIRTIO_SND_S_NOT_SUPP); + } +- if (!(supported_formats & BIT(params->format))) { ++ if (BIT(params->format) > sizeof(supported_formats) || ++ !(supported_formats & BIT(params->format))) { + error_report("Stream format is not supported."); + return cpu_to_le32(VIRTIO_SND_S_NOT_SUPP); + } +- if (!(supported_rates & BIT(params->rate))) { ++ if (BIT(params->rate) > sizeof(supported_rates) || ++ !(supported_rates & BIT(params->rate))) { + error_report("Stream rate is not supported."); + return cpu_to_le32(VIRTIO_SND_S_NOT_SUPP); + } +-- +2.43.5 + diff --git a/0288-add-support-for-the-virtcca-cvm-feature.patch b/0288-add-support-for-the-virtcca-cvm-feature.patch new file mode 100644 index 0000000..de25caa --- /dev/null +++ b/0288-add-support-for-the-virtcca-cvm-feature.patch @@ -0,0 +1,1045 @@ +From 5b31db2c6bcf1a2fc01f17b7510e9e8c6ac4acf0 Mon Sep 17 00:00:00 2001 +From: liupingwei +Date: Fri, 2 Aug 2024 11:55:43 +0800 +Subject: [PATCH] Add support for the virtcca cvm feature. + +With this commit,we can append new startup parameters :"cma=64M +cvm_guest=1" and "kvm_type=cvm" to use virtcca cvm feature. +Here is a full example of the append parameters for a cvm : +-M virt,gic-version=3,accel=kvm,kernel_irqchip=on,kvm_type=cvm \ +-append "swiotlb=force console=tty0 console=ttyAMA0 kaslr.disabled=1 +cma=64M cvm_guest=1 rodata=off rootfstype=ext4 root=/dev/vad rw" \ + +Additionally,the SVE and PMU are optional configurations for cvm,here is +an example: +-object tmm-guest,id=tmm0,sve-vector-length=128,num-pmu-counters=1 + +Signed-off-by: liupingwei +--- + accel/kvm/kvm-all.c | 36 ++++ + hw/arm/boot.c | 49 +++++ + hw/arm/virt.c | 62 +++++- + hw/virtio/virtio-bus.c | 6 + + include/hw/arm/boot.h | 1 + + include/hw/arm/virt.h | 1 + + include/sysemu/kvm.h | 9 + + linux-headers/asm-arm64/kvm.h | 62 ++++++ + linux-headers/linux/kvm.h | 32 +++- + qapi/qom.json | 29 ++- + target/arm/kvm-tmm.c | 344 ++++++++++++++++++++++++++++++++++ + target/arm/kvm.c | 6 +- + target/arm/kvm64.c | 5 + + target/arm/kvm_arm.h | 15 ++ + target/arm/meson.build | 1 + + 15 files changed, 651 insertions(+), 7 deletions(-) + create mode 100644 target/arm/kvm-tmm.c + +diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c +index 6db60854d7..0fddb20921 100644 +--- a/accel/kvm/kvm-all.c ++++ b/accel/kvm/kvm-all.c +@@ -52,6 +52,8 @@ + #include "hw/boards.h" + #include "sysemu/stats.h" + ++#include "sysemu/kvm.h" ++ + /* This check must be after config-host.h is included */ + #ifdef CONFIG_EVENTFD + #include +@@ -86,6 +88,9 @@ struct KVMParkedVcpu { + }; + + KVMState *kvm_state; ++ ++bool virtcca_cvm_allowed = false; ++ + bool kvm_kernel_irqchip; + bool kvm_split_irqchip; + bool kvm_async_interrupts_allowed; +@@ -2353,6 +2358,11 @@ uint32_t kvm_dirty_ring_size(void) + return kvm_state->kvm_dirty_ring_size; + } + ++static inline bool kvm_is_virtcca_cvm_type(int type) ++{ ++ return type & VIRTCCA_CVM_TYPE; ++} ++ + static int kvm_init(MachineState *ms) + { + MachineClass *mc = MACHINE_GET_CLASS(ms); +@@ -2445,6 +2455,10 @@ static int kvm_init(MachineState *ms) + goto err; + } + ++ if (kvm_is_virtcca_cvm_type(type)) { ++ virtcca_cvm_allowed = true; ++ } ++ + do { + ret = kvm_ioctl(s, KVM_CREATE_VM, type); + } while (ret == -EINTR); +@@ -3511,6 +3525,28 @@ int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target) + return r; + } + ++int kvm_load_user_data(hwaddr loader_start, hwaddr image_end, hwaddr initrd_start, hwaddr dtb_end, hwaddr ram_size, ++ struct kvm_numa_info *numa_info) ++{ ++ KVMState *state = kvm_state; ++ struct kvm_user_data data; ++ int ret; ++ ++ data.loader_start = loader_start; ++ data.image_end = image_end; ++ data.initrd_start = initrd_start; ++ data.dtb_end = dtb_end; ++ data.ram_size = ram_size; ++ memcpy(&data.numa_info, numa_info, sizeof(struct kvm_numa_info)); ++ ++ ret = kvm_vm_ioctl(state, KVM_LOAD_USER_DATA, &data); ++ if (ret < 0) { ++ error_report("%s: KVM_LOAD_USER_DATA failed!\n", __func__); ++ } ++ ++ return ret; ++} ++ + static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as, + hwaddr start_addr, hwaddr size) + { +diff --git a/hw/arm/boot.c b/hw/arm/boot.c +index 84ea6a807a..394d44ced3 100644 +--- a/hw/arm/boot.c ++++ b/hw/arm/boot.c +@@ -26,6 +26,7 @@ + #include "qemu/config-file.h" + #include "qemu/option.h" + #include "qemu/units.h" ++#include "kvm_arm.h" + + /* Kernel boot protocol is specified in the kernel docs + * Documentation/arm/Booting and Documentation/arm64/booting.txt +@@ -1141,6 +1142,16 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu, + for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) { + ARM_CPU(cs)->env.boot_info = info; + } ++ ++ if (kvm_enabled() && virtcca_cvm_enabled()) { ++ if (info->dtb_limit == 0) { ++ info->dtb_limit = info->dtb_start + 0x200000; ++ } ++ kvm_load_user_data(info->loader_start, image_high_addr, info->initrd_start, ++ info->dtb_limit, info->ram_size, (struct kvm_numa_info *)info->numa_info); ++ tmm_add_ram_region(info->loader_start, image_high_addr - info->loader_start, ++ info->initrd_start, info->dtb_limit - info->initrd_start, true); ++ } + } + + static void arm_setup_firmware_boot(ARMCPU *cpu, struct arm_boot_info *info) +@@ -1231,6 +1242,39 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info) + info->initrd_filename = ms->initrd_filename; + info->dtb_filename = ms->dtb; + info->dtb_limit = 0; ++ if (kvm_enabled() && virtcca_cvm_enabled()) { ++ info->ram_size = ms->ram_size; ++ info->numa_info = g_malloc(sizeof(struct kvm_numa_info)); ++ struct kvm_numa_info *numa_info = (struct kvm_numa_info *) info->numa_info; ++ if (ms->numa_state != NULL && ms->numa_state->num_nodes > 0) { ++ numa_info->numa_cnt = ms->numa_state->num_nodes; ++ uint64_t mem_base = info->loader_start; ++ for (int64_t i = 0; i < ms->numa_state->num_nodes && i < MAX_NUMA_NODE; i++) { ++ uint64_t mem_len = ms->numa_state->nodes[i].node_mem; ++ numa_info->numa_nodes[i].numa_id = i; ++ numa_info->numa_nodes[i].ipa_start = mem_base; ++ numa_info->numa_nodes[i].ipa_size = mem_len; ++ memcpy(numa_info->numa_nodes[i].host_numa_nodes, ms->numa_state->nodes[i].node_memdev->host_nodes, ++ MAX_NODES / BITS_PER_LONG * sizeof(uint64_t)); ++ mem_base += mem_len; ++ } ++ } else { ++ numa_info->numa_cnt = 1; ++ numa_info->numa_nodes[0].numa_id = 0; ++ numa_info->numa_nodes[0].ipa_start = info->loader_start; ++ numa_info->numa_nodes[0].ipa_size = info->ram_size; ++ memset(numa_info->numa_nodes[0].host_numa_nodes, 0, MAX_NODES / BITS_PER_LONG * sizeof(uint64_t)); ++ } ++ ++ for (int cpu_idx = ms->smp.cpus - 1; cpu_idx >= 0; cpu_idx--) { ++ ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu_idx)); ++ CPUState *local_cs = CPU(armcpu); ++ uint64_t node_id = 0; ++ if (ms->possible_cpus->cpus[local_cs->cpu_index].props.has_node_id) ++ node_id = ms->possible_cpus->cpus[local_cs->cpu_index].props.node_id; ++ bitmap_set((unsigned long *)numa_info->numa_nodes[node_id].cpu_id, cpu_idx, 1); ++ } ++ } + + /* Load the kernel. */ + if (!info->kernel_filename || info->firmware_loaded) { +@@ -1239,6 +1283,11 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info) + arm_setup_direct_kernel_boot(cpu, info); + } + ++ if (kvm_enabled() && virtcca_cvm_enabled()) { ++ g_free(info->numa_info); ++ info->numa_info = NULL; ++ } ++ + /* + * Disable the PSCI conduit if it is set up to target the same + * or a lower EL than the one we're going to start the guest code in. +diff --git a/hw/arm/virt.c b/hw/arm/virt.c +index be2856c018..6087207f38 100644 +--- a/hw/arm/virt.c ++++ b/hw/arm/virt.c +@@ -271,8 +271,16 @@ static void create_fdt(VirtMachineState *vms) + + /* /chosen must exist for load_dtb to fill in necessary properties later */ + qemu_fdt_add_subnode(fdt, "/chosen"); ++ ++ g_autofree char *kvm_type = NULL; ++ if (object_property_find(OBJECT(current_machine), "kvm-type")) { ++ kvm_type = object_property_get_str(OBJECT(current_machine), ++ "kvm-type", &error_abort); ++ } + if (vms->dtb_randomness) { +- create_randomness(ms, "/chosen"); ++ if (!(kvm_type && !strcmp(kvm_type, "cvm"))) { ++ create_randomness(ms, "/chosen"); ++ } + } + + if (vms->secure) { +@@ -1775,6 +1783,19 @@ static void virt_set_memmap(VirtMachineState *vms, int pa_bits) + vms->memmap[i] = base_memmap[i]; + } + ++ /* fix VIRT_MEM range */ ++ if (object_property_find(OBJECT(current_machine), "kvm-type")) { ++ g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine), ++ "kvm-type", &error_abort); ++ ++ if (!strcmp(kvm_type, "cvm")) { ++ vms->memmap[VIRT_MEM].base = 3 * GiB; ++ vms->memmap[VIRT_MEM].size = ms->ram_size; ++ info_report("[qemu] fix VIRT_MEM range 0x%llx - 0x%llx\n", (unsigned long long)(vms->memmap[VIRT_MEM].base), ++ (unsigned long long)(vms->memmap[VIRT_MEM].base + ms->ram_size)); ++ } ++ } ++ + if (ms->ram_slots > ACPI_MAX_RAM_SLOTS) { + error_report("unsupported number of memory slots: %"PRIu64, + ms->ram_slots); +@@ -2103,7 +2124,7 @@ static void machvirt_init(MachineState *machine) + */ + if (vms->secure && firmware_loaded) { + vms->psci_conduit = QEMU_PSCI_CONDUIT_DISABLED; +- } else if (vms->virt) { ++ } else if (vms->virt || virtcca_cvm_enabled()) { + vms->psci_conduit = QEMU_PSCI_CONDUIT_SMC; + } else { + vms->psci_conduit = QEMU_PSCI_CONDUIT_HVC; +@@ -2155,6 +2176,14 @@ static void machvirt_init(MachineState *machine) + exit(1); + } + ++ if (virtcca_cvm_enabled()) { ++ int ret = kvm_arm_tmm_init(machine->cgs, &error_fatal); ++ if (ret != 0) { ++ error_report("fail to initialize TMM"); ++ exit(1); ++ } ++ } ++ + create_fdt(vms); + + assert(possible_cpus->len == max_cpus); +@@ -2901,6 +2930,15 @@ static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine, + static int virt_kvm_type(MachineState *ms, const char *type_str) + { + VirtMachineState *vms = VIRT_MACHINE(ms); ++ int virtcca_cvm_type = 0; ++ if (object_property_find(OBJECT(current_machine), "kvm-type")) { ++ g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine), ++ "kvm-type", &error_abort); ++ ++ if (!strcmp(kvm_type, "cvm")) { ++ virtcca_cvm_type = VIRTCCA_CVM_TYPE; ++ } ++ } + int max_vm_pa_size, requested_pa_size; + bool fixed_ipa; + +@@ -2930,7 +2968,9 @@ static int virt_kvm_type(MachineState *ms, const char *type_str) + * the implicit legacy 40b IPA setting, in which case the kvm_type + * must be 0. + */ +- return fixed_ipa ? 0 : requested_pa_size; ++ return strcmp(type_str, "cvm") == 0 ? ++ ((fixed_ipa ? 0 : requested_pa_size) | virtcca_cvm_type) : ++ (fixed_ipa ? 0 : requested_pa_size); + } + + static void virt_machine_class_init(ObjectClass *oc, void *data) +@@ -3101,6 +3141,19 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) + + } + ++static char *virt_get_kvm_type(Object *obj, Error **errp G_GNUC_UNUSED) ++{ ++ VirtMachineState *vms = VIRT_MACHINE(obj); ++ return g_strdup(vms->kvm_type); ++} ++ ++static void virt_set_kvm_type(Object *obj, const char *value, Error **errp G_GNUC_UNUSED) ++{ ++ VirtMachineState *vms = VIRT_MACHINE(obj); ++ g_free(vms->kvm_type); ++ vms->kvm_type = g_strdup(value); ++} ++ + static void virt_instance_init(Object *obj) + { + VirtMachineState *vms = VIRT_MACHINE(obj); +@@ -3158,6 +3211,9 @@ static void virt_instance_init(Object *obj) + + vms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6); + vms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8); ++ ++ object_property_add_str(obj, "kvm-type", virt_get_kvm_type, virt_set_kvm_type); ++ object_property_set_description(obj, "kvm-type", "CVM or Normal VM"); + } + + static const TypeInfo virt_machine_info = { +diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c +index 896feb37a1..7e750d073d 100644 +--- a/hw/virtio/virtio-bus.c ++++ b/hw/virtio/virtio-bus.c +@@ -25,6 +25,7 @@ + #include "qemu/osdep.h" + #include "qemu/error-report.h" + #include "qemu/module.h" ++#include "sysemu/kvm.h" + #include "qapi/error.h" + #include "hw/virtio/virtio-bus.h" + #include "hw/virtio/virtio.h" +@@ -81,6 +82,11 @@ void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp) + vdev->dma_as = &address_space_memory; + if (has_iommu) { + vdev_has_iommu = virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); ++ ++ if (virtcca_cvm_enabled() && (strcmp(vdev->name, "vhost-user-fs") == 0)) { ++ vdev_has_iommu = true; ++ } ++ + /* + * Present IOMMU_PLATFORM to the driver iff iommu_plattform=on and + * device operational. If the driver does not accept IOMMU_PLATFORM +diff --git a/include/hw/arm/boot.h b/include/hw/arm/boot.h +index 80c492d742..2329e1a723 100644 +--- a/include/hw/arm/boot.h ++++ b/include/hw/arm/boot.h +@@ -39,6 +39,7 @@ void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, + /* arm_boot.c */ + struct arm_boot_info { + uint64_t ram_size; ++ void *numa_info; + const char *kernel_filename; + const char *kernel_cmdline; + const char *initrd_filename; +diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h +index f69239850e..1fced5e876 100644 +--- a/include/hw/arm/virt.h ++++ b/include/hw/arm/virt.h +@@ -173,6 +173,7 @@ struct VirtMachineState { + PCIBus *bus; + char *oem_id; + char *oem_table_id; ++ char *kvm_type; + }; + + #define VIRT_ECAM_ID(high) (high ? VIRT_HIGH_PCIE_ECAM : VIRT_PCIE_ECAM) +diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h +index 4d00ade5fa..7d08aae9fa 100644 +--- a/include/sysemu/kvm.h ++++ b/include/sysemu/kvm.h +@@ -19,6 +19,7 @@ + #include "exec/memattrs.h" + #include "qemu/accel.h" + #include "qom/object.h" ++#include "linux-headers/linux/kvm.h" + + #ifdef NEED_CPU_H + # ifdef CONFIG_KVM +@@ -32,6 +33,7 @@ + #ifdef CONFIG_KVM_IS_POSSIBLE + + extern bool kvm_allowed; ++extern bool virtcca_cvm_allowed; + extern bool kvm_kernel_irqchip; + extern bool kvm_split_irqchip; + extern bool kvm_async_interrupts_allowed; +@@ -45,6 +47,8 @@ extern bool kvm_msi_use_devid; + extern bool kvm_csv3_allowed; + + #define kvm_enabled() (kvm_allowed) ++#define virtcca_cvm_enabled() (virtcca_cvm_allowed) ++#define VIRTCCA_CVM_TYPE (1UL << 8) + /** + * kvm_irqchip_in_kernel: + * +@@ -153,6 +157,8 @@ extern bool kvm_csv3_allowed; + #else + + #define kvm_enabled() (0) ++#define virtcca_cvm_enabled() (0) ++#define VIRTCCA_CVM_TYPE (0) + #define kvm_irqchip_in_kernel() (false) + #define kvm_irqchip_is_split() (false) + #define kvm_async_interrupts_enabled() (false) +@@ -571,4 +577,7 @@ bool kvm_arch_cpu_check_are_resettable(void); + bool kvm_dirty_ring_enabled(void); + + uint32_t kvm_dirty_ring_size(void); ++ ++int kvm_load_user_data(hwaddr loader_start, hwaddr image_end, hwaddr initrd_start, hwaddr dtb_end, hwaddr ram_size, ++ struct kvm_numa_info *numa_info); + #endif +diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h +index c59ea55cd8..2b040b5d60 100644 +--- a/linux-headers/asm-arm64/kvm.h ++++ b/linux-headers/asm-arm64/kvm.h +@@ -110,6 +110,7 @@ struct kvm_regs { + #define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ + #define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ + #define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */ ++#define KVM_ARM_VCPU_TEC 8 /* VCPU TEC state as part of cvm */ + + struct kvm_vcpu_init { + __u32 target; +@@ -523,6 +524,67 @@ struct reg_mask_range { + __u32 reserved[13]; + }; + ++/* KVM_CAP_ARM_TMM on VM fd */ ++#define KVM_CAP_ARM_TMM_CONFIG_CVM 0 ++#define KVM_CAP_ARM_TMM_CREATE_RD 1 ++#define KVM_CAP_ARM_TMM_POPULATE_CVM 2 ++#define KVM_CAP_ARM_TMM_ACTIVATE_CVM 3 ++ ++#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256 0 ++#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512 1 ++ ++#define KVM_CAP_ARM_TMM_RPV_SIZE 64 ++ ++/* List of configuration items accepted for KVM_CAP_ARM_RME_CONFIG_REALM */ ++#define KVM_CAP_ARM_TMM_CFG_RPV 0 ++#define KVM_CAP_ARM_TMM_CFG_HASH_ALGO 1 ++#define KVM_CAP_ARM_TMM_CFG_SVE 2 ++#define KVM_CAP_ARM_TMM_CFG_DBG 3 ++#define KVM_CAP_ARM_TMM_CFG_PMU 4 ++ ++struct kvm_cap_arm_tmm_config_item { ++ __u32 cfg; ++ union { ++ /* cfg == KVM_CAP_ARM_TMM_CFG_RPV */ ++ struct { ++ __u8 rpv[KVM_CAP_ARM_TMM_RPV_SIZE]; ++ }; ++ ++ /* cfg == KVM_CAP_ARM_TMM_CFG_HASH_ALGO */ ++ struct { ++ __u32 hash_algo; ++ }; ++ ++ /* cfg == KVM_CAP_ARM_TMM_CFG_SVE */ ++ struct { ++ __u32 sve_vq; ++ }; ++ ++ /* cfg == KVM_CAP_ARM_TMM_CFG_DBG */ ++ struct { ++ __u32 num_brps; ++ __u32 num_wrps; ++ }; ++ ++ /* cfg == KVM_CAP_ARM_TMM_CFG_PMU */ ++ struct { ++ __u32 num_pmu_cntrs; ++ }; ++ /* Fix the size of the union */ ++ __u8 reserved[256]; ++ }; ++}; ++ ++#define KVM_ARM_TMM_POPULATE_FLAGS_MEASURE (1U << 0) ++struct kvm_cap_arm_tmm_populate_region_args { ++ __u64 populate_ipa_base1; ++ __u64 populate_ipa_size1; ++ __u64 populate_ipa_base2; ++ __u64 populate_ipa_size2; ++ __u32 flags; ++ __u32 reserved[3]; ++}; ++ + #endif + + #endif /* __ARM_KVM_H__ */ +diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h +index f390989e7c..c75e4cde48 100644 +--- a/linux-headers/linux/kvm.h ++++ b/linux-headers/linux/kvm.h +@@ -14,6 +14,8 @@ + #include + #include + ++#include "sysemu/numa.h" ++ + #define KVM_API_VERSION 12 + + /* *** Deprecated interfaces *** */ +@@ -1203,6 +1205,8 @@ struct kvm_ppc_resize_hpt { + + #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE) + ++#define KVM_CAP_ARM_TMM 300 ++ + #ifdef KVM_CAP_IRQ_ROUTING + + struct kvm_irq_routing_irqchip { +@@ -1478,6 +1482,32 @@ struct kvm_vfio_spapr_tce { + __s32 tablefd; + }; + ++#define MAX_NUMA_NODE 8 ++#define MAX_CPU_BIT_MAP 4 ++#define MAX_NODE_BIT_MAP (MAX_NODES / BITS_PER_LONG) ++ ++struct kvm_numa_node { ++ __u64 numa_id; ++ __u64 ipa_start; ++ __u64 ipa_size; ++ __u64 host_numa_nodes[MAX_NODE_BIT_MAP]; ++ __u64 cpu_id[MAX_CPU_BIT_MAP]; ++}; ++ ++struct kvm_numa_info { ++ __u64 numa_cnt; ++ struct kvm_numa_node numa_nodes[MAX_NUMA_NODE]; ++}; ++ ++struct kvm_user_data { ++ __u64 loader_start; ++ __u64 image_end; ++ __u64 initrd_start; ++ __u64 dtb_end; ++ __u64 ram_size; ++ struct kvm_numa_info numa_info; ++}; ++ + /* + * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns + * a vcpu fd. +@@ -1490,7 +1520,7 @@ struct kvm_vfio_spapr_tce { + struct kvm_userspace_memory_region) + #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) + #define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64) +- ++#define KVM_LOAD_USER_DATA _IOW(KVMIO, 0x49, struct kvm_user_data) + /* enable ucontrol for s390 */ + struct kvm_s390_ucas_mapping { + __u64 user_addr; +diff --git a/qapi/qom.json b/qapi/qom.json +index 89a2516b42..0853944ba8 100644 +--- a/qapi/qom.json ++++ b/qapi/qom.json +@@ -902,6 +902,29 @@ + 'data': { '*cpu-affinity': ['uint16'], + '*node-affinity': ['uint16'] } } + ++## ++# @TmmGuestMeasurementAlgo: ++# ++# Algorithm to use for cvm measurements ++# ++# Since: FIXME ++## ++{ 'enum': 'TmmGuestMeasurementAlgo', ++'data': ['default', 'sha256', 'sha512'] } ++ ++## ++# @TmmGuestProperties: ++# ++# Properties for tmm-guest objects. ++# ++# @sve-vector-length: SVE vector length (default: 0, SVE disabled) ++# ++# Since: FIXME ++## ++{ 'struct': 'TmmGuestProperties', ++ 'data': { '*sve-vector-length': 'uint32', ++ '*num-pmu-counters': 'uint32', ++ '*measurement-algo': 'TmmGuestMeasurementAlgo' } } + + ## + # @ObjectType: +@@ -965,7 +988,8 @@ + 'tls-creds-x509', + 'tls-cipher-suites', + { 'name': 'x-remote-object', 'features': [ 'unstable' ] }, +- { 'name': 'x-vfio-user-server', 'features': [ 'unstable' ] } ++ { 'name': 'x-vfio-user-server', 'features': [ 'unstable' ] }, ++ 'tmm-guest' + ] } + + ## +@@ -1032,7 +1056,8 @@ + 'tls-creds-x509': 'TlsCredsX509Properties', + 'tls-cipher-suites': 'TlsCredsProperties', + 'x-remote-object': 'RemoteObjectProperties', +- 'x-vfio-user-server': 'VfioUserServerProperties' ++ 'x-vfio-user-server': 'VfioUserServerProperties', ++ 'tmm-guest': 'TmmGuestProperties' + } } + + ## +diff --git a/target/arm/kvm-tmm.c b/target/arm/kvm-tmm.c +new file mode 100644 +index 0000000000..efe2ca0006 +--- /dev/null ++++ b/target/arm/kvm-tmm.c +@@ -0,0 +1,344 @@ ++/* ++ * QEMU add virtcca cvm feature. ++ * ++ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ * ++ */ ++ ++#include "qemu/osdep.h" ++#include "exec/confidential-guest-support.h" ++#include "hw/boards.h" ++#include "hw/core/cpu.h" ++#include "kvm_arm.h" ++#include "migration/blocker.h" ++#include "qapi/error.h" ++#include "qom/object_interfaces.h" ++#include "sysemu/kvm.h" ++#include "sysemu/runstate.h" ++#include "hw/loader.h" ++ ++#define TYPE_TMM_GUEST "tmm-guest" ++OBJECT_DECLARE_SIMPLE_TYPE(TmmGuest, TMM_GUEST) ++ ++#define TMM_PAGE_SIZE qemu_real_host_page_size() ++#define TMM_MAX_PMU_CTRS 0x20 ++#define TMM_MAX_CFG 5 ++ ++struct TmmGuest { ++ ConfidentialGuestSupport parent_obj; ++ GSList *ram_regions; ++ TmmGuestMeasurementAlgo measurement_algo; ++ uint32_t sve_vl; ++ uint32_t num_pmu_cntrs; ++}; ++ ++typedef struct { ++ hwaddr base1; ++ hwaddr len1; ++ hwaddr base2; ++ hwaddr len2; ++ bool populate; ++} TmmRamRegion; ++ ++static TmmGuest *tmm_guest; ++ ++bool kvm_arm_tmm_enabled(void) ++{ ++ return !!tmm_guest; ++} ++ ++static int tmm_configure_one(TmmGuest *guest, uint32_t cfg, Error **errp) ++{ ++ int ret = 1; ++ const char *cfg_str; ++ struct kvm_cap_arm_tmm_config_item args = { ++ .cfg = cfg, ++ }; ++ ++ switch (cfg) { ++ case KVM_CAP_ARM_TMM_CFG_RPV: ++ return 0; ++ case KVM_CAP_ARM_TMM_CFG_HASH_ALGO: ++ switch (guest->measurement_algo) { ++ case TMM_GUEST_MEASUREMENT_ALGO_DEFAULT: ++ return 0; ++ case TMM_GUEST_MEASUREMENT_ALGO_SHA256: ++ args.hash_algo = KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256; ++ break; ++ case TMM_GUEST_MEASUREMENT_ALGO_SHA512: ++ args.hash_algo = KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512; ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ cfg_str = "hash algorithm"; ++ break; ++ case KVM_CAP_ARM_TMM_CFG_SVE: ++ if (!guest->sve_vl) { ++ return 0; ++ } ++ args.sve_vq = guest->sve_vl / 128; ++ cfg_str = "SVE"; ++ break; ++ case KVM_CAP_ARM_TMM_CFG_DBG: ++ return 0; ++ case KVM_CAP_ARM_TMM_CFG_PMU: ++ if (!guest->num_pmu_cntrs) { ++ return 0; ++ } ++ args.num_pmu_cntrs = guest->num_pmu_cntrs; ++ cfg_str = "PMU"; ++ break; ++ default: ++ g_assert_not_reached(); ++ } ++ ++ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_TMM, 0, ++ KVM_CAP_ARM_TMM_CONFIG_CVM, (intptr_t)&args); ++ if (ret) { ++ error_setg_errno(errp, -ret, "TMM: failed to configure %s", cfg_str); ++ } ++ ++ return ret; ++} ++ ++static gint tmm_compare_ram_regions(gconstpointer a, gconstpointer b) ++{ ++ const TmmRamRegion *ra = a; ++ const TmmRamRegion *rb = b; ++ ++ g_assert(ra->base1 != rb->base1); ++ return ra->base1 < rb->base1 ? -1 : 1; ++} ++ ++void tmm_add_ram_region(hwaddr base1, hwaddr len1, hwaddr base2, hwaddr len2, bool populate) ++{ ++ TmmRamRegion *region; ++ ++ region = g_new0(TmmRamRegion, 1); ++ region->base1 = QEMU_ALIGN_DOWN(base1, TMM_PAGE_SIZE); ++ region->len1 = QEMU_ALIGN_UP(len1, TMM_PAGE_SIZE); ++ region->base2 = QEMU_ALIGN_DOWN(base2, TMM_PAGE_SIZE); ++ region->len2 = QEMU_ALIGN_UP(len2, TMM_PAGE_SIZE); ++ region->populate = populate; ++ ++ tmm_guest->ram_regions = g_slist_insert_sorted(tmm_guest->ram_regions, ++ region, tmm_compare_ram_regions); ++} ++ ++static void tmm_populate_region(gpointer data, gpointer unused) ++{ ++ int ret; ++ const TmmRamRegion *region = data; ++ struct kvm_cap_arm_tmm_populate_region_args populate_args = { ++ .populate_ipa_base1 = region->base1, ++ .populate_ipa_size1 = region->len1, ++ .populate_ipa_base2 = region->base2, ++ .populate_ipa_size2 = region->len2, ++ .flags = KVM_ARM_TMM_POPULATE_FLAGS_MEASURE, ++ }; ++ ++ if (!region->populate) { ++ return; ++ } ++ ++ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_TMM, 0, ++ KVM_CAP_ARM_TMM_POPULATE_CVM, ++ (intptr_t)&populate_args); ++ if (ret) { ++ error_report("TMM: failed to populate cvm region (0x%"HWADDR_PRIx", 0x%"HWADDR_PRIx", 0x%"HWADDR_PRIx", 0x%"HWADDR_PRIx"): %s", ++ region->base1, region->len1, region->base2, region->len2, strerror(-ret)); ++ exit(1); ++ } ++} ++ ++static int tmm_create_rd(Error **errp) ++{ ++ int ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_TMM, 0, ++ KVM_CAP_ARM_TMM_CREATE_RD); ++ if (ret) { ++ error_setg_errno(errp, -ret, "TMM: failed to create tmm Descriptor"); ++ } ++ return ret; ++} ++ ++static void tmm_vm_state_change(void *opaque, bool running, RunState state) ++{ ++ int ret; ++ CPUState *cs; ++ ++ if (!running) { ++ return; ++ } ++ ++ g_slist_foreach(tmm_guest->ram_regions, tmm_populate_region, NULL); ++ g_slist_free_full(g_steal_pointer(&tmm_guest->ram_regions), g_free); ++ ++ CPU_FOREACH(cs) { ++ ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_TEC); ++ if (ret) { ++ error_report("TMM: failed to finalize vCPU: %s", strerror(-ret)); ++ exit(1); ++ } ++ } ++ ++ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_TMM, 0, ++ KVM_CAP_ARM_TMM_ACTIVATE_CVM); ++ if (ret) { ++ error_report("TMM: failed to activate cvm: %s", strerror(-ret)); ++ exit(1); ++ } ++} ++ ++int kvm_arm_tmm_init(ConfidentialGuestSupport *cgs, Error **errp) ++{ ++ int ret; ++ int cfg; ++ ++ if (!tmm_guest) { ++ return -ENODEV; ++ } ++ ++ if (!kvm_check_extension(kvm_state, KVM_CAP_ARM_TMM)) { ++ error_setg(errp, "KVM does not support TMM"); ++ return -ENODEV; ++ } ++ ++ for (cfg = 0; cfg < TMM_MAX_CFG; cfg++) { ++ ret = tmm_configure_one(tmm_guest, cfg, &error_abort); ++ if (ret) { ++ return ret; ++ } ++ } ++ ++ ret = tmm_create_rd(&error_abort); ++ if (ret) { ++ return ret; ++ } ++ ++ qemu_add_vm_change_state_handler(tmm_vm_state_change, NULL); ++ return 0; ++} ++ ++static void tmm_get_sve_vl(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ TmmGuest *guest = TMM_GUEST(obj); ++ ++ visit_type_uint32(v, name, &guest->sve_vl, errp); ++} ++ ++static void tmm_set_sve_vl(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ TmmGuest *guest = TMM_GUEST(obj); ++ uint32_t value; ++ ++ if (!visit_type_uint32(v, name, &value, errp)) { ++ return; ++ } ++ ++ if (value & 0x7f || value >= ARM_MAX_VQ * 128) { ++ error_setg(errp, "invalid SVE vector length"); ++ return; ++ } ++ ++ guest->sve_vl = value; ++} ++ ++static void tmm_get_num_pmu_cntrs(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ TmmGuest *guest = TMM_GUEST(obj); ++ ++ visit_type_uint32(v, name, &guest->num_pmu_cntrs, errp); ++} ++ ++static void tmm_set_num_pmu_cntrs(Object *obj, Visitor *v, const char *name, ++ void *opaque, Error **errp) ++{ ++ TmmGuest *guest = TMM_GUEST(obj); ++ uint32_t value; ++ ++ if (!visit_type_uint32(v, name, &value, errp)) { ++ return; ++ } ++ ++ if (value >= TMM_MAX_PMU_CTRS) { ++ error_setg(errp, "invalid number of PMU counters"); ++ return; ++ } ++ ++ guest->num_pmu_cntrs = value; ++} ++ ++static int tmm_get_measurement_algo(Object *obj, Error **errp G_GNUC_UNUSED) ++{ ++ TmmGuest *guest = TMM_GUEST(obj); ++ ++ return guest->measurement_algo; ++} ++ ++static void tmm_set_measurement_algo(Object *obj, int algo, Error **errp G_GNUC_UNUSED) ++{ ++ TmmGuest *guest = TMM_GUEST(obj); ++ ++ guest->measurement_algo = algo; ++} ++ ++static void tmm_guest_class_init(ObjectClass *oc, void *data) ++{ ++ object_class_property_add_enum(oc, "measurement-algo", ++ "TmmGuestMeasurementAlgo", ++ &TmmGuestMeasurementAlgo_lookup, ++ tmm_get_measurement_algo, ++ tmm_set_measurement_algo); ++ object_class_property_set_description(oc, "measurement-algo", ++ "cvm measurement algorithm ('sha256', 'sha512')"); ++ /* ++ * This is not ideal. Normally SVE parameters are given to -cpu, but the ++ * cvm parameters are needed much earlier than CPU initialization. We also ++ * don't have a way to discover what is supported at the moment, the idea is ++ * that the user knows exactly what hardware it is running on because these ++ * parameters are part of the measurement and play in the attestation. ++ */ ++ object_class_property_add(oc, "sve-vector-length", "uint32", tmm_get_sve_vl, ++ tmm_set_sve_vl, NULL, NULL); ++ object_class_property_set_description(oc, "sve-vector-length", ++ "SVE vector length. 0 disables SVE (the default)"); ++ object_class_property_add(oc, "num-pmu-counters", "uint32", ++ tmm_get_num_pmu_cntrs, tmm_set_num_pmu_cntrs, ++ NULL, NULL); ++ object_class_property_set_description(oc, "num-pmu-counters", ++ "Number of PMU counters"); ++} ++ ++static void tmm_guest_instance_init(Object *obj) ++{ ++ if (tmm_guest) { ++ error_report("a single instance of TmmGuest is supported"); ++ exit(1); ++ } ++ tmm_guest = TMM_GUEST(obj); ++} ++ ++static const TypeInfo tmm_guest_info = { ++ .parent = TYPE_CONFIDENTIAL_GUEST_SUPPORT, ++ .name = TYPE_TMM_GUEST, ++ .instance_size = sizeof(struct TmmGuest), ++ .instance_init = tmm_guest_instance_init, ++ .class_init = tmm_guest_class_init, ++ .interfaces = (InterfaceInfo[]) { ++ { TYPE_USER_CREATABLE }, ++ { } ++ } ++}; ++ ++static void tmm_register_types(void) ++{ ++ type_register_static(&tmm_guest_info); ++} ++type_init(tmm_register_types); +diff --git a/target/arm/kvm.c b/target/arm/kvm.c +index 7903e2ddde..a42ddcc855 100644 +--- a/target/arm/kvm.c ++++ b/target/arm/kvm.c +@@ -592,6 +592,10 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level) + continue; + } + ++ if (virtcca_cvm_enabled() && regidx == KVM_REG_ARM_TIMER_CNT) { ++ continue; ++ } ++ + switch (regidx & KVM_REG_SIZE_MASK) { + case KVM_REG_SIZE_U32: + v32 = cpu->cpreg_values[i]; +@@ -1071,7 +1075,7 @@ int kvm_arch_msi_data_to_gsi(uint32_t data) + + bool kvm_arch_cpu_check_are_resettable(void) + { +- return true; ++ return !virtcca_cvm_enabled(); + } + + static void kvm_arch_get_eager_split_size(Object *obj, Visitor *v, +diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c +index 3c175c93a7..a09347254f 100644 +--- a/target/arm/kvm64.c ++++ b/target/arm/kvm64.c +@@ -543,6 +543,11 @@ static int kvm_arm_sve_set_vls(CPUState *cs) + + assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX); + ++ if (virtcca_cvm_enabled()) { ++ /* Already set through tmm config */ ++ return 0; ++ } ++ + return kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_VLS, &vls[0]); + } + +diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h +index 051a0da41c..9fd0a520fe 100644 +--- a/target/arm/kvm_arm.h ++++ b/target/arm/kvm_arm.h +@@ -377,6 +377,11 @@ void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa); + + int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level); + ++void tmm_add_ram_region(hwaddr base1, hwaddr len1, hwaddr base2, hwaddr len2, bool populate); ++ ++int kvm_arm_tmm_init(ConfidentialGuestSupport *cgs, Error **errp); ++bool kvm_arm_tmm_enabled(void); ++ + #else + + /* +@@ -451,6 +456,16 @@ static inline uint32_t kvm_arm_sve_get_vls(CPUState *cs) + g_assert_not_reached(); + } + ++static inline int kvm_arm_tmm_init(ConfidentialGuestSupport *cgs, Error **errp G_GNUC_UNUSED) ++{ ++ g_assert_not_reached(); ++} ++ ++static inline void tmm_add_ram_region(hwaddr base1, hwaddr len1, hwaddr base2, ++ hwaddr len2, bool populate) ++{ ++ g_assert_not_reached(); ++} + #endif + + /** +diff --git a/target/arm/meson.build b/target/arm/meson.build +index 5d04a8e94f..ee1ec5a5ff 100644 +--- a/target/arm/meson.build ++++ b/target/arm/meson.build +@@ -10,6 +10,7 @@ arm_ss.add(zlib) + + arm_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c', 'kvm64.c'), if_false: files('kvm-stub.c')) + arm_ss.add(when: 'CONFIG_HVF', if_true: files('hyp_gdbstub.c')) ++arm_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c', 'kvm64.c', 'kvm-tmm.c'), if_false: files('kvm-stub.c')) + + arm_ss.add(when: 'TARGET_AARCH64', if_true: files( + 'cpu64.c', +-- +2.43.5 + diff --git a/0289-cvm-bug-fix-for-incorrect-device-name-check-for-vhos.patch b/0289-cvm-bug-fix-for-incorrect-device-name-check-for-vhos.patch new file mode 100644 index 0000000..e4fc4cb --- /dev/null +++ b/0289-cvm-bug-fix-for-incorrect-device-name-check-for-vhos.patch @@ -0,0 +1,34 @@ +From 8055de376406290a18a7f6951e268facf70e96a4 Mon Sep 17 00:00:00 2001 +From: liupingwei +Date: Mon, 19 Aug 2024 15:38:23 +0800 +Subject: [PATCH] cvm : bug-fix for incorrect device name check for + vhost-user-fs + +The 'vhost-user-fs' was being parsed as 'virtio-user-fs' during the +compilation and this caused the device to erroneously trigger the error +branch. + +Fixes: 5db954cb188d3775aec053fad8a39bf4c26a2b92("Add support for the +virtcca cvm feature.) + +Signed-off-by: liupingwei +--- + hw/virtio/virtio-bus.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c +index 7e750d073d..4f16e7ef77 100644 +--- a/hw/virtio/virtio-bus.c ++++ b/hw/virtio/virtio-bus.c +@@ -83,7 +83,7 @@ void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp) + if (has_iommu) { + vdev_has_iommu = virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); + +- if (virtcca_cvm_enabled() && (strcmp(vdev->name, "vhost-user-fs") == 0)) { ++ if (virtcca_cvm_enabled() && (strcmp(vdev->name, "virtio-user-fs") == 0)) { + vdev_has_iommu = true; + } + +-- +2.43.5 + diff --git a/0290-cvm-implement-command-blacklist-for-cvm-security-enh.patch b/0290-cvm-implement-command-blacklist-for-cvm-security-enh.patch new file mode 100644 index 0000000..133a51d --- /dev/null +++ b/0290-cvm-implement-command-blacklist-for-cvm-security-enh.patch @@ -0,0 +1,118 @@ +From 94564d01d30afeedec92a9b248cefded862f062c Mon Sep 17 00:00:00 2001 +From: liupingwei +Date: Fri, 16 Aug 2024 18:06:10 +0800 +Subject: [PATCH] cvm : Implement command blacklist for cvm security + enhancement + +Added a new feature to intercept and block specific virsh commands(virsh +save,virsh restore,virsh dump,virsh suspend,virsh resume)that can impact +the security of cvm. + +Signed-off-by: liupingwei +--- + dump/dump.c | 7 +++++++ + migration/migration-hmp-cmds.c | 6 ++++++ + migration/savevm.c | 6 ++++++ + monitor/qmp-cmds.c | 6 ++++++ + 4 files changed, 25 insertions(+) + +diff --git a/dump/dump.c b/dump/dump.c +index 4819050764..787059ac2c 100644 +--- a/dump/dump.c ++++ b/dump/dump.c +@@ -20,6 +20,7 @@ + #include "sysemu/dump.h" + #include "sysemu/runstate.h" + #include "sysemu/cpus.h" ++#include "sysemu/kvm.h" + #include "qapi/error.h" + #include "qapi/qapi-commands-dump.h" + #include "qapi/qapi-events-dump.h" +@@ -2065,6 +2066,12 @@ void qmp_dump_guest_memory(bool paging, const char *protocol, + Error **errp) + { + ERRP_GUARD(); ++ ++ if (virtcca_cvm_enabled()) { ++ error_setg(errp, "The dump-guest-memory command is temporarily unsupported in cvm."); ++ return; ++ } ++ + const char *p; + int fd; + DumpState *s; +diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c +index 58da696ff7..a2675518e0 100644 +--- a/migration/migration-hmp-cmds.c ++++ b/migration/migration-hmp-cmds.c +@@ -30,6 +30,7 @@ + #include "sysemu/runstate.h" + #include "ui/qemu-spice.h" + #include "sysemu/sysemu.h" ++#include "sysemu/kvm.h" + #include "options.h" + #include "migration.h" + +@@ -416,6 +417,11 @@ void hmp_loadvm(Monitor *mon, const QDict *qdict) + const char *name = qdict_get_str(qdict, "name"); + Error *err = NULL; + ++ if (virtcca_cvm_enabled()) { ++ error_setg(&err, "The loadvm command is temporarily unsupported in cvm."); ++ return; ++ } ++ + vm_stop(RUN_STATE_RESTORE_VM); + + if (load_snapshot(name, NULL, false, NULL, &err) && saved_vm_running) { +diff --git a/migration/savevm.c b/migration/savevm.c +index eec5503a42..cf88057efa 100644 +--- a/migration/savevm.c ++++ b/migration/savevm.c +@@ -61,6 +61,7 @@ + #include "sysemu/replay.h" + #include "sysemu/runstate.h" + #include "sysemu/sysemu.h" ++#include "sysemu/kvm.h" + #include "sysemu/xen.h" + #include "migration/colo.h" + #include "qemu/bitmap.h" +@@ -3042,6 +3043,11 @@ int qemu_loadvm_approve_switchover(void) + bool save_snapshot(const char *name, bool overwrite, const char *vmstate, + bool has_devices, strList *devices, Error **errp) + { ++ if (virtcca_cvm_enabled()) { ++ error_setg(errp, "The savevm command is temporarily unsupported in cvm."); ++ return false; ++ } ++ + BlockDriverState *bs; + QEMUSnapshotInfo sn1, *sn = &sn1; + int ret = -1, ret2; +diff --git a/monitor/qmp-cmds.c b/monitor/qmp-cmds.c +index b0f948d337..9737f23e48 100644 +--- a/monitor/qmp-cmds.c ++++ b/monitor/qmp-cmds.c +@@ -23,6 +23,7 @@ + #include "sysemu/runstate.h" + #include "sysemu/runstate-action.h" + #include "sysemu/block-backend.h" ++#include "sysemu/kvm.h" + #include "qapi/error.h" + #include "qapi/qapi-init-commands.h" + #include "qapi/qapi-commands-control.h" +@@ -49,6 +50,11 @@ void qmp_quit(Error **errp) + + void qmp_stop(Error **errp) + { ++ if (virtcca_cvm_enabled()) { ++ error_setg(errp, "The stop command is temporarily unsupported in cvm."); ++ return; ++ } ++ + /* if there is a dump in background, we should wait until the dump + * finished */ + if (qemu_system_dump_in_progress()) { +-- +2.43.5 + diff --git a/0291-cvm-bug-fix-for-undefined-reference-to-virtcca-cvm-a.patch b/0291-cvm-bug-fix-for-undefined-reference-to-virtcca-cvm-a.patch new file mode 100644 index 0000000..211d16c --- /dev/null +++ b/0291-cvm-bug-fix-for-undefined-reference-to-virtcca-cvm-a.patch @@ -0,0 +1,30 @@ +From 61c7d2cd89816b805dfb990b91636bcf793fcb9b Mon Sep 17 00:00:00 2001 +From: liupingwei +Date: Wed, 4 Sep 2024 14:29:02 +0800 +Subject: [PATCH] cvm : bug fix for undefined reference to + 'virtcca_cvm_allowed' while compiling. + +Fixes a linking error due to an undefined reference to +'virtcca_cvm_allowed' when KVM is not enabled. + +Signed-off-by: liupingwei +--- + accel/stubs/kvm-stub.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c +index 45b23f61ce..b90d516755 100644 +--- a/accel/stubs/kvm-stub.c ++++ b/accel/stubs/kvm-stub.c +@@ -26,6 +26,8 @@ bool kvm_readonly_mem_allowed; + bool kvm_msi_use_devid; + bool kvm_csv3_allowed; + ++bool virtcca_cvm_allowed; ++ + void kvm_flush_coalesced_mmio_buffer(void) + { + } +-- +2.43.5 + diff --git a/0292-qapi-qom-target-i386-csv-guest-introduce-secret-head.patch b/0292-qapi-qom-target-i386-csv-guest-introduce-secret-head.patch new file mode 100644 index 0000000..bf24ce1 --- /dev/null +++ b/0292-qapi-qom-target-i386-csv-guest-introduce-secret-head.patch @@ -0,0 +1,220 @@ +From 4f1bb1eec41e67cf38c55d754d4948f384f81b5a Mon Sep 17 00:00:00 2001 +From: hanliyang +Date: Fri, 2 Aug 2024 01:35:25 +0800 +Subject: [PATCH] qapi/qom,target/i386: csv-guest: Introduce + secret-header-file=str and secret-file=str options + +This feature only applied to Hygon CSV. + +User can utilize the hag to generate secret header file and secret file, +and inject these data to guest encrypted secret area automatically. + +Signed-off-by: hanliyang +--- + qapi/qom.json | 9 ++++- + qemu-options.hx | 8 +++- + target/i386/sev.c | 100 ++++++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 115 insertions(+), 2 deletions(-) + +diff --git a/qapi/qom.json b/qapi/qom.json +index 0853944ba8..a54bb86012 100644 +--- a/qapi/qom.json ++++ b/qapi/qom.json +@@ -868,6 +868,11 @@ + # + # @user-id: the user id of the guest owner, only support on Hygon CPUs + # ++# @secret-header-file: the header file of guest owner's secret, only ++# support on Hygon CPUs (since 8.2) ++# @secret-file: the file guest owner's secret, only support on Hygon ++# CPUs (since 8.2) ++# + # Since: 2.12 + ## + { 'struct': 'SevGuestProperties', +@@ -879,7 +884,9 @@ + '*cbitpos': 'uint32', + 'reduced-phys-bits': 'uint32', + '*kernel-hashes': 'bool', +- '*user-id': 'str' } } ++ '*user-id': 'str', ++ '*secret-header-file': 'str', ++ '*secret-file': 'str' } } + + ## + # @ThreadContextProperties: +diff --git a/qemu-options.hx b/qemu-options.hx +index f64cf2b556..bffb9f31a8 100644 +--- a/qemu-options.hx ++++ b/qemu-options.hx +@@ -5645,7 +5645,7 @@ SRST + -object secret,id=sec0,keyid=secmaster0,format=base64,\\ + data=$SECRET,iv=$(user_id = g_strdup(value); + } + ++static char * ++sev_guest_get_secret_header_file(Object *obj, Error **errp) ++{ ++ SevGuestState *s = SEV_GUEST(obj); ++ ++ return g_strdup(s->secret_header_file); ++} ++ ++static void ++sev_guest_set_secret_header_file(Object *obj, const char *value, Error **errp) ++{ ++ SevGuestState *s = SEV_GUEST(obj); ++ ++ s->secret_header_file = g_strdup(value); ++} ++ ++static char * ++sev_guest_get_secret_file(Object *obj, Error **errp) ++{ ++ SevGuestState *s = SEV_GUEST(obj); ++ ++ return g_strdup(s->secret_file); ++} ++ ++static void ++sev_guest_set_secret_file(Object *obj, const char *value, Error **errp) ++{ ++ SevGuestState *s = SEV_GUEST(obj); ++ ++ s->secret_file = g_strdup(value); ++} ++ + static char * + sev_guest_get_sev_device(Object *obj, Error **errp) + { +@@ -448,6 +482,16 @@ sev_guest_class_init(ObjectClass *oc, void *data) + sev_guest_set_user_id); + object_class_property_set_description(oc, "user-id", + "user id of the guest owner"); ++ object_class_property_add_str(oc, "secret-header-file", ++ sev_guest_get_secret_header_file, ++ sev_guest_set_secret_header_file); ++ object_class_property_set_description(oc, "secret-header-file", ++ "header file of the guest owner's secret"); ++ object_class_property_add_str(oc, "secret-file", ++ sev_guest_get_secret_file, ++ sev_guest_set_secret_file); ++ object_class_property_set_description(oc, "secret-file", ++ "file of the guest owner's secret"); + } + + static void +@@ -867,6 +911,9 @@ sev_launch_update_vmsa(SevGuestState *sev) + return ret; + } + ++static int ++csv_load_launch_secret(const char *secret_header_file, const char *secret_file); ++ + static void + sev_launch_get_measure(Notifier *notifier, void *unused) + { +@@ -917,6 +964,15 @@ sev_launch_get_measure(Notifier *notifier, void *unused) + /* encode the measurement value and emit the event */ + sev->measurement = g_base64_encode(data, measurement.len); + trace_kvm_sev_launch_measurement(sev->measurement); ++ ++ /* Hygon CSV will auto load guest owner's secret */ ++ if (is_hygon_cpu()) { ++ if (sev->secret_header_file && ++ strlen(sev->secret_header_file) && ++ sev->secret_file && ++ strlen(sev->secret_file)) ++ csv_load_launch_secret(sev->secret_header_file, sev->secret_file); ++ } + } + + static char *sev_get_launch_measurement(void) +@@ -2526,6 +2582,50 @@ int csv_load_incoming_cpu_state(QEMUFile *f) + return ret; + } + ++static int ++csv_load_launch_secret(const char *secret_header_file, const char *secret_file) ++{ ++ gsize secret_header_size, secret_size; ++ gchar *secret_header = NULL, *secret = NULL; ++ uint8_t *data; ++ struct sev_secret_area *area; ++ uint64_t gpa; ++ GError *error = NULL; ++ Error *local_err = NULL; ++ int ret = 0; ++ ++ if (!g_file_get_contents(secret_header_file, ++ &secret_header, ++ &secret_header_size, &error)) { ++ error_report("CSV: Failed to read '%s' (%s)", ++ secret_header_file, error->message); ++ g_error_free(error); ++ return -1; ++ } ++ ++ if (!g_file_get_contents(secret_file, &secret, &secret_size, &error)) { ++ error_report("CSV: Failed to read '%s' (%s)", secret_file, error->message); ++ g_error_free(error); ++ return -1; ++ } ++ ++ if (!pc_system_ovmf_table_find(SEV_SECRET_GUID, &data, NULL)) { ++ error_report("CSV: no secret area found in OVMF, gpa must be" ++ " specified."); ++ return -1; ++ } ++ area = (struct sev_secret_area *)data; ++ gpa = area->base; ++ ++ ret = sev_inject_launch_secret((char *)secret_header, ++ (char *)secret, gpa, &local_err); ++ ++ if (local_err) { ++ error_report_err(local_err); ++ } ++ return ret; ++} ++ + static const QemuUUID sev_hash_table_header_guid = { + .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93, + 0xd4, 0x11, 0xfd, 0x21) +-- +2.43.5 + diff --git a/0293-target-i386-kvm-support-to-get-and-enable-extensions.patch b/0293-target-i386-kvm-support-to-get-and-enable-extensions.patch new file mode 100644 index 0000000..39ab367 --- /dev/null +++ b/0293-target-i386-kvm-support-to-get-and-enable-extensions.patch @@ -0,0 +1,105 @@ +From 80867734df11225959012d7e84c23efa98683c27 Mon Sep 17 00:00:00 2001 +From: hanliyang +Date: Sat, 28 Sep 2024 14:46:28 +0800 +Subject: [PATCH] target/i386: kvm: Support to get and enable extensions for + Hygon CoCo guest + +To enable advanced Hygon CoCo features, we should detect these features +during the initialization of VMs in the KVM accelerator. It is +suggested to enable these features if they are detected, allowing the +guest VM to run with additional functionalities. + +Signed-off-by: hanliyang +--- + linux-headers/linux/kvm.h | 7 +++++++ + target/i386/csv.c | 2 ++ + target/i386/csv.h | 2 ++ + target/i386/kvm/csv-stub.c | 2 ++ + target/i386/kvm/kvm.c | 17 +++++++++++++++++ + 5 files changed, 30 insertions(+) + +diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h +index c75e4cde48..2ff308b82b 100644 +--- a/linux-headers/linux/kvm.h ++++ b/linux-headers/linux/kvm.h +@@ -1202,6 +1202,13 @@ struct kvm_ppc_resize_hpt { + #define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230 + + #define KVM_CAP_SEV_ES_GHCB 500 ++#define KVM_CAP_HYGON_COCO_EXT 501 ++/* support userspace to request firmware to build CSV3 guest's memory space */ ++#define KVM_CAP_HYGON_COCO_EXT_CSV3_SET_PRIV_MEM (1 << 0) ++/* support request to update CSV3 guest's memory region multiple times */ ++#define KVM_CAP_HYGON_COCO_EXT_CSV3_MULT_LUP_DATA (1 << 1) ++/* support request to inject secret to CSV3 guest */ ++#define KVM_CAP_HYGON_COCO_EXT_CSV3_INJ_SECRET (1 << 2) + + #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE) + +diff --git a/target/i386/csv.c b/target/i386/csv.c +index a869cc2a7e..2377cac71f 100644 +--- a/target/i386/csv.c ++++ b/target/i386/csv.c +@@ -34,6 +34,8 @@ + #include "csv.h" + + bool csv_kvm_cpu_reset_inhibit; ++uint32_t kvm_hygon_coco_ext; ++uint32_t kvm_hygon_coco_ext_inuse; + + struct ConfidentialGuestMemoryEncryptionOps csv3_memory_encryption_ops = { + .save_setup = sev_save_setup, +diff --git a/target/i386/csv.h b/target/i386/csv.h +index a32588ab9a..78f8adcfa8 100644 +--- a/target/i386/csv.h ++++ b/target/i386/csv.h +@@ -58,6 +58,8 @@ bool csv3_enabled(void); + #define CSV_OUTGOING_PAGE_WINDOW_SIZE (4094 * TARGET_PAGE_SIZE) + + extern bool csv_kvm_cpu_reset_inhibit; ++extern uint32_t kvm_hygon_coco_ext; ++extern uint32_t kvm_hygon_coco_ext_inuse; + + typedef struct CsvBatchCmdList CsvBatchCmdList; + typedef void (*CsvDestroyCmdNodeFn) (void *data); +diff --git a/target/i386/kvm/csv-stub.c b/target/i386/kvm/csv-stub.c +index 4d1376f268..8662d33206 100644 +--- a/target/i386/kvm/csv-stub.c ++++ b/target/i386/kvm/csv-stub.c +@@ -15,3 +15,5 @@ + #include "csv.h" + + bool csv_kvm_cpu_reset_inhibit; ++uint32_t kvm_hygon_coco_ext; ++uint32_t kvm_hygon_coco_ext_inuse; +diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c +index fdceecc846..fd049789ac 100644 +--- a/target/i386/kvm/kvm.c ++++ b/target/i386/kvm/kvm.c +@@ -2639,6 +2639,23 @@ int kvm_arch_init(MachineState *ms, KVMState *s) + } + } + ++ if (is_hygon_cpu()) { ++ /* check and enable Hygon coco extensions */ ++ kvm_hygon_coco_ext = (uint32_t)kvm_vm_check_extension(s, ++ KVM_CAP_HYGON_COCO_EXT); ++ if (kvm_hygon_coco_ext) { ++ ret = kvm_vm_enable_cap(s, KVM_CAP_HYGON_COCO_EXT, 0, ++ (uint64_t)kvm_hygon_coco_ext); ++ if (ret == -EINVAL) { ++ error_report("kvm: Failed to enable KVM_CAP_HYGON_COCO_EXT cap: %s", ++ strerror(-ret)); ++ kvm_hygon_coco_ext_inuse = 0; ++ } else { ++ kvm_hygon_coco_ext_inuse = (uint32_t)ret; ++ } ++ } ++ } ++ + ret = kvm_get_supported_msrs(s); + if (ret < 0) { + return ret; +-- +2.43.5 + diff --git a/0294-target-i386-csv-request-to-set-private-memory-of-csv.patch b/0294-target-i386-csv-request-to-set-private-memory-of-csv.patch new file mode 100644 index 0000000..5de7b85 --- /dev/null +++ b/0294-target-i386-csv-request-to-set-private-memory-of-csv.patch @@ -0,0 +1,147 @@ +From a53f5ef3f2a09a8bcba2da7544b723538ff5a4e7 Mon Sep 17 00:00:00 2001 +From: hanliyang +Date: Sat, 28 Sep 2024 17:37:17 +0800 +Subject: [PATCH] target/i386: csv: Request to set private memory of CSV3 guest + if the extension is enabled + +If Qemu negotiates with Linux KVM to enable the +KVM_CAP_HYGON_COCO_EXT_CSV3_SET_PRIV_MEM capability, then Qemu should +explicitly request the issuance of the CSV3_CMD_SET_GUEST_PRIVATE_MEMORY +command. + +Signed-off-by: hanliyang +--- + hw/i386/pc_sysfw.c | 3 +++ + include/sysemu/kvm.h | 9 +++++++++ + linux-headers/linux/kvm.h | 2 ++ + target/i386/csv-sysemu-stub.c | 5 +++++ + target/i386/csv.c | 21 +++++++++++++++++++++ + target/i386/csv.h | 2 ++ + target/i386/trace-events | 3 ++- + 7 files changed, 44 insertions(+), 1 deletion(-) + +diff --git a/hw/i386/pc_sysfw.c b/hw/i386/pc_sysfw.c +index 2bbcbb8d35..7c6a910250 100644 +--- a/hw/i386/pc_sysfw.c ++++ b/hw/i386/pc_sysfw.c +@@ -268,6 +268,9 @@ void x86_firmware_configure(void *ptr, int size) + ram_addr_t offset = 0; + MemoryRegion *mr; + ++ if (kvm_csv3_should_set_priv_mem()) ++ csv3_set_guest_private_memory(&error_fatal); ++ + mr = memory_region_from_host(ptr, &offset); + if (!mr) { + error_report("failed to get memory region of flash"); +diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h +index 7d08aae9fa..1f7d36f4d3 100644 +--- a/include/sysemu/kvm.h ++++ b/include/sysemu/kvm.h +@@ -154,6 +154,14 @@ extern bool kvm_csv3_allowed; + */ + #define kvm_csv3_enabled() (kvm_csv3_allowed) + ++/** ++ * kvm_csv3_should_set_priv_mem: ++ * Returns: true if we should explicitly request ++ * KVM_CSV3_SET_GUEST_PRIVATE_MEMORY. ++ */ ++#define kvm_csv3_should_set_priv_mem() \ ++ (kvm_hygon_coco_ext_inuse & KVM_CAP_HYGON_COCO_EXT_CSV3_SET_PRIV_MEM) ++ + #else + + #define kvm_enabled() (0) +@@ -171,6 +179,7 @@ extern bool kvm_csv3_allowed; + #define kvm_readonly_mem_enabled() (false) + #define kvm_msi_devid_required() (false) + #define kvm_csv3_enabled() (false) ++#define kvm_csv3_should_set_priv_mem() (false) + + #endif /* CONFIG_KVM_IS_POSSIBLE */ + +diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h +index 2ff308b82b..f0abf968b2 100644 +--- a/linux-headers/linux/kvm.h ++++ b/linux-headers/linux/kvm.h +@@ -2121,6 +2121,8 @@ enum csv3_cmd_id { + KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT, + KVM_CSV3_HANDLE_MEMORY, + ++ KVM_CSV3_SET_GUEST_PRIVATE_MEMORY = 0xc8, ++ + KVM_CSV3_NR_MAX, + }; + +diff --git a/target/i386/csv-sysemu-stub.c b/target/i386/csv-sysemu-stub.c +index f3224a0154..ce4850f5e4 100644 +--- a/target/i386/csv-sysemu-stub.c ++++ b/target/i386/csv-sysemu-stub.c +@@ -44,3 +44,8 @@ void csv3_shared_region_relese(uint64_t gpa, uint32_t num_pages) + { + + } ++ ++int csv3_set_guest_private_memory(Error **errp) ++{ ++ g_assert_not_reached(); ++} +diff --git a/target/i386/csv.c b/target/i386/csv.c +index 2377cac71f..27cd84d912 100644 +--- a/target/i386/csv.c ++++ b/target/i386/csv.c +@@ -734,3 +734,24 @@ int csv3_load_incoming_context(QEMUFile *f) + /* receive csv3 context. */ + return csv3_receive_encrypt_context(s, f); + } ++ ++int csv3_set_guest_private_memory(Error **errp) ++{ ++ int fw_error; ++ int ret = 0; ++ ++ if (!csv3_enabled()) { ++ error_setg(errp, "%s: CSV3 is not enabled", __func__); ++ return -1; ++ } ++ ++ /* if CSV3 is in update state then load the data to secure memory */ ++ if (csv3_check_state(SEV_STATE_LAUNCH_UPDATE)) { ++ trace_kvm_csv3_set_guest_private_memory(); ++ ret = csv3_ioctl(KVM_CSV3_SET_GUEST_PRIVATE_MEMORY, NULL, &fw_error); ++ if (ret) ++ error_setg(errp, "%s: CSV3 fail set private memory", __func__); ++ } ++ ++ return ret; ++} +diff --git a/target/i386/csv.h b/target/i386/csv.h +index 78f8adcfa8..d6af8b9c80 100644 +--- a/target/i386/csv.h ++++ b/target/i386/csv.h +@@ -131,4 +131,6 @@ int csv3_queue_outgoing_page(uint8_t *ptr, uint32_t sz, uint64_t addr); + int csv3_save_queued_outgoing_pages(QEMUFile *f, uint64_t *bytes_sent); + int csv3_save_outgoing_context(QEMUFile *f, uint64_t *bytes_sent); + ++int csv3_set_guest_private_memory(Error **errp); ++ + #endif +diff --git a/target/i386/trace-events b/target/i386/trace-events +index 515441c4f3..5d4a709a39 100644 +--- a/target/i386/trace-events ++++ b/target/i386/trace-events +@@ -21,8 +21,9 @@ kvm_sev_send_update_vmsa(uint32_t cpu_id, uint32_t cpu_index, void *dst, int len + kvm_sev_receive_update_vmsa(uint32_t cpu_id, uint32_t cpu_index, void *src, int len, void *hdr, int hdr_len) "cpu_id %d cpu_index %d trans %p len %d hdr %p hdr_len %d" + + # csv.c +-kvm_csv3_launch_encrypt_data(uint64_t gpa, void *addr, uint64_t len) "gpa 0x%" PRIx64 "addr %p len 0x%" PRIu64 ++kvm_csv3_launch_encrypt_data(uint64_t gpa, void *addr, uint64_t len) "gpa 0x%" PRIx64 " addr %p len 0x%" PRIx64 + kvm_csv3_send_encrypt_data(void *dst, int len) "trans %p len %d" + kvm_csv3_send_encrypt_context(void *dst, int len) "trans %p len %d" + kvm_csv3_receive_encrypt_data(void *dst, int len, void *hdr, int hdr_len) "trans %p len %d hdr %p hdr_len %d" + kvm_csv3_receive_encrypt_context(void *dst, int len, void *hdr, int hdr_len) "trans %p len %d hdr %p hdr_len %d" ++kvm_csv3_set_guest_private_memory(void) "" +-- +2.43.5 + diff --git a/0295-target-i386-csv-support-load-kernel-hashes-for-csv3-.patch b/0295-target-i386-csv-support-load-kernel-hashes-for-csv3-.patch new file mode 100644 index 0000000..e68c7c0 --- /dev/null +++ b/0295-target-i386-csv-support-load-kernel-hashes-for-csv3-.patch @@ -0,0 +1,40 @@ +From 384abba493208775b289fb267a2450126ca586c2 Mon Sep 17 00:00:00 2001 +From: hanliyang +Date: Sat, 28 Sep 2024 17:55:13 +0800 +Subject: [PATCH] target/i386: csv: Support load kernel hashes for CSV3 guest + only if the extension is enabled + +The CSV3 guest can only update kernel hashes when the +KVM_CAP_HYGON_COCO_EXT_CSV3_MULT_LUP_DATA capability is enabled. + +Signed-off-by: hanliyang +--- + target/i386/sev.c | 12 +++++++++++- + 1 file changed, 11 insertions(+), 1 deletion(-) + +diff --git a/target/i386/sev.c b/target/i386/sev.c +index 7443f5b22c..40f74967ad 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -2748,7 +2748,17 @@ bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp) + /* zero the excess data so the measurement can be reliably calculated */ + memset(padded_ht->padding, 0, sizeof(padded_ht->padding)); + +- if (sev_encrypt_flash((uint8_t *)padded_ht, sizeof(*padded_ht), errp) < 0) { ++ if (csv3_enabled()) { ++ if (kvm_hygon_coco_ext_inuse & KVM_CAP_HYGON_COCO_EXT_CSV3_MULT_LUP_DATA) { ++ if (csv3_load_data(area->base, (uint8_t *)padded_ht, ++ sizeof(*padded_ht), errp) < 0) { ++ ret = false; ++ } ++ } else { ++ error_report("%s: CSV3 load kernel hashes unsupported!", __func__); ++ ret = false; ++ } ++ } else if (sev_encrypt_flash((uint8_t *)padded_ht, sizeof(*padded_ht), errp) < 0) { + ret = false; + } + +-- +2.43.5 + diff --git a/0296-target-i386-csv-support-inject-secret-for-csv3-guest.patch b/0296-target-i386-csv-support-inject-secret-for-csv3-guest.patch new file mode 100644 index 0000000..88269fb --- /dev/null +++ b/0296-target-i386-csv-support-inject-secret-for-csv3-guest.patch @@ -0,0 +1,43 @@ +From f2b258de4327eef141e3f8164e580df64d61f178 Mon Sep 17 00:00:00 2001 +From: hanliyang +Date: Sun, 29 Sep 2024 15:03:47 +0800 +Subject: [PATCH] target/i386: csv: Support inject secret for CSV3 guest only + if the extension is enabled + +The CSV3 guest can only inject secrets when the +KVM_CAP_HYGON_COCO_EXT_CSV3_INJ_SECRET capability is enabled. + +Additionally, if the guest is a CSV3 guest, the guest_uaddr field of the +KVM ioctl's input should be set to the value of the GPA. + +Signed-off-by: hanliyang +--- + target/i386/sev.c | 12 +++++++++++- + 1 file changed, 11 insertions(+), 1 deletion(-) + +diff --git a/target/i386/sev.c b/target/i386/sev.c +index 40f74967ad..23122068f0 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -1416,7 +1416,17 @@ int sev_inject_launch_secret(const char *packet_hdr, const char *secret, + input.trans_uaddr = (uint64_t)(unsigned long)data; + input.trans_len = data_sz; + +- input.guest_uaddr = (uint64_t)(unsigned long)hva; ++ /* For Hygon CSV3 guest, the guest_uaddr should be the gpa */ ++ if (csv3_enabled()) { ++ if (kvm_hygon_coco_ext_inuse & KVM_CAP_HYGON_COCO_EXT_CSV3_INJ_SECRET) { ++ input.guest_uaddr = gpa; ++ } else { ++ error_setg(errp, "CSV3 inject secret unsupported!"); ++ return 1; ++ } ++ } else { ++ input.guest_uaddr = (uint64_t)(unsigned long)hva; ++ } + input.guest_len = data_sz; + + trace_kvm_sev_launch_secret(gpa, input.guest_uaddr, +-- +2.43.5 + diff --git a/0297-target-i386-add-more-features-enumerated-by-cpuid-7-.patch b/0297-target-i386-add-more-features-enumerated-by-cpuid-7-.patch new file mode 100644 index 0000000..dffd1b3 --- /dev/null +++ b/0297-target-i386-add-more-features-enumerated-by-cpuid-7-.patch @@ -0,0 +1,63 @@ +From 383dddcf263e6db0d8b475fc80cb51655afbcffc Mon Sep 17 00:00:00 2001 +From: Chao Gao +Date: Thu, 19 Sep 2024 13:10:11 +0800 +Subject: [PATCH] target/i386: Add more features enumerated by CPUID.7.2.EDX + +commit 10eaf9c0fb7060f45807becbb2742a9de9bc3632 upstream + +Following 5 bits in CPUID.7.2.EDX are supported by KVM. Add their +supports in QEMU. Each of them indicates certain bits of IA32_SPEC_CTRL +are supported. Those bits can control CPU speculation behavior which can +be used to defend against side-channel attacks. + +bit0: intel-psfd + if 1, indicates bit 7 of the IA32_SPEC_CTRL MSR is supported. Bit 7 of + this MSR disables Fast Store Forwarding Predictor without disabling + Speculative Store Bypass + +bit1: ipred-ctrl + If 1, indicates bits 3 and 4 of the IA32_SPEC_CTRL MSR are supported. + Bit 3 of this MSR enables IPRED_DIS control for CPL3. Bit 4 of this + MSR enables IPRED_DIS control for CPL0/1/2 + +bit2: rrsba-ctrl + If 1, indicates bits 5 and 6 of the IA32_SPEC_CTRL MSR are supported. + Bit 5 of this MSR disables RRSBA behavior for CPL3. Bit 6 of this MSR + disables RRSBA behavior for CPL0/1/2 + +bit3: ddpd-u + If 1, indicates bit 8 of the IA32_SPEC_CTRL MSR is supported. Bit 8 of + this MSR disables Data Dependent Prefetcher. + +bit4: bhi-ctrl + if 1, indicates bit 10 of the IA32_SPEC_CTRL MSR is supported. Bit 10 + of this MSR enables BHI_DIS_S behavior. + +Intel-SIG: 10eaf9c0fb70 target/i386: Add more features enumerated by CPUID.7.2.EDX + +Signed-off-by: Chao Gao +Link: https://lore.kernel.org/r/20240919051011.118309-1-chao.gao@intel.com +Signed-off-by: Paolo Bonzini +Signed-off-by: Jason Zeng +--- + target/i386/cpu.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/target/i386/cpu.c b/target/i386/cpu.c +index 8649f9ebf5..4afccacebc 100644 +--- a/target/i386/cpu.c ++++ b/target/i386/cpu.c +@@ -1000,8 +1000,8 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { + [FEAT_7_2_EDX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { +- NULL, NULL, NULL, NULL, +- NULL, "mcdt-no", NULL, NULL, ++ "intel-psfd", "ipred-ctrl", "rrsba-ctrl", "ddpd-u", ++ "bhi-ctrl", "mcdt-no", NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, +-- +2.43.5 + diff --git a/0298-target-i386-fix-feature-dependency-for-waitpkg.patch b/0298-target-i386-fix-feature-dependency-for-waitpkg.patch new file mode 100644 index 0000000..accfb46 --- /dev/null +++ b/0298-target-i386-fix-feature-dependency-for-waitpkg.patch @@ -0,0 +1,39 @@ +From ea2d8b4013622018efce6a0fb0fff74af741155d Mon Sep 17 00:00:00 2001 +From: Paolo Bonzini +Date: Wed, 8 May 2024 11:10:54 +0200 +Subject: [PATCH] target/i386: fix feature dependency for WAITPKG + +commit fe01af5d47d4cf7fdf90c54d43f784e5068c8d72 upstream. + +The VMX feature bit depends on general availability of WAITPKG, +not the other way round. + +Intel-SIG: commit fe01af5d47d4 target/i386: fix feature dependency for WAITPKG + +Fixes: 33cc88261c3 ("target/i386: add support for VMX_SECONDARY_EXEC_ENABLE_USER_WAIT_PAUSE", 2023-08-28) +Cc: qemu-stable@nongnu.org +Reviewed-by: Zhao Liu +Signed-off-by: Paolo Bonzini +Signed-off-by: Jason Zeng +--- + target/i386/cpu.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/target/i386/cpu.c b/target/i386/cpu.c +index 4afccacebc..62a120fe7e 100644 +--- a/target/i386/cpu.c ++++ b/target/i386/cpu.c +@@ -1550,8 +1550,8 @@ static FeatureDep feature_dependencies[] = { + .to = { FEAT_SVM, ~0ull }, + }, + { +- .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_USER_WAIT_PAUSE }, +- .to = { FEAT_7_0_ECX, CPUID_7_0_ECX_WAITPKG }, ++ .from = { FEAT_7_0_ECX, CPUID_7_0_ECX_WAITPKG }, ++ .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_USER_WAIT_PAUSE }, + }, + }; + +-- +2.43.5 + diff --git a/0299-target-i386-add-support-for-fred-in-cpuid-enumeratio.patch b/0299-target-i386-add-support-for-fred-in-cpuid-enumeratio.patch new file mode 100644 index 0000000..2471dfe --- /dev/null +++ b/0299-target-i386-add-support-for-fred-in-cpuid-enumeratio.patch @@ -0,0 +1,108 @@ +From d16b0e3c7c752ada24ca4e74252ab6f4a87349ad Mon Sep 17 00:00:00 2001 +From: Xin Li +Date: Wed, 8 Nov 2023 23:20:07 -0800 +Subject: [PATCH] target/i386: add support for FRED in CPUID enumeration + +commit c1acad9f72d14daf918563eb77d2b31c39fbd06a upstream. + +FRED, i.e., the Intel flexible return and event delivery architecture, +defines simple new transitions that change privilege level (ring +transitions). + +The new transitions defined by the FRED architecture are FRED event +delivery and, for returning from events, two FRED return instructions. +FRED event delivery can effect a transition from ring 3 to ring 0, but +it is used also to deliver events incident to ring 0. One FRED +instruction (ERETU) effects a return from ring 0 to ring 3, while the +other (ERETS) returns while remaining in ring 0. Collectively, FRED +event delivery and the FRED return instructions are FRED transitions. + +In addition to these transitions, the FRED architecture defines a new +instruction (LKGS) for managing the state of the GS segment register. +The LKGS instruction can be used by 64-bit operating systems that do +not use the new FRED transitions. + +WRMSRNS is an instruction that behaves exactly like WRMSR, with the +only difference being that it is not a serializing instruction by +default. Under certain conditions, WRMSRNS may replace WRMSR to improve +performance. FRED uses it to switch RSP0 in a faster manner. + +Search for the latest FRED spec in most search engines with this search +pattern: + + site:intel.com FRED (flexible return and event delivery) specification + +The CPUID feature flag CPUID.(EAX=7,ECX=1):EAX[17] enumerates FRED, and +the CPUID feature flag CPUID.(EAX=7,ECX=1):EAX[18] enumerates LKGS, and +the CPUID feature flag CPUID.(EAX=7,ECX=1):EAX[19] enumerates WRMSRNS. + +Add CPUID definitions for FRED/LKGS/WRMSRNS, and expose them to KVM guests. + +Because FRED relies on LKGS and WRMSRNS, add that to feature dependency +map. + +Intel-SIG: commit c1acad9f72d1 target/i386: add support for FRED in CPUID enumeration + +Tested-by: Shan Kang +Signed-off-by: Xin Li +Message-ID: <20231109072012.8078-2-xin3.li@intel.com> +[Fix order of dependencies, add dependencies from LM to FRED. - Paolo] +Signed-off-by: Paolo Bonzini +Signed-off-by: Jason Zeng +--- + target/i386/cpu.c | 14 +++++++++++++- + target/i386/cpu.h | 6 ++++++ + 2 files changed, 19 insertions(+), 1 deletion(-) + +diff --git a/target/i386/cpu.c b/target/i386/cpu.c +index 62a120fe7e..18150d471c 100644 +--- a/target/i386/cpu.c ++++ b/target/i386/cpu.c +@@ -966,7 +966,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { + "avx-vnni", "avx512-bf16", NULL, "cmpccxadd", + NULL, NULL, "fzrm", "fsrs", + "fsrc", NULL, NULL, NULL, +- NULL, NULL, NULL, NULL, ++ NULL, "fred", "lkgs", "wrmsrns", + NULL, "amx-fp16", NULL, "avx-ifma", + NULL, NULL, "lam", NULL, + NULL, NULL, NULL, NULL, +@@ -1553,6 +1553,18 @@ static FeatureDep feature_dependencies[] = { + .from = { FEAT_7_0_ECX, CPUID_7_0_ECX_WAITPKG }, + .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_USER_WAIT_PAUSE }, + }, ++ { ++ .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM }, ++ .to = { FEAT_7_1_EAX, CPUID_7_1_EAX_FRED }, ++ }, ++ { ++ .from = { FEAT_7_1_EAX, CPUID_7_1_EAX_LKGS }, ++ .to = { FEAT_7_1_EAX, CPUID_7_1_EAX_FRED }, ++ }, ++ { ++ .from = { FEAT_7_1_EAX, CPUID_7_1_EAX_WRMSRNS }, ++ .to = { FEAT_7_1_EAX, CPUID_7_1_EAX_FRED }, ++ }, + }; + + typedef struct X86RegisterInfo32 { +diff --git a/target/i386/cpu.h b/target/i386/cpu.h +index 9fc24f7e4c..13274e1404 100644 +--- a/target/i386/cpu.h ++++ b/target/i386/cpu.h +@@ -940,6 +940,12 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, + #define CPUID_7_1_EDX_AMX_COMPLEX (1U << 8) + /* PREFETCHIT0/1 Instructions */ + #define CPUID_7_1_EDX_PREFETCHITI (1U << 14) ++/* Flexible return and event delivery (FRED) */ ++#define CPUID_7_1_EAX_FRED (1U << 17) ++/* Load into IA32_KERNEL_GS_BASE (LKGS) */ ++#define CPUID_7_1_EAX_LKGS (1U << 18) ++/* Non-Serializing Write to Model Specific Register (WRMSRNS) */ ++#define CPUID_7_1_EAX_WRMSRNS (1U << 19) + + /* Do not exhibit MXCSR Configuration Dependent Timing (MCDT) behavior */ + #define CPUID_7_2_EDX_MCDT_NO (1U << 5) +-- +2.43.5 + diff --git a/0300-target-i386-mark-cr4-fred-not-reserved.patch b/0300-target-i386-mark-cr4-fred-not-reserved.patch new file mode 100644 index 0000000..ebc16fd --- /dev/null +++ b/0300-target-i386-mark-cr4-fred-not-reserved.patch @@ -0,0 +1,67 @@ +From 750a876d8d66cc16174834bd6309cd4cc6b69e87 Mon Sep 17 00:00:00 2001 +From: Xin Li +Date: Wed, 8 Nov 2023 23:20:08 -0800 +Subject: [PATCH] target/i386: mark CR4.FRED not reserved + +commit f88ddc40c6d8b591a357108feec52cea13796d2d upstream. + +The CR4.FRED bit, i.e., CR4[32], is no longer a reserved bit when FRED +is exposed to guests, otherwise it is still a reserved bit. + +Intel-SIG: commit f88ddc40c6d8 target/i386: mark CR4.FRED not reserved + +Tested-by: Shan Kang +Signed-off-by: Xin Li +Reviewed-by: Zhao Liu +Message-ID: <20231109072012.8078-3-xin3.li@intel.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Jason Zeng +--- + target/i386/cpu.h | 17 ++++++++++++++++- + 1 file changed, 16 insertions(+), 1 deletion(-) + +diff --git a/target/i386/cpu.h b/target/i386/cpu.h +index 13274e1404..1c13f90246 100644 +--- a/target/i386/cpu.h ++++ b/target/i386/cpu.h +@@ -263,6 +263,18 @@ typedef enum X86Seg { + #define CR4_PKS_MASK (1U << 24) + #define CR4_LAM_SUP_MASK (1U << 28) + ++#ifdef TARGET_X86_64 ++#define CR4_FRED_MASK (1ULL << 32) ++#else ++#define CR4_FRED_MASK 0 ++#endif ++ ++#ifdef TARGET_X86_64 ++#define CR4_FRED_MASK (1ULL << 32) ++#else ++#define CR4_FRED_MASK 0 ++#endif ++ + #define CR4_RESERVED_MASK \ + (~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \ + | CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \ +@@ -271,7 +283,7 @@ typedef enum X86Seg { + | CR4_LA57_MASK \ + | CR4_FSGSBASE_MASK | CR4_PCIDE_MASK | CR4_OSXSAVE_MASK \ + | CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_PKE_MASK | CR4_PKS_MASK \ +- | CR4_LAM_SUP_MASK)) ++ | CR4_LAM_SUP_MASK | CR4_FRED_MASK)) + + #define DR6_BD (1 << 13) + #define DR6_BS (1 << 14) +@@ -2551,6 +2563,9 @@ static inline uint64_t cr4_reserved_bits(CPUX86State *env) + if (!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_LAM)) { + reserved_bits |= CR4_LAM_SUP_MASK; + } ++ if (!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED)) { ++ reserved_bits |= CR4_FRED_MASK; ++ } + return reserved_bits; + } + +-- +2.43.5 + diff --git a/0301-vmxcap-add-support-for-vmx-fred-controls.patch b/0301-vmxcap-add-support-for-vmx-fred-controls.patch new file mode 100644 index 0000000..614b6ab --- /dev/null +++ b/0301-vmxcap-add-support-for-vmx-fred-controls.patch @@ -0,0 +1,66 @@ +From a036570a60c874372df92de9158976a859a8eefc Mon Sep 17 00:00:00 2001 +From: Xin Li +Date: Wed, 8 Nov 2023 23:20:10 -0800 +Subject: [PATCH] vmxcap: add support for VMX FRED controls + +commit 2e641870170e28df28c5d9914e76ea7cab141516 upstream. + +Report secondary vm-exit controls and the VMX controls used to +save/load FRED MSRs. + +Intel-SIG: commit 2e641870170e vmxcap: add support for VMX FRED controls + +Tested-by: Shan Kang +Signed-off-by: Xin Li +Message-ID: <20231109072012.8078-5-xin3.li@intel.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Jason Zeng +--- + scripts/kvm/vmxcap | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/scripts/kvm/vmxcap b/scripts/kvm/vmxcap +index 3fb4d5b342..44898d73c2 100755 +--- a/scripts/kvm/vmxcap ++++ b/scripts/kvm/vmxcap +@@ -24,6 +24,7 @@ MSR_IA32_VMX_TRUE_EXIT_CTLS = 0x48F + MSR_IA32_VMX_TRUE_ENTRY_CTLS = 0x490 + MSR_IA32_VMX_VMFUNC = 0x491 + MSR_IA32_VMX_PROCBASED_CTLS3 = 0x492 ++MSR_IA32_VMX_EXIT_CTLS2 = 0x493 + + class msr(object): + def __init__(self): +@@ -219,11 +220,21 @@ controls = [ + 23: 'Clear IA32_BNDCFGS', + 24: 'Conceal VM exits from PT', + 25: 'Clear IA32_RTIT_CTL', ++ 31: 'Activate secondary VM-exit controls', + }, + cap_msr = MSR_IA32_VMX_EXIT_CTLS, + true_cap_msr = MSR_IA32_VMX_TRUE_EXIT_CTLS, + ), + ++ Allowed1Control( ++ name = 'secondary VM-Exit controls', ++ bits = { ++ 0: 'Save IA32 FRED MSRs', ++ 1: 'Load IA32 FRED MSRs', ++ }, ++ cap_msr = MSR_IA32_VMX_EXIT_CTLS2, ++ ), ++ + Control( + name = 'VM-Entry controls', + bits = { +@@ -237,6 +248,7 @@ controls = [ + 16: 'Load IA32_BNDCFGS', + 17: 'Conceal VM entries from PT', + 18: 'Load IA32_RTIT_CTL', ++ 23: 'Load IA32 FRED MSRs', + }, + cap_msr = MSR_IA32_VMX_ENTRY_CTLS, + true_cap_msr = MSR_IA32_VMX_TRUE_ENTRY_CTLS, +-- +2.43.5 + diff --git a/0302-target-i386-enumerate-vmx-nested-exception-support.patch b/0302-target-i386-enumerate-vmx-nested-exception-support.patch new file mode 100644 index 0000000..04d56c3 --- /dev/null +++ b/0302-target-i386-enumerate-vmx-nested-exception-support.patch @@ -0,0 +1,62 @@ +From b67056f9b8ccc6e8fb7e38300c55295ad8ae32b5 Mon Sep 17 00:00:00 2001 +From: Xin Li +Date: Wed, 8 Nov 2023 23:20:11 -0800 +Subject: [PATCH] target/i386: enumerate VMX nested-exception support + +commit ef202d64c3020f3df03c39d3ad688732d81aaae8 upstream. + +Allow VMX nested-exception support to be exposed in KVM guests, thus +nested KVM guests can enumerate it. + +Intel-SIG: commit ef202d64c302 target/i386: enumerate VMX nested-exception support + +Tested-by: Shan Kang +Signed-off-by: Xin Li +Message-ID: <20231109072012.8078-6-xin3.li@intel.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Jason Zeng +--- + scripts/kvm/vmxcap | 1 + + target/i386/cpu.c | 1 + + target/i386/cpu.h | 1 + + 3 files changed, 3 insertions(+) + +diff --git a/scripts/kvm/vmxcap b/scripts/kvm/vmxcap +index 44898d73c2..508be19c75 100755 +--- a/scripts/kvm/vmxcap ++++ b/scripts/kvm/vmxcap +@@ -117,6 +117,7 @@ controls = [ + 54: 'INS/OUTS instruction information', + 55: 'IA32_VMX_TRUE_*_CTLS support', + 56: 'Skip checks on event error code', ++ 58: 'VMX nested exception support', + }, + msr = MSR_IA32_VMX_BASIC, + ), +diff --git a/target/i386/cpu.c b/target/i386/cpu.c +index 18150d471c..1b9f9dda6d 100644 +--- a/target/i386/cpu.c ++++ b/target/i386/cpu.c +@@ -1344,6 +1344,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { + [54] = "vmx-ins-outs", + [55] = "vmx-true-ctls", + [56] = "vmx-any-errcode", ++ [58] = "vmx-nested-exception", + }, + .msr = { + .index = MSR_IA32_VMX_BASIC, +diff --git a/target/i386/cpu.h b/target/i386/cpu.h +index 1c13f90246..ab16342875 100644 +--- a/target/i386/cpu.h ++++ b/target/i386/cpu.h +@@ -1064,6 +1064,7 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, + #define MSR_VMX_BASIC_INS_OUTS (1ULL << 54) + #define MSR_VMX_BASIC_TRUE_CTLS (1ULL << 55) + #define MSR_VMX_BASIC_ANY_ERRCODE (1ULL << 56) ++#define MSR_VMX_BASIC_NESTED_EXCEPTION (1ULL << 58) + + #define MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK 0x1Full + #define MSR_VMX_MISC_STORE_LMA (1ULL << 5) +-- +2.43.5 + diff --git a/0303-target-i386-add-get-set-migrate-support-for-fred-msr.patch b/0303-target-i386-add-get-set-migrate-support-for-fred-msr.patch new file mode 100644 index 0000000..b7d3e20 --- /dev/null +++ b/0303-target-i386-add-get-set-migrate-support-for-fred-msr.patch @@ -0,0 +1,188 @@ +From bdb6501aa4033f6210143cf5d9663e52592e934e Mon Sep 17 00:00:00 2001 +From: Xin Li +Date: Wed, 8 Nov 2023 23:20:12 -0800 +Subject: [PATCH] target/i386: Add get/set/migrate support for FRED MSRs + +commit 4ebd98eb3ade5957a842da1420bda012eeeaab9c upstream. + +FRED CPU states are managed in 9 new FRED MSRs, in addtion to a few +existing CPU registers and MSRs, e.g., CR4.FRED and MSR_IA32_PL0_SSP. + +Save/restore/migrate FRED MSRs if FRED is exposed to the guest. + +Intel-SIG: commit 4ebd98eb3ade target/i386: Add get/set/migrate support for FRED MSRs + +Tested-by: Shan Kang +Signed-off-by: Xin Li +Message-ID: <20231109072012.8078-7-xin3.li@intel.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Jason Zeng +--- + target/i386/cpu.h | 22 +++++++++++++++++++ + target/i386/kvm/kvm.c | 49 +++++++++++++++++++++++++++++++++++++++++++ + target/i386/machine.c | 28 +++++++++++++++++++++++++ + 3 files changed, 99 insertions(+) + +diff --git a/target/i386/cpu.h b/target/i386/cpu.h +index ab16342875..fd411c1ce7 100644 +--- a/target/i386/cpu.h ++++ b/target/i386/cpu.h +@@ -538,6 +538,17 @@ typedef enum X86Seg { + #define MSR_IA32_XFD 0x000001c4 + #define MSR_IA32_XFD_ERR 0x000001c5 + ++/* FRED MSRs */ ++#define MSR_IA32_FRED_RSP0 0x000001cc /* Stack level 0 regular stack pointer */ ++#define MSR_IA32_FRED_RSP1 0x000001cd /* Stack level 1 regular stack pointer */ ++#define MSR_IA32_FRED_RSP2 0x000001ce /* Stack level 2 regular stack pointer */ ++#define MSR_IA32_FRED_RSP3 0x000001cf /* Stack level 3 regular stack pointer */ ++#define MSR_IA32_FRED_STKLVLS 0x000001d0 /* FRED exception stack levels */ ++#define MSR_IA32_FRED_SSP1 0x000001d1 /* Stack level 1 shadow stack pointer in ring 0 */ ++#define MSR_IA32_FRED_SSP2 0x000001d2 /* Stack level 2 shadow stack pointer in ring 0 */ ++#define MSR_IA32_FRED_SSP3 0x000001d3 /* Stack level 3 shadow stack pointer in ring 0 */ ++#define MSR_IA32_FRED_CONFIG 0x000001d4 /* FRED Entrypoint and interrupt stack level */ ++ + #define MSR_IA32_BNDCFGS 0x00000d90 + #define MSR_IA32_XSS 0x00000da0 + #define MSR_IA32_UMWAIT_CONTROL 0xe1 +@@ -1697,6 +1708,17 @@ typedef struct CPUArchState { + target_ulong cstar; + target_ulong fmask; + target_ulong kernelgsbase; ++ ++ /* FRED MSRs */ ++ uint64_t fred_rsp0; ++ uint64_t fred_rsp1; ++ uint64_t fred_rsp2; ++ uint64_t fred_rsp3; ++ uint64_t fred_stklvls; ++ uint64_t fred_ssp1; ++ uint64_t fred_ssp2; ++ uint64_t fred_ssp3; ++ uint64_t fred_config; + #endif + + uint64_t tsc_adjust; +diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c +index fd049789ac..8a554e601f 100644 +--- a/target/i386/kvm/kvm.c ++++ b/target/i386/kvm/kvm.c +@@ -3391,6 +3391,17 @@ static int kvm_put_msrs(X86CPU *cpu, int level) + kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase); + kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask); + kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar); ++ if (env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED) { ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP0, env->fred_rsp0); ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP1, env->fred_rsp1); ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP2, env->fred_rsp2); ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP3, env->fred_rsp3); ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_STKLVLS, env->fred_stklvls); ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP1, env->fred_ssp1); ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP2, env->fred_ssp2); ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP3, env->fred_ssp3); ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_CONFIG, env->fred_config); ++ } + } + #endif + +@@ -3867,6 +3878,17 @@ static int kvm_get_msrs(X86CPU *cpu) + kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0); + kvm_msr_entry_add(cpu, MSR_FMASK, 0); + kvm_msr_entry_add(cpu, MSR_LSTAR, 0); ++ if (env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED) { ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP0, 0); ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP1, 0); ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP2, 0); ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_RSP3, 0); ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_STKLVLS, 0); ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP1, 0); ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP2, 0); ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_SSP3, 0); ++ kvm_msr_entry_add(cpu, MSR_IA32_FRED_CONFIG, 0); ++ } + } + #endif + kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0); +@@ -4092,6 +4114,33 @@ static int kvm_get_msrs(X86CPU *cpu) + case MSR_LSTAR: + env->lstar = msrs[i].data; + break; ++ case MSR_IA32_FRED_RSP0: ++ env->fred_rsp0 = msrs[i].data; ++ break; ++ case MSR_IA32_FRED_RSP1: ++ env->fred_rsp1 = msrs[i].data; ++ break; ++ case MSR_IA32_FRED_RSP2: ++ env->fred_rsp2 = msrs[i].data; ++ break; ++ case MSR_IA32_FRED_RSP3: ++ env->fred_rsp3 = msrs[i].data; ++ break; ++ case MSR_IA32_FRED_STKLVLS: ++ env->fred_stklvls = msrs[i].data; ++ break; ++ case MSR_IA32_FRED_SSP1: ++ env->fred_ssp1 = msrs[i].data; ++ break; ++ case MSR_IA32_FRED_SSP2: ++ env->fred_ssp2 = msrs[i].data; ++ break; ++ case MSR_IA32_FRED_SSP3: ++ env->fred_ssp3 = msrs[i].data; ++ break; ++ case MSR_IA32_FRED_CONFIG: ++ env->fred_config = msrs[i].data; ++ break; + #endif + case MSR_IA32_TSC: + env->tsc = msrs[i].data; +diff --git a/target/i386/machine.c b/target/i386/machine.c +index 9a1cb8f3b8..7cbfbc0efb 100644 +--- a/target/i386/machine.c ++++ b/target/i386/machine.c +@@ -1544,6 +1544,33 @@ static const VMStateDescription vmstate_msr_xfd = { + }; + + #ifdef TARGET_X86_64 ++static bool intel_fred_msrs_needed(void *opaque) ++{ ++ X86CPU *cpu = opaque; ++ CPUX86State *env = &cpu->env; ++ ++ return !!(env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED); ++} ++ ++static const VMStateDescription vmstate_msr_fred = { ++ .name = "cpu/fred", ++ .version_id = 1, ++ .minimum_version_id = 1, ++ .needed = intel_fred_msrs_needed, ++ .fields = (VMStateField[]) { ++ VMSTATE_UINT64(env.fred_rsp0, X86CPU), ++ VMSTATE_UINT64(env.fred_rsp1, X86CPU), ++ VMSTATE_UINT64(env.fred_rsp2, X86CPU), ++ VMSTATE_UINT64(env.fred_rsp3, X86CPU), ++ VMSTATE_UINT64(env.fred_stklvls, X86CPU), ++ VMSTATE_UINT64(env.fred_ssp1, X86CPU), ++ VMSTATE_UINT64(env.fred_ssp2, X86CPU), ++ VMSTATE_UINT64(env.fred_ssp3, X86CPU), ++ VMSTATE_UINT64(env.fred_config, X86CPU), ++ VMSTATE_END_OF_LIST() ++ } ++ }; ++ + static bool amx_xtile_needed(void *opaque) + { + X86CPU *cpu = opaque; +@@ -1768,6 +1795,7 @@ const VMStateDescription vmstate_x86_cpu = { + &vmstate_pdptrs, + &vmstate_msr_xfd, + #ifdef TARGET_X86_64 ++ &vmstate_msr_fred, + &vmstate_amx_xtile, + #endif + &vmstate_arch_lbr, +-- +2.43.5 + diff --git a/0304-target-i386-delete-duplicated-macro-definition-cr4-f.patch b/0304-target-i386-delete-duplicated-macro-definition-cr4-f.patch new file mode 100644 index 0000000..233fe6e --- /dev/null +++ b/0304-target-i386-delete-duplicated-macro-definition-cr4-f.patch @@ -0,0 +1,39 @@ +From b3f57e1952477152f7abd7123550a23521f19689 Mon Sep 17 00:00:00 2001 +From: "Xin Li (Intel)" +Date: Wed, 7 Aug 2024 01:18:10 -0700 +Subject: [PATCH] target/i386: Delete duplicated macro definition CR4_FRED_MASK + +commit a23bc6539890d8b27458cf56bc4ed0e0d3c2de3e upstream. + +Macro CR4_FRED_MASK is defined twice, delete one. + +Intel-SIG: commit a23bc6539890 target/i386: Delete duplicated macro definition CR4_FRED_MASK + +Signed-off-by: Xin Li (Intel) +Link: https://lore.kernel.org/r/20240807081813.735158-2-xin@zytor.com +Signed-off-by: Paolo Bonzini +Signed-off-by: Jason Zeng +--- + target/i386/cpu.h | 6 ------ + 1 file changed, 6 deletions(-) + +diff --git a/target/i386/cpu.h b/target/i386/cpu.h +index fd411c1ce7..693c2a8426 100644 +--- a/target/i386/cpu.h ++++ b/target/i386/cpu.h +@@ -269,12 +269,6 @@ typedef enum X86Seg { + #define CR4_FRED_MASK 0 + #endif + +-#ifdef TARGET_X86_64 +-#define CR4_FRED_MASK (1ULL << 32) +-#else +-#define CR4_FRED_MASK 0 +-#endif +- + #define CR4_RESERVED_MASK \ + (~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \ + | CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \ +-- +2.43.5 + diff --git a/0305-target-i386-add-vmx-control-bits-for-nested-fred-sup.patch b/0305-target-i386-add-vmx-control-bits-for-nested-fred-sup.patch new file mode 100644 index 0000000..ec1cc18 --- /dev/null +++ b/0305-target-i386-add-vmx-control-bits-for-nested-fred-sup.patch @@ -0,0 +1,48 @@ +From 02dbc4b455733345a5647d9cb274763cd960efa8 Mon Sep 17 00:00:00 2001 +From: "Xin Li (Intel)" +Date: Wed, 7 Aug 2024 01:18:11 -0700 +Subject: [PATCH] target/i386: Add VMX control bits for nested FRED support + +commit 7c6ec5bc5fea92a4ddea3f0189e3a7e7588e1d19 upstream. + +Add definitions of + 1) VM-exit activate secondary controls bit + 2) VM-entry load FRED bit +which are required to enable nested FRED. + +Intel-SIG: commit 7c6ec5bc5fea target/i386: Add VMX control bits for nested FRED support + +Reviewed-by: Zhao Liu +Signed-off-by: Xin Li (Intel) +Link: https://lore.kernel.org/r/20240807081813.735158-3-xin@zytor.com +Signed-off-by: Paolo Bonzini +Signed-off-by: Jason Zeng +--- + target/i386/cpu.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/target/i386/cpu.c b/target/i386/cpu.c +index 1b9f9dda6d..58160726d2 100644 +--- a/target/i386/cpu.c ++++ b/target/i386/cpu.c +@@ -1271,7 +1271,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { + "vmx-exit-save-efer", "vmx-exit-load-efer", + "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs", + NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL, +- NULL, "vmx-exit-load-pkrs", NULL, NULL, ++ NULL, "vmx-exit-load-pkrs", NULL, "vmx-exit-secondary-ctls", + }, + .msr = { + .index = MSR_IA32_VMX_TRUE_EXIT_CTLS, +@@ -1286,7 +1286,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { + NULL, "vmx-entry-ia32e-mode", NULL, NULL, + NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer", + "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL, +- NULL, NULL, "vmx-entry-load-pkrs", NULL, ++ NULL, NULL, "vmx-entry-load-pkrs", "vmx-entry-load-fred", + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, +-- +2.43.5 + diff --git a/0306-target-i386-raise-the-highest-index-value-used-for-a.patch b/0306-target-i386-raise-the-highest-index-value-used-for-a.patch new file mode 100644 index 0000000..4720fae --- /dev/null +++ b/0306-target-i386-raise-the-highest-index-value-used-for-a.patch @@ -0,0 +1,66 @@ +From 4cd87b7e42df05792147165c429e6b317ebb50c4 Mon Sep 17 00:00:00 2001 +From: Lei Wang +Date: Wed, 7 Aug 2024 01:18:12 -0700 +Subject: [PATCH] target/i386: Raise the highest index value used for any VMCS + encoding + +commit ab891454ebe82f7e359be721007652556f9f8356 upstream. + +Because the index value of the VMCS field encoding of FRED injected-event +data (one of the newly added VMCS fields for FRED transitions), 0x52, is +larger than any existing index value, raise the highest index value used +for any VMCS encoding to 0x52. + +Because the index value of the VMCS field encoding of Secondary VM-exit +controls, 0x44, is larger than any existing index value, raise the highest +index value used for any VMCS encoding to 0x44. + +Intel-SIG: commit ab891454ebe8 target/i386: Raise the highest index value used for any VMCS encoding + +Co-developed-by: Xin Li +Signed-off-by: Xin Li +Signed-off-by: Lei Wang +Signed-off-by: Xin Li (Intel) +Link: https://lore.kernel.org/r/20240807081813.735158-4-xin@zytor.com +Signed-off-by: Paolo Bonzini +Signed-off-by: Jason Zeng +--- + target/i386/cpu.h | 1 + + target/i386/kvm/kvm.c | 9 ++++++++- + 2 files changed, 9 insertions(+), 1 deletion(-) + +diff --git a/target/i386/cpu.h b/target/i386/cpu.h +index 693c2a8426..5c8507c867 100644 +--- a/target/i386/cpu.h ++++ b/target/i386/cpu.h +@@ -1165,6 +1165,7 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, + #define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000 + #define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000 + #define VMX_VM_EXIT_LOAD_IA32_PKRS 0x20000000 ++#define VMX_VM_EXIT_ACTIVATE_SECONDARY_CONTROLS 0x80000000 + + #define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 + #define VMX_VM_ENTRY_IA32E_MODE 0x00000200 +diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c +index 8a554e601f..50e13a3846 100644 +--- a/target/i386/kvm/kvm.c ++++ b/target/i386/kvm/kvm.c +@@ -3254,7 +3254,14 @@ static void kvm_msr_entry_add_vmx(X86CPU *cpu, FeatureWordArray f) + kvm_msr_entry_add(cpu, MSR_IA32_VMX_CR4_FIXED0, + CR4_VMXE_MASK); + +- if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) { ++ if (f[FEAT_7_1_EAX] & CPUID_7_1_EAX_FRED) { ++ /* FRED injected-event data (0x2052). */ ++ kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x52); ++ } else if (f[FEAT_VMX_EXIT_CTLS] & ++ VMX_VM_EXIT_ACTIVATE_SECONDARY_CONTROLS) { ++ /* Secondary VM-exit controls (0x2044). */ ++ kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x44); ++ } else if (f[FEAT_VMX_SECONDARY_CTLS] & VMX_SECONDARY_EXEC_TSC_SCALING) { + /* TSC multiplier (0x2032). */ + kvm_msr_entry_add(cpu, MSR_IA32_VMX_VMCS_ENUM, 0x32); + } else { +-- +2.43.5 + diff --git a/0307-target-i386-pass-x86cpu-to-x86-cpu-get-supported-fea.patch b/0307-target-i386-pass-x86cpu-to-x86-cpu-get-supported-fea.patch new file mode 100644 index 0000000..b88bf44 --- /dev/null +++ b/0307-target-i386-pass-x86cpu-to-x86-cpu-get-supported-fea.patch @@ -0,0 +1,108 @@ +From b6fc3dcfa26596249bd0e0d8bb3afec92f215309 Mon Sep 17 00:00:00 2001 +From: Paolo Bonzini +Date: Thu, 27 Jun 2024 01:12:42 +0200 +Subject: [PATCH] target/i386: pass X86CPU to + x86_cpu_get_supported_feature_word + +commit 8dee38483274bd0fcf3f74dea024d719b958200d upstream. + +This allows modifying the bits in "-cpu max"/"-cpu host" depending on +the guest CPU vendor (which, at least by default, is the host vendor in +the case of KVM). + +For example, machine check architecture differs between Intel and AMD, +and bits from AMD should be dropped when configuring the guest for +an Intel model. + +Intel-SIG: commit 8dee38483274 target/i386: pass X86CPU to x86_cpu_get_supported_feature_word + +Cc: Xiaoyao Li +Cc: John Allen +Signed-off-by: Paolo Bonzini +Signed-off-by: Jason Zeng +--- + target/i386/cpu.c | 11 +++++------ + target/i386/cpu.h | 3 +-- + target/i386/kvm/kvm-cpu.c | 2 +- + 3 files changed, 7 insertions(+), 9 deletions(-) + +diff --git a/target/i386/cpu.c b/target/i386/cpu.c +index 58160726d2..7119f6f0dc 100644 +--- a/target/i386/cpu.c ++++ b/target/i386/cpu.c +@@ -5957,8 +5957,7 @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) + + #endif /* !CONFIG_USER_ONLY */ + +-uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, +- bool migratable_only) ++uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w) + { + FeatureWordInfo *wi = &feature_word_info[w]; + uint64_t r = 0; +@@ -6000,7 +5999,7 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, + r &= ~unavail; + } + #endif +- if (migratable_only) { ++ if (cpu && cpu->migratable) { + r &= x86_cpu_get_migratable_flags(w); + } + return r; +@@ -7300,7 +7299,7 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) + * by the user. + */ + env->features[w] |= +- x86_cpu_get_supported_feature_word(w, cpu->migratable) & ++ x86_cpu_get_supported_feature_word(cpu, w) & + ~env->user_features[w] & + ~feature_word_info[w].no_autoenable_flags; + } +@@ -7426,7 +7425,7 @@ static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) + + for (w = 0; w < FEATURE_WORDS; w++) { + uint64_t host_feat = +- x86_cpu_get_supported_feature_word(w, false); ++ x86_cpu_get_supported_feature_word(NULL, w); + uint64_t requested_features = env->features[w]; + uint64_t unavailable_features = requested_features & ~host_feat; + mark_unavailable_features(cpu, w, unavailable_features, prefix); +@@ -7542,7 +7541,7 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) + env->features[FEAT_PERF_CAPABILITIES] & PERF_CAP_LBR_FMT; + if (requested_lbr_fmt && kvm_enabled()) { + uint64_t host_perf_cap = +- x86_cpu_get_supported_feature_word(FEAT_PERF_CAPABILITIES, false); ++ x86_cpu_get_supported_feature_word(NULL, FEAT_PERF_CAPABILITIES); + unsigned host_lbr_fmt = host_perf_cap & PERF_CAP_LBR_FMT; + + if (!cpu->enable_pmu) { +diff --git a/target/i386/cpu.h b/target/i386/cpu.h +index 5c8507c867..b13bcc95aa 100644 +--- a/target/i386/cpu.h ++++ b/target/i386/cpu.h +@@ -654,8 +654,7 @@ typedef enum FeatureWord { + } FeatureWord; + + typedef uint64_t FeatureWordArray[FEATURE_WORDS]; +-uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, +- bool migratable_only); ++uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); + + /* cpuid_features bits */ + #define CPUID_FP87 (1U << 0) +diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c +index f76972e47e..a3bc8d8f83 100644 +--- a/target/i386/kvm/kvm-cpu.c ++++ b/target/i386/kvm/kvm-cpu.c +@@ -137,7 +137,7 @@ static void kvm_cpu_xsave_init(void) + if (!esa->size) { + continue; + } +- if ((x86_cpu_get_supported_feature_word(esa->feature, false) & esa->bits) ++ if ((x86_cpu_get_supported_feature_word(NULL, esa->feature) & esa->bits) + != esa->bits) { + continue; + } +-- +2.43.5 + diff --git a/0308-i386-cpuid-remove-subleaf-constraint-on-cpuid-leaf-1.patch b/0308-i386-cpuid-remove-subleaf-constraint-on-cpuid-leaf-1.patch new file mode 100644 index 0000000..59cb961 --- /dev/null +++ b/0308-i386-cpuid-remove-subleaf-constraint-on-cpuid-leaf-1.patch @@ -0,0 +1,38 @@ +From 3d5a9f94edd77826f7475c192d110e166e5ea398 Mon Sep 17 00:00:00 2001 +From: Xiaoyao Li +Date: Wed, 24 Jan 2024 21:40:15 -0500 +Subject: [PATCH] i386/cpuid: Remove subleaf constraint on CPUID leaf 1F + +commit a3b5376521a0de898440e8d0942b54e628f0949f upstream. + +No such constraint that subleaf index needs to be less than 64. + +Intel-SIG: commit a3b5376521a0 i386/cpuid: Remove subleaf constraint on CPUID leaf 1F + +Signed-off-by: Xiaoyao Li +Reviewed-by:Yang Weijiang +Message-ID: <20240125024016.2521244-3-xiaoyao.li@intel.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Jason Zeng +--- + target/i386/kvm/kvm.c | 4 ---- + 1 file changed, 4 deletions(-) + +diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c +index 50e13a3846..281a078a56 100644 +--- a/target/i386/kvm/kvm.c ++++ b/target/i386/kvm/kvm.c +@@ -1928,10 +1928,6 @@ int kvm_arch_init_vcpu(CPUState *cs) + break; + } + +- if (i == 0x1f && j == 64) { +- break; +- } +- + c->function = i; + c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; + c->index = j; +-- +2.43.5 + diff --git a/0309-target-i386-don-t-construct-a-all-zero-entry-for-cpu.patch b/0309-target-i386-don-t-construct-a-all-zero-entry-for-cpu.patch new file mode 100644 index 0000000..79019d0 --- /dev/null +++ b/0309-target-i386-don-t-construct-a-all-zero-entry-for-cpu.patch @@ -0,0 +1,57 @@ +From 4d2b590c508a50e9f5d29126528e461266e4ece6 Mon Sep 17 00:00:00 2001 +From: Xiaoyao Li +Date: Wed, 14 Aug 2024 03:54:23 -0400 +Subject: [PATCH] target/i386: Don't construct a all-zero entry for CPUID[0xD + 0x3f] + +commit 00c8a933d95add3ce4afebbe491ca0fa398a9007 upstream. + +Currently, QEMU always constructs a all-zero CPUID entry for +CPUID[0xD 0x3f]. + +It's meaningless to construct such a leaf as the end of leaf 0xD. Rework +the logic of how subleaves of 0xD are constructed to get rid of such +all-zero value of subleaf 0x3f. + +Intel-SIG: commit 00c8a933d95a target/i386: Don't construct a all-zero entry for CPUID[0xD 0x3f] + +Signed-off-by: Xiaoyao Li +Link: https://lore.kernel.org/r/20240814075431.339209-2-xiaoyao.li@intel.com +Signed-off-by: Paolo Bonzini +Signed-off-by: Jason Zeng +--- + target/i386/kvm/kvm.c | 11 ++++++----- + 1 file changed, 6 insertions(+), 5 deletions(-) + +diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c +index 281a078a56..9d55dcd0b4 100644 +--- a/target/i386/kvm/kvm.c ++++ b/target/i386/kvm/kvm.c +@@ -1924,10 +1924,6 @@ int kvm_arch_init_vcpu(CPUState *cs) + case 0xb: + case 0xd: + for (j = 0; ; j++) { +- if (i == 0xd && j == 64) { +- break; +- } +- + c->function = i; + c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; + c->index = j; +@@ -1943,7 +1939,12 @@ int kvm_arch_init_vcpu(CPUState *cs) + break; + } + if (i == 0xd && c->eax == 0) { +- continue; ++ if (j < 63) { ++ continue; ++ } else { ++ cpuid_i--; ++ break; ++ } + } + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { + fprintf(stderr, "cpuid_data is full, no space for " +-- +2.43.5 + diff --git a/0310-target-i386-enable-fdp-excptn-only-and-zero-fcs-fds.patch b/0310-target-i386-enable-fdp-excptn-only-and-zero-fcs-fds.patch new file mode 100644 index 0000000..2c4a65f --- /dev/null +++ b/0310-target-i386-enable-fdp-excptn-only-and-zero-fcs-fds.patch @@ -0,0 +1,70 @@ +From 6b828acb8e91038434d644b381e420e01ff055f2 Mon Sep 17 00:00:00 2001 +From: Xiaoyao Li +Date: Wed, 14 Aug 2024 03:54:24 -0400 +Subject: [PATCH] target/i386: Enable fdp-excptn-only and zero-fcs-fds + +commit 7dddc3bb875e7141ab25931d0f30a1c319bc8457 upstream. + +- CPUID.(EAX=07H,ECX=0H):EBX[bit 6]: x87 FPU Data Pointer updated only + on x87 exceptions if 1. + +- CPUID.(EAX=07H,ECX=0H):EBX[bit 13]: Deprecates FPU CS and FPU DS + values if 1. i.e., X87 FCS and FDS are always zero. + +Define names for them so that they can be exposed to guest with -cpu host. + +Also define the bit field MACROs so that named cpu models can add it as +well in the future. + +Intel-SIG: commit 7dddc3bb875e target/i386: Enable fdp-excptn-only and zero-fcs-fds + +Signed-off-by: Xiaoyao Li +Link: https://lore.kernel.org/r/20240814075431.339209-3-xiaoyao.li@intel.com +Signed-off-by: Paolo Bonzini +Signed-off-by: Jason Zeng +--- + target/i386/cpu.c | 4 ++-- + target/i386/cpu.h | 4 ++++ + 2 files changed, 6 insertions(+), 2 deletions(-) + +diff --git a/target/i386/cpu.c b/target/i386/cpu.c +index 7119f6f0dc..4673bb5c74 100644 +--- a/target/i386/cpu.c ++++ b/target/i386/cpu.c +@@ -906,9 +906,9 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + "fsgsbase", "tsc-adjust", "sgx", "bmi1", +- "hle", "avx2", NULL, "smep", ++ "hle", "avx2", "fdp-excptn-only", "smep", + "bmi2", "erms", "invpcid", "rtm", +- NULL, NULL, "mpx", NULL, ++ NULL, "zero-fcs-fds", "mpx", NULL, + "avx512f", "avx512dq", "rdseed", "adx", + "smap", "avx512ifma", "pcommit", "clflushopt", + "clwb", "intel-pt", "avx512pf", "avx512er", +diff --git a/target/i386/cpu.h b/target/i386/cpu.h +index b13bcc95aa..1402ee5e9d 100644 +--- a/target/i386/cpu.h ++++ b/target/i386/cpu.h +@@ -808,6 +808,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); + #define CPUID_7_0_EBX_HLE (1U << 4) + /* Intel Advanced Vector Extensions 2 */ + #define CPUID_7_0_EBX_AVX2 (1U << 5) ++/* FPU data pointer updated only on x87 exceptions */ ++#define CPUID_7_0_EBX_FDP_EXCPTN_ONLY (1u << 6) + /* Supervisor-mode Execution Prevention */ + #define CPUID_7_0_EBX_SMEP (1U << 7) + /* 2nd Group of Advanced Bit Manipulation Extensions */ +@@ -818,6 +820,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); + #define CPUID_7_0_EBX_INVPCID (1U << 10) + /* Restricted Transactional Memory */ + #define CPUID_7_0_EBX_RTM (1U << 11) ++/* Zero out FPU CS and FPU DS */ ++#define CPUID_7_0_EBX_ZERO_FCS_FDS (1U << 13) + /* Memory Protection Extension */ + #define CPUID_7_0_EBX_MPX (1U << 14) + /* AVX-512 Foundation */ +-- +2.43.5 + diff --git a/0311-target-i386-construct-cpuid-2-as-stateful-iff-times-.patch b/0311-target-i386-construct-cpuid-2-as-stateful-iff-times-.patch new file mode 100644 index 0000000..5c80dc0 --- /dev/null +++ b/0311-target-i386-construct-cpuid-2-as-stateful-iff-times-.patch @@ -0,0 +1,41 @@ +From 09ed3f684602f0647350cd3ca61ac6637ebfa4c6 Mon Sep 17 00:00:00 2001 +From: Xiaoyao Li +Date: Wed, 14 Aug 2024 03:54:27 -0400 +Subject: [PATCH] target/i386: Construct CPUID 2 as stateful iff times > 1 + +commit 5ab639141b6d916a6f4041d4ec46f2f1a1e4a365 upstream. + +When times == 1, the CPUID leaf 2 is not stateful. + +Intel-SIG: commit 5ab639141b6d target/i386: Construct CPUID 2 as stateful iff times > 1 + +Signed-off-by: Xiaoyao Li +Link: https://lore.kernel.org/r/20240814075431.339209-6-xiaoyao.li@intel.com +Signed-off-by: Paolo Bonzini +Signed-off-by: Jason Zeng +--- + target/i386/kvm/kvm.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c +index 9d55dcd0b4..2df3ff99c3 100644 +--- a/target/i386/kvm/kvm.c ++++ b/target/i386/kvm/kvm.c +@@ -1896,10 +1896,12 @@ int kvm_arch_init_vcpu(CPUState *cs) + int times; + + c->function = i; +- c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | +- KVM_CPUID_FLAG_STATE_READ_NEXT; + cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); + times = c->eax & 0xff; ++ if (times > 1) { ++ c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | ++ KVM_CPUID_FLAG_STATE_READ_NEXT; ++ } + + for (j = 1; j < times; ++j) { + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { +-- +2.43.5 + diff --git a/0312-target-i386-make-invtsc-migratable-when-user-sets-ts.patch b/0312-target-i386-make-invtsc-migratable-when-user-sets-ts.patch new file mode 100644 index 0000000..2fcea8f --- /dev/null +++ b/0312-target-i386-make-invtsc-migratable-when-user-sets-ts.patch @@ -0,0 +1,66 @@ +From 3ce35de1e4cd006e20041505ad6120ef252ee306 Mon Sep 17 00:00:00 2001 +From: Xiaoyao Li +Date: Wed, 14 Aug 2024 03:54:31 -0400 +Subject: [PATCH] target/i386: Make invtsc migratable when user sets tsc-khz + explicitly + +commit 87c88db3143e91076d167a62dd7febf49afca8a2 upstream. + +When user sets tsc-frequency explicitly, the invtsc feature is actually +migratable because the tsc-frequency is supposed to be fixed during the +migration. + +See commit d99569d9d856 ("kvm: Allow invtsc migration if tsc-khz +is set explicitly") for referrence. + +Intel-SIG: commit 87c88db3143e target/i386: Make invtsc migratable when user sets tsc-khz explicitly + +Signed-off-by: Xiaoyao Li +Link: https://lore.kernel.org/r/20240814075431.339209-10-xiaoyao.li@intel.com +Signed-off-by: Paolo Bonzini +Signed-off-by: Jason Zeng +--- + target/i386/cpu.c | 11 +++++++++-- + 1 file changed, 9 insertions(+), 2 deletions(-) + +diff --git a/target/i386/cpu.c b/target/i386/cpu.c +index 4673bb5c74..2f017a87de 100644 +--- a/target/i386/cpu.c ++++ b/target/i386/cpu.c +@@ -1685,9 +1685,10 @@ static inline uint64_t x86_cpu_xsave_xss_components(X86CPU *cpu) + * Returns the set of feature flags that are supported and migratable by + * QEMU, for a given FeatureWord. + */ +-static uint64_t x86_cpu_get_migratable_flags(FeatureWord w) ++static uint64_t x86_cpu_get_migratable_flags(X86CPU *cpu, FeatureWord w) + { + FeatureWordInfo *wi = &feature_word_info[w]; ++ CPUX86State *env = &cpu->env; + uint64_t r = 0; + int i; + +@@ -1701,6 +1702,12 @@ static uint64_t x86_cpu_get_migratable_flags(FeatureWord w) + r |= f; + } + } ++ ++ /* when tsc-khz is set explicitly, invtsc is migratable */ ++ if ((w == FEAT_8000_0007_EDX) && env->user_tsc_khz) { ++ r |= CPUID_APM_INVTSC; ++ } ++ + return r; + } + +@@ -6000,7 +6007,7 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w) + } + #endif + if (cpu && cpu->migratable) { +- r &= x86_cpu_get_migratable_flags(w); ++ r &= x86_cpu_get_migratable_flags(cpu, w); + } + return r; + } +-- +2.43.5 + diff --git a/0313-target-i386-cpu-fix-notes-for-cpu-models.patch b/0313-target-i386-cpu-fix-notes-for-cpu-models.patch new file mode 100644 index 0000000..0376fbd --- /dev/null +++ b/0313-target-i386-cpu-fix-notes-for-cpu-models.patch @@ -0,0 +1,45 @@ +From 0a42492eb380c5b5a964d531391e0d5f9b9ad794 Mon Sep 17 00:00:00 2001 +From: Han Han +Date: Thu, 19 Dec 2024 16:51:38 +0800 +Subject: [PATCH] target/i386/cpu: Fix notes for CPU models + +commit 93dcc9390e5ad0696ae7e9b7b3a5b08c2d1b6de6 upstream. + + + +Intel-SIG: commit 93dcc9390e5a target/i386/cpu: Fix notes for CPU models + +Fixes: 644e3c5d812 ("missing vmx features for Skylake-Server and Cascadelake-Server") +Signed-off-by: Han Han +Reviewed-by: Chenyi Qiang +Reviewed-by: Michael Tokarev +Signed-off-by: Michael Tokarev +Signed-off-by: Jason Zeng +--- + target/i386/cpu.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/target/i386/cpu.c b/target/i386/cpu.c +index 2f017a87de..88a584b736 100644 +--- a/target/i386/cpu.c ++++ b/target/i386/cpu.c +@@ -3473,6 +3473,7 @@ static const X86CPUDefinition builtin_x86_defs[] = { + }, + { + .version = 4, ++ .note = "IBRS, EPT switching, no TSX", + .props = (PropValue[]) { + { "vmx-eptp-switching", "on" }, + { /* end of list */ } +@@ -3607,7 +3608,7 @@ static const X86CPUDefinition builtin_x86_defs[] = { + }, + }, + { .version = 4, +- .note = "ARCH_CAPABILITIES, no TSX", ++ .note = "ARCH_CAPABILITIES, EPT switching, no TSX", + .props = (PropValue[]) { + { "vmx-eptp-switching", "on" }, + { /* end of list */ } +-- +2.43.5 + diff --git a/0314-sw64-add-the-migration-of-rtc-and-memb-instructions.patch b/0314-sw64-add-the-migration-of-rtc-and-memb-instructions.patch new file mode 100644 index 0000000..ca2e953 --- /dev/null +++ b/0314-sw64-add-the-migration-of-rtc-and-memb-instructions.patch @@ -0,0 +1,553 @@ +From 2738f0c0f26db3d4de536b11c16c45dab598e0c5 Mon Sep 17 00:00:00 2001 +From: Yu Jiayi +Date: Tue, 18 Mar 2025 08:40:07 +0800 +Subject: [PATCH] sw64: add the migration of rtc and memb instructions + +This commit adds the rtc and memb instructions that are forgotten +to migrate from qemu-6.2.0. + +Signed-off-by: Yu Jiayi +--- + disas/disas.c | 2 + + include/qemu/atomic.h | 2 + + include/qemu/timer.h | 9 + + linux-headers/asm-sw64/unistd.h.bak | 481 ---------------------------- + 4 files changed, 13 insertions(+), 481 deletions(-) + delete mode 100644 linux-headers/asm-sw64/unistd.h.bak + +diff --git a/disas/disas.c b/disas/disas.c +index 0d2d06c2ec..1fbc2d1117 100644 +--- a/disas/disas.c ++++ b/disas/disas.c +@@ -196,6 +196,8 @@ static void initialize_debug_host(CPUDebug *s) + s->info.print_insn = print_insn_hppa; + #elif defined(__loongarch__) + s->info.print_insn = print_insn_loongarch; ++#elif defined(__sw_64__) ++ s->info.print_insn = print_insn_sw64; + #endif + } + +diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h +index f1d3d1702a..b349fb28fb 100644 +--- a/include/qemu/atomic.h ++++ b/include/qemu/atomic.h +@@ -87,6 +87,8 @@ + #define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); }) + #elif defined(__alpha__) + #define smp_read_barrier_depends() asm volatile("mb":::"memory") ++#elif defined(__sw_64__) ++#define smp_read_barrier_depends() asm volatile("memb":::"memory") + #else + #define smp_read_barrier_depends() barrier() + #endif +diff --git a/include/qemu/timer.h b/include/qemu/timer.h +index 9a366e551f..10068b30d2 100644 +--- a/include/qemu/timer.h ++++ b/include/qemu/timer.h +@@ -1001,6 +1001,15 @@ static inline int64_t cpu_get_host_ticks(void) + return val; + } + ++#elif defined(__sw_64__) ++static inline int64_t cpu_get_host_ticks(void) ++{ ++ uint64_t cc; ++ ++ asm volatile("rtc %0" : "=r"(cc)); ++ return cc; ++} ++ + #else + /* The host CPU doesn't have an easily accessible cycle counter. + Just return a monotonically increasing value. This will be +diff --git a/linux-headers/asm-sw64/unistd.h.bak b/linux-headers/asm-sw64/unistd.h.bak +deleted file mode 100644 +index 30ff44f4a6..0000000000 +--- a/linux-headers/asm-sw64/unistd.h.bak ++++ /dev/null +@@ -1,481 +0,0 @@ +-#ifndef _SW_64_UNISTD_H +-#define _SW_64_UNISTD_H +- +-#define __NR_osf_syscall 0 /* not implemented */ +-#define __NR_exit 1 +-#define __NR_fork 2 +-#define __NR_read 3 +-#define __NR_write 4 +-#define __NR_osf_old_open 5 /* not implemented */ +-#define __NR_close 6 +-#define __NR_osf_wait4 7 +-#define __NR_osf_old_creat 8 /* not implemented */ +-#define __NR_link 9 +-#define __NR_unlink 10 +-#define __NR_osf_execve 11 /* not implemented */ +-#define __NR_chdir 12 +-#define __NR_fchdir 13 +-#define __NR_mknod 14 +-#define __NR_chmod 15 +-#define __NR_chown 16 +-#define __NR_brk 17 +-#define __NR_osf_getfsstat 18 /* not implemented */ +-#define __NR_lseek 19 +-#define __NR_getxpid 20 +-#define __NR_osf_mount 21 +-#define __NR_umount 22 +-#define __NR_setuid 23 +-#define __NR_getxuid 24 +-#define __NR_exec_with_loader 25 /* not implemented */ +-#define __NR_ptrace 26 +-#define __NR_osf_nrecvmsg 27 /* not implemented */ +-#define __NR_osf_nsendmsg 28 /* not implemented */ +-#define __NR_osf_nrecvfrom 29 /* not implemented */ +-#define __NR_osf_naccept 30 /* not implemented */ +-#define __NR_osf_ngetpeername 31 /* not implemented */ +-#define __NR_osf_ngetsockname 32 /* not implemented */ +-#define __NR_access 33 +-#define __NR_osf_chflags 34 /* not implemented */ +-#define __NR_osf_fchflags 35 /* not implemented */ +-#define __NR_sync 36 +-#define __NR_kill 37 +-#define __NR_osf_old_stat 38 /* not implemented */ +-#define __NR_setpgid 39 +-#define __NR_osf_old_lstat 40 /* not implemented */ +-#define __NR_dup 41 +-#define __NR_pipe 42 +-#define __NR_osf_set_program_attributes 43 +-#define __NR_osf_profil 44 /* not implemented */ +-#define __NR_open 45 +-#define __NR_osf_old_sigaction 46 /* not implemented */ +-#define __NR_getxgid 47 +-#define __NR_osf_sigprocmask 48 +-#define __NR_osf_getlogin 49 /* not implemented */ +-#define __NR_osf_setlogin 50 /* not implemented */ +-#define __NR_acct 51 +-#define __NR_sigpending 52 +- +-#define __NR_ioctl 54 +-#define __NR_osf_reboot 55 /* not implemented */ +-#define __NR_osf_revoke 56 /* not implemented */ +-#define __NR_symlink 57 +-#define __NR_readlink 58 +-#define __NR_execve 59 +-#define __NR_umask 60 +-#define __NR_chroot 61 +-#define __NR_osf_old_fstat 62 /* not implemented */ +-#define __NR_getpgrp 63 +-#define __NR_getpagesize 64 +-#define __NR_osf_mremap 65 /* not implemented */ +-#define __NR_vfork 66 +-#define __NR_stat 67 +-#define __NR_lstat 68 +-#define __NR_osf_sbrk 69 /* not implemented */ +-#define __NR_osf_sstk 70 /* not implemented */ +-#define __NR_mmap 71 /* OSF/1 mmap is superset of Linux */ +-#define __NR_osf_old_vadvise 72 /* not implemented */ +-#define __NR_munmap 73 +-#define __NR_mprotect 74 +-#define __NR_madvise 75 +-#define __NR_vhangup 76 +-#define __NR_osf_kmodcall 77 /* not implemented */ +-#define __NR_osf_mincore 78 /* not implemented */ +-#define __NR_getgroups 79 +-#define __NR_setgroups 80 +-#define __NR_osf_old_getpgrp 81 /* not implemented */ +-#define __NR_setpgrp 82 /* BSD alias for setpgid */ +-#define __NR_osf_setitimer 83 +-#define __NR_osf_old_wait 84 /* not implemented */ +-#define __NR_osf_table 85 /* not implemented */ +-#define __NR_osf_getitimer 86 +-#define __NR_gethostname 87 +-#define __NR_sethostname 88 +-#define __NR_getdtablesize 89 +-#define __NR_dup2 90 +-#define __NR_fstat 91 +-#define __NR_fcntl 92 +-#define __NR_osf_select 93 +-#define __NR_poll 94 +-#define __NR_fsync 95 +-#define __NR_setpriority 96 +-#define __NR_socket 97 +-#define __NR_connect 98 +-#define __NR_accept 99 +-#define __NR_getpriority 100 +-#define __NR_send 101 +-#define __NR_recv 102 +-#define __NR_sigreturn 103 +-#define __NR_bind 104 +-#define __NR_setsockopt 105 +-#define __NR_listen 106 +-#define __NR_osf_plock 107 /* not implemented */ +-#define __NR_osf_old_sigvec 108 /* not implemented */ +-#define __NR_osf_old_sigblock 109 /* not implemented */ +-#define __NR_osf_old_sigsetmask 110 /* not implemented */ +-#define __NR_sigsuspend 111 +-#define __NR_osf_sigstack 112 +-#define __NR_recvmsg 113 +-#define __NR_sendmsg 114 +-#define __NR_osf_old_vtrace 115 /* not implemented */ +-#define __NR_osf_gettimeofday 116 +-#define __NR_osf_getrusage 117 +-#define __NR_getsockopt 118 +- +-#define __NR_readv 120 +-#define __NR_writev 121 +-#define __NR_osf_settimeofday 122 +-#define __NR_fchown 123 +-#define __NR_fchmod 124 +-#define __NR_recvfrom 125 +-#define __NR_setreuid 126 +-#define __NR_setregid 127 +-#define __NR_rename 128 +-#define __NR_truncate 129 +-#define __NR_ftruncate 130 +-#define __NR_flock 131 +-#define __NR_setgid 132 +-#define __NR_sendto 133 +-#define __NR_shutdown 134 +-#define __NR_socketpair 135 +-#define __NR_mkdir 136 +-#define __NR_rmdir 137 +-#define __NR_osf_utimes 138 +-#define __NR_osf_old_sigreturn 139 /* not implemented */ +-#define __NR_osf_adjtime 140 /* not implemented */ +-#define __NR_getpeername 141 +-#define __NR_osf_gethostid 142 /* not implemented */ +-#define __NR_osf_sethostid 143 /* not implemented */ +-#define __NR_getrlimit 144 +-#define __NR_setrlimit 145 +-#define __NR_osf_old_killpg 146 /* not implemented */ +-#define __NR_setsid 147 +-#define __NR_quotactl 148 +-#define __NR_osf_oldquota 149 /* not implemented */ +-#define __NR_getsockname 150 +- +-#define __NR_osf_pid_block 153 /* not implemented */ +-#define __NR_osf_pid_unblock 154 /* not implemented */ +- +-#define __NR_sigaction 156 +-#define __NR_osf_sigwaitprim 157 /* not implemented */ +-#define __NR_osf_nfssvc 158 /* not implemented */ +-#define __NR_osf_getdirentries 159 +-#define __NR_osf_statfs 160 +-#define __NR_osf_fstatfs 161 +- +-#define __NR_osf_asynch_daemon 163 /* not implemented */ +-#define __NR_osf_getfh 164 /* not implemented */ +-#define __NR_osf_getdomainname 165 +-#define __NR_setdomainname 166 +- +-#define __NR_osf_exportfs 169 /* not implemented */ +- +-#define __NR_userfaultfd 171 +- +-#define __NR_osf_alt_plock 181 /* not implemented */ +- +-#define __NR_osf_getmnt 184 /* not implemented */ +- +-#define __NR_osf_alt_sigpending 187 /* not implemented */ +-#define __NR_osf_alt_setsid 188 /* not implemented */ +- +-#define __NR_osf_swapon 199 +-#define __NR_msgctl 200 +-#define __NR_msgget 201 +-#define __NR_msgrcv 202 +-#define __NR_msgsnd 203 +-#define __NR_semctl 204 +-#define __NR_semget 205 +-#define __NR_semop 206 +-#define __NR_osf_utsname 207 +-#define __NR_lchown 208 +-#define __NR_osf_shmat 209 +-#define __NR_shmctl 210 +-#define __NR_shmdt 211 +-#define __NR_shmget 212 +-#define __NR_osf_mvalid 213 /* not implemented */ +-#define __NR_osf_getaddressconf 214 /* not implemented */ +-#define __NR_osf_msleep 215 /* not implemented */ +-#define __NR_osf_mwakeup 216 /* not implemented */ +-#define __NR_msync 217 +-#define __NR_osf_signal 218 /* not implemented */ +-#define __NR_osf_utc_gettime 219 /* not implemented */ +-#define __NR_osf_utc_adjtime 220 /* not implemented */ +- +-#define __NR_osf_security 222 /* not implemented */ +-#define __NR_osf_kloadcall 223 /* not implemented */ +- +-#define __NR_osf_stat 224 +-#define __NR_osf_lstat 225 +-#define __NR_osf_fstat 226 +-#define __NR_osf_statfs64 227 +-#define __NR_osf_fstatfs64 228 +- +-#define __NR_getpgid 233 +-#define __NR_getsid 234 +-#define __NR_sigaltstack 235 +-#define __NR_osf_waitid 236 /* not implemented */ +-#define __NR_osf_priocntlset 237 /* not implemented */ +-#define __NR_osf_sigsendset 238 /* not implemented */ +-#define __NR_osf_set_speculative 239 /* not implemented */ +-#define __NR_osf_msfs_syscall 240 /* not implemented */ +-#define __NR_osf_sysinfo 241 +-#define __NR_osf_uadmin 242 /* not implemented */ +-#define __NR_osf_fuser 243 /* not implemented */ +-#define __NR_osf_proplist_syscall 244 +-#define __NR_osf_ntp_adjtime 245 /* not implemented */ +-#define __NR_osf_ntp_gettime 246 /* not implemented */ +-#define __NR_osf_pathconf 247 /* not implemented */ +-#define __NR_osf_fpathconf 248 /* not implemented */ +- +-#define __NR_osf_uswitch 250 /* not implemented */ +-#define __NR_osf_usleep_thread 251 +-#define __NR_osf_audcntl 252 /* not implemented */ +-#define __NR_osf_audgen 253 /* not implemented */ +-#define __NR_sysfs 254 +-#define __NR_osf_subsys_info 255 /* not implemented */ +-#define __NR_osf_getsysinfo 256 +-#define __NR_osf_setsysinfo 257 +-#define __NR_osf_afs_syscall 258 /* not implemented */ +-#define __NR_osf_swapctl 259 /* not implemented */ +-#define __NR_osf_memcntl 260 /* not implemented */ +-#define __NR_osf_fdatasync 261 /* not implemented */ +- +-/* +- * Ignore legacy syscalls that we don't use. +- */ +-#define __IGNORE_alarm +-#define __IGNORE_creat +-#define __IGNORE_getegid +-#define __IGNORE_geteuid +-#define __IGNORE_getgid +-#define __IGNORE_getpid +-#define __IGNORE_getppid +-#define __IGNORE_getuid +-#define __IGNORE_pause +-#define __IGNORE_time +-#define __IGNORE_utime +-#define __IGNORE_umount2 +- +-/* +- * Linux-specific system calls begin at 300 +- */ +-#define __NR_bdflush 300 +-#define __NR_sethae 301 +-#define __NR_mount 302 +-#define __NR_old_adjtimex 303 +-#define __NR_swapoff 304 +-#define __NR_getdents 305 +-#define __NR_create_module 306 +-#define __NR_init_module 307 +-#define __NR_delete_module 308 +-#define __NR_get_kernel_syms 309 +-#define __NR_syslog 310 +-#define __NR_reboot 311 +-#define __NR_clone 312 +-#define __NR_uselib 313 +-#define __NR_mlock 314 +-#define __NR_munlock 315 +-#define __NR_mlockall 316 +-#define __NR_munlockall 317 +-#define __NR_sysinfo 318 +-#define __NR__sysctl 319 +-/* 320 was sys_idle. */ +-#define __NR_oldumount 321 +-#define __NR_swapon 322 +-#define __NR_times 323 +-#define __NR_personality 324 +-#define __NR_setfsuid 325 +-#define __NR_setfsgid 326 +-#define __NR_ustat 327 +-#define __NR_statfs 328 +-#define __NR_fstatfs 329 +-#define __NR_sched_setparam 330 +-#define __NR_sched_getparam 331 +-#define __NR_sched_setscheduler 332 +-#define __NR_sched_getscheduler 333 +-#define __NR_sched_yield 334 +-#define __NR_sched_get_priority_max 335 +-#define __NR_sched_get_priority_min 336 +-#define __NR_sched_rr_get_interval 337 +-#define __NR_afs_syscall 338 +-#define __NR_uname 339 +-#define __NR_nanosleep 340 +-#define __NR_mremap 341 +-#define __NR_nfsservctl 342 +-#define __NR_setresuid 343 +-#define __NR_getresuid 344 +-#define __NR_pciconfig_read 345 +-#define __NR_pciconfig_write 346 +-#define __NR_query_module 347 +-#define __NR_prctl 348 +-#define __NR_pread64 349 +-#define __NR_pwrite64 350 +-#define __NR_rt_sigreturn 351 +-#define __NR_rt_sigaction 352 +-#define __NR_rt_sigprocmask 353 +-#define __NR_rt_sigpending 354 +-#define __NR_rt_sigtimedwait 355 +-#define __NR_rt_sigqueueinfo 356 +-#define __NR_rt_sigsuspend 357 +-#define __NR_select 358 +-#define __NR_gettimeofday 359 +-#define __NR_settimeofday 360 +-#define __NR_getitimer 361 +-#define __NR_setitimer 362 +-#define __NR_utimes 363 +-#define __NR_getrusage 364 +-#define __NR_wait4 365 +-#define __NR_adjtimex 366 +-#define __NR_getcwd 367 +-#define __NR_capget 368 +-#define __NR_capset 369 +-#define __NR_sendfile 370 +-#define __NR_setresgid 371 +-#define __NR_getresgid 372 +-#define __NR_dipc 373 +-#define __NR_pivot_root 374 +-#define __NR_mincore 375 +-#define __NR_pciconfig_iobase 376 +-#define __NR_getdents64 377 +-#define __NR_gettid 378 +-#define __NR_readahead 379 +-/* 380 is unused */ +-#define __NR_tkill 381 +-#define __NR_setxattr 382 +-#define __NR_lsetxattr 383 +-#define __NR_fsetxattr 384 +-#define __NR_getxattr 385 +-#define __NR_lgetxattr 386 +-#define __NR_fgetxattr 387 +-#define __NR_listxattr 388 +-#define __NR_llistxattr 389 +-#define __NR_flistxattr 390 +-#define __NR_removexattr 391 +-#define __NR_lremovexattr 392 +-#define __NR_fremovexattr 393 +-#define __NR_futex 394 +-#define __NR_sched_setaffinity 395 +-#define __NR_sched_getaffinity 396 +-#define __NR_tuxcall 397 +-#define __NR_io_setup 398 +-#define __NR_io_destroy 399 +-#define __NR_io_getevents 400 +-#define __NR_io_submit 401 +-#define __NR_io_cancel 402 +-#define __NR_exit_group 405 +-#define __NR_lookup_dcookie 406 +-#define __NR_epoll_create 407 +-#define __NR_epoll_ctl 408 +-#define __NR_epoll_wait 409 +-/* Feb 2007: These three sys_epoll defines shouldn't be here but culling +- * them would break userspace apps ... we'll kill them off in 2010 :) */ +-#define __NR_sys_epoll_create __NR_epoll_create +-#define __NR_sys_epoll_ctl __NR_epoll_ctl +-#define __NR_sys_epoll_wait __NR_epoll_wait +-#define __NR_remap_file_pages 410 +-#define __NR_set_tid_address 411 +-#define __NR_restart_syscall 412 +-#define __NR_fadvise64 413 +-#define __NR_timer_create 414 +-#define __NR_timer_settime 415 +-#define __NR_timer_gettime 416 +-#define __NR_timer_getoverrun 417 +-#define __NR_timer_delete 418 +-#define __NR_clock_settime 419 +-#define __NR_clock_gettime 420 +-#define __NR_clock_getres 421 +-#define __NR_clock_nanosleep 422 +-#define __NR_semtimedop 423 +-#define __NR_tgkill 424 +-#define __NR_stat64 425 +-#define __NR_lstat64 426 +-#define __NR_fstat64 427 +-#define __NR_vserver 428 +-#define __NR_mbind 429 +-#define __NR_get_mempolicy 430 +-#define __NR_set_mempolicy 431 +-#define __NR_mq_open 432 +-#define __NR_mq_unlink 433 +-#define __NR_mq_timedsend 434 +-#define __NR_mq_timedreceive 435 +-#define __NR_mq_notify 436 +-#define __NR_mq_getsetattr 437 +-#define __NR_waitid 438 +-#define __NR_add_key 439 +-#define __NR_request_key 440 +-#define __NR_keyctl 441 +-#define __NR_ioprio_set 442 +-#define __NR_ioprio_get 443 +-#define __NR_inotify_init 444 +-#define __NR_inotify_add_watch 445 +-#define __NR_inotify_rm_watch 446 +-#define __NR_fdatasync 447 +-#define __NR_kexec_load 448 +-#define __NR_migrate_pages 449 +-#define __NR_openat 450 +-#define __NR_mkdirat 451 +-#define __NR_mknodat 452 +-#define __NR_fchownat 453 +-#define __NR_futimesat 454 +-#define __NR_fstatat64 455 +-#define __NR_unlinkat 456 +-#define __NR_renameat 457 +-#define __NR_linkat 458 +-#define __NR_symlinkat 459 +-#define __NR_readlinkat 460 +-#define __NR_fchmodat 461 +-#define __NR_faccessat 462 +-#define __NR_pselect6 463 +-#define __NR_ppoll 464 +-#define __NR_unshare 465 +-#define __NR_set_robust_list 466 +-#define __NR_get_robust_list 467 +-#define __NR_splice 468 +-#define __NR_sync_file_range 469 +-#define __NR_tee 470 +-#define __NR_vmsplice 471 +-#define __NR_move_pages 472 +-#define __NR_getcpu 473 +-#define __NR_epoll_pwait 474 +-#define __NR_utimensat 475 +-#define __NR_signalfd 476 +-#define __NR_timerfd 477 +-#define __NR_eventfd 478 +-#define __NR_recvmmsg 479 +-#define __NR_fallocate 480 +-#define __NR_timerfd_create 481 +-#define __NR_timerfd_settime 482 +-#define __NR_timerfd_gettime 483 +-#define __NR_signalfd4 484 +-#define __NR_eventfd2 485 +-#define __NR_epoll_create1 486 +-#define __NR_dup3 487 +-#define __NR_pipe2 488 +-#define __NR_inotify_init1 489 +-#define __NR_preadv 490 +-#define __NR_pwritev 491 +-#define __NR_rt_tgsigqueueinfo 492 +-#define __NR_perf_event_open 493 +-#define __NR_fanotify_init 494 +-#define __NR_fanotify_mark 495 +-#define __NR_prlimit64 496 +-#define __NR_name_to_handle_at 497 +-#define __NR_open_by_handle_at 498 +-#define __NR_clock_adjtime 499 +-#define __NR_syncfs 500 +-#define __NR_setns 501 +-#define __NR_accept4 502 +-#define __NR_sendmmsg 503 +-#define __NR_process_vm_readv 504 +-#define __NR_process_vm_writev 505 +-#define __NR_kcmp 506 +-#define __NR_finit_module 507 +-#define __NR_sched_setattr 508 +-#define __NR_sched_getattr 509 +-#define __NR_renameat2 510 +-#define __NR_getrandom 511 +-#define __NR_memfd_create 512 +-#define __NR_execveat 513 +- +-#endif /* _SW_64_UNISTD_H */ +-- +2.43.5 + diff --git a/0315-hw-intc-add-extioi-ability-of-256-vcpu-interrupt-rou.patch b/0315-hw-intc-add-extioi-ability-of-256-vcpu-interrupt-rou.patch new file mode 100644 index 0000000..bc15515 --- /dev/null +++ b/0315-hw-intc-add-extioi-ability-of-256-vcpu-interrupt-rou.patch @@ -0,0 +1,208 @@ +From 928ff44b130beb4f06440655eed58eabb614d0ff Mon Sep 17 00:00:00 2001 +From: Xianglai Li +Date: Thu, 20 Feb 2025 19:24:18 +0800 +Subject: [PATCH] hw/intc: Add extioi ability of 256 vcpu interrupt routing + +Add the feature field for the CPU-encoded interrupt +route to extioi and the corresponding mechanism for +backup recovery. + +Signed-off-by: Xianglai Li +--- + hw/intc/loongarch_extioi_kvm.c | 61 +++++++++++++++++++++++++++++- + hw/loongarch/virt.c | 2 + + include/hw/intc/loongarch_extioi.h | 4 ++ + linux-headers/asm-loongarch/kvm.h | 16 ++++---- + 4 files changed, 73 insertions(+), 10 deletions(-) + +diff --git a/hw/intc/loongarch_extioi_kvm.c b/hw/intc/loongarch_extioi_kvm.c +index f5bbc33255..e7699ad2ea 100644 +--- a/hw/intc/loongarch_extioi_kvm.c ++++ b/hw/intc/loongarch_extioi_kvm.c +@@ -22,6 +22,30 @@ static void kvm_extioi_access_regs(int fd, uint64_t addr, + addr, val, is_write, &error_abort); + } + ++static void kvm_extioi_access_sw_status(int fd, uint64_t addr, ++ void *val, bool is_write) ++{ ++ kvm_device_access(fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS, ++ addr, val, is_write, &error_abort); ++} ++ ++static void kvm_extioi_save_load_sw_status(void *opaque, bool is_write) ++{ ++ KVMLoongArchExtIOI *s = (KVMLoongArchExtIOI *)opaque; ++ KVMLoongArchExtIOIClass *class = KVM_LOONGARCH_EXTIOI_GET_CLASS(s); ++ int fd = class->dev_fd; ++ int addr; ++ ++ addr = KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU; ++ kvm_extioi_access_sw_status(fd, addr, (void *)&s->num_cpu, is_write); ++ ++ addr = KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE; ++ kvm_extioi_access_sw_status(fd, addr, (void *)&s->features, is_write); ++ ++ addr = KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE; ++ kvm_extioi_access_sw_status(fd, addr, (void *)&s->status, is_write); ++} ++ + static int kvm_loongarch_extioi_pre_save(void *opaque) + { + KVMLoongArchExtIOI *s = (KVMLoongArchExtIOI *)opaque; +@@ -41,6 +65,8 @@ static int kvm_loongarch_extioi_pre_save(void *opaque) + kvm_extioi_access_regs(fd, EXTIOI_COREISR_START, + (void *)s->coreisr, false); + ++ kvm_extioi_save_load_sw_status(opaque, false); ++ + return 0; + } + +@@ -61,12 +87,19 @@ static int kvm_loongarch_extioi_post_load(void *opaque, int version_id) + (void *)s->sw_coremap, true); + kvm_extioi_access_regs(fd, EXTIOI_COREISR_START, (void *)s->coreisr, true); + ++ kvm_extioi_save_load_sw_status(opaque, true); ++ ++ kvm_device_access(fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL, ++ KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED, ++ NULL, true, &error_abort); ++ + return 0; + } + + static void kvm_loongarch_extioi_realize(DeviceState *dev, Error **errp) + { + KVMLoongArchExtIOIClass *extioi_class = KVM_LOONGARCH_EXTIOI_GET_CLASS(dev); ++ KVMLoongArchExtIOI *s = KVM_LOONGARCH_EXTIOI(dev); + struct kvm_create_device cd = {0}; + Error *err = NULL; + int ret,i; +@@ -77,6 +110,10 @@ static void kvm_loongarch_extioi_realize(DeviceState *dev, Error **errp) + return; + } + ++ if (s->features & BIT(EXTIOI_HAS_VIRT_EXTENSION)) { ++ s->features |= EXTIOI_VIRT_HAS_FEATURES; ++ } ++ + if (!extioi_class->is_created) { + cd.type = KVM_DEV_TYPE_LA_EXTIOI; + ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd); +@@ -87,6 +124,15 @@ static void kvm_loongarch_extioi_realize(DeviceState *dev, Error **errp) + } + extioi_class->is_created = true; + extioi_class->dev_fd = cd.fd; ++ ++ kvm_device_access(cd.fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL, ++ KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU, ++ &s->num_cpu, true, NULL); ++ ++ kvm_device_access(cd.fd, KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL, ++ KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE, ++ &s->features, true, NULL); ++ + fprintf(stdout, "Create LoongArch extioi irqchip in KVM done!\n"); + } + +@@ -102,8 +148,8 @@ static void kvm_loongarch_extioi_realize(DeviceState *dev, Error **errp) + + static const VMStateDescription vmstate_kvm_extioi_core = { + .name = "kvm-extioi-single", +- .version_id = 1, +- .minimum_version_id = 1, ++ .version_id = 2, ++ .minimum_version_id = 2, + .pre_save = kvm_loongarch_extioi_pre_save, + .post_load = kvm_loongarch_extioi_post_load, + .fields = (VMStateField[]) { +@@ -119,10 +165,20 @@ static const VMStateDescription vmstate_kvm_extioi_core = { + EXTIOI_IRQS_IPMAP_SIZE / 4), + VMSTATE_UINT32_ARRAY(coremap, KVMLoongArchExtIOI, EXTIOI_IRQS / 4), + VMSTATE_UINT8_ARRAY(sw_coremap, KVMLoongArchExtIOI, EXTIOI_IRQS), ++ VMSTATE_UINT32(num_cpu, KVMLoongArchExtIOI), ++ VMSTATE_UINT32(features, KVMLoongArchExtIOI), ++ VMSTATE_UINT32(status, KVMLoongArchExtIOI), + VMSTATE_END_OF_LIST() + } + }; + ++static Property extioi_properties[] = { ++ DEFINE_PROP_UINT32("num-cpu", KVMLoongArchExtIOI, num_cpu, 1), ++ DEFINE_PROP_BIT("has-virtualization-extension", KVMLoongArchExtIOI, ++ features, EXTIOI_HAS_VIRT_EXTENSION, 0), ++ DEFINE_PROP_END_OF_LIST(), ++}; ++ + static void kvm_loongarch_extioi_class_init(ObjectClass *oc, void *data) + { + DeviceClass *dc = DEVICE_CLASS(oc); +@@ -131,6 +187,7 @@ static void kvm_loongarch_extioi_class_init(ObjectClass *oc, void *data) + extioi_class->parent_realize = dc->realize; + dc->realize = kvm_loongarch_extioi_realize; + extioi_class->is_created = false; ++ device_class_set_props(dc, extioi_properties); + dc->vmsd = &vmstate_kvm_extioi_core; + } + +diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c +index d165c23801..f1953b9f82 100644 +--- a/hw/loongarch/virt.c ++++ b/hw/loongarch/virt.c +@@ -890,6 +890,8 @@ static void virt_irq_init(LoongArchVirtMachineState *lvms) + + if (kvm_enabled() && kvm_irqchip_in_kernel()) { + extioi = qdev_new(TYPE_KVM_LOONGARCH_EXTIOI); ++ qdev_prop_set_uint32(extioi, "num-cpu", ms->smp.max_cpus); ++ qdev_prop_set_bit(extioi, "has-virtualization-extension", true); + sysbus_realize_and_unref(SYS_BUS_DEVICE(extioi), &error_fatal); + } else { + extioi = qdev_new(TYPE_LOONGARCH_EXTIOI); +diff --git a/include/hw/intc/loongarch_extioi.h b/include/hw/intc/loongarch_extioi.h +index e8378f6083..e77cd752cf 100644 +--- a/include/hw/intc/loongarch_extioi.h ++++ b/include/hw/intc/loongarch_extioi.h +@@ -92,6 +92,10 @@ struct LoongArchExtIOI { + + struct KVMLoongArchExtIOI { + SysBusDevice parent_obj; ++ uint32_t num_cpu; ++ uint32_t features; ++ uint32_t status; ++ + /* hardware state */ + uint32_t nodetype[EXTIOI_IRQS_NODETYPE_COUNT / 2]; + uint32_t bounce[EXTIOI_IRQS_GROUP_COUNT]; +diff --git a/linux-headers/asm-loongarch/kvm.h b/linux-headers/asm-loongarch/kvm.h +index f109ed42d9..c23c16f3ae 100644 +--- a/linux-headers/asm-loongarch/kvm.h ++++ b/linux-headers/asm-loongarch/kvm.h +@@ -142,16 +142,16 @@ struct kvm_iocsr_entry { + + #define KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS 0x40000003 + +-#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL 0x40000004 +-#define KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT 0 +- +-#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS 0x40000005 ++#define KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS 0x40000006 ++#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU 0x0 ++#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE 0x1 ++#define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE 0x2 + +-#define KVM_LOONGARCH_VM_HAVE_IRQCHIP 0x40000001 +- +-#define KVM_DEV_LOONGARCH_IPI_GRP_REGS 0x40000002 ++#define KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL 0x40000007 ++#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU 0x0 ++#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE 0x1 ++#define KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED 0x3 + +-#define KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS 0x40000003 + + #define KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL 0x40000004 + #define KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT 0 +-- +2.43.5 + diff --git a/0316-target-loongarch-fix-vcpu-reset-command-word-issue.patch b/0316-target-loongarch-fix-vcpu-reset-command-word-issue.patch new file mode 100644 index 0000000..bac0400 --- /dev/null +++ b/0316-target-loongarch-fix-vcpu-reset-command-word-issue.patch @@ -0,0 +1,56 @@ +From 718fd219aeca9af3a0c5ee3dbd75d85c0d05ef88 Mon Sep 17 00:00:00 2001 +From: Xianglai Li +Date: Wed, 5 Feb 2025 19:56:54 +0800 +Subject: [PATCH] target/loongarch: fix vcpu reset command word issue + +When the KVM_REG_LOONGARCH_VCPU_RESET command word +is sent to the kernel through the kvm_set_one_reg interface, +the parameter source needs to be a legal address, +otherwise the kernel will return an error and the command word +will fail to be sent. + +Signed-off-by: Xianglai Li +--- + target/loongarch/cpu.c | 2 +- + target/loongarch/kvm/kvm.c | 9 ++++++++- + 2 files changed, 9 insertions(+), 2 deletions(-) + +diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c +index bfc7df3044..9aa9096555 100644 +--- a/target/loongarch/cpu.c ++++ b/target/loongarch/cpu.c +@@ -628,8 +628,8 @@ static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp) + + loongarch_cpu_register_gdb_regs_for_features(cs); + +- cpu_reset(cs); + qemu_init_vcpu(cs); ++ cpu_reset(cs); + + lacc->parent_realize(dev, errp); + } +diff --git a/target/loongarch/kvm/kvm.c b/target/loongarch/kvm/kvm.c +index 719d6c2989..8f9f39b10a 100644 +--- a/target/loongarch/kvm/kvm.c ++++ b/target/loongarch/kvm/kvm.c +@@ -590,9 +590,16 @@ static int kvm_loongarch_get_lbt(CPUState *cs) + void kvm_arch_reset_vcpu(CPUState *cs) + { + CPULoongArchState *env = cpu_env(cs); ++ int ret = 0; ++ uint64_t unused = 0; + + env->mp_state = KVM_MP_STATE_RUNNABLE; +- kvm_set_one_reg(cs, KVM_REG_LOONGARCH_VCPU_RESET, 0); ++ ret = kvm_set_one_reg(cs, KVM_REG_LOONGARCH_VCPU_RESET, &unused); ++ if (ret) { ++ error_report("Failed to set KVM_REG_LOONGARCH_VCPU_RESET: %s", ++ strerror(errno)); ++ exit(EXIT_FAILURE); ++ } + } + + static int kvm_loongarch_get_mpstate(CPUState *cs) +-- +2.43.5 + diff --git a/0317-target-loongarch-fix-the-cpu-unplug-resource-leak.patch b/0317-target-loongarch-fix-the-cpu-unplug-resource-leak.patch new file mode 100644 index 0000000..016b49b --- /dev/null +++ b/0317-target-loongarch-fix-the-cpu-unplug-resource-leak.patch @@ -0,0 +1,76 @@ +From 0773b52448cff6f1e7d9223994283df14cd5f8c8 Mon Sep 17 00:00:00 2001 +From: Xianglai Li +Date: Fri, 21 Mar 2025 20:40:37 +0800 +Subject: [PATCH] target/loongarch: Fix the cpu unplug resource leak + +When the cpu is created, qemu_add_vm_change_state_handler +is called in the kvm_arch_init_vcpu function to create +the VMChangeStateEntry resource. + +However, the resource is not released when the cpu is destroyed. +This results in a qemu process segment error when the virtual +machine restarts after the cpu is unplugged. + +This patch solves the problem by adding the corresponding resource +release process to the kvm_arch_destroy_vcpu function. + +Signed-off-by: Xianglai Li +--- + target/loongarch/cpu.c | 2 +- + target/loongarch/cpu.h | 1 + + target/loongarch/kvm/kvm.c | 5 ++++- + 3 files changed, 6 insertions(+), 2 deletions(-) + +diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c +index 9aa9096555..626d891dd5 100644 +--- a/target/loongarch/cpu.c ++++ b/target/loongarch/cpu.c +@@ -563,7 +563,7 @@ static void loongarch_cpu_reset_hold(Object *obj) + env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, VS, 0); + env->CSR_ECFG = FIELD_DP64(env->CSR_ECFG, CSR_ECFG, LIE, 0); + +- env->CSR_ESTAT = env->CSR_ESTAT & (~MAKE_64BIT_MASK(0, 2)); ++ env->CSR_ESTAT = 0; + env->CSR_RVACFG = FIELD_DP64(env->CSR_RVACFG, CSR_RVACFG, RBITS, 0); + env->CSR_CPUID = cs->cpu_index; + env->CSR_TCFG = FIELD_DP64(env->CSR_TCFG, CSR_TCFG, EN, 0); +diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h +index 9afa831e45..8f7c02cfed 100644 +--- a/target/loongarch/cpu.h ++++ b/target/loongarch/cpu.h +@@ -422,6 +422,7 @@ struct ArchCPU { + const char *dtb_compatible; + /* used by KVM_REG_LOONGARCH_COUNTER ioctl to access guest time counters */ + uint64_t kvm_state_counter; ++ VMChangeStateEntry *vmsentry; + }; + + /** +diff --git a/target/loongarch/kvm/kvm.c b/target/loongarch/kvm/kvm.c +index 8f9f39b10a..22177b6220 100644 +--- a/target/loongarch/kvm/kvm.c ++++ b/target/loongarch/kvm/kvm.c +@@ -905,9 +905,10 @@ int kvm_arch_init_vcpu(CPUState *cs) + uint64_t val; + int ret; + Error *local_err = NULL; ++ LoongArchCPU *cpu = LOONGARCH_CPU(cs); + + ret = 0; +- qemu_add_vm_change_state_handler(kvm_loongarch_vm_stage_change, cs); ++ cpu->vmsentry = qemu_add_vm_change_state_handler(kvm_loongarch_vm_stage_change, cs); + + if (!kvm_get_one_reg(cs, KVM_REG_LOONGARCH_DEBUG_INST, &val)) { + brk_insn = val; +@@ -928,6 +929,8 @@ int kvm_arch_init_vcpu(CPUState *cs) + + int kvm_arch_destroy_vcpu(CPUState *cs) + { ++ LoongArchCPU *cpu = LOONGARCH_CPU(cs); ++ qemu_del_vm_change_state_handler(cpu->vmsentry); + return 0; + } + +-- +2.43.5 + diff --git a/0318-hw-loongarch-boot-adjust-the-loading-position-of-the.patch b/0318-hw-loongarch-boot-adjust-the-loading-position-of-the.patch new file mode 100644 index 0000000..a836802 --- /dev/null +++ b/0318-hw-loongarch-boot-adjust-the-loading-position-of-the.patch @@ -0,0 +1,95 @@ +From e3b683aed05c772d30a323d61fc70b5907bec198 Mon Sep 17 00:00:00 2001 +From: Xianglai Li +Date: Wed, 26 Mar 2025 17:02:37 +0800 +Subject: [PATCH] hw/loongarch/boot: Adjust the loading position of the initrd + +When only the -kernel parameter is used to load the elf kernel, +the initrd is loaded in the ram. If the initrd size is too large, +the loading fails, resulting in a VM startup failure. +This patch first loads initrd near the kernel. +When the nearby memory space of the kernel is insufficient, +it tries to load it to the starting position of high memory. +If there is still not enough, qemu will report an error +and ask the user to increase the memory space for the +virtual machine to boot. + +Signed-off-by: Xianglai Li +--- + hw/loongarch/boot.c | 53 +++++++++++++++++++++++++++++++++++++-------- + 1 file changed, 44 insertions(+), 9 deletions(-) + +diff --git a/hw/loongarch/boot.c b/hw/loongarch/boot.c +index 53dcefbb55..13f0c37659 100644 +--- a/hw/loongarch/boot.c ++++ b/hw/loongarch/boot.c +@@ -171,6 +171,48 @@ static uint64_t cpu_loongarch_virt_to_phys(void *opaque, uint64_t addr) + return addr & MAKE_64BIT_MASK(0, TARGET_PHYS_ADDR_SPACE_BITS); + } + ++static void find_initrd_loadoffset(struct loongarch_boot_info *info, ++ uint64_t kernel_high, ssize_t kernel_size) ++{ ++ hwaddr base, size, gap, low_end; ++ ram_addr_t initrd_end, initrd_start; ++ ++ base = VIRT_LOWMEM_BASE; ++ gap = VIRT_LOWMEM_SIZE; ++ initrd_start = ROUND_UP(kernel_high + 4 * kernel_size, 64 * KiB); ++ initrd_end = initrd_start + initrd_size; ++ ++ size = info->ram_size; ++ low_end = base + MIN(size, gap); ++ if (initrd_end <= low_end) { ++ initrd_offset = initrd_start; ++ return ; ++ } ++ ++ if (size <= gap) { ++ error_report("The low memory too small for initial ram disk '%s'," ++ "You need to expand the memory space", ++ info->initrd_filename); ++ exit(1); ++ } ++ ++ /* ++ * Try to load initrd in the high memory ++ */ ++ size -= gap; ++ base = VIRT_HIGHMEM_BASE; ++ initrd_start = ROUND_UP(base, 64 * KiB); ++ if (initrd_size <= size) { ++ initrd_offset = initrd_start; ++ return ; ++ } ++ ++ error_report("The high memory too small for initial ram disk '%s'," ++ "You need to expand the memory space", ++ info->initrd_filename); ++ exit(1); ++} ++ + static int64_t load_kernel_info(struct loongarch_boot_info *info) + { + uint64_t kernel_entry, kernel_low, kernel_high; +@@ -192,16 +234,9 @@ static int64_t load_kernel_info(struct loongarch_boot_info *info) + if (info->initrd_filename) { + initrd_size = get_image_size(info->initrd_filename); + if (initrd_size > 0) { +- initrd_offset = ROUND_UP(kernel_high + 4 * kernel_size, 64 * KiB); +- +- if (initrd_offset + initrd_size > info->ram_size) { +- error_report("memory too small for initial ram disk '%s'", +- info->initrd_filename); +- exit(1); +- } +- ++ find_initrd_loadoffset(info, kernel_high, kernel_size); + initrd_size = load_image_targphys(info->initrd_filename, initrd_offset, +- info->ram_size - initrd_offset); ++ initrd_size); + } + + if (initrd_size == (target_ulong)-1) { +-- +2.43.5 + diff --git a/0319-hw-rtc-fixed-loongson-rtc-emulation-errors.patch b/0319-hw-rtc-fixed-loongson-rtc-emulation-errors.patch new file mode 100644 index 0000000..e00b91f --- /dev/null +++ b/0319-hw-rtc-fixed-loongson-rtc-emulation-errors.patch @@ -0,0 +1,137 @@ +From 5cb54000e87f40ec91d9d8606ae1fb271f8a4ded Mon Sep 17 00:00:00 2001 +From: Xianglai Li +Date: Mon, 7 Apr 2025 18:59:42 +0800 +Subject: [PATCH] hw/rtc: Fixed loongson rtc emulation errors + +The expire time is sent to the timer only +when the expire Time is greater than 0 or +greater than now. Otherwise, the timer +will trigger interruption continuously. + +Timer interrupts are sent using pulse functions. + +Signed-off-by: Xianglai Li +--- + hw/loongarch/virt.c | 9 +++++++-- + hw/rtc/ls7a_rtc.c | 22 +++++++++++++--------- + 2 files changed, 20 insertions(+), 11 deletions(-) + +diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c +index f1953b9f82..12d34b19d7 100644 +--- a/hw/loongarch/virt.c ++++ b/hw/loongarch/virt.c +@@ -51,6 +51,11 @@ + #include "qemu/guest-random.h" + #include "sysemu/kvm.h" + ++#define FDT_IRQ_FLAGS_EDGE_LO_HI 1 ++#define FDT_IRQ_FLAGS_EDGE_HI_LO 2 ++#define FDT_IRQ_FLAGS_LEVEL_HI 4 ++#define FDT_IRQ_FLAGS_LEVEL_LO 8 ++ + static bool virt_is_veiointc_enabled(LoongArchVirtMachineState *lvms) + { + if (lvms->veiointc == ON_OFF_AUTO_OFF) { +@@ -275,7 +280,7 @@ static void fdt_add_rtc_node(LoongArchVirtMachineState *lvms, + "loongson,ls7a-rtc"); + qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg", 2, base, 2, size); + qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts", +- VIRT_RTC_IRQ - VIRT_GSI_BASE , 0x4); ++ VIRT_RTC_IRQ - VIRT_GSI_BASE , FDT_IRQ_FLAGS_EDGE_LO_HI); + qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent", + *pch_pic_phandle); + g_free(nodename); +@@ -334,7 +339,7 @@ static void fdt_add_uart_node(LoongArchVirtMachineState *lvms, + qemu_fdt_setprop_cell(ms->fdt, nodename, "clock-frequency", 100000000); + if (chosen) + qemu_fdt_setprop_string(ms->fdt, "/chosen", "stdout-path", nodename); +- qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts", irq, 0x4); ++ qemu_fdt_setprop_cells(ms->fdt, nodename, "interrupts", irq, FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop_cell(ms->fdt, nodename, "interrupt-parent", + *pch_pic_phandle); + g_free(nodename); +diff --git a/hw/rtc/ls7a_rtc.c b/hw/rtc/ls7a_rtc.c +index 1f9e38a735..be9546c850 100644 +--- a/hw/rtc/ls7a_rtc.c ++++ b/hw/rtc/ls7a_rtc.c +@@ -145,20 +145,22 @@ static void toymatch_write(LS7ARtcState *s, uint64_t val, int num) + now = qemu_clock_get_ms(rtc_clock); + toymatch_val_to_time(s, val, &tm); + expire_time = now + (qemu_timedate_diff(&tm) - s->offset_toy) * 1000; +- timer_mod(s->toy_timer[num], expire_time); ++ if (expire_time > now) ++ timer_mod(s->toy_timer[num], expire_time); + } + } + + static void rtcmatch_write(LS7ARtcState *s, uint64_t val, int num) + { +- uint64_t expire_ns; ++ int64_t expire_ns; + + /* it do not support write when toy disabled */ + if (rtc_enabled(s)) { + s->rtcmatch[num] = val; + /* calculate expire time */ + expire_ns = ticks_to_ns(val) - ticks_to_ns(s->offset_rtc); +- timer_mod_ns(s->rtc_timer[num], expire_ns); ++ if (expire_ns > 0) ++ timer_mod_ns(s->rtc_timer[num], expire_ns); + } + } + +@@ -185,7 +187,7 @@ static void ls7a_rtc_stop(LS7ARtcState *s) + static void ls7a_toy_start(LS7ARtcState *s) + { + int i; +- uint64_t expire_time, now; ++ int64_t expire_time, now; + struct tm tm = {}; + + now = qemu_clock_get_ms(rtc_clock); +@@ -194,19 +196,21 @@ static void ls7a_toy_start(LS7ARtcState *s) + for (i = 0; i < TIMER_NUMS; i++) { + toymatch_val_to_time(s, s->toymatch[i], &tm); + expire_time = now + (qemu_timedate_diff(&tm) - s->offset_toy) * 1000; +- timer_mod(s->toy_timer[i], expire_time); ++ if (expire_time > now) ++ timer_mod(s->toy_timer[i], expire_time); + } + } + + static void ls7a_rtc_start(LS7ARtcState *s) + { + int i; +- uint64_t expire_time; ++ int64_t expire_time; + + /* recalculate expire time and enable timer */ + for (i = 0; i < TIMER_NUMS; i++) { + expire_time = ticks_to_ns(s->rtcmatch[i]) - ticks_to_ns(s->offset_rtc); +- timer_mod_ns(s->rtc_timer[i], expire_time); ++ if (expire_time > 0) ++ timer_mod_ns(s->rtc_timer[i], expire_time); + } + } + +@@ -370,7 +374,7 @@ static void toy_timer_cb(void *opaque) + LS7ARtcState *s = opaque; + + if (toy_enabled(s)) { +- qemu_irq_raise(s->irq); ++ qemu_irq_pulse(s->irq); + } + } + +@@ -379,7 +383,7 @@ static void rtc_timer_cb(void *opaque) + LS7ARtcState *s = opaque; + + if (rtc_enabled(s)) { +- qemu_irq_raise(s->irq); ++ qemu_irq_pulse(s->irq); + } + } + +-- +2.43.5 + diff --git a/0320-target-i386-introduce-sierraforest-v2-model.patch b/0320-target-i386-introduce-sierraforest-v2-model.patch new file mode 100644 index 0000000..6fbca9f --- /dev/null +++ b/0320-target-i386-introduce-sierraforest-v2-model.patch @@ -0,0 +1,62 @@ +From 549cac06fb517c2fe71c82e7ca66008a06120037 Mon Sep 17 00:00:00 2001 +From: Tao Su +Date: Tue, 21 Jan 2025 10:06:47 +0800 +Subject: [PATCH] target/i386: Introduce SierraForest-v2 model + +commit c597ff5339a9918b00d9f4160126db0ac2a423cc upstream. + +Update SierraForest CPU model to add LAM, 4 bits indicating certain bits +of IA32_SPEC_CTR are supported(intel-psfd, ipred-ctrl, rrsba-ctrl, +bhi-ctrl) and the missing features(ss, tsc-adjust, cldemote, movdiri, +movdir64b) + +Also add GDS-NO and RFDS-NO to indicate the related vulnerabilities are +mitigated in stepping 3. + +Intel-SIG: commit c597ff5339a9 target/i386: Introduce SierraForest-v2 model. +backporting CWF model and new ISAs, plus new model changes + +Tested-by: Xuelian Guo +Signed-off-by: Tao Su +Reviewed-by: Zhao Liu +Link: https://lore.kernel.org/r/20250121020650.1899618-2-tao1.su@linux.intel.com +Signed-off-by: Paolo Bonzini +[ Quanxian Wang: amend commit log ] +Signed-off-by: Quanxian Wang +--- + target/i386/cpu.c | 19 +++++++++++++++++++ + 1 file changed, 19 insertions(+) + +diff --git a/target/i386/cpu.c b/target/i386/cpu.c +index 88a584b736..21329e71a0 100644 +--- a/target/i386/cpu.c ++++ b/target/i386/cpu.c +@@ -4315,6 +4315,25 @@ static const X86CPUDefinition builtin_x86_defs[] = { + .model_id = "Intel Xeon Processor (SierraForest)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, ++ { ++ .version = 2, ++ .props = (PropValue[]) { ++ { "ss", "on" }, ++ { "tsc-adjust", "on" }, ++ { "cldemote", "on" }, ++ { "movdiri", "on" }, ++ { "movdir64b", "on" }, ++ { "gds-no", "on" }, ++ { "rfds-no", "on" }, ++ { "lam", "on" }, ++ { "intel-psfd", "on"}, ++ { "ipred-ctrl", "on"}, ++ { "rrsba-ctrl", "on"}, ++ { "bhi-ctrl", "on"}, ++ { "stepping", "3" }, ++ { /* end of list */ } ++ } ++ }, + { /* end of list */ }, + }, + }, +-- +2.43.5 + diff --git a/0321-target-i386-export-bhi-no-bit-to-guests.patch b/0321-target-i386-export-bhi-no-bit-to-guests.patch new file mode 100644 index 0000000..e9393ee --- /dev/null +++ b/0321-target-i386-export-bhi-no-bit-to-guests.patch @@ -0,0 +1,45 @@ +From bf8d708c4781f9f8838f774cc8089f17d412a038 Mon Sep 17 00:00:00 2001 +From: Tao Su +Date: Tue, 21 Jan 2025 10:06:48 +0800 +Subject: [PATCH] target/i386: Export BHI_NO bit to guests + +commit b611931d4f70b9a3e49e39c405c63b3b5e9c0df1 upstream. + +Branch History Injection (BHI) is a CPU side-channel vulnerability, where +an attacker may manipulate branch history before transitioning from user +to supervisor mode or from VMX non-root/guest to root mode. CPUs that set +BHI_NO bit in MSR IA32_ARCH_CAPABILITIES to indicate no additional +mitigation is required to prevent BHI. + +Make BHI_NO bit available to guests. + +Intel-SIG: commit b611931d4f70 target/i386: Export BHI_NO bit to guests. +backporting CWF model and new ISAs, plus new model changes + +Tested-by: Xuelian Guo +Signed-off-by: Tao Su +Reviewed-by: Zhao Liu +Link: https://lore.kernel.org/r/20250121020650.1899618-3-tao1.su@linux.intel.com +Signed-off-by: Paolo Bonzini +[ Quanxian Wang: amend commit log ] +Signed-off-by: Quanxian Wang +--- + target/i386/cpu.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/target/i386/cpu.c b/target/i386/cpu.c +index 21329e71a0..ede1d722a2 100644 +--- a/target/i386/cpu.c ++++ b/target/i386/cpu.c +@@ -1157,7 +1157,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { + "taa-no", NULL, NULL, NULL, + NULL, "sbdr-ssdp-no", "fbsdp-no", "psdp-no", + NULL, "fb-clear", NULL, NULL, +- NULL, NULL, NULL, NULL, ++ "bhi-no", NULL, NULL, NULL, + "pbrsb-no", NULL, "gds-no", "rfds-no", + "rfds-clear", NULL, NULL, NULL, + }, +-- +2.43.5 + diff --git a/0322-target-i386-add-new-cpu-model-clearwaterforest.patch b/0322-target-i386-add-new-cpu-model-clearwaterforest.patch new file mode 100644 index 0000000..89b7a9c --- /dev/null +++ b/0322-target-i386-add-new-cpu-model-clearwaterforest.patch @@ -0,0 +1,271 @@ +From 8279164dabb4d95fb4a2cb95a5c3bd915d04f995 Mon Sep 17 00:00:00 2001 +From: Tao Su +Date: Tue, 21 Jan 2025 10:06:49 +0800 +Subject: [PATCH] target/i386: Add new CPU model ClearwaterForest + +commit 56e84d898f17606b5d88778726466540af96b234 upstream. + +According to table 1-2 in Intel Architecture Instruction Set Extensions +and Future Features (rev 056) [1], ClearwaterForest has the following new +features which have already been virtualized: + + - AVX-VNNI-INT16 CPUID.(EAX=7,ECX=1):EDX[bit 10] + - SHA512 CPUID.(EAX=7,ECX=1):EAX[bit 0] + - SM3 CPUID.(EAX=7,ECX=1):EAX[bit 1] + - SM4 CPUID.(EAX=7,ECX=1):EAX[bit 2] + +Add above features to new CPU model ClearwaterForest. Comparing with +SierraForest, ClearwaterForest bare-metal contains all features of +SierraForest-v2 CPU model and adds: + + - PREFETCHI CPUID.(EAX=7,ECX=1):EDX[bit 14] + - DDPD_U CPUID.(EAX=7,ECX=2):EDX[bit 3] + - BHI_NO IA32_ARCH_CAPABILITIES[bit 20] + +Add above and all features of SierraForest-v2 CPU model to new CPU model +ClearwaterForest. + +[1] https://cdrdv2.intel.com/v1/dl/getContent/671368 + +Intel-SIG: commit 56e84d898f17 target/i386: Add new CPU model ClearwaterForest. +backporting CWF model and new ISAs, plus new model changes + +Tested-by: Xuelian Guo +Signed-off-by: Tao Su +Reviewed-by: Zhao Liu +Link: https://lore.kernel.org/r/20250121020650.1899618-4-tao1.su@linux.intel.com +Signed-off-by: Paolo Bonzini +[ Quanxian Wang: amend commit log ] +Signed-off-by: Quanxian Wang +--- + target/i386/cpu.c | 135 ++++++++++++++++++++++++++++++++++++++++++++++ + target/i386/cpu.h | 35 +++++++++--- + 2 files changed, 164 insertions(+), 6 deletions(-) + +diff --git a/target/i386/cpu.c b/target/i386/cpu.c +index ede1d722a2..dc531b15cf 100644 +--- a/target/i386/cpu.c ++++ b/target/i386/cpu.c +@@ -4337,6 +4337,141 @@ static const X86CPUDefinition builtin_x86_defs[] = { + { /* end of list */ }, + }, + }, ++ { ++ .name = "ClearwaterForest", ++ .level = 0x23, ++ .xlevel = 0x80000008, ++ .vendor = CPUID_VENDOR_INTEL, ++ .family = 6, ++ .model = 221, ++ .stepping = 0, ++ /* ++ * please keep the ascending order so that we can have a clear view of ++ * bit position of each feature. ++ */ ++ .features[FEAT_1_EDX] = ++ CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | ++ CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | ++ CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | ++ CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | ++ CPUID_SSE | CPUID_SSE2 | CPUID_SS, ++ .features[FEAT_1_ECX] = ++ CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 | ++ CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID | CPUID_EXT_SSE41 | ++ CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | ++ CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | ++ CPUID_EXT_XSAVE | CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND, ++ .features[FEAT_8000_0001_EDX] = ++ CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | ++ CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, ++ .features[FEAT_8000_0001_ECX] = ++ CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_3DNOWPREFETCH, ++ .features[FEAT_8000_0008_EBX] = ++ CPUID_8000_0008_EBX_WBNOINVD, ++ .features[FEAT_7_0_EBX] = ++ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_TSC_ADJUST | ++ CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | ++ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | ++ CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | ++ CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB | ++ CPUID_7_0_EBX_SHA_NI, ++ .features[FEAT_7_0_ECX] = ++ CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_GFNI | ++ CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | ++ CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_BUS_LOCK_DETECT | ++ CPUID_7_0_ECX_CLDEMOTE | CPUID_7_0_ECX_MOVDIRI | ++ CPUID_7_0_ECX_MOVDIR64B, ++ .features[FEAT_7_0_EDX] = ++ CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_SERIALIZE | ++ CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | ++ CPUID_7_0_EDX_SPEC_CTRL_SSBD, ++ .features[FEAT_ARCH_CAPABILITIES] = ++ MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | ++ MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | ++ MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_SBDR_SSDP_NO | ++ MSR_ARCH_CAP_FBSDP_NO | MSR_ARCH_CAP_PSDP_NO | ++ MSR_ARCH_CAP_BHI_NO | MSR_ARCH_CAP_PBRSB_NO | ++ MSR_ARCH_CAP_GDS_NO | MSR_ARCH_CAP_RFDS_NO, ++ .features[FEAT_XSAVE] = ++ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | ++ CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, ++ .features[FEAT_6_EAX] = ++ CPUID_6_EAX_ARAT, ++ .features[FEAT_7_1_EAX] = ++ CPUID_7_1_EAX_SHA512 | CPUID_7_1_EAX_SM3 | CPUID_7_1_EAX_SM4 | ++ CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_CMPCCXADD | ++ CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_AVX_IFMA | ++ CPUID_7_1_EAX_LAM, ++ .features[FEAT_7_1_EDX] = ++ CPUID_7_1_EDX_AVX_VNNI_INT8 | CPUID_7_1_EDX_AVX_NE_CONVERT | ++ CPUID_7_1_EDX_AVX_VNNI_INT16 | CPUID_7_1_EDX_PREFETCHITI, ++ .features[FEAT_7_2_EDX] = ++ CPUID_7_2_EDX_PSFD | CPUID_7_2_EDX_IPRED_CTRL | ++ CPUID_7_2_EDX_RRSBA_CTRL | CPUID_7_2_EDX_DDPD_U | ++ CPUID_7_2_EDX_BHI_CTRL | CPUID_7_2_EDX_MCDT_NO, ++ .features[FEAT_VMX_BASIC] = ++ MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, ++ .features[FEAT_VMX_ENTRY_CTLS] = ++ VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_IA32E_MODE | ++ VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | ++ VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_IA32_EFER, ++ .features[FEAT_VMX_EPT_VPID_CAPS] = ++ MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | ++ MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | ++ MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_AD_BITS | ++ MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | ++ MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | ++ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | ++ MSR_VMX_EPT_INVVPID_ALL_CONTEXT | ++ MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, ++ .features[FEAT_VMX_EXIT_CTLS] = ++ VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | ++ VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | ++ VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_IA32_PAT | ++ VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | ++ VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, ++ .features[FEAT_VMX_MISC] = ++ MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_ACTIVITY_HLT | ++ MSR_VMX_MISC_VMWRITE_VMEXIT, ++ .features[FEAT_VMX_PINBASED_CTLS] = ++ VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | ++ VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | ++ VMX_PIN_BASED_POSTED_INTR, ++ .features[FEAT_VMX_PROCBASED_CTLS] = ++ VMX_CPU_BASED_VIRTUAL_INTR_PENDING | ++ VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | ++ VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | ++ VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | ++ VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | ++ VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | ++ VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | ++ VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | ++ VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_TRAP_FLAG | ++ VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | ++ VMX_CPU_BASED_PAUSE_EXITING | ++ VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, ++ .features[FEAT_VMX_SECONDARY_CTLS] = ++ VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | ++ VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | ++ VMX_SECONDARY_EXEC_RDTSCP | ++ VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | ++ VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_WBINVD_EXITING | ++ VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | ++ VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | ++ VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | ++ VMX_SECONDARY_EXEC_RDRAND_EXITING | ++ VMX_SECONDARY_EXEC_ENABLE_INVPCID | ++ VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | ++ VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML | ++ VMX_SECONDARY_EXEC_XSAVES, ++ .features[FEAT_VMX_VMFUNC] = ++ MSR_VMX_VMFUNC_EPT_SWITCHING, ++ .model_id = "Intel Xeon Processor (ClearwaterForest)", ++ .versions = (X86CPUVersionDefinition[]) { ++ { .version = 1 }, ++ { /* end of list */ }, ++ }, ++ }, + { + .name = "Denverton", + .level = 21, +diff --git a/target/i386/cpu.h b/target/i386/cpu.h +index 1402ee5e9d..9b37b6a8c4 100644 +--- a/target/i386/cpu.h ++++ b/target/i386/cpu.h +@@ -800,6 +800,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); + + /* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */ + #define CPUID_7_0_EBX_FSGSBASE (1U << 0) ++/* Support TSC adjust MSR */ ++#define CPUID_7_0_EBX_TSC_ADJUST (1U << 1) + /* Support SGX */ + #define CPUID_7_0_EBX_SGX (1U << 2) + /* 1st Group of Advanced Bit Manipulation Extensions */ +@@ -933,6 +935,12 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); + /* Speculative Store Bypass Disable */ + #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) + ++/* SHA512 Instruction */ ++#define CPUID_7_1_EAX_SHA512 (1U << 0) ++/* SM3 Instruction */ ++#define CPUID_7_1_EAX_SM3 (1U << 1) ++/* SM4 Instruction */ ++#define CPUID_7_1_EAX_SM4 (1U << 2) + /* AVX VNNI Instruction */ + #define CPUID_7_1_EAX_AVX_VNNI (1U << 4) + /* AVX512 BFloat16 Instruction */ +@@ -945,6 +953,12 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); + #define CPUID_7_1_EAX_FSRS (1U << 11) + /* Fast Short REP CMPS/SCAS */ + #define CPUID_7_1_EAX_FSRC (1U << 12) ++/* Flexible return and event delivery (FRED) */ ++#define CPUID_7_1_EAX_FRED (1U << 17) ++/* Load into IA32_KERNEL_GS_BASE (LKGS) */ ++#define CPUID_7_1_EAX_LKGS (1U << 18) ++/* Non-Serializing Write to Model Specific Register (WRMSRNS) */ ++#define CPUID_7_1_EAX_WRMSRNS (1U << 19) + /* Support Tile Computational Operations on FP16 Numbers */ + #define CPUID_7_1_EAX_AMX_FP16 (1U << 21) + /* Support for VPMADD52[H,L]UQ */ +@@ -958,15 +972,21 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); + #define CPUID_7_1_EDX_AVX_NE_CONVERT (1U << 5) + /* AMX COMPLEX Instructions */ + #define CPUID_7_1_EDX_AMX_COMPLEX (1U << 8) ++/* AVX-VNNI-INT16 Instructions */ ++#define CPUID_7_1_EDX_AVX_VNNI_INT16 (1U << 10) + /* PREFETCHIT0/1 Instructions */ + #define CPUID_7_1_EDX_PREFETCHITI (1U << 14) +-/* Flexible return and event delivery (FRED) */ +-#define CPUID_7_1_EAX_FRED (1U << 17) +-/* Load into IA32_KERNEL_GS_BASE (LKGS) */ +-#define CPUID_7_1_EAX_LKGS (1U << 18) +-/* Non-Serializing Write to Model Specific Register (WRMSRNS) */ +-#define CPUID_7_1_EAX_WRMSRNS (1U << 19) + ++/* Indicate bit 7 of the IA32_SPEC_CTRL MSR is supported */ ++#define CPUID_7_2_EDX_PSFD (1U << 0) ++/* Indicate bits 3 and 4 of the IA32_SPEC_CTRL MSR are supported */ ++#define CPUID_7_2_EDX_IPRED_CTRL (1U << 1) ++/* Indicate bits 5 and 6 of the IA32_SPEC_CTRL MSR are supported */ ++#define CPUID_7_2_EDX_RRSBA_CTRL (1U << 2) ++/* Indicate bit 8 of the IA32_SPEC_CTRL MSR is supported */ ++#define CPUID_7_2_EDX_DDPD_U (1U << 3) ++/* Indicate bit 10 of the IA32_SPEC_CTRL MSR is supported */ ++#define CPUID_7_2_EDX_BHI_CTRL (1U << 4) + /* Do not exhibit MXCSR Configuration Dependent Timing (MCDT) behavior */ + #define CPUID_7_2_EDX_MCDT_NO (1U << 5) + +@@ -1060,7 +1080,10 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); + #define MSR_ARCH_CAP_FBSDP_NO (1U << 14) + #define MSR_ARCH_CAP_PSDP_NO (1U << 15) + #define MSR_ARCH_CAP_FB_CLEAR (1U << 17) ++#define MSR_ARCH_CAP_BHI_NO (1U << 20) + #define MSR_ARCH_CAP_PBRSB_NO (1U << 24) ++#define MSR_ARCH_CAP_GDS_NO (1U << 26) ++#define MSR_ARCH_CAP_RFDS_NO (1U << 27) + + #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5) + +-- +2.43.5 + diff --git a/0323-docs-add-gnr-srf-and-cwf-cpu-models.patch b/0323-docs-add-gnr-srf-and-cwf-cpu-models.patch new file mode 100644 index 0000000..f7b324c --- /dev/null +++ b/0323-docs-add-gnr-srf-and-cwf-cpu-models.patch @@ -0,0 +1,120 @@ +From 36be6715343e3ede1ad8a9d0735056973eb351b7 Mon Sep 17 00:00:00 2001 +From: Tao Su +Date: Tue, 21 Jan 2025 10:06:50 +0800 +Subject: [PATCH] docs: Add GNR, SRF and CWF CPU models + +commit 0a6dec6d11e5e392dcd6299548bf1514f1201707 upstream. + +Update GraniteRapids, SierraForest and ClearwaterForest CPU models in +section "Preferred CPU models for Intel x86 hosts". + +Also introduce bhi-no, gds-no and rfds-no in doc. + +Intel-SIG: commit 0a6dec6d11e5 docs: Add GNR, SRF and CWF CPU models. +backporting CWF model and new ISAs, plus new model changes + +Suggested-by: Zhao Liu +Signed-off-by: Tao Su +Reviewed-by: Zhao Liu +Link: https://lore.kernel.org/r/20250121020650.1899618-5-tao1.su@linux.intel.com +Signed-off-by: Paolo Bonzini +[ Quanxian Wang: amend commit log ] +Signed-off-by: Quanxian Wang +--- + docs/system/cpu-models-x86.rst.inc | 50 +++++++++++++++++++++++++++--- + 1 file changed, 46 insertions(+), 4 deletions(-) + +diff --git a/docs/system/cpu-models-x86.rst.inc b/docs/system/cpu-models-x86.rst.inc +index 7f6368f999..37fe1d0ac8 100644 +--- a/docs/system/cpu-models-x86.rst.inc ++++ b/docs/system/cpu-models-x86.rst.inc +@@ -71,6 +71,16 @@ mixture of host CPU models between machines, if live migration + compatibility is required, use the newest CPU model that is compatible + across all desired hosts. + ++``ClearwaterForest`` ++ Intel Xeon Processor (ClearwaterForest, 2025) ++ ++``SierraForest``, ``SierraForest-v2`` ++ Intel Xeon Processor (SierraForest, 2024), SierraForest-v2 mitigates ++ the GDS and RFDS vulnerabilities with stepping 3. ++ ++``GraniteRapids``, ``GraniteRapids-v2`` ++ Intel Xeon Processor (GraniteRapids, 2024) ++ + ``Cascadelake-Server``, ``Cascadelake-Server-noTSX`` + Intel Xeon Processor (Cascade Lake, 2019), with "stepping" levels 6 + or 7 only. (The Cascade Lake Xeon processor with *stepping 5 is +@@ -181,7 +191,7 @@ features are included if using "Host passthrough" or "Host model". + CVE-2018-12127, [MSBDS] CVE-2018-12126). + + This is an MSR (Model-Specific Register) feature rather than a CPUID feature, +- so it will not appear in the Linux ``/proc/cpuinfo`` in the host or ++ therefore it will not appear in the Linux ``/proc/cpuinfo`` in the host or + guest. Instead, the host kernel uses it to populate the MDS + vulnerability file in ``sysfs``. + +@@ -189,10 +199,10 @@ features are included if using "Host passthrough" or "Host model". + affected} in the ``/sys/devices/system/cpu/vulnerabilities/mds`` file. + + ``taa-no`` +- Recommended to inform that the guest that the host is ``not`` ++ Recommended to inform the guest that the host is ``not`` + vulnerable to CVE-2019-11135, TSX Asynchronous Abort (TAA). + +- This too is an MSR feature, so it does not show up in the Linux ++ This is also an MSR feature, therefore it does not show up in the Linux + ``/proc/cpuinfo`` in the host or guest. + + It should only be enabled for VMs if the host reports ``Not affected`` +@@ -214,7 +224,7 @@ features are included if using "Host passthrough" or "Host model". + By disabling TSX, KVM-based guests can avoid paying the price of + mitigating TSX-based attacks. + +- Note that ``tsx-ctrl`` too is an MSR feature, so it does not show ++ Note that ``tsx-ctrl`` is also an MSR feature, therefore it does not show + up in the Linux ``/proc/cpuinfo`` in the host or guest. + + To validate that Intel TSX is indeed disabled for the guest, there are +@@ -223,6 +233,38 @@ features are included if using "Host passthrough" or "Host model". + ``/sys/devices/system/cpu/vulnerabilities/tsx_async_abort`` file in + the guest should report ``Mitigation: TSX disabled``. + ++``bhi-no`` ++ Recommended to inform the guest that the host is ``not`` ++ vulnerable to CVE-2022-0001, Branch History Injection (BHI). ++ ++ This is also an MSR feature, therefore it does not show up in the Linux ++ ``/proc/cpuinfo`` in the host or guest. ++ ++ It should only be enabled for VMs if the host reports ++ ``BHI: Not affected`` in the ++ ``/sys/devices/system/cpu/vulnerabilities/spectre_v2`` file. ++ ++``gds-no`` ++ Recommended to inform the guest that the host is ``not`` ++ vulnerable to CVE-2022-40982, Gather Data Sampling (GDS). ++ ++ This is also an MSR feature, therefore it does not show up in the Linux ++ ``/proc/cpuinfo`` in the host or guest. ++ ++ It should only be enabled for VMs if the host reports ``Not affected`` ++ in the ``/sys/devices/system/cpu/vulnerabilities/gather_data_sampling`` ++ file. ++ ++``rfds-no`` ++ Recommended to inform the guest that the host is ``not`` ++ vulnerable to CVE-2023-28746, Register File Data Sampling (RFDS). ++ ++ This is also an MSR feature, therefore it does not show up in the Linux ++ ``/proc/cpuinfo`` in the host or guest. ++ ++ It should only be enabled for VMs if the host reports ``Not affected`` ++ in the ``/sys/devices/system/cpu/vulnerabilities/reg_file_data_sampling`` ++ file. + + Preferred CPU models for AMD x86 hosts + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +-- +2.43.5 + diff --git a/0324-target-i386-add-sha512-sm3-sm4-feature-bits.patch b/0324-target-i386-add-sha512-sm3-sm4-feature-bits.patch new file mode 100644 index 0000000..92e43dc --- /dev/null +++ b/0324-target-i386-add-sha512-sm3-sm4-feature-bits.patch @@ -0,0 +1,41 @@ +From 74e1071344e04e45e4dc8f6e61b521147634b9a8 Mon Sep 17 00:00:00 2001 +From: Paolo Bonzini +Date: Wed, 3 Jul 2024 13:42:49 +0200 +Subject: [PATCH] target/i386: add sha512, sm3, sm4 feature bits + +commit 78be258c0eeba3d5613c37888889e84f2ba9bd94 upstream. + +SHA512, SM3, SM4 (CPUID[EAX=7,ECX=1).EAX bits 0 to 2) is supported by +Clearwater Forest processor, add it to QEMU as it does not need any +specific enablement. + +See https://lore.kernel.org/kvm/20241105054825.870939-1-tao1.su@linux.intel.com/ +for reference. + +Intel-SIG: commit 78be258c0eeb target/i386: add sha512, sm3, sm4 feature bits. +backporting CWF model and new ISAs, plus new model changes + +Reviewed-by: Tao Su +Signed-off-by: Paolo Bonzini +[ Quanxian Wang: amend commit log ] +Signed-off-by: Quanxian Wang +--- + target/i386/cpu.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/target/i386/cpu.c b/target/i386/cpu.c +index dc531b15cf..6dbca44be1 100644 +--- a/target/i386/cpu.c ++++ b/target/i386/cpu.c +@@ -962,7 +962,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { + [FEAT_7_1_EAX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { +- NULL, NULL, NULL, NULL, ++ "sha512", "sm3", "sm4", NULL, + "avx-vnni", "avx512-bf16", NULL, "cmpccxadd", + NULL, NULL, "fzrm", "fsrs", + "fsrc", NULL, NULL, NULL, +-- +2.43.5 + diff --git a/0325-i386-kvm-move-architectural-cpuid-leaf-generation-to.patch b/0325-i386-kvm-move-architectural-cpuid-leaf-generation-to.patch new file mode 100644 index 0000000..d648c14 --- /dev/null +++ b/0325-i386-kvm-move-architectural-cpuid-leaf-generation-to.patch @@ -0,0 +1,560 @@ +From e7defcc647901d458f58dd9556401afca1e04c9d Mon Sep 17 00:00:00 2001 +From: Sean Christopherson +Date: Thu, 29 Feb 2024 01:36:43 -0500 +Subject: [PATCH] i386/kvm: Move architectural CPUID leaf generation to + separate helper + +commit a5acf4f26c208a05d05ef1bde65553ce2ab5e5d0 upstream + +Move the architectural (for lack of a better term) CPUID leaf generation +to a separate helper so that the generation code can be reused by TDX, +which needs to generate a canonical VM-scoped configuration. + +For now this is just a cleanup, so keep the function static. + +Signed-off-by: Sean Christopherson +Signed-off-by: Xiaoyao Li +Message-ID: <20240229063726.610065-23-xiaoyao.li@intel.com> +Reviewed-by: Xiaoyao Li +Signed-off-by: Paolo Bonzini +Signed-off-by: Hemanth Selam +Signed-off-by: mohanasv +--- + target/i386/kvm/kvm.c | 434 +++++++++++++++++++++--------------------- + 1 file changed, 218 insertions(+), 216 deletions(-) + +diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c +index 2df3ff99c3..1646766559 100644 +--- a/target/i386/kvm/kvm.c ++++ b/target/i386/kvm/kvm.c +@@ -1701,215 +1701,38 @@ static void kvm_init_nested_state(CPUX86State *env) + } + } + +-int kvm_arch_init_vcpu(CPUState *cs) ++static uint32_t kvm_x86_build_cpuid(CPUX86State *env, ++ struct kvm_cpuid_entry2 *entries, ++ uint32_t cpuid_i) + { +- struct { +- struct kvm_cpuid2 cpuid; +- struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES]; +- } cpuid_data; +- /* +- * The kernel defines these structs with padding fields so there +- * should be no extra padding in our cpuid_data struct. +- */ +- QEMU_BUILD_BUG_ON(sizeof(cpuid_data) != +- sizeof(struct kvm_cpuid2) + +- sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES); +- +- X86CPU *cpu = X86_CPU(cs); +- CPUX86State *env = &cpu->env; +- uint32_t limit, i, j, cpuid_i; ++ uint32_t limit, i, j; + uint32_t unused; + struct kvm_cpuid_entry2 *c; +- uint32_t signature[3]; +- int kvm_base = KVM_CPUID_SIGNATURE; +- int max_nested_state_len; +- int r; +- Error *local_err = NULL; +- +- memset(&cpuid_data, 0, sizeof(cpuid_data)); +- +- cpuid_i = 0; +- +- has_xsave2 = kvm_check_extension(cs->kvm_state, KVM_CAP_XSAVE2); +- +- r = kvm_arch_set_tsc_khz(cs); +- if (r < 0) { +- return r; +- } +- +- /* vcpu's TSC frequency is either specified by user, or following +- * the value used by KVM if the former is not present. In the +- * latter case, we query it from KVM and record in env->tsc_khz, +- * so that vcpu's TSC frequency can be migrated later via this field. +- */ +- if (!env->tsc_khz) { +- r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? +- kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : +- -ENOTSUP; +- if (r > 0) { +- env->tsc_khz = r; +- } +- } +- +- env->apic_bus_freq = KVM_APIC_BUS_FREQUENCY; +- +- /* +- * kvm_hyperv_expand_features() is called here for the second time in case +- * KVM_CAP_SYS_HYPERV_CPUID is not supported. While we can't possibly handle +- * 'query-cpu-model-expansion' in this case as we don't have a KVM vCPU to +- * check which Hyper-V enlightenments are supported and which are not, we +- * can still proceed and check/expand Hyper-V enlightenments here so legacy +- * behavior is preserved. +- */ +- if (!kvm_hyperv_expand_features(cpu, &local_err)) { +- error_report_err(local_err); +- return -ENOSYS; +- } +- +- if (hyperv_enabled(cpu)) { +- r = hyperv_init_vcpu(cpu); +- if (r) { +- return r; +- } +- +- cpuid_i = hyperv_fill_cpuids(cs, cpuid_data.entries); +- kvm_base = KVM_CPUID_SIGNATURE_NEXT; +- has_msr_hv_hypercall = true; +- } +- +- if (cs->kvm_state->xen_version) { +-#ifdef CONFIG_XEN_EMU +- struct kvm_cpuid_entry2 *xen_max_leaf; +- +- memcpy(signature, "XenVMMXenVMM", 12); +- +- xen_max_leaf = c = &cpuid_data.entries[cpuid_i++]; +- c->function = kvm_base + XEN_CPUID_SIGNATURE; +- c->eax = kvm_base + XEN_CPUID_TIME; +- c->ebx = signature[0]; +- c->ecx = signature[1]; +- c->edx = signature[2]; +- +- c = &cpuid_data.entries[cpuid_i++]; +- c->function = kvm_base + XEN_CPUID_VENDOR; +- c->eax = cs->kvm_state->xen_version; +- c->ebx = 0; +- c->ecx = 0; +- c->edx = 0; +- +- c = &cpuid_data.entries[cpuid_i++]; +- c->function = kvm_base + XEN_CPUID_HVM_MSR; +- /* Number of hypercall-transfer pages */ +- c->eax = 1; +- /* Hypercall MSR base address */ +- if (hyperv_enabled(cpu)) { +- c->ebx = XEN_HYPERCALL_MSR_HYPERV; +- kvm_xen_init(cs->kvm_state, c->ebx); +- } else { +- c->ebx = XEN_HYPERCALL_MSR; +- } +- c->ecx = 0; +- c->edx = 0; +- +- c = &cpuid_data.entries[cpuid_i++]; +- c->function = kvm_base + XEN_CPUID_TIME; +- c->eax = ((!!tsc_is_stable_and_known(env) << 1) | +- (!!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP) << 2)); +- /* default=0 (emulate if necessary) */ +- c->ebx = 0; +- /* guest tsc frequency */ +- c->ecx = env->user_tsc_khz; +- /* guest tsc incarnation (migration count) */ +- c->edx = 0; +- +- c = &cpuid_data.entries[cpuid_i++]; +- c->function = kvm_base + XEN_CPUID_HVM; +- xen_max_leaf->eax = kvm_base + XEN_CPUID_HVM; +- if (cs->kvm_state->xen_version >= XEN_VERSION(4, 5)) { +- c->function = kvm_base + XEN_CPUID_HVM; +- +- if (cpu->xen_vapic) { +- c->eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT; +- c->eax |= XEN_HVM_CPUID_X2APIC_VIRT; +- } +- +- c->eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS; +- +- if (cs->kvm_state->xen_version >= XEN_VERSION(4, 6)) { +- c->eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT; +- c->ebx = cs->cpu_index; +- } +- +- if (cs->kvm_state->xen_version >= XEN_VERSION(4, 17)) { +- c->eax |= XEN_HVM_CPUID_UPCALL_VECTOR; +- } +- } +- +- r = kvm_xen_init_vcpu(cs); +- if (r) { +- return r; +- } +- +- kvm_base += 0x100; +-#else /* CONFIG_XEN_EMU */ +- /* This should never happen as kvm_arch_init() would have died first. */ +- fprintf(stderr, "Cannot enable Xen CPUID without Xen support\n"); +- abort(); +-#endif +- } else if (cpu->expose_kvm) { +- memcpy(signature, "KVMKVMKVM\0\0\0", 12); +- c = &cpuid_data.entries[cpuid_i++]; +- c->function = KVM_CPUID_SIGNATURE | kvm_base; +- c->eax = KVM_CPUID_FEATURES | kvm_base; +- c->ebx = signature[0]; +- c->ecx = signature[1]; +- c->edx = signature[2]; +- +- c = &cpuid_data.entries[cpuid_i++]; +- c->function = KVM_CPUID_FEATURES | kvm_base; +- c->eax = env->features[FEAT_KVM]; +- c->edx = env->features[FEAT_KVM_HINTS]; +- } + + cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused); + +- if (cpu->kvm_pv_enforce_cpuid) { +- r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1); +- if (r < 0) { +- fprintf(stderr, +- "failed to enable KVM_CAP_ENFORCE_PV_FEATURE_CPUID: %s", +- strerror(-r)); +- abort(); +- } +- } +- + for (i = 0; i <= limit; i++) { ++ j = 0; + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { +- fprintf(stderr, "unsupported level value: 0x%x\n", limit); +- abort(); ++ goto full; + } +- c = &cpuid_data.entries[cpuid_i++]; +- ++ c = &entries[cpuid_i++]; + switch (i) { + case 2: { + /* Keep reading function 2 till all the input is received */ + int times; + + c->function = i; ++ c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | ++ KVM_CPUID_FLAG_STATE_READ_NEXT; + cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); + times = c->eax & 0xff; +- if (times > 1) { +- c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | +- KVM_CPUID_FLAG_STATE_READ_NEXT; +- } + + for (j = 1; j < times; ++j) { + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { +- fprintf(stderr, "cpuid_data is full, no space for " +- "cpuid(eax:2):eax & 0xf = 0x%x\n", times); +- abort(); ++ goto full; + } +- c = &cpuid_data.entries[cpuid_i++]; ++ c = &entries[cpuid_i++]; + c->function = i; + c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC; + cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); +@@ -1926,6 +1749,10 @@ int kvm_arch_init_vcpu(CPUState *cs) + case 0xb: + case 0xd: + for (j = 0; ; j++) { ++ if (i == 0xd && j == 64) { ++ break; ++ } ++ + c->function = i; + c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; + c->index = j; +@@ -1941,19 +1768,12 @@ int kvm_arch_init_vcpu(CPUState *cs) + break; + } + if (i == 0xd && c->eax == 0) { +- if (j < 63) { +- continue; +- } else { +- cpuid_i--; +- break; +- } ++ continue; + } + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { +- fprintf(stderr, "cpuid_data is full, no space for " +- "cpuid(eax:0x%x,ecx:0x%x)\n", i, j); +- abort(); ++ goto full; + } +- c = &cpuid_data.entries[cpuid_i++]; ++ c = &entries[cpuid_i++]; + } + break; + case 0x12: +@@ -1968,11 +1788,9 @@ int kvm_arch_init_vcpu(CPUState *cs) + } + + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { +- fprintf(stderr, "cpuid_data is full, no space for " +- "cpuid(eax:0x12,ecx:0x%x)\n", j); +- abort(); ++ goto full; + } +- c = &cpuid_data.entries[cpuid_i++]; ++ c = &entries[cpuid_i++]; + } + break; + case 0x7: +@@ -1989,11 +1807,9 @@ int kvm_arch_init_vcpu(CPUState *cs) + + for (j = 1; j <= times; ++j) { + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { +- fprintf(stderr, "cpuid_data is full, no space for " +- "cpuid(eax:0x%x,ecx:0x%x)\n", i, j); +- abort(); ++ goto full; + } +- c = &cpuid_data.entries[cpuid_i++]; ++ c = &entries[cpuid_i++]; + c->function = i; + c->index = j; + c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; +@@ -2046,11 +1862,11 @@ int kvm_arch_init_vcpu(CPUState *cs) + cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused); + + for (i = 0x80000000; i <= limit; i++) { ++ j = 0; + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { +- fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit); +- abort(); ++ goto full; + } +- c = &cpuid_data.entries[cpuid_i++]; ++ c = &entries[cpuid_i++]; + + switch (i) { + case 0x8000001d: +@@ -2065,11 +1881,9 @@ int kvm_arch_init_vcpu(CPUState *cs) + break; + } + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { +- fprintf(stderr, "cpuid_data is full, no space for " +- "cpuid(eax:0x%x,ecx:0x%x)\n", i, j); +- abort(); ++ goto full; + } +- c = &cpuid_data.entries[cpuid_i++]; ++ c = &entries[cpuid_i++]; + } + break; + default: +@@ -2092,11 +1906,11 @@ int kvm_arch_init_vcpu(CPUState *cs) + cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused); + + for (i = 0xC0000000; i <= limit; i++) { ++ j = 0; + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { +- fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit); +- abort(); ++ goto full; + } +- c = &cpuid_data.entries[cpuid_i++]; ++ c = &entries[cpuid_i++]; + + c->function = i; + c->flags = 0; +@@ -2104,6 +1918,194 @@ int kvm_arch_init_vcpu(CPUState *cs) + } + } + ++ return cpuid_i; ++ ++full: ++ fprintf(stderr, "cpuid_data is full, no space for " ++ "cpuid(eax:0x%x,ecx:0x%x)\n", i, j); ++ abort(); ++} ++ ++int kvm_arch_init_vcpu(CPUState *cs) ++{ ++ struct { ++ struct kvm_cpuid2 cpuid; ++ struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES]; ++ } cpuid_data; ++ /* ++ * The kernel defines these structs with padding fields so there ++ * should be no extra padding in our cpuid_data struct. ++ */ ++ QEMU_BUILD_BUG_ON(sizeof(cpuid_data) != ++ sizeof(struct kvm_cpuid2) + ++ sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES); ++ ++ X86CPU *cpu = X86_CPU(cs); ++ CPUX86State *env = &cpu->env; ++ uint32_t cpuid_i; ++ struct kvm_cpuid_entry2 *c; ++ uint32_t signature[3]; ++ int kvm_base = KVM_CPUID_SIGNATURE; ++ int max_nested_state_len; ++ int r; ++ Error *local_err = NULL; ++ ++ memset(&cpuid_data, 0, sizeof(cpuid_data)); ++ ++ cpuid_i = 0; ++ ++ has_xsave2 = kvm_check_extension(cs->kvm_state, KVM_CAP_XSAVE2); ++ ++ r = kvm_arch_set_tsc_khz(cs); ++ if (r < 0) { ++ return r; ++ } ++ ++ /* vcpu's TSC frequency is either specified by user, or following ++ * the value used by KVM if the former is not present. In the ++ * latter case, we query it from KVM and record in env->tsc_khz, ++ * so that vcpu's TSC frequency can be migrated later via this field. ++ */ ++ if (!env->tsc_khz) { ++ r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? ++ kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : ++ -ENOTSUP; ++ if (r > 0) { ++ env->tsc_khz = r; ++ } ++ } ++ ++ env->apic_bus_freq = KVM_APIC_BUS_FREQUENCY; ++ ++ /* ++ * kvm_hyperv_expand_features() is called here for the second time in case ++ * KVM_CAP_SYS_HYPERV_CPUID is not supported. While we can't possibly handle ++ * 'query-cpu-model-expansion' in this case as we don't have a KVM vCPU to ++ * check which Hyper-V enlightenments are supported and which are not, we ++ * can still proceed and check/expand Hyper-V enlightenments here so legacy ++ * behavior is preserved. ++ */ ++ if (!kvm_hyperv_expand_features(cpu, &local_err)) { ++ error_report_err(local_err); ++ return -ENOSYS; ++ } ++ ++ if (hyperv_enabled(cpu)) { ++ r = hyperv_init_vcpu(cpu); ++ if (r) { ++ return r; ++ } ++ ++ cpuid_i = hyperv_fill_cpuids(cs, cpuid_data.entries); ++ kvm_base = KVM_CPUID_SIGNATURE_NEXT; ++ has_msr_hv_hypercall = true; ++ } ++ ++ if (cs->kvm_state->xen_version) { ++#ifdef CONFIG_XEN_EMU ++ struct kvm_cpuid_entry2 *xen_max_leaf; ++ ++ memcpy(signature, "XenVMMXenVMM", 12); ++ ++ xen_max_leaf = c = &cpuid_data.entries[cpuid_i++]; ++ c->function = kvm_base + XEN_CPUID_SIGNATURE; ++ c->eax = kvm_base + XEN_CPUID_TIME; ++ c->ebx = signature[0]; ++ c->ecx = signature[1]; ++ c->edx = signature[2]; ++ ++ c = &cpuid_data.entries[cpuid_i++]; ++ c->function = kvm_base + XEN_CPUID_VENDOR; ++ c->eax = cs->kvm_state->xen_version; ++ c->ebx = 0; ++ c->ecx = 0; ++ c->edx = 0; ++ ++ c = &cpuid_data.entries[cpuid_i++]; ++ c->function = kvm_base + XEN_CPUID_HVM_MSR; ++ /* Number of hypercall-transfer pages */ ++ c->eax = 1; ++ /* Hypercall MSR base address */ ++ if (hyperv_enabled(cpu)) { ++ c->ebx = XEN_HYPERCALL_MSR_HYPERV; ++ kvm_xen_init(cs->kvm_state, c->ebx); ++ } else { ++ c->ebx = XEN_HYPERCALL_MSR; ++ } ++ c->ecx = 0; ++ c->edx = 0; ++ ++ c = &cpuid_data.entries[cpuid_i++]; ++ c->function = kvm_base + XEN_CPUID_TIME; ++ c->eax = ((!!tsc_is_stable_and_known(env) << 1) | ++ (!!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP) << 2)); ++ /* default=0 (emulate if necessary) */ ++ c->ebx = 0; ++ /* guest tsc frequency */ ++ c->ecx = env->user_tsc_khz; ++ /* guest tsc incarnation (migration count) */ ++ c->edx = 0; ++ ++ c = &cpuid_data.entries[cpuid_i++]; ++ c->function = kvm_base + XEN_CPUID_HVM; ++ xen_max_leaf->eax = kvm_base + XEN_CPUID_HVM; ++ if (cs->kvm_state->xen_version >= XEN_VERSION(4, 5)) { ++ c->function = kvm_base + XEN_CPUID_HVM; ++ ++ if (cpu->xen_vapic) { ++ c->eax |= XEN_HVM_CPUID_APIC_ACCESS_VIRT; ++ c->eax |= XEN_HVM_CPUID_X2APIC_VIRT; ++ } ++ ++ c->eax |= XEN_HVM_CPUID_IOMMU_MAPPINGS; ++ ++ if (cs->kvm_state->xen_version >= XEN_VERSION(4, 6)) { ++ c->eax |= XEN_HVM_CPUID_VCPU_ID_PRESENT; ++ c->ebx = cs->cpu_index; ++ } ++ ++ if (cs->kvm_state->xen_version >= XEN_VERSION(4, 17)) { ++ c->eax |= XEN_HVM_CPUID_UPCALL_VECTOR; ++ } ++ } ++ ++ r = kvm_xen_init_vcpu(cs); ++ if (r) { ++ return r; ++ } ++ ++ kvm_base += 0x100; ++#else /* CONFIG_XEN_EMU */ ++ /* This should never happen as kvm_arch_init() would have died first. */ ++ fprintf(stderr, "Cannot enable Xen CPUID without Xen support\n"); ++ abort(); ++#endif ++ } else if (cpu->expose_kvm) { ++ memcpy(signature, "KVMKVMKVM\0\0\0", 12); ++ c = &cpuid_data.entries[cpuid_i++]; ++ c->function = KVM_CPUID_SIGNATURE | kvm_base; ++ c->eax = KVM_CPUID_FEATURES | kvm_base; ++ c->ebx = signature[0]; ++ c->ecx = signature[1]; ++ c->edx = signature[2]; ++ ++ c = &cpuid_data.entries[cpuid_i++]; ++ c->function = KVM_CPUID_FEATURES | kvm_base; ++ c->eax = env->features[FEAT_KVM]; ++ c->edx = env->features[FEAT_KVM_HINTS]; ++ } ++ ++ if (cpu->kvm_pv_enforce_cpuid) { ++ r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1); ++ if (r < 0) { ++ fprintf(stderr, ++ "failed to enable KVM_CAP_ENFORCE_PV_FEATURE_CPUID: %s", ++ strerror(-r)); ++ abort(); ++ } ++ } ++ ++ cpuid_i = kvm_x86_build_cpuid(env, cpuid_data.entries, cpuid_i); + cpuid_data.cpuid.nent = cpuid_i; + + if (((env->cpuid_version >> 8)&0xF) >= 6 +-- +2.43.5 + diff --git a/0326-pci-host-q35-move-pam-initialization-above-smram-ini.patch b/0326-pci-host-q35-move-pam-initialization-above-smram-ini.patch new file mode 100644 index 0000000..94dcd5d --- /dev/null +++ b/0326-pci-host-q35-move-pam-initialization-above-smram-ini.patch @@ -0,0 +1,62 @@ +From c905e66ccab5b19b5b7d32b3eeed9b886da4d2d0 Mon Sep 17 00:00:00 2001 +From: Isaku Yamahata +Date: Wed, 20 Mar 2024 03:39:13 -0500 +Subject: [PATCH] pci-host/q35: Move PAM initialization above SMRAM + initialization + +commit 42c11ae2416dcbcd694ec3ee574fe2f3e70099ae upstream + +In mch_realize(), process PAM initialization before SMRAM initialization so +that later patch can skill all the SMRAM related with a single check. + +Signed-off-by: Isaku Yamahata +Signed-off-by: Xiaoyao Li +Signed-off-by: Michael Roth +Message-ID: <20240320083945.991426-18-michael.roth@amd.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Hemanth Selam +Signed-off-by: mohanasv +--- + hw/pci-host/q35.c | 19 ++++++++++--------- + 1 file changed, 10 insertions(+), 9 deletions(-) + +diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c +index 08534bc7cc..4ac44975c7 100644 +--- a/hw/pci-host/q35.c ++++ b/hw/pci-host/q35.c +@@ -568,6 +568,16 @@ static void mch_realize(PCIDevice *d, Error **errp) + /* setup pci memory mapping */ + pc_pci_as_mapping_init(mch->system_memory, mch->pci_address_space); + ++ /* PAM */ ++ init_pam(&mch->pam_regions[0], OBJECT(mch), mch->ram_memory, ++ mch->system_memory, mch->pci_address_space, ++ PAM_BIOS_BASE, PAM_BIOS_SIZE); ++ for (i = 0; i < ARRAY_SIZE(mch->pam_regions) - 1; ++i) { ++ init_pam(&mch->pam_regions[i + 1], OBJECT(mch), mch->ram_memory, ++ mch->system_memory, mch->pci_address_space, ++ PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE); ++ } ++ + /* if *disabled* show SMRAM to all CPUs */ + memory_region_init_alias(&mch->smram_region, OBJECT(mch), "smram-region", + mch->pci_address_space, MCH_HOST_BRIDGE_SMRAM_C_BASE, +@@ -634,15 +644,6 @@ static void mch_realize(PCIDevice *d, Error **errp) + + object_property_add_const_link(qdev_get_machine(), "smram", + OBJECT(&mch->smram)); +- +- init_pam(&mch->pam_regions[0], OBJECT(mch), mch->ram_memory, +- mch->system_memory, mch->pci_address_space, +- PAM_BIOS_BASE, PAM_BIOS_SIZE); +- for (i = 0; i < ARRAY_SIZE(mch->pam_regions) - 1; ++i) { +- init_pam(&mch->pam_regions[i + 1], OBJECT(mch), mch->ram_memory, +- mch->system_memory, mch->pci_address_space, +- PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE); +- } + } + + uint64_t mch_mcfg_base(void) +-- +2.43.5 + diff --git a/0327-q35-introduce-smm-ranges-property-for-q35-pci-host.patch b/0327-q35-introduce-smm-ranges-property-for-q35-pci-host.patch new file mode 100644 index 0000000..099a062 --- /dev/null +++ b/0327-q35-introduce-smm-ranges-property-for-q35-pci-host.patch @@ -0,0 +1,158 @@ +From a14a40a3ba927db6e9d73eb54f9b1d403fd418fc Mon Sep 17 00:00:00 2001 +From: Isaku Yamahata +Date: Wed, 20 Mar 2024 03:39:14 -0500 +Subject: [PATCH] q35: Introduce smm_ranges property for q35-pci-host + +commit b07bf7b73fd02d24a7baa64a580f4974b86bbc86 upstream + +Add a q35 property to check whether or not SMM ranges, e.g. SMRAM, TSEG, +etc... exist for the target platform. TDX doesn't support SMM and doesn't +play nice with QEMU modifying related guest memory ranges. + +Signed-off-by: Isaku Yamahata +Co-developed-by: Sean Christopherson +Signed-off-by: Sean Christopherson +Signed-off-by: Xiaoyao Li +Signed-off-by: Michael Roth +Message-ID: <20240320083945.991426-19-michael.roth@amd.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Hemanth Selam +Signed-off-by: mohanasv +--- + hw/i386/pc_q35.c | 2 ++ + hw/pci-host/q35.c | 42 +++++++++++++++++++++++++++------------ + include/hw/i386/pc.h | 1 + + include/hw/pci-host/q35.h | 1 + + 4 files changed, 33 insertions(+), 13 deletions(-) + +diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c +index 4f3e5412f6..3392b0c110 100644 +--- a/hw/i386/pc_q35.c ++++ b/hw/i386/pc_q35.c +@@ -236,6 +236,8 @@ static void pc_q35_init(MachineState *machine) + x86ms->above_4g_mem_size, NULL); + object_property_set_bool(phb, PCI_HOST_BYPASS_IOMMU, + pcms->default_bus_bypass_iommu, NULL); ++ object_property_set_bool(phb, PCI_HOST_PROP_SMM_RANGES, ++ x86_machine_is_smm_enabled(x86ms), NULL); + sysbus_realize_and_unref(SYS_BUS_DEVICE(phb), &error_fatal); + + /* pci */ +diff --git a/hw/pci-host/q35.c b/hw/pci-host/q35.c +index 4ac44975c7..8facd8b63f 100644 +--- a/hw/pci-host/q35.c ++++ b/hw/pci-host/q35.c +@@ -179,6 +179,8 @@ static Property q35_host_props[] = { + mch.below_4g_mem_size, 0), + DEFINE_PROP_SIZE(PCI_HOST_ABOVE_4G_MEM_SIZE, Q35PCIHost, + mch.above_4g_mem_size, 0), ++ DEFINE_PROP_BOOL(PCI_HOST_PROP_SMM_RANGES, Q35PCIHost, ++ mch.has_smm_ranges, true), + DEFINE_PROP_BOOL("x-pci-hole64-fix", Q35PCIHost, pci_hole64_fix, true), + DEFINE_PROP_END_OF_LIST(), + }; +@@ -214,6 +216,7 @@ static void q35_host_initfn(Object *obj) + /* mch's object_initialize resets the default value, set it again */ + qdev_prop_set_uint64(DEVICE(s), PCI_HOST_PROP_PCI_HOLE64_SIZE, + Q35_PCI_HOST_HOLE64_SIZE_DEFAULT); ++ + object_property_add(obj, PCI_HOST_PROP_PCI_HOLE_START, "uint32", + q35_host_get_pci_hole_start, + NULL, NULL, NULL); +@@ -476,6 +479,10 @@ static void mch_write_config(PCIDevice *d, + mch_update_pciexbar(mch); + } + ++ if (!mch->has_smm_ranges) { ++ return; ++ } ++ + if (ranges_overlap(address, len, MCH_HOST_BRIDGE_SMRAM, + MCH_HOST_BRIDGE_SMRAM_SIZE)) { + mch_update_smram(mch); +@@ -494,10 +501,13 @@ static void mch_write_config(PCIDevice *d, + static void mch_update(MCHPCIState *mch) + { + mch_update_pciexbar(mch); ++ + mch_update_pam(mch); +- mch_update_smram(mch); +- mch_update_ext_tseg_mbytes(mch); +- mch_update_smbase_smram(mch); ++ if (mch->has_smm_ranges) { ++ mch_update_smram(mch); ++ mch_update_ext_tseg_mbytes(mch); ++ mch_update_smbase_smram(mch); ++ } + + /* + * pci hole goes from end-of-low-ram to io-apic. +@@ -538,18 +548,20 @@ static void mch_reset(DeviceState *qdev) + pci_set_quad(d->config + MCH_HOST_BRIDGE_PCIEXBAR, + MCH_HOST_BRIDGE_PCIEXBAR_DEFAULT); + +- d->config[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_DEFAULT; +- d->config[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_DEFAULT; +- d->wmask[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_WMASK; +- d->wmask[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_WMASK; ++ if (mch->has_smm_ranges) { ++ d->config[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_DEFAULT; ++ d->config[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_DEFAULT; ++ d->wmask[MCH_HOST_BRIDGE_SMRAM] = MCH_HOST_BRIDGE_SMRAM_WMASK; ++ d->wmask[MCH_HOST_BRIDGE_ESMRAMC] = MCH_HOST_BRIDGE_ESMRAMC_WMASK; + +- if (mch->ext_tseg_mbytes > 0) { +- pci_set_word(d->config + MCH_HOST_BRIDGE_EXT_TSEG_MBYTES, +- MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_QUERY); +- } ++ if (mch->ext_tseg_mbytes > 0) { ++ pci_set_word(d->config + MCH_HOST_BRIDGE_EXT_TSEG_MBYTES, ++ MCH_HOST_BRIDGE_EXT_TSEG_MBYTES_QUERY); ++ } + +- d->config[MCH_HOST_BRIDGE_F_SMBASE] = 0; +- d->wmask[MCH_HOST_BRIDGE_F_SMBASE] = 0xff; ++ d->config[MCH_HOST_BRIDGE_F_SMBASE] = 0; ++ d->wmask[MCH_HOST_BRIDGE_F_SMBASE] = 0xff; ++ } + + mch_update(mch); + } +@@ -578,6 +590,10 @@ static void mch_realize(PCIDevice *d, Error **errp) + PAM_EXPAN_BASE + i * PAM_EXPAN_SIZE, PAM_EXPAN_SIZE); + } + ++ if (!mch->has_smm_ranges) { ++ return; ++ } ++ + /* if *disabled* show SMRAM to all CPUs */ + memory_region_init_alias(&mch->smram_region, OBJECT(mch), "smram-region", + mch->pci_address_space, MCH_HOST_BRIDGE_SMRAM_C_BASE, +diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h +index a10ceeabbf..c49e2970f1 100644 +--- a/include/hw/i386/pc.h ++++ b/include/hw/i386/pc.h +@@ -165,6 +165,7 @@ void pc_guest_info_init(PCMachineState *pcms); + #define PCI_HOST_PROP_PCI_HOLE64_SIZE "pci-hole64-size" + #define PCI_HOST_BELOW_4G_MEM_SIZE "below-4g-mem-size" + #define PCI_HOST_ABOVE_4G_MEM_SIZE "above-4g-mem-size" ++#define PCI_HOST_PROP_SMM_RANGES "smm-ranges" + + + void pc_pci_as_mapping_init(MemoryRegion *system_memory, +diff --git a/include/hw/pci-host/q35.h b/include/hw/pci-host/q35.h +index bafcbe6752..22fadfa3ed 100644 +--- a/include/hw/pci-host/q35.h ++++ b/include/hw/pci-host/q35.h +@@ -50,6 +50,7 @@ struct MCHPCIState { + MemoryRegion tseg_blackhole, tseg_window; + MemoryRegion smbase_blackhole, smbase_window; + bool has_smram_at_smbase; ++ bool has_smm_ranges; + Range pci_hole; + uint64_t below_4g_mem_size; + uint64_t above_4g_mem_size; +-- +2.43.5 + diff --git a/0328-hw-i386-acpi-set-pcat-compat-bit-only-when-pic-is-no.patch b/0328-hw-i386-acpi-set-pcat-compat-bit-only-when-pic-is-no.patch new file mode 100644 index 0000000..a27c4a6 --- /dev/null +++ b/0328-hw-i386-acpi-set-pcat-compat-bit-only-when-pic-is-no.patch @@ -0,0 +1,45 @@ +From a2327cd68f0455406642c86a2de7593ebf81b4b4 Mon Sep 17 00:00:00 2001 +From: Xiaoyao Li +Date: Wed, 3 Apr 2024 10:59:53 -0400 +Subject: [PATCH] hw/i386/acpi: Set PCAT_COMPAT bit only when pic is not + disabled + +commit 292dd287e78e0cbafde9d1522c729349d132d844 upstream + +A value 1 of PCAT_COMPAT (bit 0) of MADT.Flags indicates that the system +also has a PC-AT-compatible dual-8259 setup, i.e., the PIC. When PIC +is not enabled (pic=off) for x86 machine, the PCAT_COMPAT bit needs to +be cleared. The PIC probe should then print: + + [ 0.155970] Using NULL legacy PIC + +However, no such log printed in guest kernel unless PCAT_COMPAT is +cleared. + +Signed-off-by: Xiaoyao Li +Message-ID: <20240403145953.3082491-1-xiaoyao.li@intel.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Hemanth Selam +Signed-off-by: mohanasv +--- + hw/i386/acpi-common.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/hw/i386/acpi-common.c b/hw/i386/acpi-common.c +index 43dc23f7e0..57e81c7322 100644 +--- a/hw/i386/acpi-common.c ++++ b/hw/i386/acpi-common.c +@@ -107,7 +107,9 @@ void acpi_build_madt(GArray *table_data, BIOSLinker *linker, + acpi_table_begin(&table, table_data); + /* Local APIC Address */ + build_append_int_noprefix(table_data, APIC_DEFAULT_ADDRESS, 4); +- build_append_int_noprefix(table_data, 1 /* PCAT_COMPAT */, 4); /* Flags */ ++ /* Flags. bit 0: PCAT_COMPAT */ ++ build_append_int_noprefix(table_data, ++ x86ms->pic != ON_OFF_AUTO_OFF ? 1 : 0 , 4); + + for (i = 0; i < apic_ids->len; i++) { + pc_madt_cpu_entry(i, apic_ids, table_data, false); +-- +2.43.5 + diff --git a/0329-confidential-guest-support-add-kvm-init-and-kvm-rese.patch b/0329-confidential-guest-support-add-kvm-init-and-kvm-rese.patch new file mode 100644 index 0000000..a16d627 --- /dev/null +++ b/0329-confidential-guest-support-add-kvm-init-and-kvm-rese.patch @@ -0,0 +1,84 @@ +From ac642976c4c64bae9bec454a02e73daec9c46dd7 Mon Sep 17 00:00:00 2001 +From: Xiaoyao Li +Date: Thu, 29 Feb 2024 01:00:35 -0500 +Subject: [PATCH] confidential guest support: Add kvm_init() and kvm_reset() in + class + +commit 41a605944e3fecae43ca18ded95ec31f28e0c7fe upstream + +Different confidential VMs in different architectures all have the same +needs to do their specific initialization (and maybe resetting) stuffs +with KVM. Currently each of them exposes individual *_kvm_init() +functions and let machine code or kvm code to call it. + +To facilitate the introduction of confidential guest technology from +different x86 vendors, add two virtual functions, kvm_init() and kvm_reset() +in ConfidentialGuestSupportClass, and expose two helpers functions for +invodking them. + +Signed-off-by: Xiaoyao Li +Message-Id: <20240229060038.606591-1-xiaoyao.li@intel.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Hemanth Selam +Signed-off-by: mohanasv +--- + include/exec/confidential-guest-support.h | 34 ++++++++++++++++++++++- + 1 file changed, 33 insertions(+), 1 deletion(-) + +diff --git a/include/exec/confidential-guest-support.h b/include/exec/confidential-guest-support.h +index 2cba27642f..765dab4f83 100644 +--- a/include/exec/confidential-guest-support.h ++++ b/include/exec/confidential-guest-support.h +@@ -23,7 +23,10 @@ + #include "qom/object.h" + + #define TYPE_CONFIDENTIAL_GUEST_SUPPORT "confidential-guest-support" +-OBJECT_DECLARE_SIMPLE_TYPE(ConfidentialGuestSupport, CONFIDENTIAL_GUEST_SUPPORT) ++OBJECT_DECLARE_TYPE(ConfidentialGuestSupport, ++ ConfidentialGuestSupportClass, ++ CONFIDENTIAL_GUEST_SUPPORT) ++ + + struct ConfidentialGuestSupport { + Object parent; +@@ -101,8 +104,37 @@ struct ConfidentialGuestMemoryEncryptionOps { + typedef struct ConfidentialGuestSupportClass { + ObjectClass parent; + struct ConfidentialGuestMemoryEncryptionOps *memory_encryption_ops; ++ ++ int (*kvm_init)(ConfidentialGuestSupport *cgs, Error **errp); ++ int (*kvm_reset)(ConfidentialGuestSupport *cgs, Error **errp); + } ConfidentialGuestSupportClass; + ++static inline int confidential_guest_kvm_init(ConfidentialGuestSupport *cgs, ++ Error **errp) ++{ ++ ConfidentialGuestSupportClass *klass; ++ ++ klass = CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs); ++ if (klass->kvm_init) { ++ return klass->kvm_init(cgs, errp); ++ } ++ ++ return 0; ++} ++ ++static inline int confidential_guest_kvm_reset(ConfidentialGuestSupport *cgs, ++ Error **errp) ++{ ++ ConfidentialGuestSupportClass *klass; ++ ++ klass = CONFIDENTIAL_GUEST_SUPPORT_GET_CLASS(cgs); ++ if (klass->kvm_reset) { ++ return klass->kvm_reset(cgs, errp); ++ } ++ ++ return 0; ++} ++ + #endif /* !CONFIG_USER_ONLY */ + + #endif /* QEMU_CONFIDENTIAL_GUEST_SUPPORT_H */ +-- +2.43.5 + diff --git a/0330-i386-sev-switch-to-use-confidential-guest-kvm-init.patch b/0330-i386-sev-switch-to-use-confidential-guest-kvm-init.patch new file mode 100644 index 0000000..14f2b54 --- /dev/null +++ b/0330-i386-sev-switch-to-use-confidential-guest-kvm-init.patch @@ -0,0 +1,314 @@ +From 62f7d502c3a5eecfded8a269886252941afd18bf Mon Sep 17 00:00:00 2001 +From: Xiaoyao Li +Date: Thu, 29 Feb 2024 01:00:36 -0500 +Subject: [PATCH] i386/sev: Switch to use confidential_guest_kvm_init() +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +commit 637c95b37b106c2eeba313e0abb38ec12e918a59 upstream + +Use confidential_guest_kvm_init() instead of calling SEV +specific sev_kvm_init(). This allows the introduction of multiple +confidential-guest-support subclasses for different x86 vendors. + +As a bonus, stubs are not needed anymore since there is no +direct call from target/i386/kvm/kvm.c to SEV code. + +[Backport changes] + +1. Retained file “target/i386/kvm/sev-stub.c” consisting of 13 +lines of code added by commits + +i) 6503910c546 (target/i386: get/set/migrate GHCB state). + +ii) 493ec7bb50b (kvm: Add support for SEV shared regions list and + KVM_EXIT_HYPERCALL). + +iii) bfa2537c9ae (kvm: Add support for userspace MSR filtering and + handling of MSR_KVM_MIGRATION_CONTROL). + +to continue support for shared regions, VM migration and GHCB. +Also, build configuration for file sev-stub.c was retained in +"target/i386/kvm/meson.build". + +2. In file "target/i386/sev.c" fuction sev_guest_class_init() was +migrated along with the changes added by commits + +i) f49ae1db13f (target/i386: sev: Add support for reuse ASID for + different CSV guests). +ii) 4f1bb1eec41 (qapi/qom,target/i386: csv-guest: Introduce + secret-header-file=str and secret-file=str options). + +as part of Hygon csv support. + +Signed-off-by: Xiaoyao Li +Message-Id: <20240229060038.606591-1-xiaoyao.li@intel.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Hemanth Selam +Signed-off-by: mohanasv +--- + target/i386/kvm/kvm.c | 10 ++- + target/i386/kvm/sev-stub.c | 22 ------ + target/i386/sev.c | 157 ++++++++++++++++++------------------- + target/i386/sev.h | 2 - + 4 files changed, 84 insertions(+), 107 deletions(-) + +diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c +index 1646766559..59b0041427 100644 +--- a/target/i386/kvm/kvm.c ++++ b/target/i386/kvm/kvm.c +@@ -2580,10 +2580,12 @@ int kvm_arch_init(MachineState *ms, KVMState *s) + * mechanisms are supported in future (e.g. TDX), they'll need + * their own initialization either here or elsewhere. + */ +- ret = sev_kvm_init(ms->cgs, &local_err); +- if (ret < 0) { +- error_report_err(local_err); +- return ret; ++ if (ms->cgs) { ++ ret = confidential_guest_kvm_init(ms->cgs, &local_err); ++ if (ret < 0) { ++ error_report_err(local_err); ++ return ret; ++ } + } + + has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS); +diff --git a/target/i386/kvm/sev-stub.c b/target/i386/kvm/sev-stub.c +index a0aac1117f..d291071f6b 100644 +--- a/target/i386/kvm/sev-stub.c ++++ b/target/i386/kvm/sev-stub.c +@@ -1,27 +1,5 @@ +-/* +- * QEMU SEV stub +- * +- * Copyright Advanced Micro Devices 2018 +- * +- * Authors: +- * Brijesh Singh +- * +- * This work is licensed under the terms of the GNU GPL, version 2 or later. +- * See the COPYING file in the top-level directory. +- * +- */ +- +-#include "qemu/osdep.h" +-#include "sev.h" +- + bool sev_kvm_has_msr_ghcb; + +-int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) +-{ +- /* If we get here, cgs must be some non-SEV thing */ +- return 0; +-} +- + int sev_remove_shared_regions_list(unsigned long gfn_start, + unsigned long gfn_end) + { +diff --git a/target/i386/sev.c b/target/i386/sev.c +index 23122068f0..a185776a2d 100644 +--- a/target/i386/sev.c ++++ b/target/i386/sev.c +@@ -454,78 +454,6 @@ static void sev_guest_set_kernel_hashes(Object *obj, bool value, Error **errp) + sev->kernel_hashes = value; + } + +-static void +-sev_guest_class_init(ObjectClass *oc, void *data) +-{ +- object_class_property_add_str(oc, "sev-device", +- sev_guest_get_sev_device, +- sev_guest_set_sev_device); +- object_class_property_set_description(oc, "sev-device", +- "SEV device to use"); +- object_class_property_add_str(oc, "dh-cert-file", +- sev_guest_get_dh_cert_file, +- sev_guest_set_dh_cert_file); +- object_class_property_set_description(oc, "dh-cert-file", +- "guest owners DH certificate (encoded with base64)"); +- object_class_property_add_str(oc, "session-file", +- sev_guest_get_session_file, +- sev_guest_set_session_file); +- object_class_property_set_description(oc, "session-file", +- "guest owners session parameters (encoded with base64)"); +- object_class_property_add_bool(oc, "kernel-hashes", +- sev_guest_get_kernel_hashes, +- sev_guest_set_kernel_hashes); +- object_class_property_set_description(oc, "kernel-hashes", +- "add kernel hashes to guest firmware for measured Linux boot"); +- object_class_property_add_str(oc, "user-id", +- sev_guest_get_user_id, +- sev_guest_set_user_id); +- object_class_property_set_description(oc, "user-id", +- "user id of the guest owner"); +- object_class_property_add_str(oc, "secret-header-file", +- sev_guest_get_secret_header_file, +- sev_guest_set_secret_header_file); +- object_class_property_set_description(oc, "secret-header-file", +- "header file of the guest owner's secret"); +- object_class_property_add_str(oc, "secret-file", +- sev_guest_get_secret_file, +- sev_guest_set_secret_file); +- object_class_property_set_description(oc, "secret-file", +- "file of the guest owner's secret"); +-} +- +-static void +-sev_guest_instance_init(Object *obj) +-{ +- SevGuestState *sev = SEV_GUEST(obj); +- +- sev->sev_device = g_strdup(DEFAULT_SEV_DEVICE); +- sev->policy = DEFAULT_GUEST_POLICY; +- object_property_add_uint32_ptr(obj, "policy", &sev->policy, +- OBJ_PROP_FLAG_READWRITE); +- object_property_add_uint32_ptr(obj, "handle", &sev->handle, +- OBJ_PROP_FLAG_READWRITE); +- object_property_add_uint32_ptr(obj, "cbitpos", &sev->cbitpos, +- OBJ_PROP_FLAG_READWRITE); +- object_property_add_uint32_ptr(obj, "reduced-phys-bits", +- &sev->reduced_phys_bits, +- OBJ_PROP_FLAG_READWRITE); +-} +- +-/* sev guest info */ +-static const TypeInfo sev_guest_info = { +- .parent = TYPE_CONFIDENTIAL_GUEST_SUPPORT, +- .name = TYPE_SEV_GUEST, +- .instance_size = sizeof(SevGuestState), +- .instance_finalize = sev_guest_finalize, +- .class_init = sev_guest_class_init, +- .instance_init = sev_guest_instance_init, +- .interfaces = (InterfaceInfo[]) { +- { TYPE_USER_CREATABLE }, +- { } +- } +-}; +- + bool + sev_enabled(void) + { +@@ -1163,20 +1091,15 @@ sev_migration_state_notifier(Notifier *notifier, void *data) + + static Notifier sev_migration_state; + +-int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) ++static int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) + { +- SevGuestState *sev +- = (SevGuestState *)object_dynamic_cast(OBJECT(cgs), TYPE_SEV_GUEST); ++ SevGuestState *sev = SEV_GUEST(cgs); + char *devname; + int ret, fw_error, cmd; + uint32_t ebx; + uint32_t host_cbitpos; + struct sev_user_data_status status = {}; + +- if (!sev) { +- return 0; +- } +- + ConfidentialGuestSupportClass *cgs_class = + (ConfidentialGuestSupportClass *) object_get_class(OBJECT(cgs)); + +@@ -2799,6 +2722,82 @@ struct sev_ops sev_ops = { + .sev_receive_start = _sev_receive_start, + }; + ++static void ++sev_guest_class_init(ObjectClass *oc, void *data) ++{ ++ ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc); ++ ++ klass->kvm_init = sev_kvm_init; ++ ++ object_class_property_add_str(oc, "sev-device", ++ sev_guest_get_sev_device, ++ sev_guest_set_sev_device); ++ object_class_property_set_description(oc, "sev-device", ++ "SEV device to use"); ++ object_class_property_add_str(oc, "dh-cert-file", ++ sev_guest_get_dh_cert_file, ++ sev_guest_set_dh_cert_file); ++ object_class_property_set_description(oc, "dh-cert-file", ++ "guest owners DH certificate (encoded with base64)"); ++ object_class_property_add_str(oc, "session-file", ++ sev_guest_get_session_file, ++ sev_guest_set_session_file); ++ object_class_property_set_description(oc, "session-file", ++ "guest owners session parameters (encoded with base64)"); ++ object_class_property_add_bool(oc, "kernel-hashes", ++ sev_guest_get_kernel_hashes, ++ sev_guest_set_kernel_hashes); ++ object_class_property_set_description(oc, "kernel-hashes", ++ "add kernel hashes to guest firmware for measured Linux boot"); ++ object_class_property_add_str(oc, "user-id", ++ sev_guest_get_user_id, ++ sev_guest_set_user_id); ++ object_class_property_set_description(oc, "user-id", ++ "user id of the guest owner"); ++ object_class_property_add_str(oc, "secret-header-file", ++ sev_guest_get_secret_header_file, ++ sev_guest_set_secret_header_file); ++ object_class_property_set_description(oc, "secret-header-file", ++ "header file of the guest owner's secret"); ++ object_class_property_add_str(oc, "secret-file", ++ sev_guest_get_secret_file, ++ sev_guest_set_secret_file); ++ object_class_property_set_description(oc, "secret-file", ++ "file of the guest owner's secret"); ++} ++ ++static void ++sev_guest_instance_init(Object *obj) ++{ ++ SevGuestState *sev = SEV_GUEST(obj); ++ ++ sev->sev_device = g_strdup(DEFAULT_SEV_DEVICE); ++ sev->policy = DEFAULT_GUEST_POLICY; ++ object_property_add_uint32_ptr(obj, "policy", &sev->policy, ++ OBJ_PROP_FLAG_READWRITE); ++ object_property_add_uint32_ptr(obj, "handle", &sev->handle, ++ OBJ_PROP_FLAG_READWRITE); ++ object_property_add_uint32_ptr(obj, "cbitpos", &sev->cbitpos, ++ OBJ_PROP_FLAG_READWRITE); ++ object_property_add_uint32_ptr(obj, "reduced-phys-bits", ++ &sev->reduced_phys_bits, ++ OBJ_PROP_FLAG_READWRITE); ++} ++ ++/* sev guest info */ ++static const TypeInfo sev_guest_info = { ++ .parent = TYPE_CONFIDENTIAL_GUEST_SUPPORT, ++ .name = TYPE_SEV_GUEST, ++ .instance_size = sizeof(SevGuestState), ++ .instance_finalize = sev_guest_finalize, ++ .class_init = sev_guest_class_init, ++ .instance_init = sev_guest_instance_init, ++ .interfaces = (InterfaceInfo[]) { ++ { TYPE_USER_CREATABLE }, ++ { } ++ } ++}; ++ + static void + sev_register_types(void) + { +diff --git a/target/i386/sev.h b/target/i386/sev.h +index 647b426b16..0436c966cc 100644 +--- a/target/i386/sev.h ++++ b/target/i386/sev.h +@@ -76,8 +76,6 @@ int sev_load_incoming_shared_regions_list(QEMUFile *f); + bool sev_is_gfn_in_unshared_region(unsigned long gfn); + void sev_del_migrate_blocker(void); + +-int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp); +- + extern bool sev_kvm_has_msr_ghcb; + + struct sev_ops { +-- +2.43.5 + diff --git a/0331-ppc-pef-switch-to-use-confidential-guest-kvm-init-re.patch b/0331-ppc-pef-switch-to-use-confidential-guest-kvm-init-re.patch new file mode 100644 index 0000000..ba8a672 --- /dev/null +++ b/0331-ppc-pef-switch-to-use-confidential-guest-kvm-init-re.patch @@ -0,0 +1,133 @@ +From e679e82fd66307aab01339da9525485b659a1cf4 Mon Sep 17 00:00:00 2001 +From: Xiaoyao Li +Date: Thu, 29 Feb 2024 01:00:37 -0500 +Subject: [PATCH] ppc/pef: switch to use confidential_guest_kvm_init/reset() + +commit 00a238b1a845fd5f0acd771664c5e184a63ed9b6 upstream + +Use the unified interface to call confidential guest related kvm_init() +and kvm_reset(), to avoid exposing pef specific functions. + +As a bonus, pef.h goes away since there is no direct call from sPAPR +board code to PEF code anymore. + +Signed-off-by: Xiaoyao Li +Signed-off-by: Paolo Bonzini +Signed-off-by: Hemanth Selam +Signed-off-by: mohanasv +--- + hw/ppc/pef.c | 9 ++++++--- + hw/ppc/spapr.c | 10 +++++++--- + include/hw/ppc/pef.h | 17 ----------------- + 3 files changed, 13 insertions(+), 23 deletions(-) + delete mode 100644 include/hw/ppc/pef.h + +diff --git a/hw/ppc/pef.c b/hw/ppc/pef.c +index d28ed3ba73..47553348b1 100644 +--- a/hw/ppc/pef.c ++++ b/hw/ppc/pef.c +@@ -15,7 +15,6 @@ + #include "sysemu/kvm.h" + #include "migration/blocker.h" + #include "exec/confidential-guest-support.h" +-#include "hw/ppc/pef.h" + + #define TYPE_PEF_GUEST "pef-guest" + OBJECT_DECLARE_SIMPLE_TYPE(PefGuest, PEF_GUEST) +@@ -93,7 +92,7 @@ static int kvmppc_svm_off(Error **errp) + #endif + } + +-int pef_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) ++static int pef_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) + { + if (!object_dynamic_cast(OBJECT(cgs), TYPE_PEF_GUEST)) { + return 0; +@@ -107,7 +106,7 @@ int pef_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) + return kvmppc_svm_init(cgs, errp); + } + +-int pef_kvm_reset(ConfidentialGuestSupport *cgs, Error **errp) ++static int pef_kvm_reset(ConfidentialGuestSupport *cgs, Error **errp) + { + if (!object_dynamic_cast(OBJECT(cgs), TYPE_PEF_GUEST)) { + return 0; +@@ -131,6 +130,10 @@ OBJECT_DEFINE_TYPE_WITH_INTERFACES(PefGuest, + + static void pef_guest_class_init(ObjectClass *oc, void *data) + { ++ ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc); ++ ++ klass->kvm_init = pef_kvm_init; ++ klass->kvm_reset = pef_kvm_reset; + } + + static void pef_guest_init(Object *obj) +diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c +index df09aa9d6a..df1a93a58b 100644 +--- a/hw/ppc/spapr.c ++++ b/hw/ppc/spapr.c +@@ -74,6 +74,7 @@ + #include "hw/virtio/vhost-scsi-common.h" + + #include "exec/ram_addr.h" ++#include "exec/confidential-guest-support.h" + #include "hw/usb.h" + #include "qemu/config-file.h" + #include "qemu/error-report.h" +@@ -86,7 +87,6 @@ + #include "hw/ppc/spapr_tpm_proxy.h" + #include "hw/ppc/spapr_nvdimm.h" + #include "hw/ppc/spapr_numa.h" +-#include "hw/ppc/pef.h" + + #include "monitor/monitor.h" + +@@ -1687,7 +1687,9 @@ static void spapr_machine_reset(MachineState *machine, ShutdownCause reason) + qemu_guest_getrandom_nofail(spapr->fdt_rng_seed, 32); + } + +- pef_kvm_reset(machine->cgs, &error_fatal); ++ if (machine->cgs) { ++ confidential_guest_kvm_reset(machine->cgs, &error_fatal); ++ } + spapr_caps_apply(spapr); + + first_ppc_cpu = POWERPC_CPU(first_cpu); +@@ -2810,7 +2812,9 @@ static void spapr_machine_init(MachineState *machine) + /* + * if Secure VM (PEF) support is configured, then initialize it + */ +- pef_kvm_init(machine->cgs, &error_fatal); ++ if (machine->cgs) { ++ confidential_guest_kvm_init(machine->cgs, &error_fatal); ++ } + + msi_nonbroken = true; + +diff --git a/include/hw/ppc/pef.h b/include/hw/ppc/pef.h +deleted file mode 100644 +index 707dbe524c..0000000000 +--- a/include/hw/ppc/pef.h ++++ /dev/null +@@ -1,17 +0,0 @@ +-/* +- * PEF (Protected Execution Facility) for POWER support +- * +- * Copyright Red Hat. +- * +- * This work is licensed under the terms of the GNU GPL, version 2 or later. +- * See the COPYING file in the top-level directory. +- * +- */ +- +-#ifndef HW_PPC_PEF_H +-#define HW_PPC_PEF_H +- +-int pef_kvm_init(ConfidentialGuestSupport *cgs, Error **errp); +-int pef_kvm_reset(ConfidentialGuestSupport *cgs, Error **errp); +- +-#endif /* HW_PPC_PEF_H */ +-- +2.43.5 + diff --git a/0332-s390-switch-to-use-confidential-guest-kvm-init.patch b/0332-s390-switch-to-use-confidential-guest-kvm-init.patch new file mode 100644 index 0000000..ea92267 --- /dev/null +++ b/0332-s390-switch-to-use-confidential-guest-kvm-init.patch @@ -0,0 +1,103 @@ +From 17b9ff196ad3885e34de4792dbef823de8d8f6f6 Mon Sep 17 00:00:00 2001 +From: Xiaoyao Li +Date: Thu, 29 Feb 2024 01:00:38 -0500 +Subject: [PATCH] s390: Switch to use confidential_guest_kvm_init() + +commit a14a2b0148e657cc526b7a75f2a1937628764e7a upstream + +Use unified confidential_guest_kvm_init() for consistency with +other architectures. + +Signed-off-by: Xiaoyao Li +Message-Id: <20240229060038.606591-1-xiaoyao.li@intel.com> +Signed-off-by: Paolo Bonzini +Signed-off-by: Hemanth Selam +Signed-off-by: mohanasv +--- + hw/s390x/s390-virtio-ccw.c | 5 ++++- + target/s390x/kvm/pv.c | 10 +++++++++- + target/s390x/kvm/pv.h | 14 -------------- + 3 files changed, 13 insertions(+), 16 deletions(-) + +diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c +index 2d6b86624f..57ee3be6b1 100644 +--- a/hw/s390x/s390-virtio-ccw.c ++++ b/hw/s390x/s390-virtio-ccw.c +@@ -14,6 +14,7 @@ + #include "qemu/osdep.h" + #include "qapi/error.h" + #include "exec/ram_addr.h" ++#include "exec/confidential-guest-support.h" + #include "hw/s390x/s390-virtio-hcall.h" + #include "hw/s390x/sclp.h" + #include "hw/s390x/s390_flic.h" +@@ -267,7 +268,9 @@ static void ccw_init(MachineState *machine) + s390_init_cpus(machine); + + /* Need CPU model to be determined before we can set up PV */ +- s390_pv_init(machine->cgs, &error_fatal); ++ if (machine->cgs) { ++ confidential_guest_kvm_init(machine->cgs, &error_fatal); ++ } + + s390_flic_init(); + +diff --git a/target/s390x/kvm/pv.c b/target/s390x/kvm/pv.c +index 6a69be7e5c..902dbb6f34 100644 +--- a/target/s390x/kvm/pv.c ++++ b/target/s390x/kvm/pv.c +@@ -319,12 +319,17 @@ static bool s390_pv_guest_check(ConfidentialGuestSupport *cgs, Error **errp) + return s390_pv_check_cpus(errp); + } + +-int s390_pv_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) ++static int s390_pv_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) + { + if (!object_dynamic_cast(OBJECT(cgs), TYPE_S390_PV_GUEST)) { + return 0; + } + ++ if (!kvm_enabled()) { ++ error_setg(errp, "Protected Virtualization requires KVM"); ++ return -1; ++ } ++ + if (!s390_has_feat(S390_FEAT_UNPACK)) { + error_setg(errp, + "CPU model does not support Protected Virtualization"); +@@ -349,6 +354,9 @@ OBJECT_DEFINE_TYPE_WITH_INTERFACES(S390PVGuest, + + static void s390_pv_guest_class_init(ObjectClass *oc, void *data) + { ++ ConfidentialGuestSupportClass *klass = CONFIDENTIAL_GUEST_SUPPORT_CLASS(oc); ++ ++ klass->kvm_init = s390_pv_kvm_init; + } + + static void s390_pv_guest_init(Object *obj) +diff --git a/target/s390x/kvm/pv.h b/target/s390x/kvm/pv.h +index 7b935e2246..5e1527a849 100644 +--- a/target/s390x/kvm/pv.h ++++ b/target/s390x/kvm/pv.h +@@ -79,18 +79,4 @@ static inline int kvm_s390_dump_mem_state(uint64_t addr, size_t len, + static inline int kvm_s390_dump_completion_data(void *buff) { return 0; } + #endif /* CONFIG_KVM */ + +-int s390_pv_kvm_init(ConfidentialGuestSupport *cgs, Error **errp); +-static inline int s390_pv_init(ConfidentialGuestSupport *cgs, Error **errp) +-{ +- if (!cgs) { +- return 0; +- } +- if (kvm_enabled()) { +- return s390_pv_kvm_init(cgs, errp); +- } +- +- error_setg(errp, "Protected Virtualization requires KVM"); +- return -1; +-} +- + #endif /* HW_S390_PV_H */ +-- +2.43.5 + diff --git a/0333-scripts-update-linux-headers-add-setup-data-h-to-imp.patch b/0333-scripts-update-linux-headers-add-setup-data-h-to-imp.patch new file mode 100644 index 0000000..d846914 --- /dev/null +++ b/0333-scripts-update-linux-headers-add-setup-data-h-to-imp.patch @@ -0,0 +1,77 @@ +From 0f867faa5aa1ac1d6a3157d1248ce87d31c91507 Mon Sep 17 00:00:00 2001 +From: Michael Roth +Date: Sun, 18 Feb 2024 23:35:02 -0600 +Subject: [PATCH] scripts/update-linux-headers: Add setup_data.h to import list + +commit 66210a1a30f2384bb59f9dad8d769dba56dd30f1 upstream + +Data structures like struct setup_data have been moved to a separate +setup_data.h header which bootparam.h relies on. Add setup_data.h to +the cp_portable() list and sync it along with the other header files. + +Note that currently struct setup_data is stripped away as part of +generating bootparam.h, but that handling is no currently needed for +setup_data.h since it doesn't pull in many external +headers/dependencies. However, QEMU currently redefines struct +setup_data in hw/i386/x86.c, so that will need to be removed as part of +any header update that pulls in the new setup_data.h to avoid build +bisect breakage. + +Because is the first architecture specific #include +in include/standard-headers/, add a new sed substitution to rewrite +asm/ include to the standard-headers/asm-* subdirectory for the current +architecture. + +And while at it, remove asm-generic/kvm_para.h from the list of +allowed includes: it does not have a matching substitution, and therefore +it would not be possible to use it on non-Linux systems where there is +no /usr/include/asm-generic/ directory. + +Signed-off-by: Michael Roth +Signed-off-by: Paolo Bonzini +Signed-off-by: priyanka-mani +Signed-off-by: mohanasv +--- + scripts/update-linux-headers.sh | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/scripts/update-linux-headers.sh b/scripts/update-linux-headers.sh +index 88c76b8f69..77f5f063b0 100755 +--- a/scripts/update-linux-headers.sh ++++ b/scripts/update-linux-headers.sh +@@ -61,7 +61,7 @@ cp_portable() { + -e 'linux/const' \ + -e 'linux/kernel' \ + -e 'linux/sysinfo' \ +- -e 'asm-generic/kvm_para' \ ++ -e 'asm/setup_data.h' \ + > /dev/null + then + echo "Unexpected #include in input file $f". +@@ -77,6 +77,7 @@ cp_portable() { + -e 's/__be\([0-9][0-9]*\)/uint\1_t/g' \ + -e 's/"\(input-event-codes\.h\)"/"standard-headers\/linux\/\1"/' \ + -e 's/]*\)>/"standard-headers\/linux\/\1"/' \ ++ -e 's/]*\)>/"standard-headers\/asm-'$arch'\/\1"/' \ + -e 's/__bitwise//' \ + -e 's/__attribute__((packed))/QEMU_PACKED/' \ + -e 's/__inline__/inline/' \ +@@ -155,12 +156,15 @@ for arch in $ARCHLIST; do + "$tmpdir/include/asm/bootparam.h" > "$tmpdir/bootparam.h" + cp_portable "$tmpdir/bootparam.h" \ + "$output/include/standard-headers/asm-$arch" ++ cp_portable "$tmpdir/include/asm/setup_data.h" \ ++ "$output/standard-headers/asm-x86" + fi + if [ $arch = loongarch ]; then + cp "$hdrdir/include/asm/kvm_para.h" "$output/linux-headers/asm-loongarch/" + cp "$hdrdir/include/asm/unistd_64.h" "$output/linux-headers/asm-loongarch/" + fi + done ++arch= + + rm -rf "$output/linux-headers/linux" + mkdir -p "$output/linux-headers/linux" +-- +2.43.5 + diff --git a/0334-scripts-update-linux-headers-add-bits-h-to-file-impo.patch b/0334-scripts-update-linux-headers-add-bits-h-to-file-impo.patch new file mode 100644 index 0000000..78318ad --- /dev/null +++ b/0334-scripts-update-linux-headers-add-bits-h-to-file-impo.patch @@ -0,0 +1,31 @@ +From 03fcf6c8655176318602ad9c30a1da3d0d4a9ae6 Mon Sep 17 00:00:00 2001 +From: Michael Roth +Date: Wed, 21 Feb 2024 10:51:38 -0600 +Subject: [PATCH] scripts/update-linux-headers: Add bits.h to file imports + +commit b40b8eb609d3549ac14aab43849b20f5cba951c9 upstream + +Signed-off-by: Michael Roth +Signed-off-by: Paolo Bonzini +Signed-off-by: priyanka-mani +Signed-off-by: mohanasv +--- + scripts/update-linux-headers.sh | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/scripts/update-linux-headers.sh b/scripts/update-linux-headers.sh +index 77f5f063b0..4153a00957 100755 +--- a/scripts/update-linux-headers.sh ++++ b/scripts/update-linux-headers.sh +@@ -170,7 +170,7 @@ rm -rf "$output/linux-headers/linux" + mkdir -p "$output/linux-headers/linux" + for header in const.h stddef.h kvm.h vfio.h vfio_ccw.h vfio_zdev.h vhost.h \ + psci.h psp-sev.h userfaultfd.h memfd.h mman.h nvme_ioctl.h \ +- vduse.h iommufd.h; do ++ vduse.h iommufd.h bits.h; do + cp "$tmpdir/include/linux/$header" "$output/linux-headers/linux" + done + +-- +2.43.5 + diff --git a/0335-linux-headers-update-to-linux-v6-8-rc6.patch b/0335-linux-headers-update-to-linux-v6-8-rc6.patch new file mode 100644 index 0000000..8d99070 --- /dev/null +++ b/0335-linux-headers-update-to-linux-v6-8-rc6.patch @@ -0,0 +1,1010 @@ +From 7355fcd7fd30adbe56c6e1c7e2c2bc95fd19e91e Mon Sep 17 00:00:00 2001 +From: Daniel Henrique Barboza +Date: Mon, 4 Mar 2024 10:47:31 -0300 +Subject: [PATCH] linux-headers: Update to Linux v6.8-rc6 + +commit 6a02465f917d549c7ea8e0ca799724fb818e1120 upstream + +The idea with this update is to get the latest KVM caps for RISC-V. + +[Backport changes] + +1. In the linux-headers/linux/kvm.h file, the number 0x49 is already +assigned to KVM_LOAD_USER_DATA. However, in the upstream source code, +the same number (0x49) has been assigned to KVM_SET_USER_MEMORY_REGION2. +To resolve this conflict, the command number for KVM_SET_USER_MEMORY_REGION2 +has been changed to the next available number, 0x4A. + +Signed-off-by: Daniel Henrique Barboza +Acked-by: Alistair Francis +Message-ID: <20240304134732.386590-2-dbarboza@ventanamicro.com> +Signed-off-by: Alistair Francis +Signed-off-by: priyanka-mani +Signed-off-by: mohanasv +--- + include/standard-headers/drm/drm_fourcc.h | 10 +- + include/standard-headers/linux/ethtool.h | 41 +++-- + .../standard-headers/linux/virtio_config.h | 8 +- + include/standard-headers/linux/virtio_pci.h | 68 +++++++++ + include/standard-headers/linux/virtio_pmem.h | 7 + + linux-headers/asm-generic/unistd.h | 15 +- + linux-headers/asm-mips/mman.h | 2 +- + linux-headers/asm-mips/unistd_n32.h | 5 + + linux-headers/asm-mips/unistd_n64.h | 5 + + linux-headers/asm-mips/unistd_o32.h | 5 + + linux-headers/asm-powerpc/unistd_32.h | 5 + + linux-headers/asm-powerpc/unistd_64.h | 5 + + linux-headers/asm-riscv/kvm.h | 40 +++++ + linux-headers/asm-s390/unistd_32.h | 5 + + linux-headers/asm-s390/unistd_64.h | 5 + + linux-headers/asm-x86/kvm.h | 3 + + linux-headers/asm-x86/unistd_32.h | 5 + + linux-headers/asm-x86/unistd_64.h | 5 + + linux-headers/asm-x86/unistd_x32.h | 5 + + linux-headers/linux/iommufd.h | 79 ++++++++++ + linux-headers/linux/kvm.h | 141 +++++++----------- + linux-headers/linux/userfaultfd.h | 29 +++- + linux-headers/linux/vfio.h | 1 + + 23 files changed, 382 insertions(+), 112 deletions(-) + +diff --git a/include/standard-headers/drm/drm_fourcc.h b/include/standard-headers/drm/drm_fourcc.h +index 3afb70160f..b72917073d 100644 +--- a/include/standard-headers/drm/drm_fourcc.h ++++ b/include/standard-headers/drm/drm_fourcc.h +@@ -53,7 +53,7 @@ extern "C" { + * Format modifiers may change any property of the buffer, including the number + * of planes and/or the required allocation size. Format modifiers are + * vendor-namespaced, and as such the relationship between a fourcc code and a +- * modifier is specific to the modifer being used. For example, some modifiers ++ * modifier is specific to the modifier being used. For example, some modifiers + * may preserve meaning - such as number of planes - from the fourcc code, + * whereas others may not. + * +@@ -78,7 +78,7 @@ extern "C" { + * format. + * - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users + * see modifiers as opaque tokens they can check for equality and intersect. +- * These users musn't need to know to reason about the modifier value ++ * These users mustn't need to know to reason about the modifier value + * (i.e. they are not expected to extract information out of the modifier). + * + * Vendors should document their modifier usage in as much detail as +@@ -539,7 +539,7 @@ extern "C" { + * This is a tiled layout using 4Kb tiles in row-major layout. + * Within the tile pixels are laid out in 16 256 byte units / sub-tiles which + * are arranged in four groups (two wide, two high) with column-major layout. +- * Each group therefore consits out of four 256 byte units, which are also laid ++ * Each group therefore consists out of four 256 byte units, which are also laid + * out as 2x2 column-major. + * 256 byte units are made out of four 64 byte blocks of pixels, producing + * either a square block or a 2:1 unit. +@@ -1102,7 +1102,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier) + */ + + /* +- * The top 4 bits (out of the 56 bits alloted for specifying vendor specific ++ * The top 4 bits (out of the 56 bits allotted for specifying vendor specific + * modifiers) denote the category for modifiers. Currently we have three + * categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of + * sixteen different categories. +@@ -1418,7 +1418,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(uint64_t modifier) + * Amlogic FBC Memory Saving mode + * + * Indicates the storage is packed when pixel size is multiple of word +- * boudaries, i.e. 8bit should be stored in this mode to save allocation ++ * boundaries, i.e. 8bit should be stored in this mode to save allocation + * memory. + * + * This mode reduces body layout to 3072 bytes per 64x32 superblock with +diff --git a/include/standard-headers/linux/ethtool.h b/include/standard-headers/linux/ethtool.h +index 99fcddf04f..dfb54eff6f 100644 +--- a/include/standard-headers/linux/ethtool.h ++++ b/include/standard-headers/linux/ethtool.h +@@ -1266,6 +1266,8 @@ struct ethtool_rxfh_indir { + * hardware hash key. + * @hfunc: Defines the current RSS hash function used by HW (or to be set to). + * Valid values are one of the %ETH_RSS_HASH_*. ++ * @input_xfrm: Defines how the input data is transformed. Valid values are one ++ * of %RXH_XFRM_*. + * @rsvd8: Reserved for future use; see the note on reserved space. + * @rsvd32: Reserved for future use; see the note on reserved space. + * @rss_config: RX ring/queue index for each hash value i.e., indirection table +@@ -1285,7 +1287,8 @@ struct ethtool_rxfh { + uint32_t indir_size; + uint32_t key_size; + uint8_t hfunc; +- uint8_t rsvd8[3]; ++ uint8_t input_xfrm; ++ uint8_t rsvd8[2]; + uint32_t rsvd32; + uint32_t rss_config[]; + }; +@@ -1992,6 +1995,15 @@ static inline int ethtool_validate_duplex(uint8_t duplex) + + #define WOL_MODE_COUNT 8 + ++/* RSS hash function data ++ * XOR the corresponding source and destination fields of each specified ++ * protocol. Both copies of the XOR'ed fields are fed into the RSS and RXHASH ++ * calculation. Note that this XORing reduces the input set entropy and could ++ * be exploited to reduce the RSS queue spread. ++ */ ++#define RXH_XFRM_SYM_XOR (1 << 0) ++#define RXH_XFRM_NO_CHANGE 0xff ++ + /* L2-L4 network traffic flow types */ + #define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */ + #define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */ +@@ -2128,18 +2140,6 @@ enum ethtool_reset_flags { + * refused. For drivers: ignore this field (use kernel's + * __ETHTOOL_LINK_MODE_MASK_NBITS instead), any change to it will + * be overwritten by kernel. +- * @supported: Bitmap with each bit meaning given by +- * %ethtool_link_mode_bit_indices for the link modes, physical +- * connectors and other link features for which the interface +- * supports autonegotiation or auto-detection. Read-only. +- * @advertising: Bitmap with each bit meaning given by +- * %ethtool_link_mode_bit_indices for the link modes, physical +- * connectors and other link features that are advertised through +- * autonegotiation or enabled for auto-detection. +- * @lp_advertising: Bitmap with each bit meaning given by +- * %ethtool_link_mode_bit_indices for the link modes, and other +- * link features that the link partner advertised through +- * autonegotiation; 0 if unknown or not applicable. Read-only. + * @transceiver: Used to distinguish different possible PHY types, + * reported consistently by PHYLIB. Read-only. + * @master_slave_cfg: Master/slave port mode. +@@ -2181,6 +2181,21 @@ enum ethtool_reset_flags { + * %set_link_ksettings() should validate all fields other than @cmd + * and @link_mode_masks_nwords that are not described as read-only or + * deprecated, and must ignore all fields described as read-only. ++ * ++ * @link_mode_masks is divided into three bitfields, each of length ++ * @link_mode_masks_nwords: ++ * - supported: Bitmap with each bit meaning given by ++ * %ethtool_link_mode_bit_indices for the link modes, physical ++ * connectors and other link features for which the interface ++ * supports autonegotiation or auto-detection. Read-only. ++ * - advertising: Bitmap with each bit meaning given by ++ * %ethtool_link_mode_bit_indices for the link modes, physical ++ * connectors and other link features that are advertised through ++ * autonegotiation or enabled for auto-detection. ++ * - lp_advertising: Bitmap with each bit meaning given by ++ * %ethtool_link_mode_bit_indices for the link modes, and other ++ * link features that the link partner advertised through ++ * autonegotiation; 0 if unknown or not applicable. Read-only. + */ + struct ethtool_link_settings { + uint32_t cmd; +diff --git a/include/standard-headers/linux/virtio_config.h b/include/standard-headers/linux/virtio_config.h +index bfd1ca643e..45be0fa1bc 100644 +--- a/include/standard-headers/linux/virtio_config.h ++++ b/include/standard-headers/linux/virtio_config.h +@@ -52,7 +52,7 @@ + * rest are per-device feature bits. + */ + #define VIRTIO_TRANSPORT_F_START 28 +-#define VIRTIO_TRANSPORT_F_END 41 ++#define VIRTIO_TRANSPORT_F_END 42 + + #ifndef VIRTIO_CONFIG_NO_LEGACY + /* Do we get callbacks when the ring is completely used, even if we've +@@ -112,4 +112,10 @@ + * This feature indicates that the driver can reset a queue individually. + */ + #define VIRTIO_F_RING_RESET 40 ++ ++/* ++ * This feature indicates that the device support administration virtqueues. ++ */ ++#define VIRTIO_F_ADMIN_VQ 41 ++ + #endif /* _LINUX_VIRTIO_CONFIG_H */ +diff --git a/include/standard-headers/linux/virtio_pci.h b/include/standard-headers/linux/virtio_pci.h +index b7fdfd0668..3e2bc2c97e 100644 +--- a/include/standard-headers/linux/virtio_pci.h ++++ b/include/standard-headers/linux/virtio_pci.h +@@ -175,6 +175,9 @@ struct virtio_pci_modern_common_cfg { + + uint16_t queue_notify_data; /* read-write */ + uint16_t queue_reset; /* read-write */ ++ ++ uint16_t admin_queue_index; /* read-only */ ++ uint16_t admin_queue_num; /* read-only */ + }; + + /* Fields in VIRTIO_PCI_CAP_PCI_CFG: */ +@@ -215,7 +218,72 @@ struct virtio_pci_cfg_cap { + #define VIRTIO_PCI_COMMON_Q_USEDHI 52 + #define VIRTIO_PCI_COMMON_Q_NDATA 56 + #define VIRTIO_PCI_COMMON_Q_RESET 58 ++#define VIRTIO_PCI_COMMON_ADM_Q_IDX 60 ++#define VIRTIO_PCI_COMMON_ADM_Q_NUM 62 + + #endif /* VIRTIO_PCI_NO_MODERN */ + ++/* Admin command status. */ ++#define VIRTIO_ADMIN_STATUS_OK 0 ++ ++/* Admin command opcode. */ ++#define VIRTIO_ADMIN_CMD_LIST_QUERY 0x0 ++#define VIRTIO_ADMIN_CMD_LIST_USE 0x1 ++ ++/* Admin command group type. */ ++#define VIRTIO_ADMIN_GROUP_TYPE_SRIOV 0x1 ++ ++/* Transitional device admin command. */ ++#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE 0x2 ++#define VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ 0x3 ++#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE 0x4 ++#define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ 0x5 ++#define VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO 0x6 ++ ++struct QEMU_PACKED virtio_admin_cmd_hdr { ++ uint16_t opcode; ++ /* ++ * 1 - SR-IOV ++ * 2-65535 - reserved ++ */ ++ uint16_t group_type; ++ /* Unused, reserved for future extensions. */ ++ uint8_t reserved1[12]; ++ uint64_t group_member_id; ++}; ++ ++struct QEMU_PACKED virtio_admin_cmd_status { ++ uint16_t status; ++ uint16_t status_qualifier; ++ /* Unused, reserved for future extensions. */ ++ uint8_t reserved2[4]; ++}; ++ ++struct QEMU_PACKED virtio_admin_cmd_legacy_wr_data { ++ uint8_t offset; /* Starting offset of the register(s) to write. */ ++ uint8_t reserved[7]; ++ uint8_t registers[]; ++}; ++ ++struct QEMU_PACKED virtio_admin_cmd_legacy_rd_data { ++ uint8_t offset; /* Starting offset of the register(s) to read. */ ++}; ++ ++#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_END 0 ++#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_DEV 0x1 ++#define VIRTIO_ADMIN_CMD_NOTIFY_INFO_FLAGS_OWNER_MEM 0x2 ++ ++#define VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO 4 ++ ++struct QEMU_PACKED virtio_admin_cmd_notify_info_data { ++ uint8_t flags; /* 0 = end of list, 1 = owner device, 2 = member device */ ++ uint8_t bar; /* BAR of the member or the owner device */ ++ uint8_t padding[6]; ++ uint64_t offset; /* Offset within bar. */ ++}; ++ ++struct virtio_admin_cmd_notify_info_result { ++ struct virtio_admin_cmd_notify_info_data entries[VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO]; ++}; ++ + #endif +diff --git a/include/standard-headers/linux/virtio_pmem.h b/include/standard-headers/linux/virtio_pmem.h +index fc029de798..1a2576d017 100644 +--- a/include/standard-headers/linux/virtio_pmem.h ++++ b/include/standard-headers/linux/virtio_pmem.h +@@ -14,6 +14,13 @@ + #include "standard-headers/linux/virtio_ids.h" + #include "standard-headers/linux/virtio_config.h" + ++/* Feature bits */ ++/* guest physical address range will be indicated as shared memory region 0 */ ++#define VIRTIO_PMEM_F_SHMEM_REGION 0 ++ ++/* shmid of the shared memory region corresponding to the pmem */ ++#define VIRTIO_PMEM_SHMEM_REGION_ID 0 ++ + struct virtio_pmem_config { + uint64_t start; + uint64_t size; +diff --git a/linux-headers/asm-generic/unistd.h b/linux-headers/asm-generic/unistd.h +index 756b013fb8..75f00965ab 100644 +--- a/linux-headers/asm-generic/unistd.h ++++ b/linux-headers/asm-generic/unistd.h +@@ -829,8 +829,21 @@ __SYSCALL(__NR_futex_wait, sys_futex_wait) + #define __NR_futex_requeue 456 + __SYSCALL(__NR_futex_requeue, sys_futex_requeue) + ++#define __NR_statmount 457 ++__SYSCALL(__NR_statmount, sys_statmount) ++ ++#define __NR_listmount 458 ++__SYSCALL(__NR_listmount, sys_listmount) ++ ++#define __NR_lsm_get_self_attr 459 ++__SYSCALL(__NR_lsm_get_self_attr, sys_lsm_get_self_attr) ++#define __NR_lsm_set_self_attr 460 ++__SYSCALL(__NR_lsm_set_self_attr, sys_lsm_set_self_attr) ++#define __NR_lsm_list_modules 461 ++__SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules) ++ + #undef __NR_syscalls +-#define __NR_syscalls 457 ++#define __NR_syscalls 462 + + /* + * 32 bit systems traditionally used different +diff --git a/linux-headers/asm-mips/mman.h b/linux-headers/asm-mips/mman.h +index c6e1fc77c9..9c48d9a21a 100644 +--- a/linux-headers/asm-mips/mman.h ++++ b/linux-headers/asm-mips/mman.h +@@ -88,7 +88,7 @@ + #define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ + #define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ + +-#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump, ++#define MADV_DONTDUMP 16 /* Explicitly exclude from core dump, + overrides the coredump filter bits */ + #define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ + +diff --git a/linux-headers/asm-mips/unistd_n32.h b/linux-headers/asm-mips/unistd_n32.h +index 994b6f008f..ce2e050a9b 100644 +--- a/linux-headers/asm-mips/unistd_n32.h ++++ b/linux-headers/asm-mips/unistd_n32.h +@@ -385,5 +385,10 @@ + #define __NR_futex_wake (__NR_Linux + 454) + #define __NR_futex_wait (__NR_Linux + 455) + #define __NR_futex_requeue (__NR_Linux + 456) ++#define __NR_statmount (__NR_Linux + 457) ++#define __NR_listmount (__NR_Linux + 458) ++#define __NR_lsm_get_self_attr (__NR_Linux + 459) ++#define __NR_lsm_set_self_attr (__NR_Linux + 460) ++#define __NR_lsm_list_modules (__NR_Linux + 461) + + #endif /* _ASM_UNISTD_N32_H */ +diff --git a/linux-headers/asm-mips/unistd_n64.h b/linux-headers/asm-mips/unistd_n64.h +index 41dcf5877a..5bfb3733ff 100644 +--- a/linux-headers/asm-mips/unistd_n64.h ++++ b/linux-headers/asm-mips/unistd_n64.h +@@ -361,5 +361,10 @@ + #define __NR_futex_wake (__NR_Linux + 454) + #define __NR_futex_wait (__NR_Linux + 455) + #define __NR_futex_requeue (__NR_Linux + 456) ++#define __NR_statmount (__NR_Linux + 457) ++#define __NR_listmount (__NR_Linux + 458) ++#define __NR_lsm_get_self_attr (__NR_Linux + 459) ++#define __NR_lsm_set_self_attr (__NR_Linux + 460) ++#define __NR_lsm_list_modules (__NR_Linux + 461) + + #endif /* _ASM_UNISTD_N64_H */ +diff --git a/linux-headers/asm-mips/unistd_o32.h b/linux-headers/asm-mips/unistd_o32.h +index ae9d334d96..02eaecd020 100644 +--- a/linux-headers/asm-mips/unistd_o32.h ++++ b/linux-headers/asm-mips/unistd_o32.h +@@ -431,5 +431,10 @@ + #define __NR_futex_wake (__NR_Linux + 454) + #define __NR_futex_wait (__NR_Linux + 455) + #define __NR_futex_requeue (__NR_Linux + 456) ++#define __NR_statmount (__NR_Linux + 457) ++#define __NR_listmount (__NR_Linux + 458) ++#define __NR_lsm_get_self_attr (__NR_Linux + 459) ++#define __NR_lsm_set_self_attr (__NR_Linux + 460) ++#define __NR_lsm_list_modules (__NR_Linux + 461) + + #endif /* _ASM_UNISTD_O32_H */ +diff --git a/linux-headers/asm-powerpc/unistd_32.h b/linux-headers/asm-powerpc/unistd_32.h +index b9b23d66d7..bbab08d6ec 100644 +--- a/linux-headers/asm-powerpc/unistd_32.h ++++ b/linux-headers/asm-powerpc/unistd_32.h +@@ -438,6 +438,11 @@ + #define __NR_futex_wake 454 + #define __NR_futex_wait 455 + #define __NR_futex_requeue 456 ++#define __NR_statmount 457 ++#define __NR_listmount 458 ++#define __NR_lsm_get_self_attr 459 ++#define __NR_lsm_set_self_attr 460 ++#define __NR_lsm_list_modules 461 + + + #endif /* _ASM_UNISTD_32_H */ +diff --git a/linux-headers/asm-powerpc/unistd_64.h b/linux-headers/asm-powerpc/unistd_64.h +index cbb4b3e8f7..af34cde70f 100644 +--- a/linux-headers/asm-powerpc/unistd_64.h ++++ b/linux-headers/asm-powerpc/unistd_64.h +@@ -410,6 +410,11 @@ + #define __NR_futex_wake 454 + #define __NR_futex_wait 455 + #define __NR_futex_requeue 456 ++#define __NR_statmount 457 ++#define __NR_listmount 458 ++#define __NR_lsm_get_self_attr 459 ++#define __NR_lsm_set_self_attr 460 ++#define __NR_lsm_list_modules 461 + + + #endif /* _ASM_UNISTD_64_H */ +diff --git a/linux-headers/asm-riscv/kvm.h b/linux-headers/asm-riscv/kvm.h +index 60d3b21dea..7499e88a94 100644 +--- a/linux-headers/asm-riscv/kvm.h ++++ b/linux-headers/asm-riscv/kvm.h +@@ -139,6 +139,33 @@ enum KVM_RISCV_ISA_EXT_ID { + KVM_RISCV_ISA_EXT_ZIHPM, + KVM_RISCV_ISA_EXT_SMSTATEEN, + KVM_RISCV_ISA_EXT_ZICOND, ++ KVM_RISCV_ISA_EXT_ZBC, ++ KVM_RISCV_ISA_EXT_ZBKB, ++ KVM_RISCV_ISA_EXT_ZBKC, ++ KVM_RISCV_ISA_EXT_ZBKX, ++ KVM_RISCV_ISA_EXT_ZKND, ++ KVM_RISCV_ISA_EXT_ZKNE, ++ KVM_RISCV_ISA_EXT_ZKNH, ++ KVM_RISCV_ISA_EXT_ZKR, ++ KVM_RISCV_ISA_EXT_ZKSED, ++ KVM_RISCV_ISA_EXT_ZKSH, ++ KVM_RISCV_ISA_EXT_ZKT, ++ KVM_RISCV_ISA_EXT_ZVBB, ++ KVM_RISCV_ISA_EXT_ZVBC, ++ KVM_RISCV_ISA_EXT_ZVKB, ++ KVM_RISCV_ISA_EXT_ZVKG, ++ KVM_RISCV_ISA_EXT_ZVKNED, ++ KVM_RISCV_ISA_EXT_ZVKNHA, ++ KVM_RISCV_ISA_EXT_ZVKNHB, ++ KVM_RISCV_ISA_EXT_ZVKSED, ++ KVM_RISCV_ISA_EXT_ZVKSH, ++ KVM_RISCV_ISA_EXT_ZVKT, ++ KVM_RISCV_ISA_EXT_ZFH, ++ KVM_RISCV_ISA_EXT_ZFHMIN, ++ KVM_RISCV_ISA_EXT_ZIHINTNTL, ++ KVM_RISCV_ISA_EXT_ZVFH, ++ KVM_RISCV_ISA_EXT_ZVFHMIN, ++ KVM_RISCV_ISA_EXT_ZFA, + KVM_RISCV_ISA_EXT_MAX, + }; + +@@ -157,9 +184,16 @@ enum KVM_RISCV_SBI_EXT_ID { + KVM_RISCV_SBI_EXT_EXPERIMENTAL, + KVM_RISCV_SBI_EXT_VENDOR, + KVM_RISCV_SBI_EXT_DBCN, ++ KVM_RISCV_SBI_EXT_STA, + KVM_RISCV_SBI_EXT_MAX, + }; + ++/* SBI STA extension registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ ++struct kvm_riscv_sbi_sta { ++ unsigned long shmem_lo; ++ unsigned long shmem_hi; ++}; ++ + /* Possible states for kvm_riscv_timer */ + #define KVM_RISCV_TIMER_STATE_OFF 0 + #define KVM_RISCV_TIMER_STATE_ON 1 +@@ -241,6 +275,12 @@ enum KVM_RISCV_SBI_EXT_ID { + #define KVM_REG_RISCV_VECTOR_REG(n) \ + ((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long)) + ++/* Registers for specific SBI extensions are mapped as type 10 */ ++#define KVM_REG_RISCV_SBI_STATE (0x0a << KVM_REG_RISCV_TYPE_SHIFT) ++#define KVM_REG_RISCV_SBI_STA (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT) ++#define KVM_REG_RISCV_SBI_STA_REG(name) \ ++ (offsetof(struct kvm_riscv_sbi_sta, name) / sizeof(unsigned long)) ++ + /* Device Control API: RISC-V AIA */ + #define KVM_DEV_RISCV_APLIC_ALIGN 0x1000 + #define KVM_DEV_RISCV_APLIC_SIZE 0x4000 +diff --git a/linux-headers/asm-s390/unistd_32.h b/linux-headers/asm-s390/unistd_32.h +index c093e6d5f9..a3ece69d82 100644 +--- a/linux-headers/asm-s390/unistd_32.h ++++ b/linux-headers/asm-s390/unistd_32.h +@@ -429,5 +429,10 @@ + #define __NR_futex_wake 454 + #define __NR_futex_wait 455 + #define __NR_futex_requeue 456 ++#define __NR_statmount 457 ++#define __NR_listmount 458 ++#define __NR_lsm_get_self_attr 459 ++#define __NR_lsm_set_self_attr 460 ++#define __NR_lsm_list_modules 461 + + #endif /* _ASM_S390_UNISTD_32_H */ +diff --git a/linux-headers/asm-s390/unistd_64.h b/linux-headers/asm-s390/unistd_64.h +index 114c0569a4..8c5fd93495 100644 +--- a/linux-headers/asm-s390/unistd_64.h ++++ b/linux-headers/asm-s390/unistd_64.h +@@ -377,5 +377,10 @@ + #define __NR_futex_wake 454 + #define __NR_futex_wait 455 + #define __NR_futex_requeue 456 ++#define __NR_statmount 457 ++#define __NR_listmount 458 ++#define __NR_lsm_get_self_attr 459 ++#define __NR_lsm_set_self_attr 460 ++#define __NR_lsm_list_modules 461 + + #endif /* _ASM_S390_UNISTD_64_H */ +diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h +index 2b3a8f7bd2..003fb74534 100644 +--- a/linux-headers/asm-x86/kvm.h ++++ b/linux-headers/asm-x86/kvm.h +@@ -560,4 +560,7 @@ struct kvm_pmu_event_filter { + /* x86-specific KVM_EXIT_HYPERCALL flags. */ + #define KVM_EXIT_HYPERCALL_LONG_MODE BIT(0) + ++#define KVM_X86_DEFAULT_VM 0 ++#define KVM_X86_SW_PROTECTED_VM 1 ++ + #endif /* _ASM_X86_KVM_H */ +diff --git a/linux-headers/asm-x86/unistd_32.h b/linux-headers/asm-x86/unistd_32.h +index 329649c377..5c9c329e93 100644 +--- a/linux-headers/asm-x86/unistd_32.h ++++ b/linux-headers/asm-x86/unistd_32.h +@@ -447,6 +447,11 @@ + #define __NR_futex_wake 454 + #define __NR_futex_wait 455 + #define __NR_futex_requeue 456 ++#define __NR_statmount 457 ++#define __NR_listmount 458 ++#define __NR_lsm_get_self_attr 459 ++#define __NR_lsm_set_self_attr 460 ++#define __NR_lsm_list_modules 461 + + + #endif /* _ASM_UNISTD_32_H */ +diff --git a/linux-headers/asm-x86/unistd_64.h b/linux-headers/asm-x86/unistd_64.h +index 4583606ce6..d9aab7ae87 100644 +--- a/linux-headers/asm-x86/unistd_64.h ++++ b/linux-headers/asm-x86/unistd_64.h +@@ -369,6 +369,11 @@ + #define __NR_futex_wake 454 + #define __NR_futex_wait 455 + #define __NR_futex_requeue 456 ++#define __NR_statmount 457 ++#define __NR_listmount 458 ++#define __NR_lsm_get_self_attr 459 ++#define __NR_lsm_set_self_attr 460 ++#define __NR_lsm_list_modules 461 + + + #endif /* _ASM_UNISTD_64_H */ +diff --git a/linux-headers/asm-x86/unistd_x32.h b/linux-headers/asm-x86/unistd_x32.h +index 146d74d8e4..63cdd1ee43 100644 +--- a/linux-headers/asm-x86/unistd_x32.h ++++ b/linux-headers/asm-x86/unistd_x32.h +@@ -321,6 +321,11 @@ + #define __NR_futex_wake (__X32_SYSCALL_BIT + 454) + #define __NR_futex_wait (__X32_SYSCALL_BIT + 455) + #define __NR_futex_requeue (__X32_SYSCALL_BIT + 456) ++#define __NR_statmount (__X32_SYSCALL_BIT + 457) ++#define __NR_listmount (__X32_SYSCALL_BIT + 458) ++#define __NR_lsm_get_self_attr (__X32_SYSCALL_BIT + 459) ++#define __NR_lsm_set_self_attr (__X32_SYSCALL_BIT + 460) ++#define __NR_lsm_list_modules (__X32_SYSCALL_BIT + 461) + #define __NR_rt_sigaction (__X32_SYSCALL_BIT + 512) + #define __NR_rt_sigreturn (__X32_SYSCALL_BIT + 513) + #define __NR_ioctl (__X32_SYSCALL_BIT + 514) +diff --git a/linux-headers/linux/iommufd.h b/linux-headers/linux/iommufd.h +index 806d98d09c..72e8f4b9dd 100644 +--- a/linux-headers/linux/iommufd.h ++++ b/linux-headers/linux/iommufd.h +@@ -49,6 +49,7 @@ enum { + IOMMUFD_CMD_GET_HW_INFO, + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING, + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP, ++ IOMMUFD_CMD_HWPT_INVALIDATE, + }; + + /** +@@ -613,4 +614,82 @@ struct iommu_hwpt_get_dirty_bitmap { + #define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP) + ++/** ++ * enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation ++ * Data Type ++ * @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1 ++ */ ++enum iommu_hwpt_invalidate_data_type { ++ IOMMU_HWPT_INVALIDATE_DATA_VTD_S1, ++}; ++ ++/** ++ * enum iommu_hwpt_vtd_s1_invalidate_flags - Flags for Intel VT-d ++ * stage-1 cache invalidation ++ * @IOMMU_VTD_INV_FLAGS_LEAF: Indicates whether the invalidation applies ++ * to all-levels page structure cache or just ++ * the leaf PTE cache. ++ */ ++enum iommu_hwpt_vtd_s1_invalidate_flags { ++ IOMMU_VTD_INV_FLAGS_LEAF = 1 << 0, ++}; ++ ++/** ++ * struct iommu_hwpt_vtd_s1_invalidate - Intel VT-d cache invalidation ++ * (IOMMU_HWPT_INVALIDATE_DATA_VTD_S1) ++ * @addr: The start address of the range to be invalidated. It needs to ++ * be 4KB aligned. ++ * @npages: Number of contiguous 4K pages to be invalidated. ++ * @flags: Combination of enum iommu_hwpt_vtd_s1_invalidate_flags ++ * @__reserved: Must be 0 ++ * ++ * The Intel VT-d specific invalidation data for user-managed stage-1 cache ++ * invalidation in nested translation. Userspace uses this structure to ++ * tell the impacted cache scope after modifying the stage-1 page table. ++ * ++ * Invalidating all the caches related to the page table by setting @addr ++ * to be 0 and @npages to be U64_MAX. ++ * ++ * The device TLB will be invalidated automatically if ATS is enabled. ++ */ ++struct iommu_hwpt_vtd_s1_invalidate { ++ __aligned_u64 addr; ++ __aligned_u64 npages; ++ __u32 flags; ++ __u32 __reserved; ++}; ++ ++/** ++ * struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE) ++ * @size: sizeof(struct iommu_hwpt_invalidate) ++ * @hwpt_id: ID of a nested HWPT for cache invalidation ++ * @data_uptr: User pointer to an array of driver-specific cache invalidation ++ * data. ++ * @data_type: One of enum iommu_hwpt_invalidate_data_type, defining the data ++ * type of all the entries in the invalidation request array. It ++ * should be a type supported by the hwpt pointed by @hwpt_id. ++ * @entry_len: Length (in bytes) of a request entry in the request array ++ * @entry_num: Input the number of cache invalidation requests in the array. ++ * Output the number of requests successfully handled by kernel. ++ * @__reserved: Must be 0. ++ * ++ * Invalidate the iommu cache for user-managed page table. Modifications on a ++ * user-managed page table should be followed by this operation to sync cache. ++ * Each ioctl can support one or more cache invalidation requests in the array ++ * that has a total size of @entry_len * @entry_num. ++ * ++ * An empty invalidation request array by setting @entry_num==0 is allowed, and ++ * @entry_len and @data_uptr would be ignored in this case. This can be used to ++ * check if the given @data_type is supported or not by kernel. ++ */ ++struct iommu_hwpt_invalidate { ++ __u32 size; ++ __u32 hwpt_id; ++ __aligned_u64 data_uptr; ++ __u32 data_type; ++ __u32 entry_len; ++ __u32 entry_num; ++ __u32 __reserved; ++}; ++#define IOMMU_HWPT_INVALIDATE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_INVALIDATE) + #endif +diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h +index f0abf968b2..a1bbb080c3 100644 +--- a/linux-headers/linux/kvm.h ++++ b/linux-headers/linux/kvm.h +@@ -18,76 +18,6 @@ + + #define KVM_API_VERSION 12 + +-/* *** Deprecated interfaces *** */ +- +-#define KVM_TRC_SHIFT 16 +- +-#define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT) +-#define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1)) +- +-#define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01) +-#define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02) +-#define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01) +- +-#define KVM_TRC_HEAD_SIZE 12 +-#define KVM_TRC_CYCLE_SIZE 8 +-#define KVM_TRC_EXTRA_MAX 7 +- +-#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02) +-#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03) +-#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04) +-#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05) +-#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06) +-#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07) +-#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08) +-#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09) +-#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A) +-#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B) +-#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C) +-#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D) +-#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E) +-#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F) +-#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10) +-#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11) +-#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12) +-#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13) +-#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14) +-#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15) +-#define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16) +-#define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17) +-#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18) +-#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19) +- +-struct kvm_user_trace_setup { +- __u32 buf_size; +- __u32 buf_nr; +-}; +- +-#define __KVM_DEPRECATED_MAIN_W_0x06 \ +- _IOW(KVMIO, 0x06, struct kvm_user_trace_setup) +-#define __KVM_DEPRECATED_MAIN_0x07 _IO(KVMIO, 0x07) +-#define __KVM_DEPRECATED_MAIN_0x08 _IO(KVMIO, 0x08) +- +-#define __KVM_DEPRECATED_VM_R_0x70 _IOR(KVMIO, 0x70, struct kvm_assigned_irq) +- +-struct kvm_breakpoint { +- __u32 enabled; +- __u32 padding; +- __u64 address; +-}; +- +-struct kvm_debug_guest { +- __u32 enabled; +- __u32 pad; +- struct kvm_breakpoint breakpoints[4]; +- __u32 singlestep; +-}; +- +-#define __KVM_DEPRECATED_VCPU_W_0x87 _IOW(KVMIO, 0x87, struct kvm_debug_guest) +- +-/* *** End of deprecated interfaces *** */ +- +- + /* for KVM_SET_USER_MEMORY_REGION */ + struct kvm_userspace_memory_region { + __u32 slot; +@@ -97,6 +27,19 @@ struct kvm_userspace_memory_region { + __u64 userspace_addr; /* start of the userspace allocated memory */ + }; + ++/* for KVM_SET_USER_MEMORY_REGION2 */ ++struct kvm_userspace_memory_region2 { ++ __u32 slot; ++ __u32 flags; ++ __u64 guest_phys_addr; ++ __u64 memory_size; ++ __u64 userspace_addr; ++ __u64 guest_memfd_offset; ++ __u32 guest_memfd; ++ __u32 pad1; ++ __u64 pad2[14]; ++}; ++ + /* + * The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for + * userspace, other bits are reserved for kvm internal use which are defined +@@ -104,6 +47,7 @@ struct kvm_userspace_memory_region { + */ + #define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0) + #define KVM_MEM_READONLY (1UL << 1) ++#define KVM_MEM_GUEST_MEMFD (1UL << 2) + + /* for KVM_IRQ_LINE */ + struct kvm_irq_level { +@@ -267,6 +211,7 @@ struct kvm_xen_exit { + #define KVM_EXIT_RISCV_CSR 36 + #define KVM_EXIT_NOTIFY 37 + #define KVM_EXIT_LOONGARCH_IOCSR 38 ++#define KVM_EXIT_MEMORY_FAULT 39 + + /* For KVM_EXIT_INTERNAL_ERROR */ + /* Emulate instruction failed. */ +@@ -517,6 +462,13 @@ struct kvm_run { + #define KVM_NOTIFY_CONTEXT_INVALID (1 << 0) + __u32 flags; + } notify; ++ /* KVM_EXIT_MEMORY_FAULT */ ++ struct { ++#define KVM_MEMORY_EXIT_FLAG_PRIVATE (1ULL << 3) ++ __u64 flags; ++ __u64 gpa; ++ __u64 size; ++ } memory_fault; + /* Fix the size of the union. */ + char padding[256]; + }; +@@ -944,9 +896,6 @@ struct kvm_ppc_resize_hpt { + */ + #define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */ + #define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2) +-#define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06 +-#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07 +-#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08 + #define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2) + #define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list) + +@@ -1200,6 +1149,11 @@ struct kvm_ppc_resize_hpt { + #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 + #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 + #define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230 ++#define KVM_CAP_USER_MEMORY2 231 ++#define KVM_CAP_MEMORY_FAULT_INFO 232 ++#define KVM_CAP_MEMORY_ATTRIBUTES 233 ++#define KVM_CAP_GUEST_MEMFD 234 ++#define KVM_CAP_VM_TYPES 235 + + #define KVM_CAP_SEV_ES_GHCB 500 + #define KVM_CAP_HYGON_COCO_EXT 501 +@@ -1303,6 +1257,7 @@ struct kvm_x86_mce { + #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4) + #define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5) + #define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6) ++#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7) + + struct kvm_xen_hvm_config { + __u32 flags; +@@ -1528,6 +1483,9 @@ struct kvm_user_data { + #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) + #define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64) + #define KVM_LOAD_USER_DATA _IOW(KVMIO, 0x49, struct kvm_user_data) ++#define KVM_SET_USER_MEMORY_REGION2 _IOW(KVMIO, 0x4a, \ ++ struct kvm_userspace_memory_region2) ++ + /* enable ucontrol for s390 */ + struct kvm_s390_ucas_mapping { + __u64 user_addr; +@@ -1551,20 +1509,8 @@ struct kvm_s390_ucas_mapping { + _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone) + #define KVM_UNREGISTER_COALESCED_MMIO \ + _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone) +-#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \ +- struct kvm_assigned_pci_dev) + #define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing) +-/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */ +-#define KVM_ASSIGN_IRQ __KVM_DEPRECATED_VM_R_0x70 +-#define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq) + #define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71) +-#define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \ +- struct kvm_assigned_pci_dev) +-#define KVM_ASSIGN_SET_MSIX_NR _IOW(KVMIO, 0x73, \ +- struct kvm_assigned_msix_nr) +-#define KVM_ASSIGN_SET_MSIX_ENTRY _IOW(KVMIO, 0x74, \ +- struct kvm_assigned_msix_entry) +-#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq) + #define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd) + #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config) + #define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78) +@@ -1581,9 +1527,6 @@ struct kvm_s390_ucas_mapping { + * KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */ + #define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2) + #define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3) +-/* Available with KVM_CAP_PCI_2_3 */ +-#define KVM_ASSIGN_SET_INTX_MASK _IOW(KVMIO, 0xa4, \ +- struct kvm_assigned_pci_dev) + /* Available with KVM_CAP_SIGNAL_MSI */ + #define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi) + /* Available with KVM_CAP_PPC_GET_SMMU_INFO */ +@@ -1640,8 +1583,6 @@ struct kvm_s390_ucas_mapping { + #define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs) + #define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation) + #define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt) +-/* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */ +-#define KVM_DEBUG_GUEST __KVM_DEPRECATED_VCPU_W_0x87 + #define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs) + #define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs) + #define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid) +@@ -2418,4 +2359,24 @@ struct kvm_s390_zpci_op { + /* flags for kvm_s390_zpci_op->u.reg_aen.flags */ + #define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0) + ++/* Available with KVM_CAP_MEMORY_ATTRIBUTES */ ++#define KVM_SET_MEMORY_ATTRIBUTES _IOW(KVMIO, 0xd2, struct kvm_memory_attributes) ++ ++struct kvm_memory_attributes { ++ __u64 address; ++ __u64 size; ++ __u64 attributes; ++ __u64 flags; ++}; ++ ++#define KVM_MEMORY_ATTRIBUTE_PRIVATE (1ULL << 3) ++ ++#define KVM_CREATE_GUEST_MEMFD _IOWR(KVMIO, 0xd4, struct kvm_create_guest_memfd) ++ ++struct kvm_create_guest_memfd { ++ __u64 size; ++ __u64 flags; ++ __u64 reserved[6]; ++}; ++ + #endif /* __LINUX_KVM_H */ +diff --git a/linux-headers/linux/userfaultfd.h b/linux-headers/linux/userfaultfd.h +index 953c75feda..4283de22d5 100644 +--- a/linux-headers/linux/userfaultfd.h ++++ b/linux-headers/linux/userfaultfd.h +@@ -41,7 +41,8 @@ + UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \ + UFFD_FEATURE_WP_UNPOPULATED | \ + UFFD_FEATURE_POISON | \ +- UFFD_FEATURE_WP_ASYNC) ++ UFFD_FEATURE_WP_ASYNC | \ ++ UFFD_FEATURE_MOVE) + #define UFFD_API_IOCTLS \ + ((__u64)1 << _UFFDIO_REGISTER | \ + (__u64)1 << _UFFDIO_UNREGISTER | \ +@@ -50,6 +51,7 @@ + ((__u64)1 << _UFFDIO_WAKE | \ + (__u64)1 << _UFFDIO_COPY | \ + (__u64)1 << _UFFDIO_ZEROPAGE | \ ++ (__u64)1 << _UFFDIO_MOVE | \ + (__u64)1 << _UFFDIO_WRITEPROTECT | \ + (__u64)1 << _UFFDIO_CONTINUE | \ + (__u64)1 << _UFFDIO_POISON) +@@ -73,6 +75,7 @@ + #define _UFFDIO_WAKE (0x02) + #define _UFFDIO_COPY (0x03) + #define _UFFDIO_ZEROPAGE (0x04) ++#define _UFFDIO_MOVE (0x05) + #define _UFFDIO_WRITEPROTECT (0x06) + #define _UFFDIO_CONTINUE (0x07) + #define _UFFDIO_POISON (0x08) +@@ -92,6 +95,8 @@ + struct uffdio_copy) + #define UFFDIO_ZEROPAGE _IOWR(UFFDIO, _UFFDIO_ZEROPAGE, \ + struct uffdio_zeropage) ++#define UFFDIO_MOVE _IOWR(UFFDIO, _UFFDIO_MOVE, \ ++ struct uffdio_move) + #define UFFDIO_WRITEPROTECT _IOWR(UFFDIO, _UFFDIO_WRITEPROTECT, \ + struct uffdio_writeprotect) + #define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \ +@@ -222,6 +227,9 @@ struct uffdio_api { + * asynchronous mode is supported in which the write fault is + * automatically resolved and write-protection is un-set. + * It implies UFFD_FEATURE_WP_UNPOPULATED. ++ * ++ * UFFD_FEATURE_MOVE indicates that the kernel supports moving an ++ * existing page contents from userspace. + */ + #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) + #define UFFD_FEATURE_EVENT_FORK (1<<1) +@@ -239,6 +247,7 @@ struct uffdio_api { + #define UFFD_FEATURE_WP_UNPOPULATED (1<<13) + #define UFFD_FEATURE_POISON (1<<14) + #define UFFD_FEATURE_WP_ASYNC (1<<15) ++#define UFFD_FEATURE_MOVE (1<<16) + __u64 features; + + __u64 ioctls; +@@ -347,6 +356,24 @@ struct uffdio_poison { + __s64 updated; + }; + ++struct uffdio_move { ++ __u64 dst; ++ __u64 src; ++ __u64 len; ++ /* ++ * Especially if used to atomically remove memory from the ++ * address space the wake on the dst range is not needed. ++ */ ++#define UFFDIO_MOVE_MODE_DONTWAKE ((__u64)1<<0) ++#define UFFDIO_MOVE_MODE_ALLOW_SRC_HOLES ((__u64)1<<1) ++ __u64 mode; ++ /* ++ * "move" is written by the ioctl and must be at the end: the ++ * copy_from_user will not read the last 8 bytes. ++ */ ++ __s64 move; ++}; ++ + /* + * Flags for the userfaultfd(2) system call itself. + */ +diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h +index 8e175ece31..b4be37b225 100644 +--- a/linux-headers/linux/vfio.h ++++ b/linux-headers/linux/vfio.h +@@ -1219,6 +1219,7 @@ enum vfio_device_mig_state { + VFIO_DEVICE_STATE_RUNNING_P2P = 5, + VFIO_DEVICE_STATE_PRE_COPY = 6, + VFIO_DEVICE_STATE_PRE_COPY_P2P = 7, ++ VFIO_DEVICE_STATE_NR, + }; + + /** +-- +2.43.5 + diff --git a/0336-linux-headers-update-to-current-kvm-next.patch b/0336-linux-headers-update-to-current-kvm-next.patch new file mode 100644 index 0000000..cc632c8 --- /dev/null +++ b/0336-linux-headers-update-to-current-kvm-next.patch @@ -0,0 +1,2494 @@ +From cd27fb84d74231353a1d953a3f63826fec31714b Mon Sep 17 00:00:00 2001 +From: Paolo Bonzini +Date: Tue, 23 Apr 2024 11:46:47 +0200 +Subject: [PATCH] linux-headers: update to current kvm/next + +commit ab0c7fb22b56523f24d6e127cd4d10ecff67bf85 upstream + +[Backport changes] + +1. The upstream patch moves the enum sev_cmd_id structure from the +linux-headers/linux/kvm.h file to the linux-headers/asm-x86/kvm.h file. +The Hygon-related changes found in the enum sev_cmd_id structure in +linux-headers/linux/kvm.h have been moved to the enum t sev_cmd_id +structure in the backported linux-headers/asm-x86/kvm.h file. + +Signed-off-by: Paolo Bonzini +Signed-off-by: priyanka-mani +Signed-off-by: mohanasv +--- + hw/i386/x86.c | 8 - + include/standard-headers/asm-x86/bootparam.h | 17 +- + include/standard-headers/asm-x86/kvm_para.h | 3 +- + include/standard-headers/asm-x86/setup_data.h | 83 +++ + include/standard-headers/linux/ethtool.h | 48 ++ + include/standard-headers/linux/fuse.h | 39 +- + .../linux/input-event-codes.h | 1 + + include/standard-headers/linux/virtio_gpu.h | 2 + + include/standard-headers/linux/virtio_pci.h | 10 +- + include/standard-headers/linux/virtio_snd.h | 154 ++++ + linux-headers/asm-arm64/kvm.h | 15 +- + linux-headers/asm-arm64/sve_context.h | 11 + + linux-headers/asm-generic/bitsperlong.h | 4 + + linux-headers/asm-loongarch/kvm.h | 2 - + linux-headers/asm-mips/kvm.h | 2 - + linux-headers/asm-powerpc/kvm.h | 45 +- + linux-headers/asm-riscv/kvm.h | 3 +- + linux-headers/asm-s390/kvm.h | 315 +++++++- + linux-headers/asm-x86/kvm.h | 331 ++++++++- + linux-headers/linux/bits.h | 15 + + linux-headers/linux/kvm.h | 692 +----------------- + linux-headers/linux/psp-sev.h | 59 ++ + linux-headers/linux/vhost.h | 7 + + 23 files changed, 1123 insertions(+), 743 deletions(-) + create mode 100644 include/standard-headers/asm-x86/setup_data.h + create mode 100644 linux-headers/linux/bits.h + +diff --git a/hw/i386/x86.c b/hw/i386/x86.c +index 2b6291ad8d..d6f9e25abe 100644 +--- a/hw/i386/x86.c ++++ b/hw/i386/x86.c +@@ -676,14 +676,6 @@ DeviceState *ioapic_init_secondary(GSIState *gsi_state) + return dev; + } + +-struct setup_data { +- uint64_t next; +- uint32_t type; +- uint32_t len; +- uint8_t data[]; +-} __attribute__((packed)); +- +- + /* + * The entry point into the kernel for PVH boot is different from + * the native entry point. The PVH entry is defined by the x86/HVM +diff --git a/include/standard-headers/asm-x86/bootparam.h b/include/standard-headers/asm-x86/bootparam.h +index 0b06d2bff1..b582a105c0 100644 +--- a/include/standard-headers/asm-x86/bootparam.h ++++ b/include/standard-headers/asm-x86/bootparam.h +@@ -2,21 +2,7 @@ + #ifndef _ASM_X86_BOOTPARAM_H + #define _ASM_X86_BOOTPARAM_H + +-/* setup_data/setup_indirect types */ +-#define SETUP_NONE 0 +-#define SETUP_E820_EXT 1 +-#define SETUP_DTB 2 +-#define SETUP_PCI 3 +-#define SETUP_EFI 4 +-#define SETUP_APPLE_PROPERTIES 5 +-#define SETUP_JAILHOUSE 6 +-#define SETUP_CC_BLOB 7 +-#define SETUP_IMA 8 +-#define SETUP_RNG_SEED 9 +-#define SETUP_ENUM_MAX SETUP_RNG_SEED +- +-#define SETUP_INDIRECT (1<<31) +-#define SETUP_TYPE_MAX (SETUP_ENUM_MAX | SETUP_INDIRECT) ++#include "standard-headers/asm-x86/setup_data.h" + + /* ram_size flags */ + #define RAMDISK_IMAGE_START_MASK 0x07FF +@@ -38,6 +24,7 @@ + #define XLF_EFI_KEXEC (1<<4) + #define XLF_5LEVEL (1<<5) + #define XLF_5LEVEL_ENABLED (1<<6) ++#define XLF_MEM_ENCRYPTION (1<<7) + + + #endif /* _ASM_X86_BOOTPARAM_H */ +diff --git a/include/standard-headers/asm-x86/kvm_para.h b/include/standard-headers/asm-x86/kvm_para.h +index f0235e58a1..9a011d20f0 100644 +--- a/include/standard-headers/asm-x86/kvm_para.h ++++ b/include/standard-headers/asm-x86/kvm_para.h +@@ -92,7 +92,7 @@ struct kvm_clock_pairing { + #define KVM_ASYNC_PF_DELIVERY_AS_INT (1 << 3) + + /* MSR_KVM_ASYNC_PF_INT */ +-#define KVM_ASYNC_PF_VEC_MASK GENMASK(7, 0) ++#define KVM_ASYNC_PF_VEC_MASK __GENMASK(7, 0) + + /* MSR_KVM_MIGRATION_CONTROL */ + #define KVM_MIGRATION_READY (1 << 0) +@@ -142,7 +142,6 @@ struct kvm_vcpu_pv_apf_data { + uint32_t token; + + uint8_t pad[56]; +- uint32_t enabled; + }; + + #define KVM_PV_EOI_BIT 0 +diff --git a/include/standard-headers/asm-x86/setup_data.h b/include/standard-headers/asm-x86/setup_data.h +new file mode 100644 +index 0000000000..09355f54c5 +--- /dev/null ++++ b/include/standard-headers/asm-x86/setup_data.h +@@ -0,0 +1,83 @@ ++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ ++#ifndef _ASM_X86_SETUP_DATA_H ++#define _ASM_X86_SETUP_DATA_H ++ ++/* setup_data/setup_indirect types */ ++#define SETUP_NONE 0 ++#define SETUP_E820_EXT 1 ++#define SETUP_DTB 2 ++#define SETUP_PCI 3 ++#define SETUP_EFI 4 ++#define SETUP_APPLE_PROPERTIES 5 ++#define SETUP_JAILHOUSE 6 ++#define SETUP_CC_BLOB 7 ++#define SETUP_IMA 8 ++#define SETUP_RNG_SEED 9 ++#define SETUP_ENUM_MAX SETUP_RNG_SEED ++ ++#define SETUP_INDIRECT (1<<31) ++#define SETUP_TYPE_MAX (SETUP_ENUM_MAX | SETUP_INDIRECT) ++ ++#ifndef __ASSEMBLY__ ++ ++#include "standard-headers/linux/types.h" ++ ++/* extensible setup data list node */ ++struct setup_data { ++ uint64_t next; ++ uint32_t type; ++ uint32_t len; ++ uint8_t data[]; ++}; ++ ++/* extensible setup indirect data node */ ++struct setup_indirect { ++ uint32_t type; ++ uint32_t reserved; /* Reserved, must be set to zero. */ ++ uint64_t len; ++ uint64_t addr; ++}; ++ ++/* ++ * The E820 memory region entry of the boot protocol ABI: ++ */ ++struct boot_e820_entry { ++ uint64_t addr; ++ uint64_t size; ++ uint32_t type; ++} QEMU_PACKED; ++ ++/* ++ * The boot loader is passing platform information via this Jailhouse-specific ++ * setup data structure. ++ */ ++struct jailhouse_setup_data { ++ struct { ++ uint16_t version; ++ uint16_t compatible_version; ++ } QEMU_PACKED hdr; ++ struct { ++ uint16_t pm_timer_address; ++ uint16_t num_cpus; ++ uint64_t pci_mmconfig_base; ++ uint32_t tsc_khz; ++ uint32_t apic_khz; ++ uint8_t standard_ioapic; ++ uint8_t cpu_ids[255]; ++ } QEMU_PACKED v1; ++ struct { ++ uint32_t flags; ++ } QEMU_PACKED v2; ++} QEMU_PACKED; ++ ++/* ++ * IMA buffer setup data information from the previous kernel during kexec ++ */ ++struct ima_setup_data { ++ uint64_t addr; ++ uint64_t size; ++} QEMU_PACKED; ++ ++#endif /* __ASSEMBLY__ */ ++ ++#endif /* _ASM_X86_SETUP_DATA_H */ +diff --git a/include/standard-headers/linux/ethtool.h b/include/standard-headers/linux/ethtool.h +index dfb54eff6f..01503784d2 100644 +--- a/include/standard-headers/linux/ethtool.h ++++ b/include/standard-headers/linux/ethtool.h +@@ -2023,6 +2023,53 @@ static inline int ethtool_validate_duplex(uint8_t duplex) + #define IPV4_FLOW 0x10 /* hash only */ + #define IPV6_FLOW 0x11 /* hash only */ + #define ETHER_FLOW 0x12 /* spec only (ether_spec) */ ++ ++/* Used for GTP-U IPv4 and IPv6. ++ * The format of GTP packets only includes ++ * elements such as TEID and GTP version. ++ * It is primarily intended for data communication of the UE. ++ */ ++#define GTPU_V4_FLOW 0x13 /* hash only */ ++#define GTPU_V6_FLOW 0x14 /* hash only */ ++ ++/* Use for GTP-C IPv4 and v6. ++ * The format of these GTP packets does not include TEID. ++ * Primarily expected to be used for communication ++ * to create sessions for UE data communication, ++ * commonly referred to as CSR (Create Session Request). ++ */ ++#define GTPC_V4_FLOW 0x15 /* hash only */ ++#define GTPC_V6_FLOW 0x16 /* hash only */ ++ ++/* Use for GTP-C IPv4 and v6. ++ * Unlike GTPC_V4_FLOW, the format of these GTP packets includes TEID. ++ * After session creation, it becomes this packet. ++ * This is mainly used for requests to realize UE handover. ++ */ ++#define GTPC_TEID_V4_FLOW 0x17 /* hash only */ ++#define GTPC_TEID_V6_FLOW 0x18 /* hash only */ ++ ++/* Use for GTP-U and extended headers for the PSC (PDU Session Container). ++ * The format of these GTP packets includes TEID and QFI. ++ * In 5G communication using UPF (User Plane Function), ++ * data communication with this extended header is performed. ++ */ ++#define GTPU_EH_V4_FLOW 0x19 /* hash only */ ++#define GTPU_EH_V6_FLOW 0x1a /* hash only */ ++ ++/* Use for GTP-U IPv4 and v6 PSC (PDU Session Container) extended headers. ++ * This differs from GTPU_EH_V(4|6)_FLOW in that it is distinguished by ++ * UL/DL included in the PSC. ++ * There are differences in the data included based on Downlink/Uplink, ++ * and can be used to distinguish packets. ++ * The functions described so far are useful when you want to ++ * handle communication from the mobile network in UPF, PGW, etc. ++ */ ++#define GTPU_UL_V4_FLOW 0x1b /* hash only */ ++#define GTPU_UL_V6_FLOW 0x1c /* hash only */ ++#define GTPU_DL_V4_FLOW 0x1d /* hash only */ ++#define GTPU_DL_V6_FLOW 0x1e /* hash only */ ++ + /* Flag to enable additional fields in struct ethtool_rx_flow_spec */ + #define FLOW_EXT 0x80000000 + #define FLOW_MAC_EXT 0x40000000 +@@ -2037,6 +2084,7 @@ static inline int ethtool_validate_duplex(uint8_t duplex) + #define RXH_IP_DST (1 << 5) + #define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */ + #define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */ ++#define RXH_GTP_TEID (1 << 8) /* teid in case of GTP */ + #define RXH_DISCARD (1 << 31) + + #define RX_CLS_FLOW_DISC 0xffffffffffffffffULL +diff --git a/include/standard-headers/linux/fuse.h b/include/standard-headers/linux/fuse.h +index fc0dcd10ae..bac9dbc49f 100644 +--- a/include/standard-headers/linux/fuse.h ++++ b/include/standard-headers/linux/fuse.h +@@ -211,6 +211,12 @@ + * 7.39 + * - add FUSE_DIRECT_IO_ALLOW_MMAP + * - add FUSE_STATX and related structures ++ * ++ * 7.40 ++ * - add max_stack_depth to fuse_init_out, add FUSE_PASSTHROUGH init flag ++ * - add backing_id to fuse_open_out, add FOPEN_PASSTHROUGH open flag ++ * - add FUSE_NO_EXPORT_SUPPORT init flag ++ * - add FUSE_NOTIFY_RESEND, add FUSE_HAS_RESEND init flag + */ + + #ifndef _LINUX_FUSE_H +@@ -242,7 +248,7 @@ + #define FUSE_KERNEL_VERSION 7 + + /** Minor version number of this interface */ +-#define FUSE_KERNEL_MINOR_VERSION 39 ++#define FUSE_KERNEL_MINOR_VERSION 40 + + /** The node ID of the root inode */ + #define FUSE_ROOT_ID 1 +@@ -349,6 +355,7 @@ struct fuse_file_lock { + * FOPEN_STREAM: the file is stream-like (no file position at all) + * FOPEN_NOFLUSH: don't flush data cache on close (unless FUSE_WRITEBACK_CACHE) + * FOPEN_PARALLEL_DIRECT_WRITES: Allow concurrent direct writes on the same inode ++ * FOPEN_PASSTHROUGH: passthrough read/write io for this open file + */ + #define FOPEN_DIRECT_IO (1 << 0) + #define FOPEN_KEEP_CACHE (1 << 1) +@@ -357,6 +364,7 @@ struct fuse_file_lock { + #define FOPEN_STREAM (1 << 4) + #define FOPEN_NOFLUSH (1 << 5) + #define FOPEN_PARALLEL_DIRECT_WRITES (1 << 6) ++#define FOPEN_PASSTHROUGH (1 << 7) + + /** + * INIT request/reply flags +@@ -406,6 +414,9 @@ struct fuse_file_lock { + * symlink and mknod (single group that matches parent) + * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation + * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode. ++ * FUSE_NO_EXPORT_SUPPORT: explicitly disable export support ++ * FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit ++ * of the request ID indicates resend requests + */ + #define FUSE_ASYNC_READ (1 << 0) + #define FUSE_POSIX_LOCKS (1 << 1) +@@ -445,6 +456,9 @@ struct fuse_file_lock { + #define FUSE_CREATE_SUPP_GROUP (1ULL << 34) + #define FUSE_HAS_EXPIRE_ONLY (1ULL << 35) + #define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36) ++#define FUSE_PASSTHROUGH (1ULL << 37) ++#define FUSE_NO_EXPORT_SUPPORT (1ULL << 38) ++#define FUSE_HAS_RESEND (1ULL << 39) + + /* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ + #define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP +@@ -631,6 +645,7 @@ enum fuse_notify_code { + FUSE_NOTIFY_STORE = 4, + FUSE_NOTIFY_RETRIEVE = 5, + FUSE_NOTIFY_DELETE = 6, ++ FUSE_NOTIFY_RESEND = 7, + FUSE_NOTIFY_CODE_MAX, + }; + +@@ -757,7 +772,7 @@ struct fuse_create_in { + struct fuse_open_out { + uint64_t fh; + uint32_t open_flags; +- uint32_t padding; ++ int32_t backing_id; + }; + + struct fuse_release_in { +@@ -873,7 +888,8 @@ struct fuse_init_out { + uint16_t max_pages; + uint16_t map_alignment; + uint32_t flags2; +- uint32_t unused[7]; ++ uint32_t max_stack_depth; ++ uint32_t unused[6]; + }; + + #define CUSE_INIT_INFO_MAX 4096 +@@ -956,6 +972,14 @@ struct fuse_fallocate_in { + uint32_t padding; + }; + ++/** ++ * FUSE request unique ID flag ++ * ++ * Indicates whether this is a resend request. The receiver should handle this ++ * request accordingly. ++ */ ++#define FUSE_UNIQUE_RESEND (1ULL << 63) ++ + struct fuse_in_header { + uint32_t len; + uint32_t opcode; +@@ -1045,9 +1069,18 @@ struct fuse_notify_retrieve_in { + uint64_t dummy4; + }; + ++struct fuse_backing_map { ++ int32_t fd; ++ uint32_t flags; ++ uint64_t padding; ++}; ++ + /* Device ioctls: */ + #define FUSE_DEV_IOC_MAGIC 229 + #define FUSE_DEV_IOC_CLONE _IOR(FUSE_DEV_IOC_MAGIC, 0, uint32_t) ++#define FUSE_DEV_IOC_BACKING_OPEN _IOW(FUSE_DEV_IOC_MAGIC, 1, \ ++ struct fuse_backing_map) ++#define FUSE_DEV_IOC_BACKING_CLOSE _IOW(FUSE_DEV_IOC_MAGIC, 2, uint32_t) + + struct fuse_lseek_in { + uint64_t fh; +diff --git a/include/standard-headers/linux/input-event-codes.h b/include/standard-headers/linux/input-event-codes.h +index f6bab08540..2221b0c383 100644 +--- a/include/standard-headers/linux/input-event-codes.h ++++ b/include/standard-headers/linux/input-event-codes.h +@@ -602,6 +602,7 @@ + + #define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */ + #define KEY_ROTATE_LOCK_TOGGLE 0x231 /* Display rotation lock */ ++#define KEY_REFRESH_RATE_TOGGLE 0x232 /* Display refresh rate toggle */ + + #define KEY_BUTTONCONFIG 0x240 /* AL Button Configuration */ + #define KEY_TASKMANAGER 0x241 /* AL Task/Project Manager */ +diff --git a/include/standard-headers/linux/virtio_gpu.h b/include/standard-headers/linux/virtio_gpu.h +index 2da48d3d4c..2db643ed8f 100644 +--- a/include/standard-headers/linux/virtio_gpu.h ++++ b/include/standard-headers/linux/virtio_gpu.h +@@ -309,6 +309,8 @@ struct virtio_gpu_cmd_submit { + + #define VIRTIO_GPU_CAPSET_VIRGL 1 + #define VIRTIO_GPU_CAPSET_VIRGL2 2 ++/* 3 is reserved for gfxstream */ ++#define VIRTIO_GPU_CAPSET_VENUS 4 + + /* VIRTIO_GPU_CMD_GET_CAPSET_INFO */ + struct virtio_gpu_get_capset_info { +diff --git a/include/standard-headers/linux/virtio_pci.h b/include/standard-headers/linux/virtio_pci.h +index 3e2bc2c97e..4010216103 100644 +--- a/include/standard-headers/linux/virtio_pci.h ++++ b/include/standard-headers/linux/virtio_pci.h +@@ -240,7 +240,7 @@ struct virtio_pci_cfg_cap { + #define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ 0x5 + #define VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO 0x6 + +-struct QEMU_PACKED virtio_admin_cmd_hdr { ++struct virtio_admin_cmd_hdr { + uint16_t opcode; + /* + * 1 - SR-IOV +@@ -252,20 +252,20 @@ struct QEMU_PACKED virtio_admin_cmd_hdr { + uint64_t group_member_id; + }; + +-struct QEMU_PACKED virtio_admin_cmd_status { ++struct virtio_admin_cmd_status { + uint16_t status; + uint16_t status_qualifier; + /* Unused, reserved for future extensions. */ + uint8_t reserved2[4]; + }; + +-struct QEMU_PACKED virtio_admin_cmd_legacy_wr_data { ++struct virtio_admin_cmd_legacy_wr_data { + uint8_t offset; /* Starting offset of the register(s) to write. */ + uint8_t reserved[7]; + uint8_t registers[]; + }; + +-struct QEMU_PACKED virtio_admin_cmd_legacy_rd_data { ++struct virtio_admin_cmd_legacy_rd_data { + uint8_t offset; /* Starting offset of the register(s) to read. */ + }; + +@@ -275,7 +275,7 @@ struct QEMU_PACKED virtio_admin_cmd_legacy_rd_data { + + #define VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO 4 + +-struct QEMU_PACKED virtio_admin_cmd_notify_info_data { ++struct virtio_admin_cmd_notify_info_data { + uint8_t flags; /* 0 = end of list, 1 = owner device, 2 = member device */ + uint8_t bar; /* BAR of the member or the owner device */ + uint8_t padding[6]; +diff --git a/include/standard-headers/linux/virtio_snd.h b/include/standard-headers/linux/virtio_snd.h +index 1af96b9fc6..860f12e0a4 100644 +--- a/include/standard-headers/linux/virtio_snd.h ++++ b/include/standard-headers/linux/virtio_snd.h +@@ -7,6 +7,14 @@ + + #include "standard-headers/linux/virtio_types.h" + ++/******************************************************************************* ++ * FEATURE BITS ++ */ ++enum { ++ /* device supports control elements */ ++ VIRTIO_SND_F_CTLS = 0 ++}; ++ + /******************************************************************************* + * CONFIGURATION SPACE + */ +@@ -17,6 +25,8 @@ struct virtio_snd_config { + uint32_t streams; + /* # of available channel maps */ + uint32_t chmaps; ++ /* # of available control elements */ ++ uint32_t controls; + }; + + enum { +@@ -55,6 +65,15 @@ enum { + /* channel map control request types */ + VIRTIO_SND_R_CHMAP_INFO = 0x0200, + ++ /* control element request types */ ++ VIRTIO_SND_R_CTL_INFO = 0x0300, ++ VIRTIO_SND_R_CTL_ENUM_ITEMS, ++ VIRTIO_SND_R_CTL_READ, ++ VIRTIO_SND_R_CTL_WRITE, ++ VIRTIO_SND_R_CTL_TLV_READ, ++ VIRTIO_SND_R_CTL_TLV_WRITE, ++ VIRTIO_SND_R_CTL_TLV_COMMAND, ++ + /* jack event types */ + VIRTIO_SND_EVT_JACK_CONNECTED = 0x1000, + VIRTIO_SND_EVT_JACK_DISCONNECTED, +@@ -63,6 +82,9 @@ enum { + VIRTIO_SND_EVT_PCM_PERIOD_ELAPSED = 0x1100, + VIRTIO_SND_EVT_PCM_XRUN, + ++ /* control element event types */ ++ VIRTIO_SND_EVT_CTL_NOTIFY = 0x1200, ++ + /* common status codes */ + VIRTIO_SND_S_OK = 0x8000, + VIRTIO_SND_S_BAD_MSG, +@@ -331,4 +353,136 @@ struct virtio_snd_chmap_info { + uint8_t positions[VIRTIO_SND_CHMAP_MAX_SIZE]; + }; + ++/******************************************************************************* ++ * CONTROL ELEMENTS MESSAGES ++ */ ++struct virtio_snd_ctl_hdr { ++ /* VIRTIO_SND_R_CTL_XXX */ ++ struct virtio_snd_hdr hdr; ++ /* 0 ... virtio_snd_config::controls - 1 */ ++ uint32_t control_id; ++}; ++ ++/* supported roles for control elements */ ++enum { ++ VIRTIO_SND_CTL_ROLE_UNDEFINED = 0, ++ VIRTIO_SND_CTL_ROLE_VOLUME, ++ VIRTIO_SND_CTL_ROLE_MUTE, ++ VIRTIO_SND_CTL_ROLE_GAIN ++}; ++ ++/* supported value types for control elements */ ++enum { ++ VIRTIO_SND_CTL_TYPE_BOOLEAN = 0, ++ VIRTIO_SND_CTL_TYPE_INTEGER, ++ VIRTIO_SND_CTL_TYPE_INTEGER64, ++ VIRTIO_SND_CTL_TYPE_ENUMERATED, ++ VIRTIO_SND_CTL_TYPE_BYTES, ++ VIRTIO_SND_CTL_TYPE_IEC958 ++}; ++ ++/* supported access rights for control elements */ ++enum { ++ VIRTIO_SND_CTL_ACCESS_READ = 0, ++ VIRTIO_SND_CTL_ACCESS_WRITE, ++ VIRTIO_SND_CTL_ACCESS_VOLATILE, ++ VIRTIO_SND_CTL_ACCESS_INACTIVE, ++ VIRTIO_SND_CTL_ACCESS_TLV_READ, ++ VIRTIO_SND_CTL_ACCESS_TLV_WRITE, ++ VIRTIO_SND_CTL_ACCESS_TLV_COMMAND ++}; ++ ++struct virtio_snd_ctl_info { ++ /* common header */ ++ struct virtio_snd_info hdr; ++ /* element role (VIRTIO_SND_CTL_ROLE_XXX) */ ++ uint32_t role; ++ /* element value type (VIRTIO_SND_CTL_TYPE_XXX) */ ++ uint32_t type; ++ /* element access right bit map (1 << VIRTIO_SND_CTL_ACCESS_XXX) */ ++ uint32_t access; ++ /* # of members in the element value */ ++ uint32_t count; ++ /* index for an element with a non-unique name */ ++ uint32_t index; ++ /* name identifier string for the element */ ++ uint8_t name[44]; ++ /* additional information about the element's value */ ++ union { ++ /* VIRTIO_SND_CTL_TYPE_INTEGER */ ++ struct { ++ /* minimum supported value */ ++ uint32_t min; ++ /* maximum supported value */ ++ uint32_t max; ++ /* fixed step size for value (0 = variable size) */ ++ uint32_t step; ++ } integer; ++ /* VIRTIO_SND_CTL_TYPE_INTEGER64 */ ++ struct { ++ /* minimum supported value */ ++ uint64_t min; ++ /* maximum supported value */ ++ uint64_t max; ++ /* fixed step size for value (0 = variable size) */ ++ uint64_t step; ++ } integer64; ++ /* VIRTIO_SND_CTL_TYPE_ENUMERATED */ ++ struct { ++ /* # of options supported for value */ ++ uint32_t items; ++ } enumerated; ++ } value; ++}; ++ ++struct virtio_snd_ctl_enum_item { ++ /* option name */ ++ uint8_t item[64]; ++}; ++ ++struct virtio_snd_ctl_iec958 { ++ /* AES/IEC958 channel status bits */ ++ uint8_t status[24]; ++ /* AES/IEC958 subcode bits */ ++ uint8_t subcode[147]; ++ /* nothing */ ++ uint8_t pad; ++ /* AES/IEC958 subframe bits */ ++ uint8_t dig_subframe[4]; ++}; ++ ++struct virtio_snd_ctl_value { ++ union { ++ /* VIRTIO_SND_CTL_TYPE_BOOLEAN|INTEGER value */ ++ uint32_t integer[128]; ++ /* VIRTIO_SND_CTL_TYPE_INTEGER64 value */ ++ uint64_t integer64[64]; ++ /* VIRTIO_SND_CTL_TYPE_ENUMERATED value (option indexes) */ ++ uint32_t enumerated[128]; ++ /* VIRTIO_SND_CTL_TYPE_BYTES value */ ++ uint8_t bytes[512]; ++ /* VIRTIO_SND_CTL_TYPE_IEC958 value */ ++ struct virtio_snd_ctl_iec958 iec958; ++ } value; ++}; ++ ++/* supported event reason types */ ++enum { ++ /* element's value has changed */ ++ VIRTIO_SND_CTL_EVT_MASK_VALUE = 0, ++ /* element's information has changed */ ++ VIRTIO_SND_CTL_EVT_MASK_INFO, ++ /* element's metadata has changed */ ++ VIRTIO_SND_CTL_EVT_MASK_TLV ++}; ++ ++struct virtio_snd_ctl_event { ++ /* VIRTIO_SND_EVT_CTL_NOTIFY */ ++ struct virtio_snd_hdr hdr; ++ /* 0 ... virtio_snd_config::controls - 1 */ ++ uint16_t control_id; ++ /* event reason bit map (1 << VIRTIO_SND_CTL_EVT_MASK_XXX) */ ++ uint16_t mask; ++}; ++ + #endif /* VIRTIO_SND_IF_H */ +diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h +index 2b040b5d60..11611f2b18 100644 +--- a/linux-headers/asm-arm64/kvm.h ++++ b/linux-headers/asm-arm64/kvm.h +@@ -37,9 +37,7 @@ + #include + #include + +-#define __KVM_HAVE_GUEST_DEBUG + #define __KVM_HAVE_IRQ_LINE +-#define __KVM_HAVE_READONLY_MEM + #define __KVM_HAVE_VCPU_EVENTS + + #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +@@ -76,11 +74,11 @@ struct kvm_regs { + + /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ + #define KVM_ARM_DEVICE_TYPE_SHIFT 0 +-#define KVM_ARM_DEVICE_TYPE_MASK GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \ +- KVM_ARM_DEVICE_TYPE_SHIFT) ++#define KVM_ARM_DEVICE_TYPE_MASK __GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \ ++ KVM_ARM_DEVICE_TYPE_SHIFT) + #define KVM_ARM_DEVICE_ID_SHIFT 16 +-#define KVM_ARM_DEVICE_ID_MASK GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \ +- KVM_ARM_DEVICE_ID_SHIFT) ++#define KVM_ARM_DEVICE_ID_MASK __GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \ ++ KVM_ARM_DEVICE_ID_SHIFT) + + /* Supported device IDs */ + #define KVM_ARM_DEVICE_VGIC_V2 0 +@@ -163,6 +161,11 @@ struct kvm_sync_regs { + __u64 device_irq_level; + }; + ++/* Bits for run->s.regs.device_irq_level */ ++#define KVM_ARM_DEV_EL1_VTIMER (1 << 0) ++#define KVM_ARM_DEV_EL1_PTIMER (1 << 1) ++#define KVM_ARM_DEV_PMU (1 << 2) ++ + /* + * PMU filter structure. Describe a range of events with a particular + * action. To be used with KVM_ARM_VCPU_PMU_V3_FILTER. +diff --git a/linux-headers/asm-arm64/sve_context.h b/linux-headers/asm-arm64/sve_context.h +index 1d0e3e1d09..d1b1ec8cb1 100644 +--- a/linux-headers/asm-arm64/sve_context.h ++++ b/linux-headers/asm-arm64/sve_context.h +@@ -13,6 +13,17 @@ + + #define __SVE_VQ_BYTES 16 /* number of bytes per quadword */ + ++/* ++ * Yes, __SVE_VQ_MAX is 512 QUADWORDS. ++ * ++ * To help ensure forward portability, this is much larger than the ++ * current maximum value defined by the SVE architecture. While arrays ++ * or static allocations can be sized based on this value, watch out! ++ * It will waste a surprisingly large amount of memory. ++ * ++ * Dynamic sizing based on the actual runtime vector length is likely to ++ * be preferable for most purposes. ++ */ + #define __SVE_VQ_MIN 1 + #define __SVE_VQ_MAX 512 + +diff --git a/linux-headers/asm-generic/bitsperlong.h b/linux-headers/asm-generic/bitsperlong.h +index 75f320fa91..1fb4f0c9f2 100644 +--- a/linux-headers/asm-generic/bitsperlong.h ++++ b/linux-headers/asm-generic/bitsperlong.h +@@ -24,4 +24,8 @@ + #endif + #endif + ++#ifndef __BITS_PER_LONG_LONG ++#define __BITS_PER_LONG_LONG 64 ++#endif ++ + #endif /* __ASM_GENERIC_BITS_PER_LONG */ +diff --git a/linux-headers/asm-loongarch/kvm.h b/linux-headers/asm-loongarch/kvm.h +index c23c16f3ae..92c32b7ec9 100644 +--- a/linux-headers/asm-loongarch/kvm.h ++++ b/linux-headers/asm-loongarch/kvm.h +@@ -14,9 +14,7 @@ + * Some parts derived from the x86 version of this file. + */ + +-#define __KVM_HAVE_READONLY_MEM + #define __KVM_HAVE_GUEST_DEBUG +- + #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 + #define KVM_DIRTY_LOG_PAGE_OFFSET 64 + #define __KVM_HAVE_IRQ_LINE +diff --git a/linux-headers/asm-mips/kvm.h b/linux-headers/asm-mips/kvm.h +index edcf717c43..9673dc9cb3 100644 +--- a/linux-headers/asm-mips/kvm.h ++++ b/linux-headers/asm-mips/kvm.h +@@ -20,8 +20,6 @@ + * Some parts derived from the x86 version of this file. + */ + +-#define __KVM_HAVE_READONLY_MEM +- + #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 + + /* +diff --git a/linux-headers/asm-powerpc/kvm.h b/linux-headers/asm-powerpc/kvm.h +index 9f18fa090f..1691297a76 100644 +--- a/linux-headers/asm-powerpc/kvm.h ++++ b/linux-headers/asm-powerpc/kvm.h +@@ -28,7 +28,6 @@ + #define __KVM_HAVE_PPC_SMT + #define __KVM_HAVE_IRQCHIP + #define __KVM_HAVE_IRQ_LINE +-#define __KVM_HAVE_GUEST_DEBUG + + /* Not always available, but if it is, this is the correct offset. */ + #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +@@ -733,4 +732,48 @@ struct kvm_ppc_xive_eq { + #define KVM_XIVE_TIMA_PAGE_OFFSET 0 + #define KVM_XIVE_ESB_PAGE_OFFSET 4 + ++/* for KVM_PPC_GET_PVINFO */ ++ ++#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0) ++ ++struct kvm_ppc_pvinfo { ++ /* out */ ++ __u32 flags; ++ __u32 hcall[4]; ++ __u8 pad[108]; ++}; ++ ++/* for KVM_PPC_GET_SMMU_INFO */ ++#define KVM_PPC_PAGE_SIZES_MAX_SZ 8 ++ ++struct kvm_ppc_one_page_size { ++ __u32 page_shift; /* Page shift (or 0) */ ++ __u32 pte_enc; /* Encoding in the HPTE (>>12) */ ++}; ++ ++struct kvm_ppc_one_seg_page_size { ++ __u32 page_shift; /* Base page shift of segment (or 0) */ ++ __u32 slb_enc; /* SLB encoding for BookS */ ++ struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ]; ++}; ++ ++#define KVM_PPC_PAGE_SIZES_REAL 0x00000001 ++#define KVM_PPC_1T_SEGMENTS 0x00000002 ++#define KVM_PPC_NO_HASH 0x00000004 ++ ++struct kvm_ppc_smmu_info { ++ __u64 flags; ++ __u32 slb_size; ++ __u16 data_keys; /* # storage keys supported for data */ ++ __u16 instr_keys; /* # storage keys supported for instructions */ ++ struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ]; ++}; ++ ++/* for KVM_PPC_RESIZE_HPT_{PREPARE,COMMIT} */ ++struct kvm_ppc_resize_hpt { ++ __u64 flags; ++ __u32 shift; ++ __u32 pad; ++}; ++ + #endif /* __LINUX_KVM_POWERPC_H */ +diff --git a/linux-headers/asm-riscv/kvm.h b/linux-headers/asm-riscv/kvm.h +index 7499e88a94..b1c503c295 100644 +--- a/linux-headers/asm-riscv/kvm.h ++++ b/linux-headers/asm-riscv/kvm.h +@@ -16,7 +16,6 @@ + #include + + #define __KVM_HAVE_IRQ_LINE +-#define __KVM_HAVE_READONLY_MEM + + #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 + +@@ -166,6 +165,8 @@ enum KVM_RISCV_ISA_EXT_ID { + KVM_RISCV_ISA_EXT_ZVFH, + KVM_RISCV_ISA_EXT_ZVFHMIN, + KVM_RISCV_ISA_EXT_ZFA, ++ KVM_RISCV_ISA_EXT_ZTSO, ++ KVM_RISCV_ISA_EXT_ZACAS, + KVM_RISCV_ISA_EXT_MAX, + }; + +diff --git a/linux-headers/asm-s390/kvm.h b/linux-headers/asm-s390/kvm.h +index 023a2763a9..684c4e1205 100644 +--- a/linux-headers/asm-s390/kvm.h ++++ b/linux-headers/asm-s390/kvm.h +@@ -12,7 +12,320 @@ + #include + + #define __KVM_S390 +-#define __KVM_HAVE_GUEST_DEBUG ++ ++struct kvm_s390_skeys { ++ __u64 start_gfn; ++ __u64 count; ++ __u64 skeydata_addr; ++ __u32 flags; ++ __u32 reserved[9]; ++}; ++ ++#define KVM_S390_CMMA_PEEK (1 << 0) ++ ++/** ++ * kvm_s390_cmma_log - Used for CMMA migration. ++ * ++ * Used both for input and output. ++ * ++ * @start_gfn: Guest page number to start from. ++ * @count: Size of the result buffer. ++ * @flags: Control operation mode via KVM_S390_CMMA_* flags ++ * @remaining: Used with KVM_S390_GET_CMMA_BITS. Indicates how many dirty ++ * pages are still remaining. ++ * @mask: Used with KVM_S390_SET_CMMA_BITS. Bitmap of bits to actually set ++ * in the PGSTE. ++ * @values: Pointer to the values buffer. ++ * ++ * Used in KVM_S390_{G,S}ET_CMMA_BITS ioctls. ++ */ ++struct kvm_s390_cmma_log { ++ __u64 start_gfn; ++ __u32 count; ++ __u32 flags; ++ union { ++ __u64 remaining; ++ __u64 mask; ++ }; ++ __u64 values; ++}; ++ ++#define KVM_S390_RESET_POR 1 ++#define KVM_S390_RESET_CLEAR 2 ++#define KVM_S390_RESET_SUBSYSTEM 4 ++#define KVM_S390_RESET_CPU_INIT 8 ++#define KVM_S390_RESET_IPL 16 ++ ++/* for KVM_S390_MEM_OP */ ++struct kvm_s390_mem_op { ++ /* in */ ++ __u64 gaddr; /* the guest address */ ++ __u64 flags; /* flags */ ++ __u32 size; /* amount of bytes */ ++ __u32 op; /* type of operation */ ++ __u64 buf; /* buffer in userspace */ ++ union { ++ struct { ++ __u8 ar; /* the access register number */ ++ __u8 key; /* access key, ignored if flag unset */ ++ __u8 pad1[6]; /* ignored */ ++ __u64 old_addr; /* ignored if cmpxchg flag unset */ ++ }; ++ __u32 sida_offset; /* offset into the sida */ ++ __u8 reserved[32]; /* ignored */ ++ }; ++}; ++/* types for kvm_s390_mem_op->op */ ++#define KVM_S390_MEMOP_LOGICAL_READ 0 ++#define KVM_S390_MEMOP_LOGICAL_WRITE 1 ++#define KVM_S390_MEMOP_SIDA_READ 2 ++#define KVM_S390_MEMOP_SIDA_WRITE 3 ++#define KVM_S390_MEMOP_ABSOLUTE_READ 4 ++#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5 ++#define KVM_S390_MEMOP_ABSOLUTE_CMPXCHG 6 ++ ++/* flags for kvm_s390_mem_op->flags */ ++#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0) ++#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1) ++#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2) ++ ++/* flags specifying extension support via KVM_CAP_S390_MEM_OP_EXTENSION */ ++#define KVM_S390_MEMOP_EXTENSION_CAP_BASE (1 << 0) ++#define KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG (1 << 1) ++ ++struct kvm_s390_psw { ++ __u64 mask; ++ __u64 addr; ++}; ++ ++/* valid values for type in kvm_s390_interrupt */ ++#define KVM_S390_SIGP_STOP 0xfffe0000u ++#define KVM_S390_PROGRAM_INT 0xfffe0001u ++#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u ++#define KVM_S390_RESTART 0xfffe0003u ++#define KVM_S390_INT_PFAULT_INIT 0xfffe0004u ++#define KVM_S390_INT_PFAULT_DONE 0xfffe0005u ++#define KVM_S390_MCHK 0xfffe1000u ++#define KVM_S390_INT_CLOCK_COMP 0xffff1004u ++#define KVM_S390_INT_CPU_TIMER 0xffff1005u ++#define KVM_S390_INT_VIRTIO 0xffff2603u ++#define KVM_S390_INT_SERVICE 0xffff2401u ++#define KVM_S390_INT_EMERGENCY 0xffff1201u ++#define KVM_S390_INT_EXTERNAL_CALL 0xffff1202u ++/* Anything below 0xfffe0000u is taken by INT_IO */ ++#define KVM_S390_INT_IO(ai,cssid,ssid,schid) \ ++ (((schid)) | \ ++ ((ssid) << 16) | \ ++ ((cssid) << 18) | \ ++ ((ai) << 26)) ++#define KVM_S390_INT_IO_MIN 0x00000000u ++#define KVM_S390_INT_IO_MAX 0xfffdffffu ++#define KVM_S390_INT_IO_AI_MASK 0x04000000u ++ ++ ++struct kvm_s390_interrupt { ++ __u32 type; ++ __u32 parm; ++ __u64 parm64; ++}; ++ ++struct kvm_s390_io_info { ++ __u16 subchannel_id; ++ __u16 subchannel_nr; ++ __u32 io_int_parm; ++ __u32 io_int_word; ++}; ++ ++struct kvm_s390_ext_info { ++ __u32 ext_params; ++ __u32 pad; ++ __u64 ext_params2; ++}; ++ ++struct kvm_s390_pgm_info { ++ __u64 trans_exc_code; ++ __u64 mon_code; ++ __u64 per_address; ++ __u32 data_exc_code; ++ __u16 code; ++ __u16 mon_class_nr; ++ __u8 per_code; ++ __u8 per_atmid; ++ __u8 exc_access_id; ++ __u8 per_access_id; ++ __u8 op_access_id; ++#define KVM_S390_PGM_FLAGS_ILC_VALID 0x01 ++#define KVM_S390_PGM_FLAGS_ILC_0 0x02 ++#define KVM_S390_PGM_FLAGS_ILC_1 0x04 ++#define KVM_S390_PGM_FLAGS_ILC_MASK 0x06 ++#define KVM_S390_PGM_FLAGS_NO_REWIND 0x08 ++ __u8 flags; ++ __u8 pad[2]; ++}; ++ ++struct kvm_s390_prefix_info { ++ __u32 address; ++}; ++ ++struct kvm_s390_extcall_info { ++ __u16 code; ++}; ++ ++struct kvm_s390_emerg_info { ++ __u16 code; ++}; ++ ++#define KVM_S390_STOP_FLAG_STORE_STATUS 0x01 ++struct kvm_s390_stop_info { ++ __u32 flags; ++}; ++ ++struct kvm_s390_mchk_info { ++ __u64 cr14; ++ __u64 mcic; ++ __u64 failing_storage_address; ++ __u32 ext_damage_code; ++ __u32 pad; ++ __u8 fixed_logout[16]; ++}; ++ ++struct kvm_s390_irq { ++ __u64 type; ++ union { ++ struct kvm_s390_io_info io; ++ struct kvm_s390_ext_info ext; ++ struct kvm_s390_pgm_info pgm; ++ struct kvm_s390_emerg_info emerg; ++ struct kvm_s390_extcall_info extcall; ++ struct kvm_s390_prefix_info prefix; ++ struct kvm_s390_stop_info stop; ++ struct kvm_s390_mchk_info mchk; ++ char reserved[64]; ++ } u; ++}; ++ ++struct kvm_s390_irq_state { ++ __u64 buf; ++ __u32 flags; /* will stay unused for compatibility reasons */ ++ __u32 len; ++ __u32 reserved[4]; /* will stay unused for compatibility reasons */ ++}; ++ ++struct kvm_s390_ucas_mapping { ++ __u64 user_addr; ++ __u64 vcpu_addr; ++ __u64 length; ++}; ++ ++struct kvm_s390_pv_sec_parm { ++ __u64 origin; ++ __u64 length; ++}; ++ ++struct kvm_s390_pv_unp { ++ __u64 addr; ++ __u64 size; ++ __u64 tweak; ++}; ++ ++enum pv_cmd_dmp_id { ++ KVM_PV_DUMP_INIT, ++ KVM_PV_DUMP_CONFIG_STOR_STATE, ++ KVM_PV_DUMP_COMPLETE, ++ KVM_PV_DUMP_CPU, ++}; ++ ++struct kvm_s390_pv_dmp { ++ __u64 subcmd; ++ __u64 buff_addr; ++ __u64 buff_len; ++ __u64 gaddr; /* For dump storage state */ ++ __u64 reserved[4]; ++}; ++ ++enum pv_cmd_info_id { ++ KVM_PV_INFO_VM, ++ KVM_PV_INFO_DUMP, ++}; ++ ++struct kvm_s390_pv_info_dump { ++ __u64 dump_cpu_buffer_len; ++ __u64 dump_config_mem_buffer_per_1m; ++ __u64 dump_config_finalize_len; ++}; ++ ++struct kvm_s390_pv_info_vm { ++ __u64 inst_calls_list[4]; ++ __u64 max_cpus; ++ __u64 max_guests; ++ __u64 max_guest_addr; ++ __u64 feature_indication; ++}; ++ ++struct kvm_s390_pv_info_header { ++ __u32 id; ++ __u32 len_max; ++ __u32 len_written; ++ __u32 reserved; ++}; ++ ++struct kvm_s390_pv_info { ++ struct kvm_s390_pv_info_header header; ++ union { ++ struct kvm_s390_pv_info_dump dump; ++ struct kvm_s390_pv_info_vm vm; ++ }; ++}; ++ ++enum pv_cmd_id { ++ KVM_PV_ENABLE, ++ KVM_PV_DISABLE, ++ KVM_PV_SET_SEC_PARMS, ++ KVM_PV_UNPACK, ++ KVM_PV_VERIFY, ++ KVM_PV_PREP_RESET, ++ KVM_PV_UNSHARE_ALL, ++ KVM_PV_INFO, ++ KVM_PV_DUMP, ++ KVM_PV_ASYNC_CLEANUP_PREPARE, ++ KVM_PV_ASYNC_CLEANUP_PERFORM, ++}; ++ ++struct kvm_pv_cmd { ++ __u32 cmd; /* Command to be executed */ ++ __u16 rc; /* Ultravisor return code */ ++ __u16 rrc; /* Ultravisor return reason code */ ++ __u64 data; /* Data or address */ ++ __u32 flags; /* flags for future extensions. Must be 0 for now */ ++ __u32 reserved[3]; ++}; ++ ++struct kvm_s390_zpci_op { ++ /* in */ ++ __u32 fh; /* target device */ ++ __u8 op; /* operation to perform */ ++ __u8 pad[3]; ++ union { ++ /* for KVM_S390_ZPCIOP_REG_AEN */ ++ struct { ++ __u64 ibv; /* Guest addr of interrupt bit vector */ ++ __u64 sb; /* Guest addr of summary bit */ ++ __u32 flags; ++ __u32 noi; /* Number of interrupts */ ++ __u8 isc; /* Guest interrupt subclass */ ++ __u8 sbo; /* Offset of guest summary bit vector */ ++ __u16 pad; ++ } reg_aen; ++ __u64 reserved[8]; ++ } u; ++}; ++ ++/* types for kvm_s390_zpci_op->op */ ++#define KVM_S390_ZPCIOP_REG_AEN 0 ++#define KVM_S390_ZPCIOP_DEREG_AEN 1 ++ ++/* flags for kvm_s390_zpci_op->u.reg_aen.flags */ ++#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0) + + /* Device control API: s390-specific devices */ + #define KVM_DEV_FLIC_GET_ALL_IRQS 1 +diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h +index 003fb74534..c7a4ac53ce 100644 +--- a/linux-headers/asm-x86/kvm.h ++++ b/linux-headers/asm-x86/kvm.h +@@ -7,6 +7,8 @@ + * + */ + ++#include ++#include + #include + #include + #include +@@ -40,7 +42,6 @@ + #define __KVM_HAVE_IRQ_LINE + #define __KVM_HAVE_MSI + #define __KVM_HAVE_USER_NMI +-#define __KVM_HAVE_GUEST_DEBUG + #define __KVM_HAVE_MSIX + #define __KVM_HAVE_MCE + #define __KVM_HAVE_PIT_STATE2 +@@ -49,7 +50,6 @@ + #define __KVM_HAVE_DEBUGREGS + #define __KVM_HAVE_XSAVE + #define __KVM_HAVE_XCRS +-#define __KVM_HAVE_READONLY_MEM + + /* Architectural interrupt line count. */ + #define KVM_NR_INTERRUPTS 256 +@@ -455,8 +455,13 @@ struct kvm_sync_regs { + + #define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001 + +-/* attributes for system fd (group 0) */ +-#define KVM_X86_XCOMP_GUEST_SUPP 0 ++/* vendor-independent attributes for system fd (group 0) */ ++#define KVM_X86_GRP_SYSTEM 0 ++# define KVM_X86_XCOMP_GUEST_SUPP 0 ++ ++/* vendor-specific groups and attributes for system fd */ ++#define KVM_X86_GRP_SEV 1 ++# define KVM_X86_SEV_VMSA_FEATURES 0 + + struct kvm_vmx_nested_state_data { + __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; +@@ -524,9 +529,313 @@ struct kvm_pmu_event_filter { + #define KVM_PMU_EVENT_ALLOW 0 + #define KVM_PMU_EVENT_DENY 1 + +-#define KVM_PMU_EVENT_FLAG_MASKED_EVENTS BIT(0) ++#define KVM_PMU_EVENT_FLAG_MASKED_EVENTS _BITUL(0) + #define KVM_PMU_EVENT_FLAGS_VALID_MASK (KVM_PMU_EVENT_FLAG_MASKED_EVENTS) + ++/* for KVM_CAP_MCE */ ++struct kvm_x86_mce { ++ __u64 status; ++ __u64 addr; ++ __u64 misc; ++ __u64 mcg_status; ++ __u8 bank; ++ __u8 pad1[7]; ++ __u64 pad2[3]; ++}; ++ ++/* for KVM_CAP_XEN_HVM */ ++#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0) ++#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1) ++#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2) ++#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3) ++#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4) ++#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5) ++#define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6) ++#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7) ++#define KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA (1 << 8) ++ ++struct kvm_xen_hvm_config { ++ __u32 flags; ++ __u32 msr; ++ __u64 blob_addr_32; ++ __u64 blob_addr_64; ++ __u8 blob_size_32; ++ __u8 blob_size_64; ++ __u8 pad2[30]; ++}; ++ ++struct kvm_xen_hvm_attr { ++ __u16 type; ++ __u16 pad[3]; ++ union { ++ __u8 long_mode; ++ __u8 vector; ++ __u8 runstate_update_flag; ++ union { ++ __u64 gfn; ++#define KVM_XEN_INVALID_GFN ((__u64)-1) ++ __u64 hva; ++ } shared_info; ++ struct { ++ __u32 send_port; ++ __u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */ ++ __u32 flags; ++#define KVM_XEN_EVTCHN_DEASSIGN (1 << 0) ++#define KVM_XEN_EVTCHN_UPDATE (1 << 1) ++#define KVM_XEN_EVTCHN_RESET (1 << 2) ++ /* ++ * Events sent by the guest are either looped back to ++ * the guest itself (potentially on a different port#) ++ * or signalled via an eventfd. ++ */ ++ union { ++ struct { ++ __u32 port; ++ __u32 vcpu; ++ __u32 priority; ++ } port; ++ struct { ++ __u32 port; /* Zero for eventfd */ ++ __s32 fd; ++ } eventfd; ++ __u32 padding[4]; ++ } deliver; ++ } evtchn; ++ __u32 xen_version; ++ __u64 pad[8]; ++ } u; ++}; ++ ++ ++/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */ ++#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 ++#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 ++#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2 ++/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */ ++#define KVM_XEN_ATTR_TYPE_EVTCHN 0x3 ++#define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4 ++/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG */ ++#define KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG 0x5 ++/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA */ ++#define KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA 0x6 ++ ++struct kvm_xen_vcpu_attr { ++ __u16 type; ++ __u16 pad[3]; ++ union { ++ __u64 gpa; ++#define KVM_XEN_INVALID_GPA ((__u64)-1) ++ __u64 hva; ++ __u64 pad[8]; ++ struct { ++ __u64 state; ++ __u64 state_entry_time; ++ __u64 time_running; ++ __u64 time_runnable; ++ __u64 time_blocked; ++ __u64 time_offline; ++ } runstate; ++ __u32 vcpu_id; ++ struct { ++ __u32 port; ++ __u32 priority; ++ __u64 expires_ns; ++ } timer; ++ __u8 vector; ++ } u; ++}; ++ ++/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */ ++#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0 ++#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1 ++#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2 ++#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3 ++#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4 ++#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5 ++/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */ ++#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6 ++#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7 ++#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8 ++/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA */ ++#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA 0x9 ++ ++/* Secure Encrypted Virtualization command */ ++enum sev_cmd_id { ++ /* Guest initialization commands */ ++ KVM_SEV_INIT = 0, ++ KVM_SEV_ES_INIT, ++ /* Guest launch commands */ ++ KVM_SEV_LAUNCH_START, ++ KVM_SEV_LAUNCH_UPDATE_DATA, ++ KVM_SEV_LAUNCH_UPDATE_VMSA, ++ KVM_SEV_LAUNCH_SECRET, ++ KVM_SEV_LAUNCH_MEASURE, ++ KVM_SEV_LAUNCH_FINISH, ++ /* Guest migration commands (outgoing) */ ++ KVM_SEV_SEND_START, ++ KVM_SEV_SEND_UPDATE_DATA, ++ KVM_SEV_SEND_UPDATE_VMSA, ++ KVM_SEV_SEND_FINISH, ++ /* Guest migration commands (incoming) */ ++ KVM_SEV_RECEIVE_START, ++ KVM_SEV_RECEIVE_UPDATE_DATA, ++ KVM_SEV_RECEIVE_UPDATE_VMSA, ++ KVM_SEV_RECEIVE_FINISH, ++ /* Guest status and debug commands */ ++ KVM_SEV_GUEST_STATUS, ++ KVM_SEV_DBG_DECRYPT, ++ KVM_SEV_DBG_ENCRYPT, ++ /* Guest certificates commands */ ++ KVM_SEV_CERT_EXPORT, ++ /* Attestation report */ ++ KVM_SEV_GET_ATTESTATION_REPORT, ++ /* Guest Migration Extension */ ++ KVM_SEV_SEND_CANCEL, ++ ++ /* Second time is the charm; improved versions of the above ioctls. */ ++ KVM_SEV_INIT2, ++ ++ /* Hygon CSV batch command */ ++ KVM_CSV_COMMAND_BATCH = 0x18, ++ ++ KVM_SEV_NR_MAX, ++}; ++ ++struct kvm_sev_cmd { ++ __u32 id; ++ __u32 pad0; ++ __u64 data; ++ __u32 error; ++ __u32 sev_fd; ++}; ++ ++struct kvm_sev_init { ++ __u64 vmsa_features; ++ __u32 flags; ++ __u32 pad[9]; ++}; ++ ++struct kvm_sev_launch_start { ++ __u32 handle; ++ __u32 policy; ++ __u64 dh_uaddr; ++ __u32 dh_len; ++ __u32 pad0; ++ __u64 session_uaddr; ++ __u32 session_len; ++ __u32 pad1; ++}; ++ ++struct kvm_sev_launch_update_data { ++ __u64 uaddr; ++ __u32 len; ++ __u32 pad0; ++}; ++ ++ ++struct kvm_sev_launch_secret { ++ __u64 hdr_uaddr; ++ __u32 hdr_len; ++ __u32 pad0; ++ __u64 guest_uaddr; ++ __u32 guest_len; ++ __u32 pad1; ++ __u64 trans_uaddr; ++ __u32 trans_len; ++ __u32 pad2; ++}; ++ ++struct kvm_sev_launch_measure { ++ __u64 uaddr; ++ __u32 len; ++ __u32 pad0; ++}; ++ ++struct kvm_sev_guest_status { ++ __u32 handle; ++ __u32 policy; ++ __u32 state; ++}; ++ ++struct kvm_sev_dbg { ++ __u64 src_uaddr; ++ __u64 dst_uaddr; ++ __u32 len; ++ __u32 pad0; ++}; ++ ++struct kvm_sev_attestation_report { ++ __u8 mnonce[16]; ++ __u64 uaddr; ++ __u32 len; ++ __u32 pad0; ++}; ++ ++struct kvm_sev_send_start { ++ __u32 policy; ++ __u32 pad0; ++ __u64 pdh_cert_uaddr; ++ __u32 pdh_cert_len; ++ __u32 pad1; ++ __u64 plat_certs_uaddr; ++ __u32 plat_certs_len; ++ __u32 pad2; ++ __u64 amd_certs_uaddr; ++ __u32 amd_certs_len; ++ __u32 pad3; ++ __u64 session_uaddr; ++ __u32 session_len; ++ __u32 pad4; ++}; ++ ++struct kvm_sev_send_update_data { ++ __u64 hdr_uaddr; ++ __u32 hdr_len; ++ __u32 pad0; ++ __u64 guest_uaddr; ++ __u32 guest_len; ++ __u32 pad1; ++ __u64 trans_uaddr; ++ __u32 trans_len; ++ __u32 pad2; ++}; ++ ++struct kvm_sev_receive_start { ++ __u32 handle; ++ __u32 policy; ++ __u64 pdh_uaddr; ++ __u32 pdh_len; ++ __u32 pad0; ++ __u64 session_uaddr; ++ __u32 session_len; ++ __u32 pad1; ++}; ++ ++struct kvm_sev_receive_update_data { ++ __u64 hdr_uaddr; ++ __u32 hdr_len; ++ __u32 pad0; ++ __u64 guest_uaddr; ++ __u32 guest_len; ++ __u32 pad1; ++ __u64 trans_uaddr; ++ __u32 trans_len; ++ __u32 pad2; ++}; ++ ++#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0) ++#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1) ++ ++struct kvm_hyperv_eventfd { ++ __u32 conn_id; ++ __s32 fd; ++ __u32 flags; ++ __u32 padding[3]; ++}; ++ ++#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff ++#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0) ++ + /* + * Masked event layout. + * Bits Description +@@ -547,10 +856,10 @@ struct kvm_pmu_event_filter { + ((__u64)(!!(exclude)) << 55)) + + #define KVM_PMU_MASKED_ENTRY_EVENT_SELECT \ +- (GENMASK_ULL(7, 0) | GENMASK_ULL(35, 32)) +-#define KVM_PMU_MASKED_ENTRY_UMASK_MASK (GENMASK_ULL(63, 56)) +-#define KVM_PMU_MASKED_ENTRY_UMASK_MATCH (GENMASK_ULL(15, 8)) +-#define KVM_PMU_MASKED_ENTRY_EXCLUDE (BIT_ULL(55)) ++ (__GENMASK_ULL(7, 0) | __GENMASK_ULL(35, 32)) ++#define KVM_PMU_MASKED_ENTRY_UMASK_MASK (__GENMASK_ULL(63, 56)) ++#define KVM_PMU_MASKED_ENTRY_UMASK_MATCH (__GENMASK_ULL(15, 8)) ++#define KVM_PMU_MASKED_ENTRY_EXCLUDE (_BITULL(55)) + #define KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT (56) + + /* for KVM_{GET,SET,HAS}_DEVICE_ATTR */ +@@ -558,9 +867,11 @@ struct kvm_pmu_event_filter { + #define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */ + + /* x86-specific KVM_EXIT_HYPERCALL flags. */ +-#define KVM_EXIT_HYPERCALL_LONG_MODE BIT(0) ++#define KVM_EXIT_HYPERCALL_LONG_MODE _BITULL(0) + + #define KVM_X86_DEFAULT_VM 0 + #define KVM_X86_SW_PROTECTED_VM 1 ++#define KVM_X86_SEV_VM 2 ++#define KVM_X86_SEV_ES_VM 3 + + #endif /* _ASM_X86_KVM_H */ +diff --git a/linux-headers/linux/bits.h b/linux-headers/linux/bits.h +new file mode 100644 +index 0000000000..d9897771be +--- /dev/null ++++ b/linux-headers/linux/bits.h +@@ -0,0 +1,15 @@ ++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ ++/* bits.h: Macros for dealing with bitmasks. */ ++ ++#ifndef _LINUX_BITS_H ++#define _LINUX_BITS_H ++ ++#define __GENMASK(h, l) \ ++ (((~_UL(0)) - (_UL(1) << (l)) + 1) & \ ++ (~_UL(0) >> (__BITS_PER_LONG - 1 - (h)))) ++ ++#define __GENMASK_ULL(h, l) \ ++ (((~_ULL(0)) - (_ULL(1) << (l)) + 1) & \ ++ (~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h)))) ++ ++#endif /* _LINUX_BITS_H */ +diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h +index a1bbb080c3..d2f6007fb8 100644 +--- a/linux-headers/linux/kvm.h ++++ b/linux-headers/linux/kvm.h +@@ -18,6 +18,11 @@ + + #define KVM_API_VERSION 12 + ++/* ++ * Backwards-compatible definitions. ++ */ ++#define __KVM_HAVE_GUEST_DEBUG ++ + /* for KVM_SET_USER_MEMORY_REGION */ + struct kvm_userspace_memory_region { + __u32 slot; +@@ -87,43 +92,6 @@ struct kvm_pit_config { + + #define KVM_PIT_SPEAKER_DUMMY 1 + +-struct kvm_s390_skeys { +- __u64 start_gfn; +- __u64 count; +- __u64 skeydata_addr; +- __u32 flags; +- __u32 reserved[9]; +-}; +- +-#define KVM_S390_CMMA_PEEK (1 << 0) +- +-/** +- * kvm_s390_cmma_log - Used for CMMA migration. +- * +- * Used both for input and output. +- * +- * @start_gfn: Guest page number to start from. +- * @count: Size of the result buffer. +- * @flags: Control operation mode via KVM_S390_CMMA_* flags +- * @remaining: Used with KVM_S390_GET_CMMA_BITS. Indicates how many dirty +- * pages are still remaining. +- * @mask: Used with KVM_S390_SET_CMMA_BITS. Bitmap of bits to actually set +- * in the PGSTE. +- * @values: Pointer to the values buffer. +- * +- * Used in KVM_S390_{G,S}ET_CMMA_BITS ioctls. +- */ +-struct kvm_s390_cmma_log { +- __u64 start_gfn; +- __u32 count; +- __u32 flags; +- union { +- __u64 remaining; +- __u64 mask; +- }; +- __u64 values; +-}; +- + struct kvm_hyperv_exit { + #define KVM_EXIT_HYPERV_SYNIC 1 + #define KVM_EXIT_HYPERV_HCALL 2 +@@ -316,11 +284,6 @@ struct kvm_run { + __u32 ipb; + } s390_sieic; + /* KVM_EXIT_S390_RESET */ +-#define KVM_S390_RESET_POR 1 +-#define KVM_S390_RESET_CLEAR 2 +-#define KVM_S390_RESET_SUBSYSTEM 4 +-#define KVM_S390_RESET_CPU_INIT 8 +-#define KVM_S390_RESET_IPL 16 + __u64 s390_reset_flags; + /* KVM_EXIT_S390_UCONTROL */ + struct { +@@ -535,43 +498,6 @@ struct kvm_translation { + __u8 pad[5]; + }; + +-/* for KVM_S390_MEM_OP */ +-struct kvm_s390_mem_op { +- /* in */ +- __u64 gaddr; /* the guest address */ +- __u64 flags; /* flags */ +- __u32 size; /* amount of bytes */ +- __u32 op; /* type of operation */ +- __u64 buf; /* buffer in userspace */ +- union { +- struct { +- __u8 ar; /* the access register number */ +- __u8 key; /* access key, ignored if flag unset */ +- __u8 pad1[6]; /* ignored */ +- __u64 old_addr; /* ignored if cmpxchg flag unset */ +- }; +- __u32 sida_offset; /* offset into the sida */ +- __u8 reserved[32]; /* ignored */ +- }; +-}; +-/* types for kvm_s390_mem_op->op */ +-#define KVM_S390_MEMOP_LOGICAL_READ 0 +-#define KVM_S390_MEMOP_LOGICAL_WRITE 1 +-#define KVM_S390_MEMOP_SIDA_READ 2 +-#define KVM_S390_MEMOP_SIDA_WRITE 3 +-#define KVM_S390_MEMOP_ABSOLUTE_READ 4 +-#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5 +-#define KVM_S390_MEMOP_ABSOLUTE_CMPXCHG 6 +- +-/* flags for kvm_s390_mem_op->flags */ +-#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0) +-#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1) +-#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2) +- +-/* flags specifying extension support via KVM_CAP_S390_MEM_OP_EXTENSION */ +-#define KVM_S390_MEMOP_EXTENSION_CAP_BASE (1 << 0) +-#define KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG (1 << 1) +- + /* for KVM_INTERRUPT */ + struct kvm_interrupt { + /* in */ +@@ -636,124 +562,6 @@ struct kvm_mp_state { + __u32 mp_state; + }; + +-struct kvm_s390_psw { +- __u64 mask; +- __u64 addr; +-}; +- +-/* valid values for type in kvm_s390_interrupt */ +-#define KVM_S390_SIGP_STOP 0xfffe0000u +-#define KVM_S390_PROGRAM_INT 0xfffe0001u +-#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u +-#define KVM_S390_RESTART 0xfffe0003u +-#define KVM_S390_INT_PFAULT_INIT 0xfffe0004u +-#define KVM_S390_INT_PFAULT_DONE 0xfffe0005u +-#define KVM_S390_MCHK 0xfffe1000u +-#define KVM_S390_INT_CLOCK_COMP 0xffff1004u +-#define KVM_S390_INT_CPU_TIMER 0xffff1005u +-#define KVM_S390_INT_VIRTIO 0xffff2603u +-#define KVM_S390_INT_SERVICE 0xffff2401u +-#define KVM_S390_INT_EMERGENCY 0xffff1201u +-#define KVM_S390_INT_EXTERNAL_CALL 0xffff1202u +-/* Anything below 0xfffe0000u is taken by INT_IO */ +-#define KVM_S390_INT_IO(ai,cssid,ssid,schid) \ +- (((schid)) | \ +- ((ssid) << 16) | \ +- ((cssid) << 18) | \ +- ((ai) << 26)) +-#define KVM_S390_INT_IO_MIN 0x00000000u +-#define KVM_S390_INT_IO_MAX 0xfffdffffu +-#define KVM_S390_INT_IO_AI_MASK 0x04000000u +- +- +-struct kvm_s390_interrupt { +- __u32 type; +- __u32 parm; +- __u64 parm64; +-}; +- +-struct kvm_s390_io_info { +- __u16 subchannel_id; +- __u16 subchannel_nr; +- __u32 io_int_parm; +- __u32 io_int_word; +-}; +- +-struct kvm_s390_ext_info { +- __u32 ext_params; +- __u32 pad; +- __u64 ext_params2; +-}; +- +-struct kvm_s390_pgm_info { +- __u64 trans_exc_code; +- __u64 mon_code; +- __u64 per_address; +- __u32 data_exc_code; +- __u16 code; +- __u16 mon_class_nr; +- __u8 per_code; +- __u8 per_atmid; +- __u8 exc_access_id; +- __u8 per_access_id; +- __u8 op_access_id; +-#define KVM_S390_PGM_FLAGS_ILC_VALID 0x01 +-#define KVM_S390_PGM_FLAGS_ILC_0 0x02 +-#define KVM_S390_PGM_FLAGS_ILC_1 0x04 +-#define KVM_S390_PGM_FLAGS_ILC_MASK 0x06 +-#define KVM_S390_PGM_FLAGS_NO_REWIND 0x08 +- __u8 flags; +- __u8 pad[2]; +-}; +- +-struct kvm_s390_prefix_info { +- __u32 address; +-}; +- +-struct kvm_s390_extcall_info { +- __u16 code; +-}; +- +-struct kvm_s390_emerg_info { +- __u16 code; +-}; +- +-#define KVM_S390_STOP_FLAG_STORE_STATUS 0x01 +-struct kvm_s390_stop_info { +- __u32 flags; +-}; +- +-struct kvm_s390_mchk_info { +- __u64 cr14; +- __u64 mcic; +- __u64 failing_storage_address; +- __u32 ext_damage_code; +- __u32 pad; +- __u8 fixed_logout[16]; +-}; +- +-struct kvm_s390_irq { +- __u64 type; +- union { +- struct kvm_s390_io_info io; +- struct kvm_s390_ext_info ext; +- struct kvm_s390_pgm_info pgm; +- struct kvm_s390_emerg_info emerg; +- struct kvm_s390_extcall_info extcall; +- struct kvm_s390_prefix_info prefix; +- struct kvm_s390_stop_info stop; +- struct kvm_s390_mchk_info mchk; +- char reserved[64]; +- } u; +-}; +- +-struct kvm_s390_irq_state { +- __u64 buf; +- __u32 flags; /* will stay unused for compatibility reasons */ +- __u32 len; +- __u32 reserved[4]; /* will stay unused for compatibility reasons */ +-}; +- + /* for KVM_SET_GUEST_DEBUG */ + + #define KVM_GUESTDBG_ENABLE 0x00000001 +@@ -809,50 +617,6 @@ struct kvm_enable_cap { + __u8 pad[64]; + }; + +-/* for KVM_PPC_GET_PVINFO */ +- +-#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0) +- +-struct kvm_ppc_pvinfo { +- /* out */ +- __u32 flags; +- __u32 hcall[4]; +- __u8 pad[108]; +-}; +- +-/* for KVM_PPC_GET_SMMU_INFO */ +-#define KVM_PPC_PAGE_SIZES_MAX_SZ 8 +- +-struct kvm_ppc_one_page_size { +- __u32 page_shift; /* Page shift (or 0) */ +- __u32 pte_enc; /* Encoding in the HPTE (>>12) */ +-}; +- +-struct kvm_ppc_one_seg_page_size { +- __u32 page_shift; /* Base page shift of segment (or 0) */ +- __u32 slb_enc; /* SLB encoding for BookS */ +- struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ]; +-}; +- +-#define KVM_PPC_PAGE_SIZES_REAL 0x00000001 +-#define KVM_PPC_1T_SEGMENTS 0x00000002 +-#define KVM_PPC_NO_HASH 0x00000004 +- +-struct kvm_ppc_smmu_info { +- __u64 flags; +- __u32 slb_size; +- __u16 data_keys; /* # storage keys supported for data */ +- __u16 instr_keys; /* # storage keys supported for instructions */ +- struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ]; +-}; +- +-/* for KVM_PPC_RESIZE_HPT_{PREPARE,COMMIT} */ +-struct kvm_ppc_resize_hpt { +- __u64 flags; +- __u32 shift; +- __u32 pad; +-}; +- + #define KVMIO 0xAE + + /* machine type bits, to be used as argument to KVM_CREATE_VM */ +@@ -922,9 +686,7 @@ struct kvm_ppc_resize_hpt { + /* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ + #define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 + #define KVM_CAP_USER_NMI 22 +-#ifdef __KVM_HAVE_GUEST_DEBUG + #define KVM_CAP_SET_GUEST_DEBUG 23 +-#endif + #ifdef __KVM_HAVE_PIT + #define KVM_CAP_REINJECT_CONTROL 24 + #endif +@@ -1168,8 +930,6 @@ struct kvm_ppc_resize_hpt { + + #define KVM_CAP_ARM_TMM 300 + +-#ifdef KVM_CAP_IRQ_ROUTING +- + struct kvm_irq_routing_irqchip { + __u32 irqchip; + __u32 pin; +@@ -1234,42 +994,6 @@ struct kvm_irq_routing { + struct kvm_irq_routing_entry entries[]; + }; + +-#endif +- +-#ifdef KVM_CAP_MCE +-/* x86 MCE */ +-struct kvm_x86_mce { +- __u64 status; +- __u64 addr; +- __u64 misc; +- __u64 mcg_status; +- __u8 bank; +- __u8 pad1[7]; +- __u64 pad2[3]; +-}; +-#endif +- +-#ifdef KVM_CAP_XEN_HVM +-#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0) +-#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1) +-#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2) +-#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3) +-#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4) +-#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5) +-#define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6) +-#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7) +- +-struct kvm_xen_hvm_config { +- __u32 flags; +- __u32 msr; +- __u64 blob_addr_32; +- __u64 blob_addr_64; +- __u8 blob_size_32; +- __u8 blob_size_64; +- __u8 pad2[30]; +-}; +-#endif +- + #define KVM_IRQFD_FLAG_DEASSIGN (1 << 0) + /* + * Available with KVM_CAP_IRQFD_RESAMPLE +@@ -1487,11 +1211,6 @@ struct kvm_user_data { + struct kvm_userspace_memory_region2) + + /* enable ucontrol for s390 */ +-struct kvm_s390_ucas_mapping { +- __u64 user_addr; +- __u64 vcpu_addr; +- __u64 length; +-}; + #define KVM_S390_UCAS_MAP _IOW(KVMIO, 0x50, struct kvm_s390_ucas_mapping) + #define KVM_S390_UCAS_UNMAP _IOW(KVMIO, 0x51, struct kvm_s390_ucas_mapping) + #define KVM_S390_VCPU_FAULT _IOW(KVMIO, 0x52, unsigned long) +@@ -1690,89 +1409,6 @@ struct kvm_enc_region { + #define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3) + #define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4) + +-struct kvm_s390_pv_sec_parm { +- __u64 origin; +- __u64 length; +-}; +- +-struct kvm_s390_pv_unp { +- __u64 addr; +- __u64 size; +- __u64 tweak; +-}; +- +-enum pv_cmd_dmp_id { +- KVM_PV_DUMP_INIT, +- KVM_PV_DUMP_CONFIG_STOR_STATE, +- KVM_PV_DUMP_COMPLETE, +- KVM_PV_DUMP_CPU, +-}; +- +-struct kvm_s390_pv_dmp { +- __u64 subcmd; +- __u64 buff_addr; +- __u64 buff_len; +- __u64 gaddr; /* For dump storage state */ +- __u64 reserved[4]; +-}; +- +-enum pv_cmd_info_id { +- KVM_PV_INFO_VM, +- KVM_PV_INFO_DUMP, +-}; +- +-struct kvm_s390_pv_info_dump { +- __u64 dump_cpu_buffer_len; +- __u64 dump_config_mem_buffer_per_1m; +- __u64 dump_config_finalize_len; +-}; +- +-struct kvm_s390_pv_info_vm { +- __u64 inst_calls_list[4]; +- __u64 max_cpus; +- __u64 max_guests; +- __u64 max_guest_addr; +- __u64 feature_indication; +-}; +- +-struct kvm_s390_pv_info_header { +- __u32 id; +- __u32 len_max; +- __u32 len_written; +- __u32 reserved; +-}; +- +-struct kvm_s390_pv_info { +- struct kvm_s390_pv_info_header header; +- union { +- struct kvm_s390_pv_info_dump dump; +- struct kvm_s390_pv_info_vm vm; +- }; +-}; +- +-enum pv_cmd_id { +- KVM_PV_ENABLE, +- KVM_PV_DISABLE, +- KVM_PV_SET_SEC_PARMS, +- KVM_PV_UNPACK, +- KVM_PV_VERIFY, +- KVM_PV_PREP_RESET, +- KVM_PV_UNSHARE_ALL, +- KVM_PV_INFO, +- KVM_PV_DUMP, +- KVM_PV_ASYNC_CLEANUP_PREPARE, +- KVM_PV_ASYNC_CLEANUP_PERFORM, +-}; +- +-struct kvm_pv_cmd { +- __u32 cmd; /* Command to be executed */ +- __u16 rc; /* Ultravisor return code */ +- __u16 rrc; /* Ultravisor return reason code */ +- __u64 data; /* Data or address */ +- __u32 flags; /* flags for future extensions. Must be 0 for now */ +- __u32 reserved[3]; +-}; +- + /* Available with KVM_CAP_S390_PROTECTED */ + #define KVM_S390_PV_COMMAND _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd) + +@@ -1786,58 +1422,6 @@ struct kvm_pv_cmd { + #define KVM_XEN_HVM_GET_ATTR _IOWR(KVMIO, 0xc8, struct kvm_xen_hvm_attr) + #define KVM_XEN_HVM_SET_ATTR _IOW(KVMIO, 0xc9, struct kvm_xen_hvm_attr) + +-struct kvm_xen_hvm_attr { +- __u16 type; +- __u16 pad[3]; +- union { +- __u8 long_mode; +- __u8 vector; +- __u8 runstate_update_flag; +- struct { +- __u64 gfn; +-#define KVM_XEN_INVALID_GFN ((__u64)-1) +- } shared_info; +- struct { +- __u32 send_port; +- __u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */ +- __u32 flags; +-#define KVM_XEN_EVTCHN_DEASSIGN (1 << 0) +-#define KVM_XEN_EVTCHN_UPDATE (1 << 1) +-#define KVM_XEN_EVTCHN_RESET (1 << 2) +- /* +- * Events sent by the guest are either looped back to +- * the guest itself (potentially on a different port#) +- * or signalled via an eventfd. +- */ +- union { +- struct { +- __u32 port; +- __u32 vcpu; +- __u32 priority; +- } port; +- struct { +- __u32 port; /* Zero for eventfd */ +- __s32 fd; +- } eventfd; +- __u32 padding[4]; +- } deliver; +- } evtchn; +- __u32 xen_version; +- __u64 pad[8]; +- } u; +-}; +- +- +-/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */ +-#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 +-#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 +-#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2 +-/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */ +-#define KVM_XEN_ATTR_TYPE_EVTCHN 0x3 +-#define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4 +-/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG */ +-#define KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG 0x5 +- + /* Per-vCPU Xen attributes */ + #define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr) + #define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr) +@@ -1848,157 +1432,6 @@ struct kvm_xen_hvm_attr { + #define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2) + #define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2) + +-struct kvm_xen_vcpu_attr { +- __u16 type; +- __u16 pad[3]; +- union { +- __u64 gpa; +-#define KVM_XEN_INVALID_GPA ((__u64)-1) +- __u64 pad[8]; +- struct { +- __u64 state; +- __u64 state_entry_time; +- __u64 time_running; +- __u64 time_runnable; +- __u64 time_blocked; +- __u64 time_offline; +- } runstate; +- __u32 vcpu_id; +- struct { +- __u32 port; +- __u32 priority; +- __u64 expires_ns; +- } timer; +- __u8 vector; +- } u; +-}; +- +-/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */ +-#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0 +-#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1 +-#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2 +-#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3 +-#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4 +-#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5 +-/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */ +-#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6 +-#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7 +-#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8 +- +-/* Secure Encrypted Virtualization command */ +-enum sev_cmd_id { +- /* Guest initialization commands */ +- KVM_SEV_INIT = 0, +- KVM_SEV_ES_INIT, +- /* Guest launch commands */ +- KVM_SEV_LAUNCH_START, +- KVM_SEV_LAUNCH_UPDATE_DATA, +- KVM_SEV_LAUNCH_UPDATE_VMSA, +- KVM_SEV_LAUNCH_SECRET, +- KVM_SEV_LAUNCH_MEASURE, +- KVM_SEV_LAUNCH_FINISH, +- /* Guest migration commands (outgoing) */ +- KVM_SEV_SEND_START, +- KVM_SEV_SEND_UPDATE_DATA, +- KVM_SEV_SEND_UPDATE_VMSA, +- KVM_SEV_SEND_FINISH, +- /* Guest migration commands (incoming) */ +- KVM_SEV_RECEIVE_START, +- KVM_SEV_RECEIVE_UPDATE_DATA, +- KVM_SEV_RECEIVE_UPDATE_VMSA, +- KVM_SEV_RECEIVE_FINISH, +- /* Guest status and debug commands */ +- KVM_SEV_GUEST_STATUS, +- KVM_SEV_DBG_DECRYPT, +- KVM_SEV_DBG_ENCRYPT, +- /* Guest certificates commands */ +- KVM_SEV_CERT_EXPORT, +- /* Attestation report */ +- KVM_SEV_GET_ATTESTATION_REPORT, +- /* Guest Migration Extension */ +- KVM_SEV_SEND_CANCEL, +- +- /* Hygon CSV batch command */ +- KVM_CSV_COMMAND_BATCH = 0x18, +- +- KVM_SEV_NR_MAX, +-}; +- +-struct kvm_sev_cmd { +- __u32 id; +- __u64 data; +- __u32 error; +- __u32 sev_fd; +-}; +- +-struct kvm_sev_launch_start { +- __u32 handle; +- __u32 policy; +- __u64 dh_uaddr; +- __u32 dh_len; +- __u64 session_uaddr; +- __u32 session_len; +-}; +- +-struct kvm_sev_launch_update_data { +- __u64 uaddr; +- __u32 len; +-}; +- +- +-struct kvm_sev_launch_secret { +- __u64 hdr_uaddr; +- __u32 hdr_len; +- __u64 guest_uaddr; +- __u32 guest_len; +- __u64 trans_uaddr; +- __u32 trans_len; +-}; +- +-struct kvm_sev_launch_measure { +- __u64 uaddr; +- __u32 len; +-}; +- +-struct kvm_sev_guest_status { +- __u32 handle; +- __u32 policy; +- __u32 state; +-}; +- +-struct kvm_sev_dbg { +- __u64 src_uaddr; +- __u64 dst_uaddr; +- __u32 len; +-}; +- +-struct kvm_sev_attestation_report { +- __u8 mnonce[16]; +- __u64 uaddr; +- __u32 len; +-}; +- +-struct kvm_sev_send_start { +- __u32 policy; +- __u64 pdh_cert_uaddr; +- __u32 pdh_cert_len; +- __u64 plat_certs_uaddr; +- __u32 plat_certs_len; +- __u64 amd_certs_uaddr; +- __u32 amd_certs_len; +- __u64 session_uaddr; +- __u32 session_len; +-}; +- +-struct kvm_sev_send_update_data { +- __u64 hdr_uaddr; +- __u32 hdr_len; +- __u64 guest_uaddr; +- __u32 guest_len; +- __u64 trans_uaddr; +- __u32 trans_len; +-}; +- + struct kvm_sev_send_update_vmsa { + __u32 vcpu_id; + __u64 hdr_uaddr; +@@ -2007,24 +1440,6 @@ struct kvm_sev_send_update_vmsa { + __u32 trans_len; + }; + +-struct kvm_sev_receive_start { +- __u32 handle; +- __u32 policy; +- __u64 pdh_uaddr; +- __u32 pdh_len; +- __u64 session_uaddr; +- __u32 session_len; +-}; +- +-struct kvm_sev_receive_update_data { +- __u64 hdr_uaddr; +- __u32 hdr_len; +- __u64 guest_uaddr; +- __u32 guest_len; +- __u64 trans_uaddr; +- __u32 trans_len; +-}; +- + struct kvm_sev_receive_update_vmsa { + __u32 vcpu_id; + __u64 hdr_uaddr; +@@ -2117,76 +1532,6 @@ struct kvm_csv3_handle_memory { + __u32 opcode; + }; + +-#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) +-#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) +-#define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) +- +-struct kvm_assigned_pci_dev { +- __u32 assigned_dev_id; +- __u32 busnr; +- __u32 devfn; +- __u32 flags; +- __u32 segnr; +- union { +- __u32 reserved[11]; +- }; +-}; +- +-#define KVM_DEV_IRQ_HOST_INTX (1 << 0) +-#define KVM_DEV_IRQ_HOST_MSI (1 << 1) +-#define KVM_DEV_IRQ_HOST_MSIX (1 << 2) +- +-#define KVM_DEV_IRQ_GUEST_INTX (1 << 8) +-#define KVM_DEV_IRQ_GUEST_MSI (1 << 9) +-#define KVM_DEV_IRQ_GUEST_MSIX (1 << 10) +- +-#define KVM_DEV_IRQ_HOST_MASK 0x00ff +-#define KVM_DEV_IRQ_GUEST_MASK 0xff00 +- +-struct kvm_assigned_irq { +- __u32 assigned_dev_id; +- __u32 host_irq; /* ignored (legacy field) */ +- __u32 guest_irq; +- __u32 flags; +- union { +- __u32 reserved[12]; +- }; +-}; +- +-struct kvm_assigned_msix_nr { +- __u32 assigned_dev_id; +- __u16 entry_nr; +- __u16 padding; +-}; +- +-#define KVM_MAX_MSIX_PER_DEV 256 +-struct kvm_assigned_msix_entry { +- __u32 assigned_dev_id; +- __u32 gsi; +- __u16 entry; /* The index of entry in the MSI-X table */ +- __u16 padding[3]; +-}; +- +-#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0) +-#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1) +- +-/* Available with KVM_CAP_ARM_USER_IRQ */ +- +-/* Bits for run->s.regs.device_irq_level */ +-#define KVM_ARM_DEV_EL1_VTIMER (1 << 0) +-#define KVM_ARM_DEV_EL1_PTIMER (1 << 1) +-#define KVM_ARM_DEV_PMU (1 << 2) +- +-struct kvm_hyperv_eventfd { +- __u32 conn_id; +- __s32 fd; +- __u32 flags; +- __u32 padding[3]; +-}; +- +-#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff +-#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0) +- + #define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0) + #define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1) + +@@ -2332,33 +1677,6 @@ struct kvm_stats_desc { + /* Available with KVM_CAP_S390_ZPCI_OP */ + #define KVM_S390_ZPCI_OP _IOW(KVMIO, 0xd1, struct kvm_s390_zpci_op) + +-struct kvm_s390_zpci_op { +- /* in */ +- __u32 fh; /* target device */ +- __u8 op; /* operation to perform */ +- __u8 pad[3]; +- union { +- /* for KVM_S390_ZPCIOP_REG_AEN */ +- struct { +- __u64 ibv; /* Guest addr of interrupt bit vector */ +- __u64 sb; /* Guest addr of summary bit */ +- __u32 flags; +- __u32 noi; /* Number of interrupts */ +- __u8 isc; /* Guest interrupt subclass */ +- __u8 sbo; /* Offset of guest summary bit vector */ +- __u16 pad; +- } reg_aen; +- __u64 reserved[8]; +- } u; +-}; +- +-/* types for kvm_s390_zpci_op->op */ +-#define KVM_S390_ZPCIOP_REG_AEN 0 +-#define KVM_S390_ZPCIOP_DEREG_AEN 1 +- +-/* flags for kvm_s390_zpci_op->u.reg_aen.flags */ +-#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0) +- + /* Available with KVM_CAP_MEMORY_ATTRIBUTES */ + #define KVM_SET_MEMORY_ATTRIBUTES _IOW(KVMIO, 0xd2, struct kvm_memory_attributes) + +diff --git a/linux-headers/linux/psp-sev.h b/linux-headers/linux/psp-sev.h +index bcb21339ee..c3046c6bff 100644 +--- a/linux-headers/linux/psp-sev.h ++++ b/linux-headers/linux/psp-sev.h +@@ -28,6 +28,9 @@ enum { + SEV_PEK_CERT_IMPORT, + SEV_GET_ID, /* This command is deprecated, use SEV_GET_ID2 */ + SEV_GET_ID2, ++ SNP_PLATFORM_STATUS, ++ SNP_COMMIT, ++ SNP_SET_CONFIG, + + SEV_MAX, + }; +@@ -69,6 +72,12 @@ typedef enum { + SEV_RET_RESOURCE_LIMIT, + SEV_RET_SECURE_DATA_INVALID, + SEV_RET_INVALID_KEY = 0x27, ++ SEV_RET_INVALID_PAGE_SIZE, ++ SEV_RET_INVALID_PAGE_STATE, ++ SEV_RET_INVALID_MDATA_ENTRY, ++ SEV_RET_INVALID_PAGE_OWNER, ++ SEV_RET_INVALID_PAGE_AEAD_OFLOW, ++ SEV_RET_RMP_INIT_REQUIRED, + SEV_RET_MAX, + } sev_ret_code; + +@@ -155,6 +164,56 @@ struct sev_user_data_get_id2 { + __u32 length; /* In/Out */ + } __attribute__((packed)); + ++/** ++ * struct sev_user_data_snp_status - SNP status ++ * ++ * @api_major: API major version ++ * @api_minor: API minor version ++ * @state: current platform state ++ * @is_rmp_initialized: whether RMP is initialized or not ++ * @rsvd: reserved ++ * @build_id: firmware build id for the API version ++ * @mask_chip_id: whether chip id is present in attestation reports or not ++ * @mask_chip_key: whether attestation reports are signed or not ++ * @vlek_en: VLEK (Version Loaded Endorsement Key) hashstick is loaded ++ * @rsvd1: reserved ++ * @guest_count: the number of guest currently managed by the firmware ++ * @current_tcb_version: current TCB version ++ * @reported_tcb_version: reported TCB version ++ */ ++struct sev_user_data_snp_status { ++ __u8 api_major; /* Out */ ++ __u8 api_minor; /* Out */ ++ __u8 state; /* Out */ ++ __u8 is_rmp_initialized:1; /* Out */ ++ __u8 rsvd:7; ++ __u32 build_id; /* Out */ ++ __u32 mask_chip_id:1; /* Out */ ++ __u32 mask_chip_key:1; /* Out */ ++ __u32 vlek_en:1; /* Out */ ++ __u32 rsvd1:29; ++ __u32 guest_count; /* Out */ ++ __u64 current_tcb_version; /* Out */ ++ __u64 reported_tcb_version; /* Out */ ++} __attribute__((packed)); ++ ++/** ++ * struct sev_user_data_snp_config - system wide configuration value for SNP. ++ * ++ * @reported_tcb: the TCB version to report in the guest attestation report. ++ * @mask_chip_id: whether chip id is present in attestation reports or not ++ * @mask_chip_key: whether attestation reports are signed or not ++ * @rsvd: reserved ++ * @rsvd1: reserved ++ */ ++struct sev_user_data_snp_config { ++ __u64 reported_tcb ; /* In */ ++ __u32 mask_chip_id:1; /* In */ ++ __u32 mask_chip_key:1; /* In */ ++ __u32 rsvd:30; /* In */ ++ __u8 rsvd1[52]; ++} __attribute__((packed)); ++ + /** + * struct sev_issue_cmd - SEV ioctl parameters + * +diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h +index 649560c685..bea6973906 100644 +--- a/linux-headers/linux/vhost.h ++++ b/linux-headers/linux/vhost.h +@@ -227,4 +227,11 @@ + */ + #define VHOST_VDPA_GET_VRING_DESC_GROUP _IOWR(VHOST_VIRTIO, 0x7F, \ + struct vhost_vring_state) ++ ++/* Get the queue size of a specific virtqueue. ++ * userspace set the vring index in vhost_vring_state.index ++ * kernel set the queue size in vhost_vring_state.num ++ */ ++#define VHOST_VDPA_GET_VRING_SIZE _IOWR(VHOST_VIRTIO, 0x80, \ ++ struct vhost_vring_state) + #endif +-- +2.43.5 + diff --git a/0337-cpus-vm-was-suspended.patch b/0337-cpus-vm-was-suspended.patch new file mode 100644 index 0000000..87e1710 --- /dev/null +++ b/0337-cpus-vm-was-suspended.patch @@ -0,0 +1,63 @@ +From 9c592f2b3619a8a47cbf0f26ea7b926ed9ef4c34 Mon Sep 17 00:00:00 2001 +From: Steve Sistare +Date: Wed, 3 Jan 2024 12:05:30 -0800 +Subject: [PATCH] cpus: vm_was_suspended + +commit f06f316d3e7f100cbbeaf0ea001d6332c12e7ab5 upstream + +Add a state variable to remember if a vm previously transitioned into a +suspended state. + +Signed-off-by: Steve Sistare +Reviewed-by: Peter Xu +Link: https://lore.kernel.org/r/1704312341-66640-2-git-send-email-steven.sistare@oracle.com +Signed-off-by: Peter Xu +Signed-off-by: priyanka-mani +Signed-off-by: mohanasv +--- + include/sysemu/runstate.h | 2 ++ + system/cpus.c | 15 +++++++++++++++ + 2 files changed, 17 insertions(+) + +diff --git a/include/sysemu/runstate.h b/include/sysemu/runstate.h +index c8c2bd8a61..88a67e22b0 100644 +--- a/include/sysemu/runstate.h ++++ b/include/sysemu/runstate.h +@@ -51,6 +51,8 @@ int vm_prepare_start(bool step_pending); + int vm_stop(RunState state); + int vm_stop_force_state(RunState state); + int vm_shutdown(void); ++void vm_set_suspended(bool suspended); ++bool vm_get_suspended(void); + + typedef enum WakeupReason { + /* Always keep QEMU_WAKEUP_REASON_NONE = 0 */ +diff --git a/system/cpus.c b/system/cpus.c +index cbeec13f39..67e6350207 100644 +--- a/system/cpus.c ++++ b/system/cpus.c +@@ -273,6 +273,21 @@ void cpu_interrupt(CPUState *cpu, int mask) + } + } + ++/* ++ * True if the vm was previously suspended, and has not been woken or reset. ++ */ ++static int vm_was_suspended; ++ ++void vm_set_suspended(bool suspended) ++{ ++ vm_was_suspended = suspended; ++} ++ ++bool vm_get_suspended(void) ++{ ++ return vm_was_suspended; ++} ++ + static int do_vm_stop(RunState state, bool send_stop) + { + int ret = 0; +-- +2.43.5 + diff --git a/0338-cpus-stop-vm-in-suspended-runstate.patch b/0338-cpus-stop-vm-in-suspended-runstate.patch new file mode 100644 index 0000000..3e8bcfa --- /dev/null +++ b/0338-cpus-stop-vm-in-suspended-runstate.patch @@ -0,0 +1,243 @@ +From 6c242ff0ac6f0bde07212e7ee2a08d563c16d5d6 Mon Sep 17 00:00:00 2001 +From: Steve Sistare +Date: Wed, 3 Jan 2024 12:05:31 -0800 +Subject: [PATCH] cpus: stop vm in suspended runstate + +commit b9ae473d80302519a7b89f98795a80abfea1deea upstream + +Currently, a vm in the suspended state is not completely stopped. The VCPUs +have been paused, but the cpu clock still runs, and runstate notifiers for +the transition to stopped have not been called. This causes problems for +live migration. Stale cpu timers_state is saved to the migration stream, +causing time errors in the guest when it wakes from suspend, and state that +would have been modified by runstate notifiers is wrong. + +Modify vm_stop to completely stop the vm if the current state is suspended, +transition to RUN_STATE_PAUSED, and remember that the machine was suspended. +Modify vm_start to restore the suspended state. + +This affects all callers of vm_stop and vm_start, notably, the qapi stop and +cont commands: + + old behavior: + RUN_STATE_SUSPENDED --> stop --> RUN_STATE_SUSPENDED + + new behavior: + RUN_STATE_SUSPENDED --> stop --> RUN_STATE_PAUSED + RUN_STATE_PAUSED --> cont --> RUN_STATE_SUSPENDED + +For example: + + (qemu) info status + VM status: paused (suspended) + + (qemu) stop + (qemu) info status + VM status: paused + + (qemu) system_wakeup + Error: Unable to wake up: guest is not in suspended state + + (qemu) cont + (qemu) info status + VM status: paused (suspended) + + (qemu) system_wakeup + (qemu) info status + VM status: running + +Suggested-by: Peter Xu +Signed-off-by: Steve Sistare +Reviewed-by: Peter Xu +Link: https://lore.kernel.org/r/1704312341-66640-3-git-send-email-steven.sistare@oracle.com +Signed-off-by: Peter Xu +Signed-off-by: priyanka-mani +Signed-off-by: mohanasv +--- + include/sysemu/runstate.h | 9 +++++++++ + qapi/misc.json | 11 +++++++++-- + qapi/run-state.json | 6 +++--- + system/cpus.c | 23 +++++++++++++++-------- + system/runstate.c | 3 +++ + 5 files changed, 39 insertions(+), 13 deletions(-) + +diff --git a/include/sysemu/runstate.h b/include/sysemu/runstate.h +index 88a67e22b0..618eb491af 100644 +--- a/include/sysemu/runstate.h ++++ b/include/sysemu/runstate.h +@@ -40,6 +40,15 @@ static inline bool shutdown_caused_by_guest(ShutdownCause cause) + return cause >= SHUTDOWN_CAUSE_GUEST_SHUTDOWN; + } + ++/* ++ * In a "live" state, the vcpu clock is ticking, and the runstate notifiers ++ * think we are running. ++ */ ++static inline bool runstate_is_live(RunState state) ++{ ++ return state == RUN_STATE_RUNNING || state == RUN_STATE_SUSPENDED; ++} ++ + void vm_start(void); + + /** +diff --git a/qapi/misc.json b/qapi/misc.json +index cda2effa81..3622d98d01 100644 +--- a/qapi/misc.json ++++ b/qapi/misc.json +@@ -134,7 +134,7 @@ + ## + # @stop: + # +-# Stop all guest VCPU execution. ++# Stop guest VM execution. + # + # Since: 0.14 + # +@@ -143,6 +143,9 @@ + # the guest remains paused once migration finishes, as if the -S + # option was passed on the command line. + # ++# In the "suspended" state, it will completely stop the VM and ++# cause a transition to the "paused" state. (Since 9.0) ++# + # Example: + # + # -> { "execute": "stop" } +@@ -153,7 +156,7 @@ + ## + # @cont: + # +-# Resume guest VCPU execution. ++# Resume guest VM execution. + # + # Since: 0.14 + # +@@ -165,6 +168,10 @@ + # guest starts once migration finishes, removing the effect of the + # -S command line option if it was passed. + # ++# If the VM was previously suspended, and not been reset or woken, ++# this command will transition back to the "suspended" state. ++# (Since 9.0) ++# + # Example: + # + # -> { "execute": "cont" } +diff --git a/qapi/run-state.json b/qapi/run-state.json +index f216ba54ec..ca05502e0a 100644 +--- a/qapi/run-state.json ++++ b/qapi/run-state.json +@@ -102,7 +102,7 @@ + ## + # @StatusInfo: + # +-# Information about VCPU run state ++# Information about VM run state + # + # @running: true if all VCPUs are runnable, false if not runnable + # +@@ -130,9 +130,9 @@ + ## + # @query-status: + # +-# Query the run status of all VCPUs ++# Query the run status of the VM + # +-# Returns: @StatusInfo reflecting all VCPUs ++# Returns: @StatusInfo reflecting the VM + # + # Since: 0.14 + # +diff --git a/system/cpus.c b/system/cpus.c +index 67e6350207..061d1509ee 100644 +--- a/system/cpus.c ++++ b/system/cpus.c +@@ -291,11 +291,15 @@ bool vm_get_suspended(void) + static int do_vm_stop(RunState state, bool send_stop) + { + int ret = 0; ++ RunState oldstate = runstate_get(); + +- if (runstate_is_running()) { ++ if (runstate_is_live(oldstate)) { ++ vm_was_suspended = (oldstate == RUN_STATE_SUSPENDED); + runstate_set(state); + cpu_disable_ticks(); +- pause_all_vcpus(); ++ if (oldstate == RUN_STATE_RUNNING) { ++ pause_all_vcpus(); ++ } + vm_state_notify(0, state); + if (send_stop) { + qapi_event_send_stop(); +@@ -708,11 +712,13 @@ int vm_stop(RunState state) + + /** + * Prepare for (re)starting the VM. +- * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already +- * running or in case of an error condition), 0 otherwise. ++ * Returns 0 if the vCPUs should be restarted, -1 on an error condition, ++ * and 1 otherwise. + */ + int vm_prepare_start(bool step_pending) + { ++ int ret = vm_was_suspended ? 1 : 0; ++ RunState state = vm_was_suspended ? RUN_STATE_SUSPENDED : RUN_STATE_RUNNING; + RunState requested; + + qemu_vmstop_requested(&requested); +@@ -743,9 +749,10 @@ int vm_prepare_start(bool step_pending) + qapi_event_send_resume(); + + cpu_enable_ticks(); +- runstate_set(RUN_STATE_RUNNING); +- vm_state_notify(1, RUN_STATE_RUNNING); +- return 0; ++ runstate_set(state); ++ vm_state_notify(1, state); ++ vm_was_suspended = false; ++ return ret; + } + + void vm_start(void) +@@ -759,7 +766,7 @@ void vm_start(void) + current state is forgotten forever */ + int vm_stop_force_state(RunState state) + { +- if (runstate_is_running()) { ++ if (runstate_is_live(runstate_get())) { + return vm_stop(state); + } else { + int ret; +diff --git a/system/runstate.c b/system/runstate.c +index 365f2f44b9..0465505447 100644 +--- a/system/runstate.c ++++ b/system/runstate.c +@@ -108,6 +108,7 @@ static const RunStateTransition runstate_transitions_def[] = { + { RUN_STATE_PAUSED, RUN_STATE_POSTMIGRATE }, + { RUN_STATE_PAUSED, RUN_STATE_PRELAUNCH }, + { RUN_STATE_PAUSED, RUN_STATE_COLO}, ++ { RUN_STATE_PAUSED, RUN_STATE_SUSPENDED}, + + { RUN_STATE_POSTMIGRATE, RUN_STATE_RUNNING }, + { RUN_STATE_POSTMIGRATE, RUN_STATE_FINISH_MIGRATE }, +@@ -161,6 +162,7 @@ static const RunStateTransition runstate_transitions_def[] = { + { RUN_STATE_SUSPENDED, RUN_STATE_FINISH_MIGRATE }, + { RUN_STATE_SUSPENDED, RUN_STATE_PRELAUNCH }, + { RUN_STATE_SUSPENDED, RUN_STATE_COLO}, ++ { RUN_STATE_SUSPENDED, RUN_STATE_PAUSED}, + + { RUN_STATE_WATCHDOG, RUN_STATE_RUNNING }, + { RUN_STATE_WATCHDOG, RUN_STATE_FINISH_MIGRATE }, +@@ -506,6 +508,7 @@ void qemu_system_reset(ShutdownCause reason) + cpu_synchronize_all_post_reset(); + + cpus_control_post_system_reset(); ++ vm_set_suspended(false); + } + + /* +-- +2.43.5 + diff --git a/0339-runstate-skip-initial-cpu-reset-if-reset-is-not-actu.patch b/0339-runstate-skip-initial-cpu-reset-if-reset-is-not-actu.patch new file mode 100644 index 0000000..1db3650 --- /dev/null +++ b/0339-runstate-skip-initial-cpu-reset-if-reset-is-not-actu.patch @@ -0,0 +1,63 @@ +From a15277902f2e1486f986211fb5a8d2e67afaf008 Mon Sep 17 00:00:00 2001 +From: Paolo Bonzini +Date: Mon, 18 Mar 2024 17:45:56 -0400 +Subject: [PATCH] runstate: skip initial CPU reset if reset is not actually + possible +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +commit 08b2d15cdd0d3fbbe37ce23bf192b770db3a7539 upstream + +Right now, the system reset is concluded by a call to +cpu_synchronize_all_post_reset() in order to sync any changes +that the machine reset callback applied to the CPU state. + +However, for VMs with encrypted state such as SEV-ES guests (currently +the only case of guests with non-resettable CPUs) this cannot be done, +because guest state has already been finalized by machine-init-done notifiers. +cpu_synchronize_all_post_reset() does nothing on these guests, and actually +we would like to make it fail if called once guest has been encrypted. +So, assume that boards that support non-resettable CPUs do not touch +CPU state and that all such setup is done before, at the time of +cpu_synchronize_all_post_init(). + +Reviewed-by: Philippe Mathieu-Daudé +Signed-off-by: Paolo Bonzini +Signed-off-by: priyanka-mani +Signed-off-by: mohanasv +--- + system/runstate.c | 15 ++++++++++++++- + 1 file changed, 14 insertions(+), 1 deletion(-) + +diff --git a/system/runstate.c b/system/runstate.c +index 0465505447..cbdac6597a 100644 +--- a/system/runstate.c ++++ b/system/runstate.c +@@ -505,9 +505,22 @@ void qemu_system_reset(ShutdownCause reason) + default: + qapi_event_send_reset(shutdown_caused_by_guest(reason), reason); + } +- cpu_synchronize_all_post_reset(); + + cpus_control_post_system_reset(); ++ ++ /* ++ * Some boards use the machine reset callback to point CPUs to the firmware ++ * entry point. Assume that this is not the case for boards that support ++ * non-resettable CPUs (currently used only for confidential guests), in ++ * which case cpu_synchronize_all_post_init() is enough because ++ * it does _more_ than cpu_synchronize_all_post_reset(). ++ */ ++ if (cpus_are_resettable()) { ++ cpu_synchronize_all_post_reset(); ++ } else { ++ assert(runstate_check(RUN_STATE_PRELAUNCH)); ++ } ++ + vm_set_suspended(false); + } + +-- +2.43.5 + diff --git a/0340-migration-prevent-migration-when-vm-has-poisoned-mem.patch b/0340-migration-prevent-migration-when-vm-has-poisoned-mem.patch new file mode 100644 index 0000000..53f0a83 --- /dev/null +++ b/0340-migration-prevent-migration-when-vm-has-poisoned-mem.patch @@ -0,0 +1,113 @@ +From 6cba231a7e727f8e86fbfd3a8ffe97b6859ef3fe Mon Sep 17 00:00:00 2001 +From: William Roche +Date: Tue, 30 Jan 2024 19:06:40 +0000 +Subject: [PATCH] migration: prevent migration when VM has poisoned memory + +commit 06152b89db64bc5ccec1e54576706ba891654df9 upstream + +A memory page poisoned from the hypervisor level is no longer readable. +The migration of a VM will crash Qemu when it tries to read the +memory address space and stumbles on the poisoned page with a similar +stack trace: + +Program terminated with signal SIGBUS, Bus error. + +To avoid this VM crash during the migration, prevent the migration +when a known hardware poison exists on the VM. + +Signed-off-by: William Roche +Link: https://lore.kernel.org/r/20240130190640.139364-2-william.roche@oracle.com +Signed-off-by: Peter Xu +Signed-off-by: priyanka-mani +Signed-off-by: mohanasv +--- + accel/kvm/kvm-all.c | 10 ++++++++++ + accel/stubs/kvm-stub.c | 5 +++++ + include/sysemu/kvm.h | 6 ++++++ + migration/migration.c | 7 +++++++ + 4 files changed, 28 insertions(+) + +diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c +index 0fddb20921..aaee470b8a 100644 +--- a/accel/kvm/kvm-all.c ++++ b/accel/kvm/kvm-all.c +@@ -1168,6 +1168,11 @@ int kvm_vm_check_extension(KVMState *s, unsigned int extension) + return ret; + } + ++/* ++ * We track the poisoned pages to be able to: ++ * - replace them on VM reset ++ * - block a migration for a VM with a poisoned page ++ */ + typedef struct HWPoisonPage { + ram_addr_t ram_addr; + QLIST_ENTRY(HWPoisonPage) list; +@@ -1201,6 +1206,11 @@ void kvm_hwpoison_page_add(ram_addr_t ram_addr) + QLIST_INSERT_HEAD(&hwpoison_page_list, page, list); + } + ++bool kvm_hwpoisoned_mem(void) ++{ ++ return !QLIST_EMPTY(&hwpoison_page_list); ++} ++ + static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size) + { + #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN +diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c +index b90d516755..c2e7c2f660 100644 +--- a/accel/stubs/kvm-stub.c ++++ b/accel/stubs/kvm-stub.c +@@ -127,3 +127,8 @@ uint32_t kvm_dirty_ring_size(void) + { + return 0; + } ++ ++bool kvm_hwpoisoned_mem(void) ++{ ++ return false; ++} +diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h +index 1f7d36f4d3..fa1e42f085 100644 +--- a/include/sysemu/kvm.h ++++ b/include/sysemu/kvm.h +@@ -589,4 +589,10 @@ uint32_t kvm_dirty_ring_size(void); + + int kvm_load_user_data(hwaddr loader_start, hwaddr image_end, hwaddr initrd_start, hwaddr dtb_end, hwaddr ram_size, + struct kvm_numa_info *numa_info); ++ ++/** ++ * kvm_hwpoisoned_mem - indicate if there is any hwpoisoned page ++ * reported for the VM. ++ */ ++bool kvm_hwpoisoned_mem(void); + #endif +diff --git a/migration/migration.c b/migration/migration.c +index 982ab85f04..8b1b47836f 100644 +--- a/migration/migration.c ++++ b/migration/migration.c +@@ -67,6 +67,7 @@ + #include "options.h" + #include "sysemu/dirtylimit.h" + #include "qemu/sockets.h" ++#include "sysemu/kvm.h" + + static NotifierList migration_state_notifiers = + NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); +@@ -1900,6 +1901,12 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, + return false; + } + ++ if (kvm_hwpoisoned_mem()) { ++ error_setg(errp, "Can't migrate this vm with hardware poisoned memory, " ++ "please reboot the vm and try again"); ++ return false; ++ } ++ + if (migration_is_blocked(errp)) { + return false; + } +-- +2.43.5 + diff --git a/0341-scripts-update-linux-header-sh-be-more-src-tree-frie.patch b/0341-scripts-update-linux-header-sh-be-more-src-tree-frie.patch new file mode 100644 index 0000000..e925aef --- /dev/null +++ b/0341-scripts-update-linux-header-sh-be-more-src-tree-frie.patch @@ -0,0 +1,181 @@ +From 63419b20345b2d2d69efad4d7412c2e84db24cae Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Alex=20Benn=C3=A9e?= +Date: Tue, 14 May 2024 18:42:44 +0100 +Subject: [PATCH] scripts/update-linux-header.sh: be more src tree friendly +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +commit b51ddd937f11f76614d4b36d14d8778df242661c upstream + +Running "install_headers" in the Linux source tree is fairly +unfriendly as out-of-tree builds will start complaining about the +kernel source being non-pristine. As we have a temporary directory for +the install we should also do the build step here. So now we have: + + $tmpdir/ + $blddir/ + $hdrdir/ + +Reviewed-by: Pierrick Bouvier +Reviewed-by: Michael S. Tsirkin +Signed-off-by: Alex Bennée +Message-Id: <20240514174253.694591-3-alex.bennee@linaro.org> +--- + scripts/update-linux-headers.sh | 81 +++++++++++++++++---------------- + 1 file changed, 43 insertions(+), 38 deletions(-) + +diff --git a/scripts/update-linux-headers.sh b/scripts/update-linux-headers.sh +index 4153a00957..746573d80c 100755 +--- a/scripts/update-linux-headers.sh ++++ b/scripts/update-linux-headers.sh +@@ -27,6 +27,8 @@ + # types like "__u64". This work is done in the cp_portable function. + + tmpdir=$(mktemp -d) ++hdrdir="$tmpdir/headers" ++blddir="$tmpdir/build" + linux="$1" + output="$2" + +@@ -111,58 +113,61 @@ for arch in $ARCHLIST; do + arch_var=ARCH + fi + +- make -C "$linux" INSTALL_HDR_PATH="$tmpdir" $arch_var=$arch headers_install ++ make -C "$linux" O="$blddir" INSTALL_HDR_PATH="$hdrdir" $arch_var=$arch headers_install + + rm -rf "$output/linux-headers/asm-$arch" + mkdir -p "$output/linux-headers/asm-$arch" + for header in kvm.h unistd.h bitsperlong.h mman.h; do +- cp "$tmpdir/include/asm/$header" "$output/linux-headers/asm-$arch" ++ cp "$hdrdir/include/asm/$header" "$output/linux-headers/asm-$arch" + done + + if [ $arch = mips ]; then +- cp "$tmpdir/include/asm/sgidefs.h" "$output/linux-headers/asm-mips/" +- cp "$tmpdir/include/asm/unistd_o32.h" "$output/linux-headers/asm-mips/" +- cp "$tmpdir/include/asm/unistd_n32.h" "$output/linux-headers/asm-mips/" +- cp "$tmpdir/include/asm/unistd_n64.h" "$output/linux-headers/asm-mips/" ++ cp "$hdrdir/include/asm/sgidefs.h" "$output/linux-headers/asm-mips/" ++ cp "$hdrdir/include/asm/unistd_o32.h" "$output/linux-headers/asm-mips/" ++ cp "$hdrdir/include/asm/unistd_n32.h" "$output/linux-headers/asm-mips/" ++ cp "$hdrdir/include/asm/unistd_n64.h" "$output/linux-headers/asm-mips/" + fi + if [ $arch = powerpc ]; then +- cp "$tmpdir/include/asm/unistd_32.h" "$output/linux-headers/asm-powerpc/" +- cp "$tmpdir/include/asm/unistd_64.h" "$output/linux-headers/asm-powerpc/" ++ cp "$hdrdir/include/asm/unistd_32.h" "$output/linux-headers/asm-powerpc/" ++ cp "$hdrdir/include/asm/unistd_64.h" "$output/linux-headers/asm-powerpc/" + fi + + rm -rf "$output/include/standard-headers/asm-$arch" + mkdir -p "$output/include/standard-headers/asm-$arch" + if [ $arch = s390 ]; then +- cp_portable "$tmpdir/include/asm/virtio-ccw.h" "$output/include/standard-headers/asm-s390/" +- cp "$tmpdir/include/asm/unistd_32.h" "$output/linux-headers/asm-s390/" +- cp "$tmpdir/include/asm/unistd_64.h" "$output/linux-headers/asm-s390/" ++ cp_portable "$hdrdir/include/asm/virtio-ccw.h" "$output/include/standard-headers/asm-s390/" ++ cp "$hdrdir/include/asm/unistd_32.h" "$output/linux-headers/asm-s390/" ++ cp "$hdrdir/include/asm/unistd_64.h" "$output/linux-headers/asm-s390/" + fi + if [ $arch = arm ]; then +- cp "$tmpdir/include/asm/unistd-eabi.h" "$output/linux-headers/asm-arm/" +- cp "$tmpdir/include/asm/unistd-oabi.h" "$output/linux-headers/asm-arm/" +- cp "$tmpdir/include/asm/unistd-common.h" "$output/linux-headers/asm-arm/" ++ cp "$hdrdir/include/asm/unistd-eabi.h" "$output/linux-headers/asm-arm/" ++ cp "$hdrdir/include/asm/unistd-oabi.h" "$output/linux-headers/asm-arm/" ++ cp "$hdrdir/include/asm/unistd-common.h" "$output/linux-headers/asm-arm/" + fi + if [ $arch = arm64 ]; then +- cp "$tmpdir/include/asm/sve_context.h" "$output/linux-headers/asm-arm64/" ++ cp "$hdrdir/include/asm/sve_context.h" "$output/linux-headers/asm-arm64/" + fi + if [ $arch = x86 ]; then +- cp "$tmpdir/include/asm/unistd_32.h" "$output/linux-headers/asm-x86/" +- cp "$tmpdir/include/asm/unistd_x32.h" "$output/linux-headers/asm-x86/" +- cp "$tmpdir/include/asm/unistd_64.h" "$output/linux-headers/asm-x86/" +- cp_portable "$tmpdir/include/asm/kvm_para.h" "$output/include/standard-headers/asm-$arch" ++ cp "$hdrdir/include/asm/unistd_32.h" "$output/linux-headers/asm-x86/" ++ cp "$hdrdir/include/asm/unistd_x32.h" "$output/linux-headers/asm-x86/" ++ cp "$hdrdir/include/asm/unistd_64.h" "$output/linux-headers/asm-x86/" ++ cp_portable "$hdrdir/include/asm/kvm_para.h" "$output/include/standard-headers/asm-$arch" + # Remove everything except the macros from bootparam.h avoiding the + # unnecessary import of several video/ist/etc headers + sed -e '/__ASSEMBLY__/,/__ASSEMBLY__/d' \ +- "$tmpdir/include/asm/bootparam.h" > "$tmpdir/bootparam.h" +- cp_portable "$tmpdir/bootparam.h" \ ++ "$hdrdir/include/asm/bootparam.h" > "$hdrdir/bootparam.h" ++ cp_portable "$hdrdir/bootparam.h" \ + "$output/include/standard-headers/asm-$arch" +- cp_portable "$tmpdir/include/asm/setup_data.h" \ ++ cp_portable "$hdrdir/include/asm/setup_data.h" \ + "$output/standard-headers/asm-x86" + fi + if [ $arch = loongarch ]; then + cp "$hdrdir/include/asm/kvm_para.h" "$output/linux-headers/asm-loongarch/" + cp "$hdrdir/include/asm/unistd_64.h" "$output/linux-headers/asm-loongarch/" + fi ++ if [ $arch = riscv ]; then ++ cp "$hdrdir/include/asm/ptrace.h" "$output/linux-headers/asm-riscv/" ++ fi + done + arch= + +@@ -171,13 +176,13 @@ mkdir -p "$output/linux-headers/linux" + for header in const.h stddef.h kvm.h vfio.h vfio_ccw.h vfio_zdev.h vhost.h \ + psci.h psp-sev.h userfaultfd.h memfd.h mman.h nvme_ioctl.h \ + vduse.h iommufd.h bits.h; do +- cp "$tmpdir/include/linux/$header" "$output/linux-headers/linux" ++ cp "$hdrdir/include/linux/$header" "$output/linux-headers/linux" + done + + rm -rf "$output/linux-headers/asm-generic" + mkdir -p "$output/linux-headers/asm-generic" + for header in unistd.h bitsperlong.h mman-common.h mman.h hugetlb_encode.h; do +- cp "$tmpdir/include/asm-generic/$header" "$output/linux-headers/asm-generic" ++ cp "$hdrdir/include/asm-generic/$header" "$output/linux-headers/asm-generic" + done + + if [ -L "$linux/source" ]; then +@@ -212,23 +217,23 @@ EOF + + rm -rf "$output/include/standard-headers/linux" + mkdir -p "$output/include/standard-headers/linux" +-for i in "$tmpdir"/include/linux/*virtio*.h \ +- "$tmpdir/include/linux/qemu_fw_cfg.h" \ +- "$tmpdir/include/linux/fuse.h" \ +- "$tmpdir/include/linux/input.h" \ +- "$tmpdir/include/linux/input-event-codes.h" \ +- "$tmpdir/include/linux/udmabuf.h" \ +- "$tmpdir/include/linux/pci_regs.h" \ +- "$tmpdir/include/linux/ethtool.h" \ +- "$tmpdir/include/linux/const.h" \ +- "$tmpdir/include/linux/kernel.h" \ +- "$tmpdir/include/linux/vhost_types.h" \ +- "$tmpdir/include/linux/sysinfo.h" \ +- "$tmpdir/include/misc/pvpanic.h"; do ++for i in "$hdrdir"/include/linux/*virtio*.h \ ++ "$hdrdir/include/linux/qemu_fw_cfg.h" \ ++ "$hdrdir/include/linux/fuse.h" \ ++ "$hdrdir/include/linux/input.h" \ ++ "$hdrdir/include/linux/input-event-codes.h" \ ++ "$hdrdir/include/linux/udmabuf.h" \ ++ "$hdrdir/include/linux/pci_regs.h" \ ++ "$hdrdir/include/linux/ethtool.h" \ ++ "$hdrdir/include/linux/const.h" \ ++ "$hdrdir/include/linux/kernel.h" \ ++ "$hdrdir/include/linux/vhost_types.h" \ ++ "$hdrdir/include/linux/sysinfo.h" \ ++ "$hdrdir/include/misc/pvpanic.h"; do + cp_portable "$i" "$output/include/standard-headers/linux" + done + mkdir -p "$output/include/standard-headers/drm" +-cp_portable "$tmpdir/include/drm/drm_fourcc.h" \ ++cp_portable "$hdrdir/include/drm/drm_fourcc.h" \ + "$output/include/standard-headers/drm" + + rm -rf "$output/include/standard-headers/drivers/infiniband/hw/vmw_pvrdma" +-- +2.43.5 + diff --git a/0342-scripts-update-linux-headers-sh-remove-temporary-dir.patch b/0342-scripts-update-linux-headers-sh-remove-temporary-dir.patch new file mode 100644 index 0000000..5eccc1c --- /dev/null +++ b/0342-scripts-update-linux-headers-sh-remove-temporary-dir.patch @@ -0,0 +1,34 @@ +From 2a406ef928b7ed9f40217396f99be24ec9a5009e Mon Sep 17 00:00:00 2001 +From: Thomas Huth +Date: Mon, 27 May 2024 08:02:43 +0200 +Subject: [PATCH] scripts/update-linux-headers.sh: Remove temporary directory + inbetween + +We are reusing the same temporary directory for installing the headers +of all targets, so there could be stale files here when switching from +one target to another. Make sure to delete the folder before installing +a new set of target headers into it. + +Message-ID: <20240527060243.12647-1-thuth@redhat.com> +Reviewed-by: Michael S. Tsirkin +Acked-by: Cornelia Huck +Signed-off-by: Thomas Huth +--- + scripts/update-linux-headers.sh | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/scripts/update-linux-headers.sh b/scripts/update-linux-headers.sh +index 746573d80c..2c9b787740 100755 +--- a/scripts/update-linux-headers.sh ++++ b/scripts/update-linux-headers.sh +@@ -113,6 +113,7 @@ for arch in $ARCHLIST; do + arch_var=ARCH + fi + ++ rm -rf "$hdrdir" + make -C "$linux" O="$blddir" INSTALL_HDR_PATH="$hdrdir" $arch_var=$arch headers_install + + rm -rf "$output/linux-headers/asm-$arch" +-- +2.43.5 + diff --git a/0343-scripts-update-linux-headers-sh-fix-the-path-of-setu.patch b/0343-scripts-update-linux-headers-sh-fix-the-path-of-setu.patch new file mode 100644 index 0000000..61a657b --- /dev/null +++ b/0343-scripts-update-linux-headers-sh-fix-the-path-of-setu.patch @@ -0,0 +1,36 @@ +From 5c975b3bafda772be20b21b121706503ab696532 Mon Sep 17 00:00:00 2001 +From: Thomas Huth +Date: Mon, 27 May 2024 08:01:26 +0200 +Subject: [PATCH] scripts/update-linux-headers.sh: Fix the path of setup_data.h + +When running the update-linx-headers.sh script, it currently fails with: + +scripts/update-linux-headers.sh: line 73: .../qemu/standard-headers/asm-x86/setup_data.h: No such file or directory + +The "include" folder is obviously missing here - no clue how this could +have worked before? + +Fixes: 66210a1a30 ("scripts/update-linux-headers: Add setup_data.h to import list") +Message-ID: <20240527060126.12578-1-thuth@redhat.com> +Reviewed-by: Cornelia Huck +Signed-off-by: Thomas Huth +--- + scripts/update-linux-headers.sh | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/scripts/update-linux-headers.sh b/scripts/update-linux-headers.sh +index 2c9b787740..895a2c1722 100755 +--- a/scripts/update-linux-headers.sh ++++ b/scripts/update-linux-headers.sh +@@ -160,7 +160,7 @@ for arch in $ARCHLIST; do + cp_portable "$hdrdir/bootparam.h" \ + "$output/include/standard-headers/asm-$arch" + cp_portable "$hdrdir/include/asm/setup_data.h" \ +- "$output/standard-headers/asm-x86" ++ "$output/include/standard-headers/asm-x86" + fi + if [ $arch = loongarch ]; then + cp "$hdrdir/include/asm/kvm_para.h" "$output/linux-headers/asm-loongarch/" +-- +2.43.5 + diff --git a/0344-util-add-interfaces-to-read-midr-on-aarch64.patch b/0344-util-add-interfaces-to-read-midr-on-aarch64.patch new file mode 100644 index 0000000..f2dd1c3 --- /dev/null +++ b/0344-util-add-interfaces-to-read-midr-on-aarch64.patch @@ -0,0 +1,171 @@ +From 7827ed7eb6a2e0aeaae9ee28bb0d9ee54f5eaf3d Mon Sep 17 00:00:00 2001 +From: Peng Mengguang +Date: Fri, 10 Jan 2025 15:47:42 -0500 +Subject: [PATCH] util: add interfaces to read midr on aarch64 + +Add interfaces to judge the cpu platform. + +Signed-off-by: Peng Mengguang +--- + include/qemu/aarch64-cpuid.h | 51 ++++++++++++++++++++++++ + util/aarch64-cpuid.c | 77 ++++++++++++++++++++++++++++++++++++ + util/meson.build | 1 + + 3 files changed, 129 insertions(+) + create mode 100644 include/qemu/aarch64-cpuid.h + create mode 100644 util/aarch64-cpuid.c + +diff --git a/include/qemu/aarch64-cpuid.h b/include/qemu/aarch64-cpuid.h +new file mode 100644 +index 0000000000..ec68e6d575 +--- /dev/null ++++ b/include/qemu/aarch64-cpuid.h +@@ -0,0 +1,51 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * aarch64-cpuid.h: Macros to identify the MIDR of aarch64. ++ * ++ * This work is licensed under the terms of the GNU GPL, version 2 or later. ++ * See the COPYING file in the top-level directory. ++ */ ++ ++#ifndef QEMU_AARCH64_CPUID_H ++#define QEMU_AARCH64_CPUID_H ++ ++#define MIDR_REVISION_MASK 0xf ++#define MIDR_REVISION(midr) ((midr) & MIDR_REVISION_MASK) ++#define MIDR_PARTNUM_SHIFT 4 ++#define MIDR_PARTNUM_MASK (0xfff << MIDR_PARTNUM_SHIFT) ++#define MIDR_PARTNUM(midr) \ ++ (((midr) & MIDR_PARTNUM_MASK) >> MIDR_PARTNUM_SHIFT) ++#define MIDR_ARCHITECTURE_SHIFT 16 ++#define MIDR_ARCHITECTURE_MASK (0xf << MIDR_ARCHITECTURE_SHIFT) ++#define MIDR_ARCHITECTURE(midr) \ ++ (((midr) & MIDR_ARCHITECTURE_MASK) >> MIDR_ARCHITECTURE_SHIFT) ++#define MIDR_VARIANT_SHIFT 20 ++#define MIDR_VARIANT_MASK (0xf << MIDR_VARIANT_SHIFT) ++#define MIDR_VARIANT(midr) \ ++ (((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT) ++#define MIDR_IMPLEMENTOR_SHIFT 24 ++#define MIDR_IMPLEMENTOR_MASK (0xffU << MIDR_IMPLEMENTOR_SHIFT) ++#define MIDR_IMPLEMENTOR(midr) \ ++ (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) ++#define MIDR_CPU_MODEL(imp, partnum) \ ++ (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \ ++ (0xf << MIDR_ARCHITECTURE_SHIFT) | \ ++ ((partnum) << MIDR_PARTNUM_SHIFT)) ++ ++#define MIDR_CPU_VAR_REV(var, rev) \ ++ (((var) << MIDR_VARIANT_SHIFT) | (rev)) ++ ++#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ ++ MIDR_ARCHITECTURE_MASK) ++ ++#define ARM_CPU_IMP_PHYTIUM 0x70 ++#define PHYTIUM_CPU_PART_FTC662 0x662 ++#define PHYTIUM_CPU_PART_FTC663 0x663 ++#define PHYTIUM_CPU_PART_FTC862 0x862 ++ ++uint64_t qemu_read_cpuid_id(void); ++uint8_t qemu_read_cpuid_implementor(void); ++uint16_t qemu_read_cpuid_part_number(void); ++bool is_phytium_cpu(void); ++ ++#endif +diff --git a/util/aarch64-cpuid.c b/util/aarch64-cpuid.c +new file mode 100644 +index 0000000000..568f28b283 +--- /dev/null ++++ b/util/aarch64-cpuid.c +@@ -0,0 +1,77 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Dealing with arm cpu identification information. ++ * ++ * Copyright (C) 2024 Phytium, Inc. ++ * ++ * Authors: ++ * Peng Meng Guang ++ * ++ * This work is licensed under the terms of the GNU LGPL, version 2.1 ++ * or later. See the COPYING.LIB file in the top-level directory. ++ */ ++ ++#include ++#include "qemu/osdep.h" ++#include "qemu/cutils.h" ++#include "qemu/aarch64-cpuid.h" ++ ++#if defined(__aarch64__) ++uint64_t qemu_read_cpuid_id(void) ++{ ++#ifdef CONFIG_LINUX ++ const char *file = "/sys/devices/system/cpu/cpu0/regs/identification/midr_el1"; ++ char *buf; ++ uint64_t midr = 0; ++ ++#define BUF_SIZE 32 ++ buf = g_malloc0(BUF_SIZE); ++ if (!buf) { ++ return 0; ++ } ++ ++ if (!g_file_get_contents(file, &buf, 0, NULL)) { ++ goto out; ++ } ++ ++ if (qemu_strtoul(buf, NULL, 0, &midr) < 0) { ++ goto out; ++ } ++ ++out: ++ g_free(buf); ++ ++ return midr; ++#else ++ return 0; ++#endif ++} ++ ++uint8_t qemu_read_cpuid_implementor(void) ++{ ++#ifdef CONFIG_LINUX ++ uint64_t aarch64_midr = qemu_read_cpuid_id(); ++ ++ return MIDR_IMPLEMENTOR(aarch64_midr); ++#else ++ return 0; ++#endif ++} ++ ++uint16_t qemu_read_cpuid_part_number(void) ++{ ++#ifdef CONFIG_LINUX ++ uint64_t aarch64_midr = qemu_read_cpuid_id(); ++ ++ return MIDR_PARTNUM(aarch64_midr); ++#else ++ return 0; ++#endif ++} ++ ++bool is_phytium_cpu(void) ++{ ++ return qemu_read_cpuid_implementor() == ARM_CPU_IMP_PHYTIUM; ++} ++ ++#endif +diff --git a/util/meson.build b/util/meson.build +index c2322ef6e7..5ca44750da 100644 +--- a/util/meson.build ++++ b/util/meson.build +@@ -63,6 +63,7 @@ util_ss.add(files('int128.c')) + util_ss.add(files('memalign.c')) + util_ss.add(files('interval-tree.c')) + util_ss.add(files('lockcnt.c')) ++util_ss.add(files('aarch64-cpuid.c')) + + if have_user + util_ss.add(files('selfmap.c')) +-- +2.43.5 + diff --git a/0345-cpu-add-phytium-v-cpu-support.patch b/0345-cpu-add-phytium-v-cpu-support.patch new file mode 100644 index 0000000..33c0f1b --- /dev/null +++ b/0345-cpu-add-phytium-v-cpu-support.patch @@ -0,0 +1,184 @@ +From be9e40ef448b4cd2ba906cc9189b10fcabd38b5e Mon Sep 17 00:00:00 2001 +From: Peng Mengguang +Date: Mon, 13 Jan 2025 18:03:16 -0500 +Subject: [PATCH] cpu: add phytium-v cpu support + +phytium-v as a minimal set of phytium server features. +this model is only used for live migration. + +Signed-off-by: Peng Mengguang +--- + hw/arm/virt.c | 1 + + target/arm/cpu64.c | 13 ++++++ + target/arm/kvm64.c | 101 +++++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 115 insertions(+) + +diff --git a/hw/arm/virt.c b/hw/arm/virt.c +index 6087207f38..eaf29f5c94 100644 +--- a/hw/arm/virt.c ++++ b/hw/arm/virt.c +@@ -222,6 +222,7 @@ static const char *valid_cpus[] = { + ARM_CPU_TYPE_NAME("cortex-a57"), + ARM_CPU_TYPE_NAME("host"), + ARM_CPU_TYPE_NAME("max"), ++ ARM_CPU_TYPE_NAME("phytium-v"), + }; + + static bool cpu_type_valid(const char *cpu) +diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c +index 1e9c6c85ae..b4326ce8ba 100644 +--- a/target/arm/cpu64.c ++++ b/target/arm/cpu64.c +@@ -741,10 +741,23 @@ static void aarch64_max_initfn(Object *obj) + } + } + ++static void aarch64_phytium_v_initfn(Object *obj) ++{ ++ ARMCPU *cpu = ARM_CPU(obj); ++ ++ if (kvm_enabled()) { ++ kvm_arm_set_cpu_features_from_host(cpu); ++ } else { ++ aarch64_a53_initfn(obj); ++ cpu->midr = 0x701f6622; ++ } ++} ++ + static const ARMCPUInfo aarch64_cpus[] = { + { .name = "cortex-a57", .initfn = aarch64_a57_initfn }, + { .name = "cortex-a53", .initfn = aarch64_a53_initfn }, + { .name = "max", .initfn = aarch64_max_initfn }, ++ { .name = "phytium-v", .initfn = aarch64_phytium_v_initfn }, + #if defined(CONFIG_KVM) || defined(CONFIG_HVF) + { .name = "host", .initfn = aarch64_host_initfn }, + #endif +diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c +index a09347254f..45493065ef 100644 +--- a/target/arm/kvm64.c ++++ b/target/arm/kvm64.c +@@ -31,6 +31,7 @@ + #include "cpu-features.h" + #include "hw/acpi/acpi.h" + #include "hw/acpi/ghes.h" ++#include "qemu/aarch64-cpuid.h" + + static bool have_guest_debug; + +@@ -553,6 +554,92 @@ static int kvm_arm_sve_set_vls(CPUState *cs) + + #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 + ++#define SYS_ID_PFR0_EL1 ARM64_SYS_REG(3, 0, 0, 1, 0) ++#define SYS_ID_PFR1_EL1 ARM64_SYS_REG(3, 0, 0, 1, 1) ++#define SYS_ID_PFR2_EL1 ARM64_SYS_REG(3, 0, 0, 3, 4) ++#define SYS_ID_DFR0_EL1 ARM64_SYS_REG(3, 0, 0, 1, 2) ++#define SYS_ID_MMFR0_EL1 ARM64_SYS_REG(3, 0, 0, 1, 4) ++#define SYS_ID_MMFR1_EL1 ARM64_SYS_REG(3, 0, 0, 1, 5) ++#define SYS_ID_MMFR2_EL1 ARM64_SYS_REG(3, 0, 0, 1, 6) ++#define SYS_ID_MMFR3_EL1 ARM64_SYS_REG(3, 0, 0, 1, 7) ++#define SYS_ID_MMFR4_EL1 ARM64_SYS_REG(3, 0, 0, 2, 6) ++#define SYS_ID_ISAR0_EL1 ARM64_SYS_REG(3, 0, 0, 2, 0) ++#define SYS_ID_ISAR1_EL1 ARM64_SYS_REG(3, 0, 0, 2, 1) ++#define SYS_ID_ISAR2_EL1 ARM64_SYS_REG(3, 0, 0, 2, 2) ++#define SYS_ID_ISAR3_EL1 ARM64_SYS_REG(3, 0, 0, 2, 3) ++#define SYS_ID_ISAR4_EL1 ARM64_SYS_REG(3, 0, 0, 2, 4) ++#define SYS_ID_ISAR5_EL1 ARM64_SYS_REG(3, 0, 0, 2, 5) ++#define SYS_ID_ISAR6_EL1 ARM64_SYS_REG(3, 0, 0, 2, 7) ++#define SYS_MVFR0_EL1 ARM64_SYS_REG(3, 0, 0, 3, 0) ++#define SYS_MVFR1_EL1 ARM64_SYS_REG(3, 0, 0, 3, 1) ++#define SYS_MVFR2_EL1 ARM64_SYS_REG(3, 0, 0, 3, 2) ++#define SYS_ID_AA64PFR0_EL1 ARM64_SYS_REG(3, 0, 0, 4, 0) ++#define SYS_ID_AA64PFR1_EL1 ARM64_SYS_REG(3, 0, 0, 4, 1) ++#define SYS_ID_AA64DFR0_EL1 ARM64_SYS_REG(3, 0, 0, 5, 0) ++#define SYS_ID_AA64ISAR0_EL1 ARM64_SYS_REG(3, 0, 0, 6, 0) ++#define SYS_ID_AA64ISAR1_EL1 ARM64_SYS_REG(3, 0, 0, 6, 1) ++#define SYS_ID_AA64MMFR0_EL1 ARM64_SYS_REG(3, 0, 0, 7, 0) ++#define SYS_ID_AA64MMFR1_EL1 ARM64_SYS_REG(3, 0, 0, 7, 1) ++#define SYS_ID_AA64MMFR2_EL1 ARM64_SYS_REG(3, 0, 0, 7, 2) ++ ++struct SysRegInfo { ++ const char *name; ++ uint64_t reg; ++ uint64_t value; ++}; ++ ++const struct SysRegInfo sys_regs_info[] = { ++ { "ID_PFR0_EL1", SYS_ID_PFR0_EL1, 0 }, ++ { "ID_PFR1_EL1", SYS_ID_PFR1_EL1, 0 }, ++ { "ID_PFR2_EL1", SYS_ID_PFR2_EL1, 0 }, ++ { "ID_DFR0_EL1", SYS_ID_DFR0_EL1, 0 }, ++ { "ID_MMFR0_EL1", SYS_ID_MMFR0_EL1, 0 }, ++ { "ID_MMFR1_EL1", SYS_ID_MMFR1_EL1, 0 }, ++ { "ID_MMFR2_EL1", SYS_ID_MMFR2_EL1, 0 }, ++ { "ID_MMFR3_EL1", SYS_ID_MMFR3_EL1, 0 }, ++ { "ID_MMFR4_EL1", SYS_ID_MMFR4_EL1, 0 }, ++ { "ID_ISAR0_EL1", SYS_ID_ISAR0_EL1, 0 }, ++ { "ID_ISAR1_EL1", SYS_ID_ISAR1_EL1, 0 }, ++ { "ID_ISAR2_EL1", SYS_ID_ISAR2_EL1, 0 }, ++ { "ID_ISAR3_EL1", SYS_ID_ISAR3_EL1, 0 }, ++ { "ID_ISAR4_EL1", SYS_ID_ISAR4_EL1, 0 }, ++ { "ID_ISAR5_EL1", SYS_ID_ISAR5_EL1, 0 }, ++ { "ID_ISAR6_EL1", SYS_ID_ISAR6_EL1, 0 }, ++ { "MVFR0_EL1", SYS_MVFR0_EL1, 0 }, ++ { "MVFR1_EL1", SYS_MVFR1_EL1, 0 }, ++ { "MVFR2_EL1", SYS_MVFR2_EL1, 0 }, ++ { "ID_AA64PFR0_EL1", SYS_ID_AA64PFR0_EL1, 0x01001111 }, ++ { "ID_AA64PFR1_EL1", SYS_ID_AA64PFR1_EL1, 0 }, ++ { "ID_AA64DFR0_EL1", SYS_ID_AA64DFR0_EL1, 0x10305106 }, ++ { "ID_AA64ISAR0_EL1", SYS_ID_AA64ISAR0_EL1, 0x10000 }, ++ { "ID_AA64ISAR1_EL1", SYS_ID_AA64ISAR1_EL1, 0 }, ++ { "ID_AA64MMFR0_EL1", SYS_ID_AA64MMFR0_EL1, 0x1124 }, ++ { "ID_AA64MMFR1_EL1", SYS_ID_AA64MMFR1_EL1, 0 }, ++ { "ID_AA64MMFR2_EL1", SYS_ID_AA64MMFR2_EL1, 0 }, ++}; ++ ++/* PHYTIUM : modify sys_regs for phytium-v. */ ++static int modify_arm_vcpu_regs_for_phytium_v(ARMCPU *cpu) ++{ ++ int ret = 0; ++ CPUState *cs = CPU(cpu); ++ Object *obj = OBJECT(cpu); ++ ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj); ++ ++ if (NULL != acc->info && 0 == strcmp(acc->info->name, "phytium-v")) { ++ uint64_t val = 0; ++ for (int i = 0; i < ARRAY_SIZE(sys_regs_info); i++) { ++ val = sys_regs_info[i].value; ++ ret = kvm_set_one_reg(cs, sys_regs_info[i].reg, &val); ++ if (ret) { ++ break; ++ } ++ } ++ } ++ ++ return ret; ++} ++ + int kvm_arch_init_vcpu(CPUState *cs) + { + int ret; +@@ -604,6 +691,20 @@ int kvm_arch_init_vcpu(CPUState *cs) + return ret; + } + ++ /* ++ * For Phytium only, we'll modify registers' value like ID_AA64ISAR0_EL1 ++ * before the virtual machine used for live-migration is started to ensure ++ * that the virtual machine is successfully migrated between different ++ * models of Phytium servers. ++ * Of course, the above will only happen if the CPU model "phytium-v" ++ * is selected during live migration. ++ */ ++ if (is_phytium_cpu()) { ++ ret = modify_arm_vcpu_regs_for_phytium_v(cpu); ++ if (ret < 0) ++ return ret; ++ } ++ + if (cpu_isar_feature(aa64_sve, cpu)) { + ret = kvm_arm_sve_set_vls(cs); + if (ret) { +-- +2.43.5 + diff --git a/0346-target-arm-support-vm-live-migration-between-phytium.patch b/0346-target-arm-support-vm-live-migration-between-phytium.patch new file mode 100644 index 0000000..6eeb860 --- /dev/null +++ b/0346-target-arm-support-vm-live-migration-between-phytium.patch @@ -0,0 +1,154 @@ +From 8dcf74338705adc39055f4d55d9cbccbf54578e0 Mon Sep 17 00:00:00 2001 +From: Peng Mengguang +Date: Tue, 14 Jan 2025 10:14:10 -0500 +Subject: [PATCH] target/arm: support vm live migration between phytium and + kp920 + +Support for migration between phytium and kp920 +when using phytium-v cpu model. +Support for migration from low to high versions +when using host-passthrough mode. + +Signed-off-by: Peng Mengguang +--- + hw/intc/arm_gicv3_common.c | 5 +++ + hw/intc/arm_gicv3_kvm.c | 8 +++++ + include/hw/intc/arm_gicv3_common.h | 1 + + target/arm/kvm.c | 53 +++++++++++++++++++++++++++++- + 4 files changed, 66 insertions(+), 1 deletion(-) + +diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c +index 2ebf880ead..8b9c4b2f1f 100644 +--- a/hw/intc/arm_gicv3_common.c ++++ b/hw/intc/arm_gicv3_common.c +@@ -88,6 +88,11 @@ static int gicv3_post_load(void *opaque, int version_id) + gicv3_gicd_no_migration_shift_bug_post_load(s); + + if (c->post_load) { ++ /* load origin value of reg icc_ctrl_el1 when migrate vm */ ++ for (int ncpu = 0; ncpu < s->num_cpu; ncpu++) { ++ GICv3CPUState *cs = &s->cpu[ncpu]; ++ cs->icc_ctlr_el1[GICV3_NS] = cs->icc_ctlr_el1_origin[GICV3_NS]; ++ } + c->post_load(s); + } + return 0; +diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c +index 77eb37e131..367589141c 100644 +--- a/hw/intc/arm_gicv3_kvm.c ++++ b/hw/intc/arm_gicv3_kvm.c +@@ -700,6 +700,8 @@ static void arm_gicv3_icc_reset(CPUARMState *env, const ARMCPRegInfo *ri) + KVM_VGIC_ATTR(ICC_CTLR_EL1, c->gicr_typer), + &c->icc_ctlr_el1[GICV3_NS], false, &error_abort); + ++ /* save origin value of reg icc_ctrl_el1 for vm migration to use */ ++ c->icc_ctlr_el1_origin[GICV3_S] = c->icc_ctlr_el1[GICV3_NS]; + c->icc_ctlr_el1[GICV3_S] = c->icc_ctlr_el1[GICV3_NS]; + } + +@@ -720,6 +722,12 @@ static void kvm_arm_gicv3_reset_hold(Object *obj) + } + + kvm_arm_gicv3_put(s); ++ ++ /* save origin value of reg icc_ctrl_el1 */ ++ for (int ncpu = 0; ncpu < s->num_cpu; ncpu++) { ++ GICv3CPUState *c = &s->cpu[ncpu]; ++ c->icc_ctlr_el1_origin[GICV3_NS] = c->icc_ctlr_el1[GICV3_NS]; ++ } + } + + /* +diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h +index 4e2fb518e7..a9b6d7f7b0 100644 +--- a/include/hw/intc/arm_gicv3_common.h ++++ b/include/hw/intc/arm_gicv3_common.h +@@ -181,6 +181,7 @@ struct GICv3CPUState { + /* CPU interface */ + uint64_t icc_sre_el1; + uint64_t icc_ctlr_el1[2]; ++ uint64_t icc_ctlr_el1_origin[2]; + uint64_t icc_pmr_el1; + uint64_t icc_bpr[3]; + uint64_t icc_apr[3][4]; +diff --git a/target/arm/kvm.c b/target/arm/kvm.c +index a42ddcc855..fb0ced115f 100644 +--- a/target/arm/kvm.c ++++ b/target/arm/kvm.c +@@ -32,6 +32,7 @@ + #include "hw/irq.h" + #include "qapi/visitor.h" + #include "qemu/log.h" ++#include "qemu/aarch64-cpuid.h" + + const KVMCapabilityInfo kvm_arch_required_capabilities[] = { + KVM_CAP_LAST_INFO +@@ -469,6 +470,56 @@ static uint64_t *kvm_arm_get_cpreg_ptr(ARMCPU *cpu, uint64_t regidx) + return &cpu->cpreg_values[res - cpu->cpreg_indexes]; + } + ++/* PHYTIUM: check compatibility for live migration. */ ++static bool check_compatibility_for_phytium(ARMCPU *cpu) ++{ ++ Object *obj = OBJECT(cpu); ++ ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj); ++ ++ int i; ++ bool ret = true; ++ uint8_t src_impl = 0; ++ uint16_t src_partnum = 0; ++ uint64_t src_midr = 0; ++ ++ if (NULL != acc->info && 0 == strcmp(acc->info->name, "phytium-v")) ++ ret = true; ++ else { ++ for (i = 0; i < cpu->cpreg_array_len; i++) { ++ uint64_t regidx = cpu->cpreg_indexes[i]; ++ if (regidx == ARM64_SYS_REG(3, 0, 0, 0, 0)) { ++ src_midr = cpu->cpreg_values[i]; ++ src_impl = (src_midr >> 24) & 0xff; ++ src_partnum = (src_midr >> 4) & 0x0fff; ++ break; ++ } ++ } ++ ++ if (src_impl == ARM_CPU_IMP_PHYTIUM) { ++ if (is_phytium_cpu()) { ++ if (qemu_read_cpuid_part_number() >= src_partnum) { ++ ret = true; ++ } else { ++ ret = false; ++ } ++ } else if (qemu_read_cpuid_implementor() == 0x48) { ++ if (src_partnum == PHYTIUM_CPU_PART_FTC662 ++ || src_partnum == PHYTIUM_CPU_PART_FTC663) { ++ ret = true; ++ } else { ++ ret = false; ++ } ++ } else { ++ ret = false; ++ } ++ } else { ++ ret = false; ++ } ++ } ++ ++ return ret; ++} ++ + /* Initialize the ARMCPU cpreg list according to the kernel's + * definition of what CPU registers it knows about (and throw away + * the previous TCG-created cpreg list). +@@ -612,7 +663,7 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level) + * "you tried to set a register which is constant with + * a different value from what it actually contains". + */ +- ok = false; ++ ok = check_compatibility_for_phytium(cpu); + } + } + return ok; +-- +2.43.5 + diff --git a/0347-cpu-add-tengyun-s5000c-cpu-support.patch b/0347-cpu-add-tengyun-s5000c-cpu-support.patch new file mode 100644 index 0000000..ab1913a --- /dev/null +++ b/0347-cpu-add-tengyun-s5000c-cpu-support.patch @@ -0,0 +1,62 @@ +From aba69912bf0a2eb0fa99a8e6bf6773c235159384 Mon Sep 17 00:00:00 2001 +From: wangzhimin +Date: Tue, 15 Apr 2025 11:13:52 +0800 +Subject: [PATCH] cpu: add Tengyun-S5000C cpu support + +Add the Tengyun-S5000C CPU model. + +Signed-off-by: Peng Mengguang +Signed-off-by: Wang Zhimin +--- + hw/arm/virt.c | 1 + + target/arm/cpu64.c | 17 +++++++++++++++++ + 2 files changed, 18 insertions(+) + +diff --git a/hw/arm/virt.c b/hw/arm/virt.c +index eaf29f5c94..9b9fe821d5 100644 +--- a/hw/arm/virt.c ++++ b/hw/arm/virt.c +@@ -220,6 +220,7 @@ static const char *valid_cpus[] = { + #endif + ARM_CPU_TYPE_NAME("cortex-a53"), + ARM_CPU_TYPE_NAME("cortex-a57"), ++ ARM_CPU_TYPE_NAME("Tengyun-S5000C"), + ARM_CPU_TYPE_NAME("host"), + ARM_CPU_TYPE_NAME("max"), + ARM_CPU_TYPE_NAME("phytium-v"), +diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c +index b4326ce8ba..71c11ae49e 100644 +--- a/target/arm/cpu64.c ++++ b/target/arm/cpu64.c +@@ -753,11 +753,28 @@ static void aarch64_phytium_v_initfn(Object *obj) + } + } + ++static void aarch64_tengyun_s5000c_initfn(Object *obj) ++{ ++ ARMCPU *cpu = ARM_CPU(obj); ++ ++ if (kvm_enabled()) { ++ kvm_arm_set_cpu_features_from_host(cpu); ++ } else { ++ aarch64_a53_initfn(obj); ++ cpu->midr = 0x700f8620; ++ } ++ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { ++ aarch64_add_sve_properties(obj); ++ aarch64_add_pauth_properties(obj); ++ } ++} ++ + static const ARMCPUInfo aarch64_cpus[] = { + { .name = "cortex-a57", .initfn = aarch64_a57_initfn }, + { .name = "cortex-a53", .initfn = aarch64_a53_initfn }, + { .name = "max", .initfn = aarch64_max_initfn }, + { .name = "phytium-v", .initfn = aarch64_phytium_v_initfn }, ++ { .name = "Tengyun-S5000C", .initfn = aarch64_tengyun_s5000c_initfn }, + #if defined(CONFIG_KVM) || defined(CONFIG_HVF) + { .name = "host", .initfn = aarch64_host_initfn }, + #endif +-- +2.43.5 + diff --git a/0348-sync-header-file-from-upstream.patch b/0348-sync-header-file-from-upstream.patch new file mode 100644 index 0000000..5e75eaa --- /dev/null +++ b/0348-sync-header-file-from-upstream.patch @@ -0,0 +1,138 @@ +From f4c27a80b5fb0a9a004397b64347adc92378e3f7 Mon Sep 17 00:00:00 2001 +From: Xianglai Li +Date: Mon, 26 May 2025 16:58:25 +0800 +Subject: [PATCH] sync header file from upstream + +The local interrupt controller simulation header file is inconsistent +with the upstream header file. To ensure uapi compatibility, +the upstream interrupt controller simulation header file is now +synchronized. + +Signed-off-by: Xianglai Li +--- + hw/intc/loongarch_extioi_kvm.c | 2 +- + hw/intc/loongarch_ipi_kvm.c | 2 +- + hw/intc/loongarch_pch_pic_kvm.c | 2 +- + linux-headers/asm-loongarch/kvm.h | 15 ++++++--------- + linux-headers/linux/kvm.h | 13 +++++++------ + target/loongarch/kvm/kvm.c | 4 ---- + 6 files changed, 16 insertions(+), 22 deletions(-) + +diff --git a/hw/intc/loongarch_extioi_kvm.c b/hw/intc/loongarch_extioi_kvm.c +index e7699ad2ea..a7ee0a8ad7 100644 +--- a/hw/intc/loongarch_extioi_kvm.c ++++ b/hw/intc/loongarch_extioi_kvm.c +@@ -115,7 +115,7 @@ static void kvm_loongarch_extioi_realize(DeviceState *dev, Error **errp) + } + + if (!extioi_class->is_created) { +- cd.type = KVM_DEV_TYPE_LA_EXTIOI; ++ cd.type = KVM_DEV_TYPE_LOONGARCH_EIOINTC; + ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd); + if (ret < 0) { + error_setg_errno(errp, errno, +diff --git a/hw/intc/loongarch_ipi_kvm.c b/hw/intc/loongarch_ipi_kvm.c +index fd308eb0c0..57fc05db77 100644 +--- a/hw/intc/loongarch_ipi_kvm.c ++++ b/hw/intc/loongarch_ipi_kvm.c +@@ -128,7 +128,7 @@ static void kvm_loongarch_ipi_realize(DeviceState *dev, Error **errp) + } + + if (!ipi_class->is_created) { +- cd.type = KVM_DEV_TYPE_LA_IPI; ++ cd.type = KVM_DEV_TYPE_LOONGARCH_IPI; + ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd); + if (ret < 0) { + error_setg_errno(errp, errno, "Creating the KVM device failed"); +diff --git a/hw/intc/loongarch_pch_pic_kvm.c b/hw/intc/loongarch_pch_pic_kvm.c +index 8f66d9a01f..e9cef02f9a 100644 +--- a/hw/intc/loongarch_pch_pic_kvm.c ++++ b/hw/intc/loongarch_pch_pic_kvm.c +@@ -113,7 +113,7 @@ static void kvm_loongarch_pch_pic_realize(DeviceState *dev, Error **errp) + } + + if (!pch_pic_class->is_created) { +- cd.type = KVM_DEV_TYPE_LA_PCH_PIC; ++ cd.type = KVM_DEV_TYPE_LOONGARCH_PCHPIC; + ret = kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd); + if (ret < 0) { + error_setg_errno(errp, errno, +diff --git a/linux-headers/asm-loongarch/kvm.h b/linux-headers/asm-loongarch/kvm.h +index 92c32b7ec9..bc37a986f2 100644 +--- a/linux-headers/asm-loongarch/kvm.h ++++ b/linux-headers/asm-loongarch/kvm.h +@@ -134,26 +134,23 @@ struct kvm_iocsr_entry { + #define KVM_IRQCHIP_NUM_PINS 64 + #define KVM_MAX_CORES 256 + +-#define KVM_LOONGARCH_VM_HAVE_IRQCHIP 0x40000001 ++#define KVM_DEV_LOONGARCH_IPI_GRP_REGS 0x40000001 + +-#define KVM_DEV_LOONGARCH_IPI_GRP_REGS 0x40000002 ++#define KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS 0x40000002 + +-#define KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS 0x40000003 +- +-#define KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS 0x40000006 ++#define KVM_DEV_LOONGARCH_EXTIOI_GRP_SW_STATUS 0x40000003 + #define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_NUM_CPU 0x0 + #define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_FEATURE 0x1 + #define KVM_DEV_LOONGARCH_EXTIOI_SW_STATUS_STATE 0x2 + +-#define KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL 0x40000007 ++#define KVM_DEV_LOONGARCH_EXTIOI_GRP_CTRL 0x40000004 + #define KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_NUM_CPU 0x0 + #define KVM_DEV_LOONGARCH_EXTIOI_CTRL_INIT_FEATURE 0x1 + #define KVM_DEV_LOONGARCH_EXTIOI_CTRL_LOAD_FINISHED 0x3 + + +-#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL 0x40000004 +-#define KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT 0 +- + #define KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS 0x40000005 ++#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL 0x40000006 ++#define KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT 0 + + #endif /* __UAPI_ASM_LOONGARCH_KVM_H */ +diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h +index d2f6007fb8..656b6163e6 100644 +--- a/linux-headers/linux/kvm.h ++++ b/linux-headers/linux/kvm.h +@@ -1154,12 +1154,13 @@ enum kvm_device_type { + #define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME + KVM_DEV_TYPE_RISCV_AIA, + #define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA +- KVM_DEV_TYPE_LA_PCH_PIC = 0x100, +-#define KVM_DEV_TYPE_LA_PCH_PIC KVM_DEV_TYPE_LA_PCH_PIC +- KVM_DEV_TYPE_LA_IPI, +-#define KVM_DEV_TYPE_LA_IPI KVM_DEV_TYPE_LA_IPI +- KVM_DEV_TYPE_LA_EXTIOI, +-#define KVM_DEV_TYPE_LA_EXTIOI KVM_DEV_TYPE_LA_EXTIOI ++ KVM_DEV_TYPE_LOONGARCH_IPI, ++#define KVM_DEV_TYPE_LOONGARCH_IPI KVM_DEV_TYPE_LOONGARCH_IPI ++ KVM_DEV_TYPE_LOONGARCH_EIOINTC, ++#define KVM_DEV_TYPE_LOONGARCH_EIOINTC KVM_DEV_TYPE_LOONGARCH_EIOINTC ++ KVM_DEV_TYPE_LOONGARCH_PCHPIC, ++#define KVM_DEV_TYPE_LOONGARCH_PCHPIC KVM_DEV_TYPE_LOONGARCH_PCHPIC ++ + KVM_DEV_TYPE_MAX, + }; + +diff --git a/target/loongarch/kvm/kvm.c b/target/loongarch/kvm/kvm.c +index 22177b6220..f42b92d7c8 100644 +--- a/target/loongarch/kvm/kvm.c ++++ b/target/loongarch/kvm/kvm.c +@@ -973,10 +973,6 @@ int kvm_arch_get_default_type(MachineState *ms) + int kvm_arch_init(MachineState *ms, KVMState *s) + { + cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE); +- if(!kvm_vm_check_attr(kvm_state, KVM_LOONGARCH_VM_HAVE_IRQCHIP, KVM_LOONGARCH_VM_HAVE_IRQCHIP)) { +- s->kernel_irqchip_allowed = false; +- } +- + return 0; + } + +-- +2.43.5 + diff --git a/qemu.spec b/qemu.spec index 13f07bf..0b24594 100644 --- a/qemu.spec +++ b/qemu.spec @@ -1,4 +1,4 @@ -%define anolis_release 25 +%define anolis_release 26 %bcond_with check %global all_system_emu_support 0 @@ -566,7 +566,71 @@ Patch0280: 0280-enable-the-irqchip-simulation-function.patch Patch0281: 0281-hw-i386-pc-add-mem2-option-for-qemu.patch Patch0282: 0282-hw-misc-psp-support-tkm-use-mem2-memory.patch Patch0283: 0283-hw-misc-psp-Pin-the-hugepage-memory-specified-by-mem.patch -Patch0284: 0284-fix-potential-use-after-free-with-dbus-shared-memory.patch +Patch0284: 0284-fix-potential-use-after-free-with-dbus-shared-memory.patch +Patch0285: 0285-add-sw64arch-support.patch +Patch0286: 0286-virtio-snd-add-max-size-bounds-check-in-input-cb.patch +Patch0287: 0287-virtio-snd-check-for-invalid-param-shift-operands.patch +Patch0288: 0288-add-support-for-the-virtcca-cvm-feature.patch +Patch0289: 0289-cvm-bug-fix-for-incorrect-device-name-check-for-vhos.patch +Patch0290: 0290-cvm-implement-command-blacklist-for-cvm-security-enh.patch +Patch0291: 0291-cvm-bug-fix-for-undefined-reference-to-virtcca-cvm-a.patch +Patch0292: 0292-qapi-qom-target-i386-csv-guest-introduce-secret-head.patch +Patch0293: 0293-target-i386-kvm-support-to-get-and-enable-extensions.patch +Patch0294: 0294-target-i386-csv-request-to-set-private-memory-of-csv.patch +Patch0295: 0295-target-i386-csv-support-load-kernel-hashes-for-csv3-.patch +Patch0296: 0296-target-i386-csv-support-inject-secret-for-csv3-guest.patch +Patch0297: 0297-target-i386-add-more-features-enumerated-by-cpuid-7-.patch +Patch0298: 0298-target-i386-fix-feature-dependency-for-waitpkg.patch +Patch0299: 0299-target-i386-add-support-for-fred-in-cpuid-enumeratio.patch +Patch0300: 0300-target-i386-mark-cr4-fred-not-reserved.patch +Patch0301: 0301-vmxcap-add-support-for-vmx-fred-controls.patch +Patch0302: 0302-target-i386-enumerate-vmx-nested-exception-support.patch +Patch0303: 0303-target-i386-add-get-set-migrate-support-for-fred-msr.patch +Patch0304: 0304-target-i386-delete-duplicated-macro-definition-cr4-f.patch +Patch0305: 0305-target-i386-add-vmx-control-bits-for-nested-fred-sup.patch +Patch0306: 0306-target-i386-raise-the-highest-index-value-used-for-a.patch +Patch0307: 0307-target-i386-pass-x86cpu-to-x86-cpu-get-supported-fea.patch +Patch0308: 0308-i386-cpuid-remove-subleaf-constraint-on-cpuid-leaf-1.patch +Patch0309: 0309-target-i386-don-t-construct-a-all-zero-entry-for-cpu.patch +Patch0310: 0310-target-i386-enable-fdp-excptn-only-and-zero-fcs-fds.patch +Patch0311: 0311-target-i386-construct-cpuid-2-as-stateful-iff-times-.patch +Patch0312: 0312-target-i386-make-invtsc-migratable-when-user-sets-ts.patch +Patch0313: 0313-target-i386-cpu-fix-notes-for-cpu-models.patch +Patch0314: 0314-sw64-add-the-migration-of-rtc-and-memb-instructions.patch +Patch0315: 0315-hw-intc-add-extioi-ability-of-256-vcpu-interrupt-rou.patch +Patch0316: 0316-target-loongarch-fix-vcpu-reset-command-word-issue.patch +Patch0317: 0317-target-loongarch-fix-the-cpu-unplug-resource-leak.patch +Patch0318: 0318-hw-loongarch-boot-adjust-the-loading-position-of-the.patch +Patch0319: 0319-hw-rtc-fixed-loongson-rtc-emulation-errors.patch +Patch0320: 0320-target-i386-introduce-sierraforest-v2-model.patch +Patch0321: 0321-target-i386-export-bhi-no-bit-to-guests.patch +Patch0322: 0322-target-i386-add-new-cpu-model-clearwaterforest.patch +Patch0323: 0323-docs-add-gnr-srf-and-cwf-cpu-models.patch +Patch0324: 0324-target-i386-add-sha512-sm3-sm4-feature-bits.patch +Patch0325: 0325-i386-kvm-move-architectural-cpuid-leaf-generation-to.patch +Patch0326: 0326-pci-host-q35-move-pam-initialization-above-smram-ini.patch +Patch0327: 0327-q35-introduce-smm-ranges-property-for-q35-pci-host.patch +Patch0328: 0328-hw-i386-acpi-set-pcat-compat-bit-only-when-pic-is-no.patch +Patch0329: 0329-confidential-guest-support-add-kvm-init-and-kvm-rese.patch +Patch0330: 0330-i386-sev-switch-to-use-confidential-guest-kvm-init.patch +Patch0331: 0331-ppc-pef-switch-to-use-confidential-guest-kvm-init-re.patch +Patch0332: 0332-s390-switch-to-use-confidential-guest-kvm-init.patch +Patch0333: 0333-scripts-update-linux-headers-add-setup-data-h-to-imp.patch +Patch0334: 0334-scripts-update-linux-headers-add-bits-h-to-file-impo.patch +Patch0335: 0335-linux-headers-update-to-linux-v6-8-rc6.patch +Patch0336: 0336-linux-headers-update-to-current-kvm-next.patch +Patch0337: 0337-cpus-vm-was-suspended.patch +Patch0338: 0338-cpus-stop-vm-in-suspended-runstate.patch +Patch0339: 0339-runstate-skip-initial-cpu-reset-if-reset-is-not-actu.patch +Patch0340: 0340-migration-prevent-migration-when-vm-has-poisoned-mem.patch +Patch0341: 0341-scripts-update-linux-header-sh-be-more-src-tree-frie.patch +Patch0342: 0342-scripts-update-linux-headers-sh-remove-temporary-dir.patch +Patch0343: 0343-scripts-update-linux-headers-sh-fix-the-path-of-setu.patch +Patch0344: 0344-util-add-interfaces-to-read-midr-on-aarch64.patch +Patch0345: 0345-cpu-add-phytium-v-cpu-support.patch +Patch0346: 0346-target-arm-support-vm-live-migration-between-phytium.patch +Patch0347: 0347-cpu-add-tengyun-s5000c-cpu-support.patch +Patch0348: 0348-sync-header-file-from-upstream.patch ExclusiveArch: x86_64 aarch64 loongarch64 @@ -1695,6 +1759,14 @@ rm -rf %{buildroot}%{_datadir}/%{name}/efi*rom # Provided by package seavgabios rm -rf %{buildroot}%{_datadir}/%{name}/vgabios*bin %endif +%ifnarch sw_64 +rm -rf %{buildroot}%{_datadir}/%{name}/c3-uefi-bios-sw +rm -rf %{buildroot}%{_datadir}/%{name}/c4-uefi-bios-sw +rm -rf %{buildroot}%{_datadir}/%{name}/core3-hmcode +rm -rf %{buildroot}%{_datadir}/%{name}/core3-reset +rm -rf %{buildroot}%{_datadir}/%{name}/core4-hmcode +rm -rf %{buildroot}%{_datadir}/%{name}/core4-reset +%endif # Provided by package seabios rm -rf %{buildroot}%{_datadir}/%{name}/bios*.bin # Provided by package sgabios @@ -1911,6 +1983,15 @@ useradd -r -u 107 -g qemu -G kvm -d / -s /sbin/nologin \ %{_datadir}/%{name}/pvh.bin %{_datadir}/%{name}/qboot.rom +%ifarch sw_64 +%{_datadir}/%{name}/c3-uefi-bios-sw +%{_datadir}/%{name}/c4-uefi-bios-sw +%{_datadir}/%{name}/core3-hmcode +%{_datadir}/%{name}/core3-reset +%{_datadir}/%{name}/core4-hmcode +%{_datadir}/%{name}/core4-reset +%endif + %ifarch loongarch64 # Provided by package seavgabios %{_datadir}/%{name}/vgabios-ati.bin @@ -2111,6 +2192,9 @@ useradd -r -u 107 -g qemu -G kvm -d / -s /sbin/nologin \ %endif %changelog +* Wed May 28 2025 wh02252983 - 2:8.2.0-26 +- Add sw64arch support and Add patches to fix some bugs/cves + * Sat Nov 30 2024 Xianglai Li - 2:8.2.0-25 - Remove loongarch qemu's dependency on the seavgabios package. -- Gitee