diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index eecd8031cf6c326f1816da179cf104c9c6dfb983..fbd6f5fcc2356bbbcfbb04b7e458472d22aee199 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -2423,6 +2423,18 @@ static int kvm_init(MachineState *ms)
soft_vcpus_limit = kvm_recommended_vcpus(s);
hard_vcpus_limit = kvm_max_vcpus(s);
+#if defined(HOST_PPC64) || defined(HOST_LOONGARCH64)
+ /*
+ * On POWER, the kernel advertises a soft limit based on the
+ * number of CPU threads on the host. We want to allow exceeding
+ * this for testing purposes, so we don't want to set hard limit
+ * to soft limit as on x86.
+ */
+#else
+ /* RHEL doesn't support nr_vcpus > soft_vcpus_limit */
+ hard_vcpus_limit = soft_vcpus_limit;
+#endif
+
while (nc->name) {
if (nc->num > soft_vcpus_limit) {
warn_report("Number of %s cpus requested (%d) exceeds "
diff --git a/configs/devices/loongarch64-softmmu/default.mak b/configs/devices/loongarch64-softmmu/default.mak
new file mode 100644
index 0000000000000000000000000000000000000000..b4994d8a6c3dfc4a79623e2e483885158382916c
--- /dev/null
+++ b/configs/devices/loongarch64-softmmu/default.mak
@@ -0,0 +1,163 @@
+# Default configuration for loongarch-softmmu
+
+CONFIG_PCI=y
+CONFIG_ACPI_PCI=y
+# For now, CONFIG_IDE_CORE requires ISA, so we enable it here
+CONFIG_ISA_BUS=y
+CONFIG_VIRTIO_PCI=y
+
+CONFIG_VGA_PCI=y
+CONFIG_ACPI_SMBUS=y
+#CONFIG_VHOST_USER_SCSI=$(call land,$(CONFIG_VHOST_USER),$(CONFIG_LINUX))
+CONFIG_VHOST_USER_SCSI=y
+#CONFIG_VHOST_USER_BLK=$(call land,$(CONFIG_VHOST_USER),$(CONFIG_LINUX))
+CONFIG_VHOST_USER_BLK=y
+CONFIG_VIRTIO=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTIO_CRYPTO=y
+CONFIG_VIRTIO_GPU=y
+CONFIG_VIRTIO_INPUT=y
+CONFIG_VIRTIO_NET=y
+CONFIG_VIRTIO_RNG=y
+CONFIG_SCSI=y
+CONFIG_VIRTIO_SCSI=y
+CONFIG_VIRTIO_SERIAL=y
+
+CONFIG_USB_UHCI=y
+CONFIG_USB_OHCI=y
+CONFIG_USB_OHCI_PCI=y
+CONFIG_USB_XHCI=y
+CONFIG_USB_XHCI_NEC=y
+CONFIG_NE2000_PCI=y
+CONFIG_EEPRO100_PCI=y
+CONFIG_PCNET_PCI=y
+CONFIG_PCNET_COMMON=y
+CONFIG_AC97=y
+CONFIG_HDA=y
+CONFIG_ES1370=y
+CONFIG_SCSI=y
+CONFIG_LSI_SCSI_PCI=y
+CONFIG_VMW_PVSCSI_SCSI_PCI=y
+CONFIG_MEGASAS_SCSI_PCI=y
+CONFIG_MPTSAS_SCSI_PCI=y
+CONFIG_RTL8139_PCI=y
+CONFIG_E1000_PCI=y
+CONFIG_IDE_CORE=y
+CONFIG_IDE_QDEV=y
+CONFIG_IDE_PCI=y
+CONFIG_AHCI=y
+CONFIG_AHCI_ICH9=y
+CONFIG_ESP=y
+CONFIG_ESP_PCI=y
+CONFIG_SERIAL=y
+CONFIG_SERIAL_ISA=y
+CONFIG_SERIAL_PCI=y
+CONFIG_CAN_BUS=y
+CONFIG_CAN_SJA1000=y
+CONFIG_CAN_PCI=y
+CONFIG_USB_UHCI=y
+CONFIG_USB_OHCI=y
+CONFIG_USB_XHCI=y
+CONFIG_USB_XHCI_NEC=y
+CONFIG_NE2000_PCI=y
+CONFIG_EEPRO100_PCI=y
+CONFIG_PCNET_PCI=y
+CONFIG_PCNET_COMMON=y
+CONFIG_AC97=y
+CONFIG_HDA=y
+CONFIG_ES1370=y
+CONFIG_SCSI=y
+CONFIG_LSI_SCSI_PCI=y
+CONFIG_VMW_PVSCSI_SCSI_PCI=y
+CONFIG_MEGASAS_SCSI_PCI=y
+CONFIG_MPTSAS_SCSI_PCI=y
+CONFIG_RTL8139_PCI=y
+CONFIG_E1000_PCI=y
+CONFIG_IDE_CORE=y
+CONFIG_IDE_QDEV=y
+CONFIG_IDE_PCI=y
+CONFIG_AHCI=y
+CONFIG_ESP=y
+CONFIG_ESP_PCI=y
+CONFIG_SERIAL=y
+CONFIG_SERIAL_ISA=y
+CONFIG_SERIAL_PCI=y
+CONFIG_CAN_BUS=y
+CONFIG_CAN_SJA1000=y
+CONFIG_CAN_PCI=y
+
+CONFIG_SPICE=y
+CONFIG_QXL=y
+CONFIG_ESP=y
+CONFIG_SCSI=y
+CONFIG_VGA_ISA=y
+CONFIG_VGA_ISA_MM=y
+CONFIG_VGA_CIRRUS=y
+CONFIG_VMWARE_VGA=y
+CONFIG_VIRTIO_VGA=y
+CONFIG_SERIAL=y
+CONFIG_SERIAL_ISA=y
+CONFIG_PARALLEL=y
+CONFIG_I8254=y
+CONFIG_PCSPK=y
+CONFIG_PCKBD=y
+CONFIG_FDC=y
+CONFIG_ACPI=y
+CONFIG_ACPI_MEMORY_HOTPLUG=y
+CONFIG_ACPI_NVDIMM=y
+CONFIG_ACPI_CPU_HOTPLUG=y
+CONFIG_APM=y
+CONFIG_I8257=y
+CONFIG_PIIX4=y
+CONFIG_IDE_ISA=y
+CONFIG_IDE_PIIX=y
+#CONFIG_NE2000_ISA=y
+CONFIG_MIPSNET=y
+CONFIG_PFLASH_CFI01=y
+CONFIG_I8259=y
+CONFIG_MC146818RTC=y
+CONFIG_ISA_TESTDEV=y
+CONFIG_EMPTY_SLOT=y
+CONFIG_I2C=y
+CONFIG_DIMM=y
+CONFIG_MEM_DEVICE=y
+
+# Arch Specified CONFIG defines
+CONFIG_IDE_VIA=y
+CONFIG_VT82C686=y
+CONFIG_RC4030=y
+CONFIG_DP8393X=y
+CONFIG_DS1225Y=y
+CONFIG_FITLOADER=y
+CONFIG_SMBIOS=y
+
+CONFIG_PCIE_PORT=y
+CONFIG_I82801B11=y
+CONFIG_XIO3130=y
+CONFIG_PCI_EXPRESS=y
+CONFIG_MSI_NONBROKEN=y
+CONFIG_IOH3420=y
+CONFIG_SD=y
+CONFIG_SDHCI=y
+CONFIG_VIRTFS=y
+CONFIG_VIRTIO_9P=y
+CONFIG_USB_EHCI=y
+CONFIG_USB_EHCI_PCI=y
+CONFIG_USB_EHCI_SYSBUS=y
+CONFIG_USB_STORAGE_BOT=y
+CONFIG_TPM_EMULATOR=y
+CONFIG_TPM_TIS=y
+CONFIG_PLATFORM_BUS=y
+CONFIG_TPM_TIS_SYSBUS=y
+CONFIG_ACPI_LOONGARCH=y
+CONFIG_LS7A_RTC=y
+
+#vfio config
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VFIO_PLATFORM=y
+CONFIG_VFIO_XGMAC=y
+CONFIG_VFIO_AMD_XGBE=y
+
+
diff --git a/configs/devices/loongarch64-softmmu/loongarch64-rh-devices.mak b/configs/devices/loongarch64-softmmu/loongarch64-rh-devices.mak
new file mode 100644
index 0000000000000000000000000000000000000000..15fc2d00f92438ed7efb5c35989062afc133ea52
--- /dev/null
+++ b/configs/devices/loongarch64-softmmu/loongarch64-rh-devices.mak
@@ -0,0 +1,165 @@
+
+include ../rh-virtio.mak
+# Default configuration for loongarch-softmmu
+
+CONFIG_PCI=y
+CONFIG_ACPI_PCI=y
+# For now, CONFIG_IDE_CORE requires ISA, so we enable it here
+CONFIG_ISA_BUS=y
+CONFIG_VIRTIO_PCI=y
+
+CONFIG_VGA_PCI=y
+CONFIG_ACPI_SMBUS=y
+#CONFIG_VHOST_USER_SCSI=$(call land,$(CONFIG_VHOST_USER),$(CONFIG_LINUX))
+CONFIG_VHOST_USER_SCSI=y
+#CONFIG_VHOST_USER_BLK=$(call land,$(CONFIG_VHOST_USER),$(CONFIG_LINUX))
+CONFIG_VHOST_USER_BLK=y
+CONFIG_VIRTIO=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTIO_CRYPTO=y
+CONFIG_VIRTIO_GPU=y
+CONFIG_VIRTIO_INPUT=y
+CONFIG_VIRTIO_NET=y
+CONFIG_VIRTIO_RNG=y
+CONFIG_SCSI=y
+CONFIG_VIRTIO_SCSI=y
+CONFIG_VIRTIO_SERIAL=y
+
+CONFIG_USB_UHCI=y
+CONFIG_USB_OHCI=y
+CONFIG_USB_OHCI_PCI=y
+CONFIG_USB_XHCI=y
+CONFIG_USB_XHCI_NEC=y
+CONFIG_NE2000_PCI=y
+CONFIG_EEPRO100_PCI=y
+CONFIG_PCNET_PCI=y
+CONFIG_PCNET_COMMON=y
+CONFIG_AC97=y
+CONFIG_HDA=y
+CONFIG_ES1370=y
+CONFIG_SCSI=y
+CONFIG_LSI_SCSI_PCI=y
+CONFIG_VMW_PVSCSI_SCSI_PCI=y
+CONFIG_MEGASAS_SCSI_PCI=y
+CONFIG_MPTSAS_SCSI_PCI=y
+CONFIG_RTL8139_PCI=y
+CONFIG_E1000_PCI=y
+CONFIG_IDE_CORE=y
+CONFIG_IDE_QDEV=y
+CONFIG_IDE_PCI=y
+CONFIG_AHCI=y
+CONFIG_AHCI_ICH9=y
+CONFIG_ESP=y
+CONFIG_ESP_PCI=y
+CONFIG_SERIAL=y
+CONFIG_SERIAL_ISA=y
+CONFIG_SERIAL_PCI=y
+CONFIG_CAN_BUS=y
+CONFIG_CAN_SJA1000=y
+CONFIG_CAN_PCI=y
+CONFIG_USB_UHCI=y
+CONFIG_USB_OHCI=y
+CONFIG_USB_XHCI=y
+CONFIG_USB_XHCI_NEC=y
+CONFIG_NE2000_PCI=y
+CONFIG_EEPRO100_PCI=y
+CONFIG_PCNET_PCI=y
+CONFIG_PCNET_COMMON=y
+CONFIG_AC97=y
+CONFIG_HDA=y
+CONFIG_ES1370=y
+CONFIG_SCSI=y
+CONFIG_LSI_SCSI_PCI=y
+CONFIG_VMW_PVSCSI_SCSI_PCI=y
+CONFIG_MEGASAS_SCSI_PCI=y
+CONFIG_MPTSAS_SCSI_PCI=y
+CONFIG_RTL8139_PCI=y
+CONFIG_E1000_PCI=y
+CONFIG_IDE_CORE=y
+CONFIG_IDE_QDEV=y
+CONFIG_IDE_PCI=y
+CONFIG_AHCI=y
+CONFIG_ESP=y
+CONFIG_ESP_PCI=y
+CONFIG_SERIAL=y
+CONFIG_SERIAL_ISA=y
+CONFIG_SERIAL_PCI=y
+CONFIG_CAN_BUS=y
+CONFIG_CAN_SJA1000=y
+CONFIG_CAN_PCI=y
+
+CONFIG_SPICE=y
+CONFIG_QXL=y
+CONFIG_ESP=y
+CONFIG_SCSI=y
+CONFIG_VGA_ISA=y
+CONFIG_VGA_ISA_MM=y
+CONFIG_VGA_CIRRUS=y
+CONFIG_VMWARE_VGA=y
+CONFIG_VIRTIO_VGA=y
+CONFIG_SERIAL_ISA=y
+CONFIG_PARALLEL=y
+CONFIG_I8254=y
+CONFIG_PCSPK=y
+CONFIG_PCKBD=y
+CONFIG_FDC=y
+CONFIG_ACPI=y
+CONFIG_ACPI_MEMORY_HOTPLUG=y
+CONFIG_ACPI_NVDIMM=y
+CONFIG_ACPI_CPU_HOTPLUG=y
+CONFIG_APM=y
+CONFIG_I8257=y
+CONFIG_PIIX4=y
+CONFIG_IDE_ISA=y
+CONFIG_IDE_PIIX=y
+#CONFIG_NE2000_ISA=y
+CONFIG_MIPSNET=y
+CONFIG_PFLASH_CFI01=y
+CONFIG_I8259=y
+CONFIG_MC146818RTC=y
+CONFIG_ISA_TESTDEV=y
+CONFIG_EMPTY_SLOT=y
+CONFIG_I2C=y
+CONFIG_DIMM=y
+CONFIG_MEM_DEVICE=y
+
+# Arch Specified CONFIG defines
+CONFIG_IDE_VIA=y
+CONFIG_VT82C686=y
+CONFIG_RC4030=y
+CONFIG_DP8393X=y
+CONFIG_DS1225Y=y
+CONFIG_FITLOADER=y
+CONFIG_SMBIOS=y
+
+CONFIG_PCIE_PORT=y
+CONFIG_I82801B11=y
+CONFIG_XIO3130=y
+CONFIG_PCI_EXPRESS=y
+CONFIG_MSI_NONBROKEN=y
+CONFIG_IOH3420=y
+CONFIG_SD=y
+CONFIG_SDHCI=y
+CONFIG_VIRTFS=y
+CONFIG_VIRTIO_9P=y
+CONFIG_USB_EHCI=y
+CONFIG_USB_EHCI_PCI=y
+CONFIG_USB_EHCI_SYSBUS=y
+CONFIG_USB_STORAGE_BOT=y
+CONFIG_USB_STORAGE_CORE=y
+CONFIG_USB_STORAGE_CLASSIC=y
+CONFIG_TPM_EMULATOR=y
+CONFIG_TPM_TIS=y
+CONFIG_PLATFORM_BUS=y
+CONFIG_TPM_TIS_SYSBUS=y
+CONFIG_ACPI_LOONGARCH=y
+CONFIG_LS7A_RTC=y
+
+#vfio config
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VFIO_PLATFORM=y
+CONFIG_VFIO_XGMAC=y
+CONFIG_VFIO_AMD_XGBE=y
+
diff --git a/configs/targets/loongarch64-softmmu.mak b/configs/targets/loongarch64-softmmu.mak
new file mode 100644
index 0000000000000000000000000000000000000000..dc5ab396618416614aa09ed0f2ca9155a3282949
--- /dev/null
+++ b/configs/targets/loongarch64-softmmu.mak
@@ -0,0 +1,4 @@
+TARGET_ARCH=loongarch64
+TARGET_SUPPORTS_MTTCG=y
+TARGET_XML_FILES= gdb-xml/loongarch-base64.xml gdb-xml/loongarch-fpu64.xml
+
diff --git a/configure b/configure
index 48c21775f3a90c91631d90bd6e3ec6b060215465..1f932f7eeb8c59f7f635659b12801366d3fbeb6a 100755
--- a/configure
+++ b/configure
@@ -581,6 +581,8 @@ elif check_define __arm__ ; then
cpu="arm"
elif check_define __aarch64__ ; then
cpu="aarch64"
+elif check_define __loongarch__ ; then
+ cpu="loongarch64"
else
cpu=$(uname -m)
fi
@@ -606,6 +608,9 @@ case "$cpu" in
aarch64)
cpu="aarch64"
;;
+ loongarch64)
+ cpu="loongarch64"
+ ;;
mips*)
cpu="mips"
;;
diff --git a/disas/loongarch.c b/disas/loongarch.c
new file mode 100644
index 0000000000000000000000000000000000000000..14dd131e2e304011eb014ca88a5f9068ae618fd2
--- /dev/null
+++ b/disas/loongarch.c
@@ -0,0 +1,2748 @@
+/*
+ * QEMU Loongarch Disassembler
+ *
+ * Copyright (c) 2020-2021.
+ * Author: Song Gao, gaosong@loongson.cn
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ */
+
+#include "qemu/osdep.h"
+#include "disas/dis-asm.h"
+
+#define INSNLEN 4
+
+/* types */
+
+typedef uint16_t la_opcode;
+
+/* enums */
+
+typedef enum {
+ la_op_illegal = 0,
+ la_op_gr2scr = 1,
+ la_op_scr2gr = 2,
+ la_op_clo_w = 3,
+ la_op_clz_w = 4,
+ la_op_cto_w = 5,
+ la_op_ctz_w = 6,
+ la_op_clo_d = 7,
+ la_op_clz_d = 8,
+ la_op_cto_d = 9,
+ la_op_ctz_d = 10,
+ la_op_revb_2h = 11,
+ la_op_revb_4h = 12,
+ la_op_revb_2w = 13,
+ la_op_revb_d = 14,
+ la_op_revh_2w = 15,
+ la_op_revh_d = 16,
+ la_op_bitrev_4b = 17,
+ la_op_bitrev_8b = 18,
+ la_op_bitrev_w = 19,
+ la_op_bitrev_d = 20,
+ la_op_ext_w_h = 21,
+ la_op_ext_w_b = 22,
+ la_op_rdtime_d = 23,
+ la_op_cpucfg = 24,
+ la_op_asrtle_d = 25,
+ la_op_asrtgt_d = 26,
+ la_op_alsl_w = 27,
+ la_op_alsl_wu = 28,
+ la_op_bytepick_w = 29,
+ la_op_bytepick_d = 30,
+ la_op_add_w = 31,
+ la_op_add_d = 32,
+ la_op_sub_w = 33,
+ la_op_sub_d = 34,
+ la_op_slt = 35,
+ la_op_sltu = 36,
+ la_op_maskeqz = 37,
+ la_op_masknez = 38,
+ la_op_nor = 39,
+ la_op_and = 40,
+ la_op_or = 41,
+ la_op_xor = 42,
+ la_op_orn = 43,
+ la_op_andn = 44,
+ la_op_sll_w = 45,
+ la_op_srl_w = 46,
+ la_op_sra_w = 47,
+ la_op_sll_d = 48,
+ la_op_srl_d = 49,
+ la_op_sra_d = 50,
+ la_op_rotr_w = 51,
+ la_op_rotr_d = 52,
+ la_op_mul_w = 53,
+ la_op_mulh_w = 54,
+ la_op_mulh_wu = 55,
+ la_op_mul_d = 56,
+ la_op_mulh_d = 57,
+ la_op_mulh_du = 58,
+ la_op_mulw_d_w = 59,
+ la_op_mulw_d_wu = 60,
+ la_op_div_w = 61,
+ la_op_mod_w = 62,
+ la_op_div_wu = 63,
+ la_op_mod_wu = 64,
+ la_op_div_d = 65,
+ la_op_mod_d = 66,
+ la_op_div_du = 67,
+ la_op_mod_du = 68,
+ la_op_crc_w_b_w = 69,
+ la_op_crc_w_h_w = 70,
+ la_op_crc_w_w_w = 71,
+ la_op_crc_w_d_w = 72,
+ la_op_crcc_w_b_w = 73,
+ la_op_crcc_w_h_w = 74,
+ la_op_crcc_w_w_w = 75,
+ la_op_crcc_w_d_w = 76,
+ la_op_break = 77,
+ la_op_dbcl = 78,
+ la_op_syscall = 79,
+ la_op_alsl_d = 80,
+ la_op_slli_w = 81,
+ la_op_slli_d = 82,
+ la_op_srli_w = 83,
+ la_op_srli_d = 84,
+ la_op_srai_w = 85,
+ la_op_srai_d = 86,
+ la_op_rotri_w = 87,
+ la_op_rotri_d = 88,
+ la_op_bstrins_w = 89,
+ la_op_bstrpick_w = 90,
+ la_op_bstrins_d = 91,
+ la_op_bstrpick_d = 92,
+ la_op_fadd_s = 93,
+ la_op_fadd_d = 94,
+ la_op_fsub_s = 95,
+ la_op_fsub_d = 96,
+ la_op_fmul_s = 97,
+ la_op_fmul_d = 98,
+ la_op_fdiv_s = 99,
+ la_op_fdiv_d = 100,
+ la_op_fmax_s = 101,
+ la_op_fmax_d = 102,
+ la_op_fmin_s = 103,
+ la_op_fmin_d = 104,
+ la_op_fmaxa_s = 105,
+ la_op_fmaxa_d = 106,
+ la_op_fmina_s = 107,
+ la_op_fmina_d = 108,
+ la_op_fscaleb_s = 109,
+ la_op_fscaleb_d = 110,
+ la_op_fcopysign_s = 111,
+ la_op_fcopysign_d = 112,
+ la_op_fabs_s = 113,
+ la_op_fabs_d = 114,
+ la_op_fneg_s = 115,
+ la_op_fneg_d = 116,
+ la_op_flogb_s = 117,
+ la_op_flogb_d = 118,
+ la_op_fclass_s = 119,
+ la_op_fclass_d = 120,
+ la_op_fsqrt_s = 121,
+ la_op_fsqrt_d = 122,
+ la_op_frecip_s = 123,
+ la_op_frecip_d = 124,
+ la_op_frsqrt_s = 125,
+ la_op_frsqrt_d = 126,
+ la_op_fmov_s = 127,
+ la_op_fmov_d = 128,
+ la_op_movgr2fr_w = 129,
+ la_op_movgr2fr_d = 130,
+ la_op_movgr2frh_w = 131,
+ la_op_movfr2gr_s = 132,
+ la_op_movfr2gr_d = 133,
+ la_op_movfrh2gr_s = 134,
+ la_op_movgr2fcsr = 135,
+ la_op_movfcsr2gr = 136,
+ la_op_movfr2cf = 137,
+ la_op_movcf2fr = 138,
+ la_op_movgr2cf = 139,
+ la_op_movcf2gr = 140,
+ la_op_fcvt_s_d = 141,
+ la_op_fcvt_d_s = 142,
+
+ la_op_ftintrm_w_s = 143,
+ la_op_ftintrm_w_d = 144,
+ la_op_ftintrm_l_s = 145,
+ la_op_ftintrm_l_d = 146,
+ la_op_ftintrp_w_s = 147,
+ la_op_ftintrp_w_d = 148,
+ la_op_ftintrp_l_s = 149,
+ la_op_ftintrp_l_d = 150,
+ la_op_ftintrz_w_s = 151,
+ la_op_ftintrz_w_d = 152,
+ la_op_ftintrz_l_s = 153,
+ la_op_ftintrz_l_d = 154,
+ la_op_ftintrne_w_s = 155,
+ la_op_ftintrne_w_d = 156,
+ la_op_ftintrne_l_s = 157,
+ la_op_ftintrne_l_d = 158,
+ la_op_ftint_w_s = 159,
+ la_op_ftint_w_d = 160,
+ la_op_ftint_l_s = 161,
+ la_op_ftint_l_d = 162,
+ la_op_ffint_s_w = 163,
+ la_op_ffint_s_l = 164,
+ la_op_ffint_d_w = 165,
+ la_op_ffint_d_l = 166,
+ la_op_frint_s = 167,
+ la_op_frint_d = 168,
+
+ la_op_slti = 169,
+ la_op_sltui = 170,
+ la_op_addi_w = 171,
+ la_op_addi_d = 172,
+ la_op_lu52i_d = 173,
+ la_op_addi = 174,
+ la_op_ori = 175,
+ la_op_xori = 176,
+
+ la_op_csrxchg = 177,
+ la_op_cacop = 178,
+ la_op_lddir = 179,
+ la_op_ldpte = 180,
+ la_op_iocsrrd_b = 181,
+ la_op_iocsrrd_h = 182,
+ la_op_iocsrrd_w = 183,
+ la_op_iocsrrd_d = 184,
+ la_op_iocsrwr_b = 185,
+ la_op_iocsrwr_h = 186,
+ la_op_iocsrwr_w = 187,
+ la_op_iocsrwr_d = 188,
+ la_op_tlbclr = 189,
+ la_op_tlbflush = 190,
+ la_op_tlbsrch = 191,
+ la_op_tlbrd = 192,
+ la_op_tlbwr = 193,
+ la_op_tlbfill = 194,
+ la_op_ertn = 195,
+ la_op_idle = 196,
+ la_op_invtlb = 197,
+
+ la_op_fmadd_s = 198,
+ la_op_fmadd_d = 199,
+ la_op_fmsub_s = 200,
+ la_op_fmsub_d = 201,
+ la_op_fnmadd_s = 202,
+ la_op_fnmadd_d = 203,
+ la_op_fnmsub_s = 204,
+ la_op_fnmsub_d = 205,
+ la_op_fcmp_cond_s = 206,
+ la_op_fcmp_cond_d = 207,
+ la_op_fsel = 208,
+ la_op_addu16i_d = 209,
+ la_op_lu12i_w = 210,
+ la_op_lu32i_d = 211,
+ la_op_pcaddi = 212,
+ la_op_pcalau12i = 213,
+ la_op_pcaddu12i = 214,
+ la_op_pcaddu18i = 215,
+
+ la_op_ll_w = 216,
+ la_op_sc_w = 217,
+ la_op_ll_d = 218,
+ la_op_sc_d = 219,
+ la_op_ldptr_w = 220,
+ la_op_stptr_w = 221,
+ la_op_ldptr_d = 222,
+ la_op_stptr_d = 223,
+ la_op_ld_b = 224,
+ la_op_ld_h = 225,
+ la_op_ld_w = 226,
+ la_op_ld_d = 227,
+ la_op_st_b = 228,
+ la_op_st_h = 229,
+ la_op_st_w = 230,
+ la_op_st_d = 231,
+ la_op_ld_bu = 232,
+ la_op_ld_hu = 233,
+ la_op_ld_wu = 234,
+ la_op_preld = 235,
+ la_op_fld_s = 236,
+ la_op_fst_s = 237,
+ la_op_fld_d = 238,
+ la_op_fst_d = 239,
+ la_op_ldl_w = 240,
+ la_op_ldr_w = 241,
+ la_op_ldl_d = 242,
+ la_op_ldr_d = 243,
+ la_op_stl_d = 244,
+ la_op_str_d = 245,
+ la_op_ldx_b = 246,
+ la_op_ldx_h = 247,
+ la_op_ldx_w = 248,
+ la_op_ldx_d = 249,
+ la_op_stx_b = 250,
+ la_op_stx_h = 251,
+ la_op_stx_w = 252,
+ la_op_stx_d = 253,
+ la_op_ldx_bu = 254,
+ la_op_ldx_hu = 255,
+ la_op_ldx_wu = 256,
+ la_op_fldx_s = 257,
+ la_op_fldx_d = 258,
+ la_op_fstx_s = 259,
+ la_op_fstx_d = 260,
+
+ la_op_amswap_w = 261,
+ la_op_amswap_d = 262,
+ la_op_amadd_w = 263,
+ la_op_amadd_d = 264,
+ la_op_amand_w = 265,
+ la_op_amand_d = 266,
+ la_op_amor_w = 267,
+ la_op_amor_d = 268,
+ la_op_amxor_w = 269,
+ la_op_amxor_d = 270,
+ la_op_ammax_w = 271,
+ la_op_ammax_d = 272,
+ la_op_ammin_w = 273,
+ la_op_ammin_d = 274,
+ la_op_ammax_wu = 275,
+ la_op_ammax_du = 276,
+ la_op_ammin_wu = 277,
+ la_op_ammin_du = 278,
+ la_op_amswap_db_w = 279,
+ la_op_amswap_db_d = 280,
+ la_op_amadd_db_w = 281,
+ la_op_amadd_db_d = 282,
+ la_op_amand_db_w = 283,
+ la_op_amand_db_d = 284,
+ la_op_amor_db_w = 285,
+ la_op_amor_db_d = 286,
+ la_op_amxor_db_w = 287,
+ la_op_amxor_db_d = 288,
+ la_op_ammax_db_w = 289,
+ la_op_ammax_db_d = 290,
+ la_op_ammin_db_w = 291,
+ la_op_ammin_db_d = 292,
+ la_op_ammax_db_wu = 293,
+ la_op_ammax_db_du = 294,
+ la_op_ammin_db_wu = 295,
+ la_op_ammin_db_du = 296,
+ la_op_dbar = 297,
+ la_op_ibar = 298,
+ la_op_fldgt_s = 299,
+ la_op_fldgt_d = 300,
+ la_op_fldle_s = 301,
+ la_op_fldle_d = 302,
+ la_op_fstgt_s = 303,
+ la_op_fstgt_d = 304,
+ ls_op_fstle_s = 305,
+ la_op_fstle_d = 306,
+ la_op_ldgt_b = 307,
+ la_op_ldgt_h = 308,
+ la_op_ldgt_w = 309,
+ la_op_ldgt_d = 310,
+ la_op_ldle_b = 311,
+ la_op_ldle_h = 312,
+ la_op_ldle_w = 313,
+ la_op_ldle_d = 314,
+ la_op_stgt_b = 315,
+ la_op_stgt_h = 316,
+ la_op_stgt_w = 317,
+ la_op_stgt_d = 318,
+ la_op_stle_b = 319,
+ la_op_stle_h = 320,
+ la_op_stle_w = 321,
+ la_op_stle_d = 322,
+ la_op_beqz = 323,
+ la_op_bnez = 324,
+ la_op_bceqz = 325,
+ la_op_bcnez = 326,
+ la_op_jirl = 327,
+ la_op_b = 328,
+ la_op_bl = 329,
+ la_op_beq = 330,
+ la_op_bne = 331,
+ la_op_blt = 332,
+ la_op_bge = 333,
+ la_op_bltu = 334,
+ la_op_bgeu = 335,
+
+ /* vz insn */
+ la_op_hvcl = 336,
+
+} la_op;
+
+typedef enum {
+ la_codec_illegal,
+ la_codec_empty,
+ la_codec_2r,
+ la_codec_2r_u5,
+ la_codec_2r_u6,
+ la_codec_2r_2bw,
+ la_codec_2r_2bd,
+ la_codec_3r,
+ la_codec_3r_rd0,
+ la_codec_3r_sa2,
+ la_codec_3r_sa3,
+ la_codec_4r,
+ la_codec_r_im20,
+ la_codec_2r_im16,
+ la_codec_2r_im14,
+ la_codec_2r_im12,
+ la_codec_im5_r_im12,
+ la_codec_2r_im8,
+ la_codec_r_sd,
+ la_codec_r_sj,
+ la_codec_r_cd,
+ la_codec_r_cj,
+ la_codec_r_seq,
+ la_codec_code,
+ la_codec_whint,
+ la_codec_invtlb,
+ la_codec_r_ofs21,
+ la_codec_cj_ofs21,
+ la_codec_ofs26,
+ la_codec_cond,
+ la_codec_sel,
+
+} la_codec;
+
+#define la_fmt_illegal "nte"
+#define la_fmt_empty "nt"
+#define la_fmt_sd_rj "ntA,1"
+#define la_fmt_rd_sj "nt0,B"
+#define la_fmt_rd_rj "nt0,1"
+#define la_fmt_rj_rk "nt1,2"
+#define la_fmt_rj_seq "nt1,x"
+#define la_fmt_rd_si20 "nt0,i(x)"
+#define la_fmt_rd_rj_ui5 "nt0,1,C"
+#define la_fmt_rd_rj_ui6 "nt0,1.C"
+#define la_fmt_rd_rj_level "nt0,1,x"
+#define la_fmt_rd_rj_msbw_lsbw "nt0,1,C,D"
+#define la_fmt_rd_rj_msbd_lsbd "nt0,1,C,D"
+#define la_fmt_rd_rj_si12 "nt0,1,i(x)"
+#define la_fmt_hint_rj_si12 "ntE,1,i(x)"
+#define la_fmt_rd_rj_csr "nt0,1,x"
+#define la_fmt_rd_rj_si14 "nt0,1,i(x)"
+#define la_fmt_rd_rj_si16 "nt0,1,i(x)"
+#define la_fmt_rd_rj_rk "nt0,1,2"
+#define la_fmt_fd_rj_rk "nt3,1,2"
+#define la_fmt_rd_rj_rk_sa2 "nt0,1,2,D"
+#define la_fmt_rd_rj_rk_sa3 "nt0,1,2,D"
+#define la_fmt_fd_rj "nt3,1"
+#define la_fmt_rd_fj "nt0,4"
+#define la_fmt_fd_fj "nt3,4"
+#define la_fmt_fd_fj_si12 "nt3,4,i(x)"
+#define la_fmt_fcsrd_rj "ntF,1"
+#define la_fmt_rd_fcsrs "nt0,G"
+#define la_fmt_cd_fj "ntH,4"
+#define la_fmt_fd_cj "nt3,I"
+#define la_fmt_fd_fj_fk "nt3,4,5"
+#define la_fmt_code "ntJ"
+#define la_fmt_whint "ntx"
+#define la_fmt_invtlb "ntx,1,2" /* op,rj,rk */
+#define la_fmt_offs26 "nto(X)p"
+#define la_fmt_rj_offs21 "nt1,o(X)p"
+#define la_fmt_cj_offs21 "ntQ,o(X)p"
+#define la_fmt_rd_rj_offs16 "nt0,1,o(X)"
+#define la_fmt_rj_rd_offs16 "nt1,0,o(X)p"
+#define la_fmt_s_cd_fj_fk "K.stH,4,5"
+#define la_fmt_d_cd_fj_fk "K.dtH,4,5"
+#define la_fmt_fd_fj_fk_fa "nt3,4,5,6"
+#define la_fmt_fd_fj_fk_ca "nt3,4,5,L"
+#define la_fmt_cop_rj_si12 "ntM,1,i(x)"
+
+/* structures */
+
+typedef struct {
+ uint32_t pc;
+ uint32_t insn;
+ int32_t imm;
+ int32_t imm2;
+ uint16_t op;
+ uint16_t code;
+ uint8_t codec;
+ uint8_t r1;
+ uint8_t r2;
+ uint8_t r3;
+ uint8_t r4;
+ uint8_t bit;
+} la_decode;
+
+typedef struct {
+ const char * const name;
+ const la_codec codec;
+ const char * const format;
+} la_opcode_data;
+
+/* reg names */
+
+const char * const loongarch_r_normal_name[32] = {
+ "$r0", "$r1", "$r2", "$r3", "$r4", "$r5", "$r6", "$r7",
+ "$r8", "$r9", "$r10", "$r11", "$r12", "$r13", "$r14", "$r15",
+ "$r16", "$r17", "$r18", "$r19", "$r20", "$r21", "$r22", "$r23",
+ "$r24", "$r25", "$r26", "$r27", "$r28", "$r29", "$r30", "$r31",
+};
+
+const char * const loongarch_f_normal_name[32] = {
+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
+ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
+ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
+ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
+};
+
+const char * const loongarch_cr_normal_name[4] = {
+ "$scr0", "$scr1", "$scr2", "$scr3",
+};
+
+const char * const loongarch_c_normal_name[8] = {
+ "$fcc0", "$fcc1", "$fcc2", "$fcc3", "$fcc4", "$fcc5", "$fcc6", "$fcc7",
+};
+
+/* instruction data */
+
+const la_opcode_data opcode_data[] = {
+ { "illegal", la_codec_illegal, la_fmt_illegal },
+ { "gr2scr", la_codec_r_sd, la_fmt_sd_rj },
+ { "scr2gr", la_codec_r_sj, la_fmt_rd_sj },
+ { "clo.w", la_codec_2r, la_fmt_rd_rj },
+ { "clz.w", la_codec_2r, la_fmt_rd_rj },
+ { "cto.w", la_codec_2r, la_fmt_rd_rj },
+ { "ctz.w", la_codec_2r, la_fmt_rd_rj },
+ { "clo.d", la_codec_2r, la_fmt_rd_rj },
+ { "clz.d", la_codec_2r, la_fmt_rd_rj },
+ { "cto.d", la_codec_2r, la_fmt_rd_rj },
+ { "ctz_d", la_codec_2r, la_fmt_rd_rj },
+ { "revb.2h", la_codec_2r, la_fmt_rd_rj },
+ { "revb.4h", la_codec_2r, la_fmt_rd_rj },
+ { "revb.2w", la_codec_2r, la_fmt_rd_rj },
+ { "revb.d", la_codec_2r, la_fmt_rd_rj },
+ { "revh.2w", la_codec_2r, la_fmt_rd_rj },
+ { "revh.d", la_codec_2r, la_fmt_rd_rj },
+ { "bitrev.4b", la_codec_2r, la_fmt_rd_rj },
+ { "bitrev.8b", la_codec_2r, la_fmt_rd_rj },
+ { "bitrev.w", la_codec_2r, la_fmt_rd_rj },
+ { "bitrev.d", la_codec_2r, la_fmt_rd_rj },
+ { "ext.w.h", la_codec_2r, la_fmt_rd_rj },
+ { "ext.w.b", la_codec_2r, la_fmt_rd_rj },
+ { "rdtime.d", la_codec_2r, la_fmt_rd_rj },
+ { "cpucfg", la_codec_2r, la_fmt_rd_rj },
+ { "asrtle.d", la_codec_3r_rd0, la_fmt_rj_rk },
+ { "asrtgt.d", la_codec_3r_rd0, la_fmt_rj_rk },
+ { "alsl.w", la_codec_3r_sa2, la_fmt_rd_rj_rk_sa2 },
+ { "alsl.wu", la_codec_3r_sa2, la_fmt_rd_rj_rk_sa2 },
+ { "bytepick.w", la_codec_3r_sa2, la_fmt_rd_rj_rk_sa2 },
+ { "bytepick.d", la_codec_3r_sa3, la_fmt_rd_rj_rk_sa3 },
+ { "add.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "add.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "sub.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "sub.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "slt", la_codec_3r, la_fmt_rd_rj_rk },
+ { "sltu", la_codec_3r, la_fmt_rd_rj_rk },
+ { "maskeqz", la_codec_3r, la_fmt_rd_rj_rk },
+ { "masknez", la_codec_3r, la_fmt_rd_rj_rk },
+ { "nor", la_codec_3r, la_fmt_rd_rj_rk },
+ { "and", la_codec_3r, la_fmt_rd_rj_rk },
+ { "or", la_codec_3r, la_fmt_rd_rj_rk },
+ { "xor", la_codec_3r, la_fmt_rd_rj_rk },
+ { "orn", la_codec_3r, la_fmt_rd_rj_rk },
+ { "andn", la_codec_3r, la_fmt_rd_rj_rk },
+ { "sll.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "srl.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "sra.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "sll.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "srl.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "sra.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "rotr.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "rotr.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "mul.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "mulh.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "mulh.wu", la_codec_3r, la_fmt_rd_rj_rk },
+ { "mul.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "mulh.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "mulh.du", la_codec_3r, la_fmt_rd_rj_rk },
+ { "mulw.d.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "mulw.d.wu", la_codec_3r, la_fmt_rd_rj_rk },
+ { "div.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "mod.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "div.wu", la_codec_3r, la_fmt_rd_rj_rk },
+ { "mod.wu", la_codec_3r, la_fmt_rd_rj_rk },
+ { "div.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "mod.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "div.du", la_codec_3r, la_fmt_rd_rj_rk },
+ { "mod.du", la_codec_3r, la_fmt_rd_rj_rk },
+ { "crc.w.b.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "crc.w.h.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "crc.w.w.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "crc.w.d.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "crcc.w.b.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "crcc.w.h.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "crcc.w.w.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "crcc.w.d.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "break", la_codec_code, la_fmt_code },
+ { "dbcl", la_codec_code, la_fmt_code },
+ { "syscall", la_codec_code, la_fmt_code },
+ { "alsl.d", la_codec_3r_sa2, la_fmt_rd_rj_rk_sa2 },
+ { "slli.w", la_codec_2r_u5, la_fmt_rd_rj_ui5 },
+ { "slli.d", la_codec_2r_u6, la_fmt_rd_rj_ui6 },
+ { "srli.w", la_codec_2r_u5, la_fmt_rd_rj_ui5 },
+ { "srli.d", la_codec_2r_u6, la_fmt_rd_rj_ui6 },
+ { "srai.w", la_codec_2r_u5, la_fmt_rd_rj_ui5 },
+ { "srai.d", la_codec_2r_u6, la_fmt_rd_rj_ui6 },
+ { "rotri.w", la_codec_2r_u5, la_fmt_rd_rj_ui5 },
+ { "rotri.d", la_codec_2r_u6, la_fmt_rd_rj_ui6 },
+ { "bstrins.w", la_codec_2r_2bw, la_fmt_rd_rj_msbw_lsbw },
+ { "bstrpick.w", la_codec_2r_2bw, la_fmt_rd_rj_msbw_lsbw },
+ { "bstrins.d", la_codec_2r_2bd, la_fmt_rd_rj_msbd_lsbd },
+ { "bstrpick.d", la_codec_2r_2bd, la_fmt_rd_rj_msbd_lsbd },
+ { "fadd.s", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fadd.d", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fsub.s", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fsub.d", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fmul.s", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fmul.d", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fdiv.s", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fdiv.d", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fmax.s", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fmax.d", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fmin.s", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fmin.d", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fmaxa.s", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fmaxa.d", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fmina.s", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fmina.d", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fscaleb.s", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fscaleb.d", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fcopysign.s", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fcopysign.d", la_codec_3r, la_fmt_fd_fj_fk },
+ { "fabs.s", la_codec_2r, la_fmt_fd_fj },
+ { "fabs.d", la_codec_2r, la_fmt_fd_fj },
+ { "fneg.s", la_codec_2r, la_fmt_fd_fj },
+ { "fneg.d", la_codec_2r, la_fmt_fd_fj },
+ { "flogb.s", la_codec_2r, la_fmt_fd_fj },
+ { "flogb.d", la_codec_2r, la_fmt_fd_fj },
+ { "fclass.s", la_codec_2r, la_fmt_fd_fj },
+ { "fclass.d", la_codec_2r, la_fmt_fd_fj },
+ { "fsqrt.s", la_codec_2r, la_fmt_fd_fj },
+ { "fsqrt.d", la_codec_2r, la_fmt_fd_fj },
+ { "frecip.s", la_codec_2r, la_fmt_fd_fj },
+ { "frecip.d", la_codec_2r, la_fmt_fd_fj },
+ { "frsqrt.s", la_codec_2r, la_fmt_fd_fj },
+ { "frsqrt.d", la_codec_2r, la_fmt_fd_fj },
+ { "fmov.s", la_codec_2r, la_fmt_fd_fj },
+ { "fmov.d", la_codec_2r, la_fmt_fd_fj },
+ { "movgr2fr.w", la_codec_2r, la_fmt_fd_rj },
+ { "movgr2fr.d", la_codec_2r, la_fmt_fd_rj },
+ { "movgr2frh.w", la_codec_2r, la_fmt_fd_rj },
+ { "movfr2gr.s", la_codec_2r, la_fmt_rd_fj },
+ { "movfr2gr.d", la_codec_2r, la_fmt_rd_fj },
+ { "movfrh2gr.s", la_codec_2r, la_fmt_rd_fj },
+ { "movgr2fcsr", la_codec_2r, la_fmt_fcsrd_rj },
+ { "movfcsr2gr", la_codec_2r, la_fmt_rd_fcsrs },
+ { "movfr2cf", la_codec_r_cd, la_fmt_cd_fj },
+ { "movcf2fr", la_codec_r_cj, la_fmt_fd_cj },
+ { "movgr2cf", la_codec_r_cd, la_fmt_cd_fj },
+ { "movcf2gr", la_codec_r_cj, la_fmt_fd_cj },
+ { "fcvt.s.d", la_codec_2r, la_fmt_fd_fj },
+ { "fcvt.d.s", la_codec_2r, la_fmt_fd_fj },
+ { "ftintrm.w.s", la_codec_2r, la_fmt_fd_fj },
+ { "ftintrm.w.d", la_codec_2r, la_fmt_fd_fj },
+ { "ftintrm.l.s", la_codec_2r, la_fmt_fd_fj },
+ { "ftintrm.l.d", la_codec_2r, la_fmt_fd_fj },
+ { "ftintrp.w.s", la_codec_2r, la_fmt_fd_fj },
+ { "ftintrp.w.d", la_codec_2r, la_fmt_fd_fj },
+ { "ftintrp.l.s", la_codec_2r, la_fmt_fd_fj },
+ { "ftintrp.l.d", la_codec_2r, la_fmt_fd_fj },
+ { "ftintrz.w.s", la_codec_2r, la_fmt_fd_fj },
+ { "ftintrz.w.d", la_codec_2r, la_fmt_fd_fj },
+ { "ftintrz.l.s", la_codec_2r, la_fmt_fd_fj },
+ { "ftintrz.l.d", la_codec_2r, la_fmt_fd_fj },
+ { "ftintrne.w.s", la_codec_2r, la_fmt_fd_fj },
+ { "ftintrne.w.d", la_codec_2r, la_fmt_fd_fj },
+ { "ftintrne.l.s", la_codec_2r, la_fmt_fd_fj },
+ { "ftintrne.l.d", la_codec_2r, la_fmt_fd_fj },
+ { "ftint.w.s", la_codec_2r, la_fmt_fd_fj },
+ { "ftint.w.d", la_codec_2r, la_fmt_fd_fj },
+ { "ftint.l.s", la_codec_2r, la_fmt_fd_fj },
+ { "ftint.l.d", la_codec_2r, la_fmt_fd_fj },
+ { "ffint.s.w", la_codec_2r, la_fmt_fd_fj },
+ { "ffint.s.l", la_codec_2r, la_fmt_fd_fj },
+ { "ffint.d.w", la_codec_2r, la_fmt_fd_fj },
+ { "ffint.d.l", la_codec_2r, la_fmt_fd_fj },
+ { "frint.s", la_codec_2r, la_fmt_fd_fj },
+ { "frint.d", la_codec_2r, la_fmt_fd_fj },
+ { "slti", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "sltui", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "addi.w", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "addi.d", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "lu52i.d", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "addi", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "ori", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "xori", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "csrxchg", la_codec_2r_im14, la_fmt_rd_rj_csr },
+ { "cacop", la_codec_im5_r_im12, la_fmt_cop_rj_si12 },
+ { "lddir", la_codec_2r_im8, la_fmt_rd_rj_level },
+ { "ldpte", la_codec_r_seq, la_fmt_rj_seq },
+ { "iocsrrd.b", la_codec_2r, la_fmt_rd_rj },
+ { "iocsrrd.h", la_codec_2r, la_fmt_rd_rj },
+ { "iocsrrd.w", la_codec_2r, la_fmt_rd_rj },
+ { "iocsrrd.d", la_codec_2r, la_fmt_rd_rj },
+ { "iocsrwr.b", la_codec_2r, la_fmt_rd_rj },
+ { "iocsrwr.h", la_codec_2r, la_fmt_rd_rj },
+ { "iocsrwr.w", la_codec_2r, la_fmt_rd_rj },
+ { "iocsrwr.d", la_codec_2r, la_fmt_rd_rj },
+ { "tlbclr", la_codec_empty, la_fmt_empty },
+ { "tlbflush", la_codec_empty, la_fmt_empty },
+ { "tlbsrch", la_codec_empty, la_fmt_empty },
+ { "tlbrd", la_codec_empty, la_fmt_empty },
+ { "tlbwr", la_codec_empty, la_fmt_empty },
+ { "tlbfill", la_codec_empty, la_fmt_empty },
+ { "ertn", la_codec_empty, la_fmt_empty },
+ { "idle", la_codec_whint, la_fmt_whint },
+ { "invtlb", la_codec_invtlb, la_fmt_invtlb },
+ { "fmadd.s", la_codec_4r, la_fmt_fd_fj_fk_fa },
+ { "fmadd.d", la_codec_4r, la_fmt_fd_fj_fk_fa },
+ { "fmsub.s", la_codec_4r, la_fmt_fd_fj_fk_fa },
+ { "fmsub.d", la_codec_4r, la_fmt_fd_fj_fk_fa },
+ { "fnmadd.s", la_codec_4r, la_fmt_fd_fj_fk_fa },
+ { "fnmadd.d", la_codec_4r, la_fmt_fd_fj_fk_fa },
+ { "fnmsub.s", la_codec_4r, la_fmt_fd_fj_fk_fa },
+ { "fnmsub.d", la_codec_4r, la_fmt_fd_fj_fk_fa },
+ { "fcmp.cond.s", la_codec_cond, la_fmt_s_cd_fj_fk },
+ { "fcmp.cond.d", la_codec_cond, la_fmt_d_cd_fj_fk },
+ { "fsel", la_codec_sel, la_fmt_fd_fj_fk_ca },
+ { "addu16i.d", la_codec_2r_im16, la_fmt_rd_rj_si16 },
+ { "lu12i.w", la_codec_r_im20, la_fmt_rd_si20 },
+ { "lu32i.d", la_codec_r_im20, la_fmt_rd_si20 },
+ { "pcaddi", la_codec_r_im20, la_fmt_rd_si20 },
+ { "pcalau12i", la_codec_r_im20, la_fmt_rd_si20 },
+ { "pcaddu12i", la_codec_r_im20, la_fmt_rd_si20 },
+ { "pcaddu18i", la_codec_r_im20, la_fmt_rd_si20 },
+ { "ll.w", la_codec_2r_im14, la_fmt_rd_rj_si14 },
+ { "sc.w", la_codec_2r_im14, la_fmt_rd_rj_si14 },
+ { "ll.d", la_codec_2r_im14, la_fmt_rd_rj_si14 },
+ { "sc.d", la_codec_2r_im14, la_fmt_rd_rj_si14 },
+ { "ldptr.w", la_codec_2r_im14, la_fmt_rd_rj_si14 },
+ { "stptr.w", la_codec_2r_im14, la_fmt_rd_rj_si14 },
+ { "ldptr.d", la_codec_2r_im14, la_fmt_rd_rj_si14 },
+ { "stptr.d", la_codec_2r_im14, la_fmt_rd_rj_si14 },
+ { "ld.b", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "ld.h", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "ld.w", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "ld.d", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "st.b", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "st.h", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "st.w", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "st.d", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "ld.bu", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "ld.hu", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "ld.wu", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "preld", la_codec_2r_im12, la_fmt_hint_rj_si12 },
+ { "fld.s", la_codec_2r_im12, la_fmt_fd_fj_si12 },
+ { "fst.s", la_codec_2r_im12, la_fmt_fd_fj_si12 },
+ { "fld.d", la_codec_2r_im12, la_fmt_fd_fj_si12 },
+ { "fst.d", la_codec_2r_im12, la_fmt_fd_fj_si12 },
+ { "ldl.w", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "ldr.w", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "ldl.d", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "ldr.d", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "stl.d", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "str.d", la_codec_2r_im12, la_fmt_rd_rj_si12 },
+ { "ldx.b", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ldx.h", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ldx.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ldx.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "stx.b", la_codec_3r, la_fmt_rd_rj_rk },
+ { "stx.h", la_codec_3r, la_fmt_rd_rj_rk },
+ { "stx.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "stx.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ldx.bu", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ldx.hu", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ldx.wu", la_codec_3r, la_fmt_rd_rj_rk },
+ { "fldx.s", la_codec_3r, la_fmt_fd_rj_rk },
+ { "fldx.d", la_codec_3r, la_fmt_fd_rj_rk },
+ { "fstx.s", la_codec_3r, la_fmt_fd_rj_rk },
+ { "fstx.d", la_codec_3r, la_fmt_fd_rj_rk },
+ { "amswap.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amswap.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amadd.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amadd.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amand.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amand.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amor.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amor.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amxor.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amxor.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ammax.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ammax.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ammin.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ammin.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ammax.wu", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ammax.du", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ammin.wu", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ammin.du", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amswap.db.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amswap.db.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amadd.db.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amadd.db.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amand.db.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amand.db.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amor.db.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amor.db.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amxor.db.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "amxor.db.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ammax.db.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ammax.db.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ammin.db.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ammin.db.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ammax.db.wu", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ammax.db.du", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ammin.db.wu", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ammin.db.du", la_codec_3r, la_fmt_rd_rj_rk },
+ { "dbar", la_codec_whint, la_fmt_whint },
+ { "ibar", la_codec_whint, la_fmt_whint },
+ { "fldgt.s", la_codec_3r, la_fmt_fd_rj_rk },
+ { "fldgt.d", la_codec_3r, la_fmt_fd_rj_rk },
+ { "fldle.s", la_codec_3r, la_fmt_fd_rj_rk },
+ { "fldle.d", la_codec_3r, la_fmt_fd_rj_rk },
+ { "fstgt.s", la_codec_3r, la_fmt_fd_rj_rk },
+ { "fstgt.d", la_codec_3r, la_fmt_fd_rj_rk },
+ { "fstle.s", la_codec_3r, la_fmt_fd_rj_rk },
+ { "fstle.d", la_codec_3r, la_fmt_fd_rj_rk },
+ { "ldgt.b", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ldgt.h", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ldgt.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ldgt.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ldle.b", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ldle.h", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ldle.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "ldle.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "stgt.b", la_codec_3r, la_fmt_rd_rj_rk },
+ { "stgt.h", la_codec_3r, la_fmt_rd_rj_rk },
+ { "stgt.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "stgt.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "stle.b", la_codec_3r, la_fmt_rd_rj_rk },
+ { "stle.h", la_codec_3r, la_fmt_rd_rj_rk },
+ { "stle.w", la_codec_3r, la_fmt_rd_rj_rk },
+ { "stle.d", la_codec_3r, la_fmt_rd_rj_rk },
+ { "beqz", la_codec_r_ofs21, la_fmt_rj_offs21 },
+ { "bnez", la_codec_r_ofs21, la_fmt_rj_offs21 },
+ { "bceqz", la_codec_cj_ofs21, la_fmt_cj_offs21 },
+ { "bcnez", la_codec_cj_ofs21, la_fmt_cj_offs21 },
+ { "jirl", la_codec_2r_im16, la_fmt_rd_rj_offs16 },
+ { "b", la_codec_ofs26, la_fmt_offs26 },
+ { "bl", la_codec_ofs26, la_fmt_offs26 },
+ { "beq", la_codec_2r_im16, la_fmt_rj_rd_offs16 },
+ { "bne", la_codec_2r_im16, la_fmt_rj_rd_offs16 },
+ { "blt", la_codec_2r_im16, la_fmt_rj_rd_offs16 },
+ { "bge", la_codec_2r_im16, la_fmt_rj_rd_offs16 },
+ { "bltu", la_codec_2r_im16, la_fmt_rj_rd_offs16 },
+ { "bgeu", la_codec_2r_im16, la_fmt_rj_rd_offs16 },
+
+ /* vz insn */
+ { "hvcl", la_codec_code, la_fmt_code },
+
+};
+
+
+/* decode opcode */
+
+static void decode_insn_opcode(la_decode *dec)
+{
+ uint32_t insn = dec->insn;
+ uint16_t op = la_op_illegal;
+ switch ((insn >> 26) & 0x3f) {
+ case 0x0:
+ switch ((insn >> 22) & 0xf) {
+ case 0x0:
+ switch ((insn >> 18) & 0xf) {
+ case 0x0:
+ switch ((insn >> 15) & 0x7) {
+ case 0x0:
+ switch ((insn >> 10) & 0x1f) {
+ case 0x2:
+ switch ((insn >> 2) & 0x7) {
+ case 0x0:
+ op = la_op_gr2scr;
+ break;
+ }
+ break;
+ case 0x3:
+ switch ((insn >> 7) & 0x7) {
+ case 0x0:
+ op = la_op_scr2gr;
+ break;
+ }
+ break;
+ case 0x4:
+ op = la_op_clo_w;
+ break;
+ case 0x5:
+ op = la_op_clz_w;
+ break;
+ case 0x6:
+ op = la_op_cto_w;
+ break;
+ case 0x7:
+ op = la_op_ctz_w;
+ break;
+ case 0x8:
+ op = la_op_clo_d;
+ break;
+ case 0x9:
+ op = la_op_clz_d;
+ break;
+ case 0xa:
+ op = la_op_cto_d;
+ break;
+ case 0xb:
+ op = la_op_ctz_d;
+ break;
+ case 0xc:
+ op = la_op_revb_2h;
+ break;
+ case 0xd:
+ op = la_op_revb_4h;
+ break;
+ case 0xe:
+ op = la_op_revb_2w;
+ break;
+ case 0xf:
+ op = la_op_revb_d;
+ break;
+ case 0x10:
+ op = la_op_revh_2w;
+ break;
+ case 0x11:
+ op = la_op_revh_d;
+ break;
+ case 0x12:
+ op = la_op_bitrev_4b;
+ break;
+ case 0x13:
+ op = la_op_bitrev_8b;
+ break;
+ case 0x14:
+ op = la_op_bitrev_w;
+ break;
+ case 0x15:
+ op = la_op_bitrev_d;
+ break;
+ case 0x16:
+ op = la_op_ext_w_h;
+ break;
+ case 0x17:
+ op = la_op_ext_w_b;
+ break;
+ case 0x1a:
+ op = la_op_rdtime_d;
+ break;
+ case 0x1b:
+ op = la_op_cpucfg;
+ break;
+ }
+ break;
+ case 0x2:
+ switch (insn & 0x0000001f) {
+ case 0x00000000:
+ op = la_op_asrtle_d;
+ break;
+ }
+ break;
+ case 0x3:
+ switch (insn & 0x0000001f) {
+ case 0x00000000:
+ op = la_op_asrtgt_d;
+ break;
+ }
+ break;
+ }
+ break;
+ case 0x1:
+ switch ((insn >> 17) & 0x1) {
+ case 0x0:
+ op = la_op_alsl_w;
+ break;
+ case 0x1:
+ op = la_op_alsl_wu;
+ break;
+ }
+ break;
+ case 0x2:
+ switch ((insn >> 17) & 0x1) {
+ case 0x0:
+ op = la_op_bytepick_w;
+ break;
+ }
+ break;
+ case 0x3:
+ op = la_op_bytepick_d;
+ break;
+ case 0x4:
+ switch ((insn >> 15) & 0x7) {
+ case 0x0:
+ op = la_op_add_w;
+ break;
+ case 0x1:
+ op = la_op_add_d;
+ break;
+ case 0x2:
+ op = la_op_sub_w;
+ break;
+ case 0x3:
+ op = la_op_sub_d;
+ break;
+ case 0x4:
+ op = la_op_slt;
+ break;
+ case 0x5:
+ op = la_op_sltu;
+ break;
+ case 0x6:
+ op = la_op_maskeqz;
+ break;
+ case 0x7:
+ op = la_op_masknez;
+ break;
+ }
+ break;
+ case 0x5:
+ switch ((insn >> 15) & 0x7) {
+ case 0x0:
+ op = la_op_nor;
+ break;
+ case 0x1:
+ op = la_op_and;
+ break;
+ case 0x2:
+ op = la_op_or;
+ break;
+ case 0x3:
+ op = la_op_xor;
+ break;
+ case 0x4:
+ op = la_op_orn;
+ break;
+ case 0x5:
+ op = la_op_andn;
+ break;
+ case 0x6:
+ op = la_op_sll_w;
+ break;
+ case 0x7:
+ op = la_op_srl_w;
+ break;
+ }
+ break;
+ case 0x6:
+ switch ((insn >> 15) & 0x7) {
+ case 0x0:
+ op = la_op_sra_w;
+ break;
+ case 0x1:
+ op = la_op_sll_d;
+ break;
+ case 0x2:
+ op = la_op_srl_d;
+ break;
+ case 0x3:
+ op = la_op_sra_d;
+ break;
+ case 0x6:
+ op = la_op_rotr_w;
+ break;
+ case 0x7:
+ op = la_op_rotr_d;
+ break;
+ }
+ break;
+ case 0x7:
+ switch ((insn >> 15) & 0x7) {
+ case 0x0:
+ op = la_op_mul_w;
+ break;
+ case 0x1:
+ op = la_op_mulh_w;
+ break;
+ case 0x2:
+ op = la_op_mulh_wu;
+ break;
+ case 0x3:
+ op = la_op_mul_d;
+ break;
+ case 0x4:
+ op = la_op_mulh_d;
+ break;
+ case 0x5:
+ op = la_op_mulh_du;
+ break;
+ case 0x6:
+ op = la_op_mulw_d_w;
+ break;
+ case 0x7:
+ op = la_op_mulw_d_wu;
+ break;
+ }
+ break;
+ case 0x8:
+ switch ((insn >> 15) & 0x7) {
+ case 0x0:
+ op = la_op_div_w;
+ break;
+ case 0x1:
+ op = la_op_mod_w;
+ break;
+ case 0x2:
+ op = la_op_div_wu;
+ break;
+ case 0x3:
+ op = la_op_mod_wu;
+ break;
+ case 0x4:
+ op = la_op_div_d;
+ break;
+ case 0x5:
+ op = la_op_mod_d;
+ break;
+ case 0x6:
+ op = la_op_div_du;
+ break;
+ case 0x7:
+ op = la_op_mod_du;
+ break;
+ }
+ break;
+ case 0x9:
+ switch ((insn >> 15) & 0x7) {
+ case 0x0:
+ op = la_op_crc_w_b_w;
+ break;
+ case 0x1:
+ op = la_op_crc_w_h_w;
+ break;
+ case 0x2:
+ op = la_op_crc_w_w_w;
+ break;
+ case 0x3:
+ op = la_op_crc_w_d_w;
+ break;
+ case 0x4:
+ op = la_op_crcc_w_b_w;
+ break;
+ case 0x5:
+ op = la_op_crcc_w_h_w;
+ break;
+ case 0x6:
+ op = la_op_crcc_w_w_w;
+ break;
+ case 0x7:
+ op = la_op_crcc_w_d_w;
+ break;
+ }
+ break;
+ case 0xa:
+ switch ((insn >> 15) & 0x7) {
+ case 0x4:
+ op = la_op_break;
+ break;
+ case 0x5:
+ op = la_op_dbcl;
+ break;
+ case 0x6:
+ op = la_op_syscall;
+ break;
+ case 0x7:
+ op = la_op_hvcl;
+ break;
+ }
+ break;
+ case 0xb:
+ switch ((insn >> 17) & 0x1) {
+ case 0x0:
+ op = la_op_alsl_d;
+ break;
+ }
+ break;
+ }
+ break;
+ case 0x1:
+ switch ((insn >> 21) & 0x1) {
+ case 0x0:
+ switch ((insn >> 16) & 0x1f) {
+ case 0x0:
+ switch ((insn >> 15) & 0x1) {
+ case 0x1:
+ op = la_op_slli_w;
+ break;
+ }
+ break;
+ case 0x1:
+ op = la_op_slli_d;
+ break;
+ case 0x4:
+ switch ((insn >> 15) & 0x1) {
+ case 0x1:
+ op = la_op_srli_w;
+ break;
+ }
+ break;
+ case 0x5:
+ op = la_op_srli_d;
+ break;
+ case 0x8:
+ switch ((insn >> 15) & 0x1) {
+ case 0x1:
+ op = la_op_srai_w;
+ break;
+ }
+ break;
+ case 0x9:
+ op = la_op_srai_d;
+ break;
+ case 0xc:
+ switch ((insn >> 15) & 0x1) {
+ case 0x1:
+ op = la_op_rotri_w;
+ break;
+ }
+ break;
+ case 0xd:
+ op = la_op_rotri_d;
+ break;
+ }
+ break;
+ case 0x1:
+ switch ((insn >> 15) & 0x1) {
+ case 0x0:
+ op = la_op_bstrins_w;
+ break;
+ case 0x1:
+ op = la_op_bstrpick_w;
+ break;
+ }
+ break;
+ }
+ break;
+ case 0x2:
+ op = la_op_bstrins_d;
+ break;
+ case 0x3:
+ op = la_op_bstrpick_d;
+ break;
+ case 0x4:
+ switch ((insn >> 15) & 0x7f) {
+ case 0x1:
+ op = la_op_fadd_s;
+ break;
+ case 0x2:
+ op = la_op_fadd_d;
+ break;
+ case 0x5:
+ op = la_op_fsub_s;
+ break;
+ case 0x6:
+ op = la_op_fsub_d;
+ break;
+ case 0x9:
+ op = la_op_fmul_s;
+ break;
+ case 0xa:
+ op = la_op_fmul_d;
+ break;
+ case 0xd:
+ op = la_op_fdiv_s;
+ break;
+ case 0xe:
+ op = la_op_fdiv_d;
+ break;
+ case 0x11:
+ op = la_op_fmax_s;
+ break;
+ case 0x12:
+ op = la_op_fmax_d;
+ break;
+ case 0x15:
+ op = la_op_fmin_s;
+ break;
+ case 0x16:
+ op = la_op_fmin_d;
+ break;
+ case 0x19:
+ op = la_op_fmaxa_s;
+ break;
+ case 0x1a:
+ op = la_op_fmaxa_d;
+ break;
+ case 0x1d:
+ op = la_op_fmina_s;
+ break;
+ case 0x1e:
+ op = la_op_fmina_d;
+ break;
+ case 0x21:
+ op = la_op_fscaleb_s;
+ break;
+ case 0x22:
+ op = la_op_fscaleb_d;
+ break;
+ case 0x25:
+ op = la_op_fcopysign_s;
+ break;
+ case 0x26:
+ op = la_op_fcopysign_d;
+ break;
+ case 0x28:
+ switch ((insn >> 10) & 0x1f) {
+ case 0x1:
+ op = la_op_fabs_s;
+ break;
+ case 0x2:
+ op = la_op_fabs_d;
+ break;
+ case 0x5:
+ op = la_op_fneg_s;
+ break;
+ case 0x6:
+ op = la_op_fneg_d;
+ break;
+ case 0x9:
+ op = la_op_flogb_s;
+ break;
+ case 0xa:
+ op = la_op_flogb_d;
+ break;
+ case 0xd:
+ op = la_op_fclass_s;
+ break;
+ case 0xe:
+ op = la_op_fclass_d;
+ break;
+ case 0x11:
+ op = la_op_fsqrt_s;
+ break;
+ case 0x12:
+ op = la_op_fsqrt_d;
+ break;
+ case 0x15:
+ op = la_op_frecip_s;
+ break;
+ case 0x16:
+ op = la_op_frecip_d;
+ break;
+ case 0x19:
+ op = la_op_frsqrt_s;
+ break;
+ case 0x1a:
+ op = la_op_frsqrt_d;
+ break;
+ }
+ break;
+ case 0x29:
+ switch ((insn >> 10) & 0x1f) {
+ case 0x5:
+ op = la_op_fmov_s;
+ break;
+ case 0x6:
+ op = la_op_fmov_d;
+ break;
+ case 0x9:
+ op = la_op_movgr2fr_w;
+ break;
+ case 0xa:
+ op = la_op_movgr2fr_d;
+ break;
+ case 0xb:
+ op = la_op_movgr2frh_w;
+ break;
+ case 0xd:
+ op = la_op_movfr2gr_s;
+ break;
+ case 0xe:
+ op = la_op_movfr2gr_d;
+ break;
+ case 0xf:
+ op = la_op_movfrh2gr_s;
+ break;
+ case 0x10:
+ op = la_op_movgr2fcsr;
+ break;
+ case 0x12:
+ op = la_op_movfcsr2gr;
+ break;
+ case 0x14:
+ switch ((insn >> 3) & 0x3) {
+ case 0x0:
+ op = la_op_movfr2cf;
+ break;
+ }
+ break;
+ case 0x15:
+ switch ((insn >> 8) & 0x3) {
+ case 0x0:
+ op = la_op_movcf2fr;
+ break;
+ }
+ break;
+ case 0x16:
+ switch ((insn >> 3) & 0x3) {
+ case 0x0:
+ op = la_op_movgr2cf;
+ break;
+ }
+ break;
+ case 0x17:
+ switch ((insn >> 8) & 0x3) {
+ case 0x0:
+ op = la_op_movcf2gr;
+ break;
+ }
+ break;
+ }
+ break;
+ case 0x32:
+ switch ((insn >> 10) & 0x1f) {
+ case 0x6:
+ op = la_op_fcvt_s_d;
+ break;
+ case 0x9:
+ op = la_op_fcvt_d_s;
+ break;
+ }
+ break;
+ case 0x34:
+ switch ((insn >> 10) & 0x1f) {
+ case 0x1:
+ op = la_op_ftintrm_w_s;
+ break;
+ case 0x2:
+ op = la_op_ftintrm_w_d;
+ break;
+ case 0x9:
+ op = la_op_ftintrm_l_s;
+ break;
+ case 0xa:
+ op = la_op_ftintrm_l_d;
+ break;
+ case 0x11:
+ op = la_op_ftintrp_w_s;
+ break;
+ case 0x12:
+ op = la_op_ftintrp_w_d;
+ break;
+ case 0x19:
+ op = la_op_ftintrp_l_s;
+ break;
+ case 0x1a:
+ op = la_op_ftintrp_l_d;
+ break;
+ }
+ break;
+ case 0x35:
+ switch ((insn >> 10) & 0x1f) {
+ case 0x1:
+ op = la_op_ftintrz_w_s;
+ break;
+ case 0x2:
+ op = la_op_ftintrz_w_d;
+ break;
+ case 0x9:
+ op = la_op_ftintrz_l_s;
+ break;
+ case 0xa:
+ op = la_op_ftintrz_l_d;
+ break;
+ case 0x11:
+ op = la_op_ftintrne_w_s;
+ break;
+ case 0x12:
+ op = la_op_ftintrne_w_d;
+ break;
+ case 0x19:
+ op = la_op_ftintrne_l_s;
+ break;
+ case 0x1a:
+ op = la_op_ftintrne_l_d;
+ break;
+ }
+ break;
+ case 0x36:
+ switch ((insn >> 10) & 0x1f) {
+ case 0x1:
+ op = la_op_ftint_w_s;
+ break;
+ case 0x2:
+ op = la_op_ftint_w_d;
+ break;
+ case 0x9:
+ op = la_op_ftint_l_s;
+ break;
+ case 0xa:
+ op = la_op_ftint_l_d;
+ break;
+ }
+ break;
+ case 0x3a:
+ switch ((insn >> 10) & 0x1f) {
+ case 0x4:
+ op = la_op_ffint_s_w;
+ break;
+ case 0x6:
+ op = la_op_ffint_s_l;
+ break;
+ case 0x8:
+ op = la_op_ffint_d_w;
+ break;
+ case 0xa:
+ op = la_op_ffint_d_l;
+ break;
+ }
+ break;
+ case 0x3c:
+ switch ((insn >> 10) & 0x1f) {
+ case 0x11:
+ op = la_op_frint_s;
+ break;
+ case 0x12:
+ op = la_op_frint_d;
+ break;
+ }
+ break;
+ }
+ break;
+ case 0x8:
+ op = la_op_slti;
+ break;
+ case 0x9:
+ op = la_op_sltui;
+ break;
+ case 0xa:
+ op = la_op_addi_w;
+ break;
+ case 0xb:
+ op = la_op_addi_d;
+ break;
+ case 0xc:
+ op = la_op_lu52i_d;
+ break;
+ case 0xd:
+ op = la_op_addi;
+ break;
+ case 0xe:
+ op = la_op_ori;
+ break;
+ case 0xf:
+ op = la_op_xori;
+ break;
+ }
+ break;
+ case 0x1:
+ switch ((insn >> 24) & 0x3) {
+ case 0x0:
+ op = la_op_csrxchg;
+ break;
+ case 0x2:
+ switch ((insn >> 22) & 0x3) {
+ case 0x0:
+ op = la_op_cacop;
+ break;
+ case 0x1:
+ switch ((insn >> 18) & 0xf) {
+ case 0x0:
+ op = la_op_lddir;
+ break;
+ case 0x1:
+ switch (insn & 0x0000001f) {
+ case 0x00000000:
+ op = la_op_ldpte;
+ break;
+ }
+ break;
+ case 0x2:
+ switch ((insn >> 15) & 0x7) {
+ case 0x0:
+ switch ((insn >> 10) & 0x1f) {
+ case 0x0:
+ op = la_op_iocsrrd_b;
+ break;
+ case 0x1:
+ op = la_op_iocsrrd_h;
+ break;
+ case 0x2:
+ op = la_op_iocsrrd_w;
+ break;
+ case 0x3:
+ op = la_op_iocsrrd_d;
+ break;
+ case 0x4:
+ op = la_op_iocsrwr_b;
+ break;
+ case 0x5:
+ op = la_op_iocsrwr_h;
+ break;
+ case 0x6:
+ op = la_op_iocsrwr_w;
+ break;
+ case 0x7:
+ op = la_op_iocsrwr_d;
+ break;
+ case 0x8:
+ switch (insn & 0x000003ff) {
+ case 0x00000000:
+ op = la_op_tlbclr;
+ break;
+ }
+ break;
+ case 0x9:
+ switch (insn & 0x000003ff) {
+ case 0x00000000:
+ op = la_op_tlbflush;
+ break;
+ }
+ break;
+ case 0xa:
+ switch (insn & 0x000003ff) {
+ case 0x00000000:
+ op = la_op_tlbsrch;
+ break;
+ }
+ break;
+ case 0xb:
+ switch (insn & 0x000003ff) {
+ case 0x00000000:
+ op = la_op_tlbrd;
+ break;
+ }
+ break;
+ case 0xc:
+ switch (insn & 0x000003ff) {
+ case 0x00000000:
+ op = la_op_tlbwr;
+ break;
+ }
+ break;
+ case 0xd:
+ switch (insn & 0x000003ff) {
+ case 0x00000000:
+ op = la_op_tlbfill;
+ break;
+ }
+ break;
+ case 0xe:
+ switch (insn & 0x000003ff) {
+ case 0x00000000:
+ op = la_op_ertn;
+ break;
+ }
+ break;
+ }
+ break;
+ case 0x1:
+ op = la_op_idle;
+ break;
+ case 0x3:
+ op = la_op_invtlb;
+ break;
+ }
+ break;
+ }
+ break;
+ }
+ break;
+ }
+ break;
+ case 0x2:
+ switch ((insn >> 20) & 0x3f) {
+ case 0x1:
+ op = la_op_fmadd_s;
+ break;
+ case 0x2:
+ op = la_op_fmadd_d;
+ break;
+ case 0x5:
+ op = la_op_fmsub_s;
+ break;
+ case 0x6:
+ op = la_op_fmsub_d;
+ break;
+ case 0x9:
+ op = la_op_fnmadd_s;
+ break;
+ case 0xa:
+ op = la_op_fnmadd_d;
+ break;
+ case 0xd:
+ op = la_op_fnmsub_s;
+ break;
+ case 0xe:
+ op = la_op_fnmsub_d;
+ break;
+ }
+ break;
+ case 0x3:
+ switch ((insn >> 20) & 0x3f) {
+ case 0x1:
+ switch ((insn >> 3) & 0x3) {
+ case 0x0:
+ op = la_op_fcmp_cond_s;
+ break;
+ }
+ break;
+ case 0x2:
+ switch ((insn >> 3) & 0x3) {
+ case 0x0:
+ op = la_op_fcmp_cond_d;
+ break;
+ }
+ break;
+ case 0x10:
+ switch ((insn >> 18) & 0x3) {
+ case 0x0:
+ op = la_op_fsel;
+ break;
+ }
+ break;
+ }
+ break;
+ case 0x4:
+ op = la_op_addu16i_d;
+ break;
+ case 0x5:
+ switch ((insn >> 25) & 0x1) {
+ case 0x0:
+ op = la_op_lu12i_w;
+ break;
+ case 0x1:
+ op = la_op_lu32i_d;
+ break;
+ }
+ break;
+ case 0x6:
+ switch ((insn >> 25) & 0x1) {
+ case 0x0:
+ op = la_op_pcaddi;
+ break;
+ case 0x1:
+ op = la_op_pcalau12i;
+ break;
+ }
+ break;
+ case 0x7:
+ switch ((insn >> 25) & 0x1) {
+ case 0x0:
+ op = la_op_pcaddu12i;
+ break;
+ case 0x1:
+ op = la_op_pcaddu18i;
+ break;
+ }
+ break;
+ case 0x8:
+ switch ((insn >> 24) & 0x3) {
+ case 0x0:
+ op = la_op_ll_w;
+ break;
+ case 0x1:
+ op = la_op_sc_w;
+ break;
+ case 0x2:
+ op = la_op_ll_d;
+ break;
+ case 0x3:
+ op = la_op_sc_d;
+ break;
+ }
+ break;
+ case 0x9:
+ switch ((insn >> 24) & 0x3) {
+ case 0x0:
+ op = la_op_ldptr_w;
+ break;
+ case 0x1:
+ op = la_op_stptr_w;
+ break;
+ case 0x2:
+ op = la_op_ldptr_d;
+ break;
+ case 0x3:
+ op = la_op_stptr_d;
+ break;
+ }
+ break;
+ case 0xa:
+ switch ((insn >> 22) & 0xf) {
+ case 0x0:
+ op = la_op_ld_b;
+ break;
+ case 0x1:
+ op = la_op_ld_h;
+ break;
+ case 0x2:
+ op = la_op_ld_w;
+ break;
+ case 0x3:
+ op = la_op_ld_d;
+ break;
+ case 0x4:
+ op = la_op_st_b;
+ break;
+ case 0x5:
+ op = la_op_st_h;
+ break;
+ case 0x6:
+ op = la_op_st_w;
+ break;
+ case 0x7:
+ op = la_op_st_d;
+ break;
+ case 0x8:
+ op = la_op_ld_bu;
+ break;
+ case 0x9:
+ op = la_op_ld_hu;
+ break;
+ case 0xa:
+ op = la_op_ld_wu;
+ break;
+ case 0xb:
+ op = la_op_preld;
+ break;
+ case 0xc:
+ op = la_op_fld_s;
+ break;
+ case 0xd:
+ op = la_op_fst_s;
+ break;
+ case 0xe:
+ op = la_op_fld_d;
+ break;
+ case 0xf:
+ op = la_op_fst_d;
+ break;
+ }
+ break;
+ case 0xb:
+ switch ((insn >> 22) & 0xf) {
+ case 0x8:
+ op = la_op_ldl_w;
+ break;
+ case 0x9:
+ op = la_op_ldr_w;
+ break;
+ case 0xa:
+ op = la_op_ldl_d;
+ break;
+ case 0xb:
+ op = la_op_ldr_d;
+ break;
+ case 0xe:
+ op = la_op_stl_d;
+ break;
+ case 0xf:
+ op = la_op_str_d;
+ break;
+ }
+ break;
+ case 0xe:
+ switch ((insn >> 15) & 0x7ff) {
+ case 0x0:
+ op = la_op_ldx_b;
+ break;
+ case 0x8:
+ op = la_op_ldx_h;
+ break;
+ case 0x10:
+ op = la_op_ldx_w;
+ break;
+ case 0x18:
+ op = la_op_ldx_d;
+ break;
+ case 0x20:
+ op = la_op_stx_b;
+ break;
+ case 0x28:
+ op = la_op_stx_h;
+ break;
+ case 0x30:
+ op = la_op_stx_w;
+ break;
+ case 0x38:
+ op = la_op_stx_d;
+ break;
+ case 0x40:
+ op = la_op_ldx_bu;
+ break;
+ case 0x48:
+ op = la_op_ldx_hu;
+ break;
+ case 0x50:
+ op = la_op_ldx_wu;
+ break;
+ case 0x60:
+ op = la_op_fldx_s;
+ break;
+ case 0x68:
+ op = la_op_fldx_d;
+ break;
+ case 0x70:
+ op = la_op_fstx_s;
+ break;
+ case 0x78:
+ op = la_op_fstx_d;
+ break;
+ case 0xc0:
+ op = la_op_amswap_w;
+ break;
+ case 0xc1:
+ op = la_op_amswap_d;
+ break;
+ case 0xc2:
+ op = la_op_amadd_w;
+ break;
+ case 0xc3:
+ op = la_op_amadd_d;
+ break;
+ case 0xc4:
+ op = la_op_amand_w;
+ break;
+ case 0xc5:
+ op = la_op_amand_d;
+ break;
+ case 0xc6:
+ op = la_op_amor_w;
+ break;
+ case 0xc7:
+ op = la_op_amor_d;
+ break;
+ case 0xc8:
+ op = la_op_amxor_w;
+ break;
+ case 0xc9:
+ op = la_op_amxor_d;
+ break;
+ case 0xca:
+ op = la_op_ammax_w;
+ break;
+ case 0xcb:
+ op = la_op_ammax_d;
+ break;
+ case 0xcc:
+ op = la_op_ammin_w;
+ break;
+ case 0xcd:
+ op = la_op_ammin_d;
+ break;
+ case 0xce:
+ op = la_op_ammax_wu;
+ break;
+ case 0xcf:
+ op = la_op_ammax_du;
+ break;
+ case 0xd0:
+ op = la_op_ammin_wu;
+ break;
+ case 0xd1:
+ op = la_op_ammin_du;
+ break;
+ case 0xd2:
+ op = la_op_amswap_db_w;
+ break;
+ case 0xd3:
+ op = la_op_amswap_db_d;
+ break;
+ case 0xd4:
+ op = la_op_amadd_db_w;
+ break;
+ case 0xd5:
+ op = la_op_amadd_db_d;
+ break;
+ case 0xd6:
+ op = la_op_amand_db_w;
+ break;
+ case 0xd7:
+ op = la_op_amand_db_d;
+ break;
+ case 0xd8:
+ op = la_op_amor_db_w;
+ break;
+ case 0xd9:
+ op = la_op_amor_db_d;
+ break;
+ case 0xda:
+ op = la_op_amxor_db_w;
+ break;
+ case 0xdb:
+ op = la_op_amxor_db_d;
+ break;
+ case 0xdc:
+ op = la_op_ammax_db_w;
+ break;
+ case 0xdd:
+ op = la_op_ammax_db_d;
+ break;
+ case 0xde:
+ op = la_op_ammin_db_w;
+ break;
+ case 0xdf:
+ op = la_op_ammin_db_d;
+ break;
+ case 0xe0:
+ op = la_op_ammax_db_wu;
+ break;
+ case 0xe1:
+ op = la_op_ammax_db_du;
+ break;
+ case 0xe2:
+ op = la_op_ammin_db_wu;
+ break;
+ case 0xe3:
+ op = la_op_ammin_db_du;
+ break;
+ case 0xe4:
+ op = la_op_dbar;
+ break;
+ case 0xe5:
+ op = la_op_ibar;
+ break;
+ case 0xe8:
+ op = la_op_fldgt_s;
+ break;
+ case 0xe9:
+ op = la_op_fldgt_d;
+ break;
+ case 0xea:
+ op = la_op_fldle_s;
+ break;
+ case 0xeb:
+ op = la_op_fldle_d;
+ break;
+ case 0xec:
+ op = la_op_fstgt_s;
+ break;
+ case 0xed:
+ op = la_op_fstgt_d;
+ break;
+ case 0xee:
+ op = ls_op_fstle_s;
+ break;
+ case 0xef:
+ op = la_op_fstle_d;
+ break;
+ case 0xf0:
+ op = la_op_ldgt_b;
+ break;
+ case 0xf1:
+ op = la_op_ldgt_h;
+ break;
+ case 0xf2:
+ op = la_op_ldgt_w;
+ break;
+ case 0xf3:
+ op = la_op_ldgt_d;
+ break;
+ case 0xf4:
+ op = la_op_ldle_b;
+ break;
+ case 0xf5:
+ op = la_op_ldle_h;
+ break;
+ case 0xf6:
+ op = la_op_ldle_w;
+ break;
+ case 0xf7:
+ op = la_op_ldle_d;
+ break;
+ case 0xf8:
+ op = la_op_stgt_b;
+ break;
+ case 0xf9:
+ op = la_op_stgt_h;
+ break;
+ case 0xfa:
+ op = la_op_stgt_w;
+ break;
+ case 0xfb:
+ op = la_op_stgt_d;
+ break;
+ case 0xfc:
+ op = la_op_stle_b;
+ break;
+ case 0xfd:
+ op = la_op_stle_h;
+ break;
+ case 0xfe:
+ op = la_op_stle_w;
+ break;
+ case 0xff:
+ op = la_op_stle_d;
+ break;
+ }
+ break;
+ case 0x10:
+ op = la_op_beqz;
+ break;
+ case 0x11:
+ op = la_op_bnez;
+ break;
+ case 0x12:
+ switch ((insn >> 8) & 0x3) {
+ case 0x0:
+ op = la_op_bceqz;
+ break;
+ case 0x1:
+ op = la_op_bcnez;
+ break;
+ }
+ break;
+ case 0x13:
+ op = la_op_jirl;
+ break;
+ case 0x14:
+ op = la_op_b;
+ break;
+ case 0x15:
+ op = la_op_bl;
+ break;
+ case 0x16:
+ op = la_op_beq;
+ break;
+ case 0x17:
+ op = la_op_bne;
+ break;
+ case 0x18:
+ op = la_op_blt;
+ break;
+ case 0x19:
+ op = la_op_bge;
+ break;
+ case 0x1a:
+ op = la_op_bltu;
+ break;
+ case 0x1b:
+ op = la_op_bgeu;
+ break;
+ default:
+ op = la_op_illegal;
+ break;
+ }
+ dec->op = op;
+}
+
+/* operand extractors */
+
+#define IM_5 5
+#define IM_8 8
+#define IM_12 12
+#define IM_14 14
+#define IM_15 15
+#define IM_16 16
+#define IM_20 20
+#define IM_21 21
+#define IM_26 26
+
+static uint32_t operand_r1(uint32_t insn)
+{
+ return insn & 0x1f;
+}
+
+static uint32_t operand_r2(uint32_t insn)
+{
+ return (insn >> 5) & 0x1f;
+}
+
+static uint32_t operand_r3(uint32_t insn)
+{
+ return (insn >> 10) & 0x1f;
+}
+
+static uint32_t operand_r4(uint32_t insn)
+{
+ return (insn >> 15) & 0x1f;
+}
+
+static uint32_t operand_u6(uint32_t insn)
+{
+ return (insn >> 10) & 0x3f;
+}
+
+static uint32_t operand_bw1(uint32_t insn)
+{
+ return (insn >> 10) & 0x1f;
+}
+
+static uint32_t operand_bw2(uint32_t insn)
+{
+ return (insn >> 16) & 0x1f;
+}
+
+static uint32_t operand_bd1(uint32_t insn)
+{
+ return (insn >> 10) & 0x3f;
+}
+
+static uint32_t operand_bd2(uint32_t insn)
+{
+ return (insn >> 16) & 0x3f;
+}
+
+static uint32_t operand_sa2(uint32_t insn)
+{
+ return (insn >> 15) & 0x3;
+}
+
+static uint32_t operand_sa3(uint32_t insn)
+{
+ return (insn >> 15) & 0x3;
+}
+
+static int32_t operand_im20(uint32_t insn)
+{
+ int32_t imm = (int32_t)((insn >> 5) & 0xfffff);
+ return imm > (1 << 19) ? imm - (1 << 20) : imm;
+}
+
+static int32_t operand_im16(uint32_t insn)
+{
+ int32_t imm = (int32_t)((insn >> 10) & 0xffff);
+ return imm > (1 << 15) ? imm - (1 << 16) : imm;
+}
+
+static int32_t operand_im14(uint32_t insn)
+{
+ int32_t imm = (int32_t)((insn >> 10) & 0x3fff);
+ return imm > (1 << 13) ? imm - (1 << 14) : imm;
+}
+
+static int32_t operand_im12(uint32_t insn)
+{
+ int32_t imm = (int32_t)((insn >> 10) & 0xfff);
+ return imm > (1 << 11) ? imm - (1 << 12) : imm;
+}
+
+static int32_t operand_im8(uint32_t insn)
+{
+ int32_t imm = (int32_t)((insn >> 10) & 0xff);
+ return imm > (1 << 7) ? imm - (1 << 8) : imm;
+}
+
+static uint32_t operand_sd(uint32_t insn)
+{
+ return insn & 0x3;
+}
+
+static uint32_t operand_sj(uint32_t insn)
+{
+ return (insn >> 5) & 0x3;
+}
+
+static uint32_t operand_cd(uint32_t insn)
+{
+ return insn & 0x7;
+}
+
+static uint32_t operand_cj(uint32_t insn)
+{
+ return (insn >> 5) & 0x7;
+}
+
+static uint32_t operand_code(uint32_t insn)
+{
+ return insn & 0x7fff;
+}
+
+static int32_t operand_whint(uint32_t insn)
+{
+ int32_t imm = (int32_t)(insn & 0x7fff);
+ return imm > (1 << 14) ? imm - (1 << 15) : imm;
+}
+
+static int32_t operand_invop(uint32_t insn)
+{
+ int32_t imm = (int32_t)(insn & 0x1f);
+ return imm > (1 << 4) ? imm - (1 << 5) : imm;
+}
+
+static int32_t operand_ofs21(uint32_t insn)
+{
+ int32_t imm = (((int32_t)insn & 0x1f) << 16) |
+ ((insn >> 10) & 0xffff);
+ return imm > (1 << 20) ? imm - (1 << 21) : imm;
+}
+
+static int32_t operand_ofs26(uint32_t insn)
+{
+ int32_t imm = (((int32_t)insn & 0x3ff) << 16) |
+ ((insn >> 10) & 0xffff);
+ return imm > (1 << 25) ? imm - (1 << 26) : imm;
+}
+
+static uint32_t operand_fcond(uint32_t insn)
+{
+ return (insn >> 15) & 0x1f;
+}
+
+static uint32_t operand_sel(uint32_t insn)
+{
+ return (insn >> 15) & 0x7;
+}
+
+/* decode operands */
+
+static void decode_insn_operands(la_decode *dec)
+{
+ uint32_t insn = dec->insn;
+ dec->codec = opcode_data[dec->op].codec;
+ switch (dec->codec) {
+ case la_codec_illegal:
+ case la_codec_empty:
+ break;
+ case la_codec_2r:
+ dec->r1 = operand_r1(insn);
+ dec->r2 = operand_r2(insn);
+ break;
+ case la_codec_2r_u5:
+ dec->r1 = operand_r1(insn);
+ dec->r2 = operand_r2(insn);
+ dec->r3 = operand_r3(insn);
+ break;
+ case la_codec_2r_u6:
+ dec->r1 = operand_r1(insn);
+ dec->r2 = operand_r2(insn);
+ dec->r3 = operand_u6(insn);
+ break;
+ case la_codec_2r_2bw:
+ dec->r1 = operand_r1(insn);
+ dec->r2 = operand_r2(insn);
+ dec->r3 = operand_bw1(insn);
+ dec->r4 = operand_bw2(insn);
+ break;
+ case la_codec_2r_2bd:
+ dec->r1 = operand_r1(insn);
+ dec->r2 = operand_r2(insn);
+ dec->r3 = operand_bd1(insn);
+ dec->r4 = operand_bd2(insn);
+ break;
+ case la_codec_3r:
+ dec->r1 = operand_r1(insn);
+ dec->r2 = operand_r2(insn);
+ dec->r3 = operand_r3(insn);
+ break;
+ case la_codec_3r_rd0:
+ dec->r1 = 0;
+ dec->r2 = operand_r2(insn);
+ dec->r3 = operand_r3(insn);
+ break;
+ case la_codec_3r_sa2:
+ dec->r1 = operand_r1(insn);
+ dec->r2 = operand_r2(insn);
+ dec->r3 = operand_r3(insn);
+ dec->r4 = operand_sa2(insn);
+ break;
+ case la_codec_3r_sa3:
+ dec->r1 = operand_r1(insn);
+ dec->r2 = operand_r2(insn);
+ dec->r3 = operand_r3(insn);
+ dec->r4 = operand_sa3(insn);
+ break;
+ case la_codec_4r:
+ dec->r1 = operand_r1(insn);
+ dec->r2 = operand_r2(insn);
+ dec->r3 = operand_r3(insn);
+ dec->r4 = operand_r4(insn);
+ break;
+ case la_codec_r_im20:
+ dec->r1 = operand_r1(insn);
+ dec->imm = operand_im20(insn);
+ dec->bit = IM_20;
+ break;
+ case la_codec_2r_im16:
+ dec->r1 = operand_r1(insn);
+ dec->r2 = operand_r2(insn);
+ dec->imm = operand_im16(insn);
+ dec->bit = IM_16;
+ break;
+ case la_codec_2r_im14:
+ dec->r1 = operand_r1(insn);
+ dec->r2 = operand_r2(insn);
+ dec->imm = operand_im14(insn);
+ dec->bit = IM_14;
+ break;
+ case la_codec_im5_r_im12:
+ dec->imm2 = operand_r1(insn);
+ dec->r2 = operand_r2(insn);
+ dec->imm = operand_im12(insn);
+ dec->bit = IM_12;
+ break;
+ case la_codec_2r_im12:
+ dec->r1 = operand_r1(insn);
+ dec->r2 = operand_r2(insn);
+ dec->imm = operand_im12(insn);
+ dec->bit = IM_12;
+ break;
+ case la_codec_2r_im8:
+ dec->r1 = operand_r1(insn);
+ dec->r2 = operand_r2(insn);
+ dec->imm = operand_im8(insn);
+ dec->bit = IM_8;
+ break;
+ case la_codec_r_sd:
+ dec->r1 = operand_sd(insn);
+ dec->r2 = operand_r2(insn);
+ break;
+ case la_codec_r_sj:
+ dec->r1 = operand_r1(insn);
+ dec->r2 = operand_sj(insn);
+ break;
+ case la_codec_r_cd:
+ dec->r1 = operand_cd(insn);
+ dec->r2 = operand_r2(insn);
+ break;
+ case la_codec_r_cj:
+ dec->r1 = operand_r1(insn);
+ dec->r2 = operand_cj(insn);
+ break;
+ case la_codec_r_seq:
+ dec->r1 = 0;
+ dec->r2 = operand_r1(insn);
+ dec->imm = operand_im8(insn);
+ dec->bit = IM_8;
+ break;
+ case la_codec_code:
+ dec->code = operand_code(insn);
+ break;
+ case la_codec_whint:
+ dec->imm = operand_whint(insn);
+ dec->bit = IM_15;
+ break;
+ case la_codec_invtlb:
+ dec->imm = operand_invop(insn);
+ dec->bit = IM_5;
+ dec->r2 = operand_r2(insn);
+ dec->r3 = operand_r3(insn);
+ break;
+ case la_codec_r_ofs21:
+ dec->imm = operand_ofs21(insn);
+ dec->bit = IM_21;
+ dec->r2 = operand_r2(insn);
+ break;
+ case la_codec_cj_ofs21:
+ dec->imm = operand_ofs21(insn);
+ dec->bit = IM_21;
+ dec->r2 = operand_cj(insn);
+ break;
+ case la_codec_ofs26:
+ dec->imm = operand_ofs26(insn);
+ dec->bit = IM_26;
+ break;
+ case la_codec_cond:
+ dec->r1 = operand_cd(insn);
+ dec->r2 = operand_r2(insn);
+ dec->r3 = operand_r3(insn);
+ dec->r4 = operand_fcond(insn);
+ break;
+ case la_codec_sel:
+ dec->r1 = operand_r1(insn);
+ dec->r2 = operand_r2(insn);
+ dec->r3 = operand_r3(insn);
+ dec->r4 = operand_sel(insn);
+ break;
+ }
+}
+
+/* format instruction */
+
+static void append(char *s1, const char *s2, size_t n)
+{
+ size_t l1 = strlen(s1);
+ if (n - l1 - 1 > 0) {
+ strncat(s1, s2, n - l1);
+ }
+}
+
+static void format_insn(char *buf, size_t buflen, size_t tab, la_decode *dec)
+{
+ char tmp[16];
+ const char *fmt;
+
+ fmt = opcode_data[dec->op].format;
+ while (*fmt) {
+ switch (*fmt) {
+ case 'n': /* name */
+ append(buf, opcode_data[dec->op].name, buflen);
+ break;
+ case 's':
+ append(buf, "s", buflen);
+ break;
+ case 'd':
+ append(buf, "d", buflen);
+ break;
+ case 'e': /* illegal */
+ snprintf(tmp, sizeof(tmp), "%x", dec->insn);
+ append(buf, tmp, buflen);
+ break;
+ case 't':
+ while (strlen(buf) < tab) {
+ append(buf, " ", buflen);
+ }
+ break;
+ case '(':
+ append(buf, "(", buflen);
+ break;
+ case ',':
+ append(buf, ",", buflen);
+ break;
+ case '.':
+ append(buf, ".", buflen);
+ break;
+ case ')':
+ append(buf, ")", buflen);
+ break;
+ case '0': /* rd */
+ append(buf, loongarch_r_normal_name[dec->r1], buflen);
+ break;
+ case '1': /* rj */
+ append(buf, loongarch_r_normal_name[dec->r2], buflen);
+ break;
+ case '2': /* rk */
+ append(buf, loongarch_r_normal_name[dec->r3], buflen);
+ break;
+ case '3': /* fd */
+ append(buf, loongarch_f_normal_name[dec->r1], buflen);
+ break;
+ case '4': /* fj */
+ append(buf, loongarch_f_normal_name[dec->r2], buflen);
+ break;
+ case '5': /* fk */
+ append(buf, loongarch_f_normal_name[dec->r3], buflen);
+ break;
+ case '6': /* fa */
+ append(buf, loongarch_f_normal_name[dec->r4], buflen);
+ break;
+ case 'A': /* sd */
+ append(buf, loongarch_cr_normal_name[dec->r1], buflen);
+ break;
+ case 'B': /* sj */
+ append(buf, loongarch_cr_normal_name[dec->r2], buflen);
+ break;
+ case 'C': /* r3 */
+ snprintf(tmp, sizeof(tmp), "%x", dec->r3);
+ append(buf, tmp, buflen);
+ break;
+ case 'D': /* r4 */
+ snprintf(tmp, sizeof(tmp), "%x", dec->r4);
+ append(buf, tmp, buflen);
+ break;
+ case 'E': /* r1 */
+ snprintf(tmp, sizeof(tmp), "%x", dec->r1);
+ append(buf, tmp, buflen);
+ break;
+ case 'F': /* fcsrd */
+ append(buf, loongarch_r_normal_name[dec->r1], buflen);
+ break;
+ case 'G': /* fcsrs */
+ append(buf, loongarch_r_normal_name[dec->r2], buflen);
+ break;
+ case 'H': /* cd */
+ append(buf, loongarch_c_normal_name[dec->r1], buflen);
+ break;
+ case 'I': /* cj */
+ append(buf, loongarch_c_normal_name[dec->r2], buflen);
+ break;
+ case 'J': /* code */
+ snprintf(tmp, sizeof(tmp), "0x%x", dec->code);
+ append(buf, tmp, buflen);
+ break;
+ case 'K': /* cond */
+ switch (dec->r4) {
+ case 0x0:
+ append(buf, "caf", buflen);
+ break;
+ case 0x1:
+ append(buf, "saf", buflen);
+ break;
+ case 0x2:
+ append(buf, "clt", buflen);
+ break;
+ case 0x3:
+ append(buf, "slt", buflen);
+ break;
+ case 0x4:
+ append(buf, "ceq", buflen);
+ break;
+ case 0x5:
+ append(buf, "seq", buflen);
+ break;
+ case 0x6:
+ append(buf, "cle", buflen);
+ break;
+ case 0x7:
+ append(buf, "sle", buflen);
+ break;
+ case 0x8:
+ append(buf, "cun", buflen);
+ break;
+ case 0x9:
+ append(buf, "sun", buflen);
+ break;
+ case 0xA:
+ append(buf, "cult", buflen);
+ break;
+ case 0xB:
+ append(buf, "sult", buflen);
+ break;
+ case 0xC:
+ append(buf, "cueq", buflen);
+ break;
+ case 0xD:
+ append(buf, "sueq", buflen);
+ break;
+ case 0xE:
+ append(buf, "cule", buflen);
+ break;
+ case 0xF:
+ append(buf, "sule", buflen);
+ break;
+ case 0x10:
+ append(buf, "cne", buflen);
+ break;
+ case 0x11:
+ append(buf, "sne", buflen);
+ break;
+ case 0x14:
+ append(buf, "cor", buflen);
+ break;
+ case 0x15:
+ append(buf, "sor", buflen);
+ break;
+ case 0x18:
+ append(buf, "cune", buflen);
+ break;
+ case 0x19:
+ append(buf, "sune", buflen);
+ break;
+ }
+ break;
+ case 'L': /* ca */
+ append(buf, loongarch_c_normal_name[dec->r4], buflen);
+ break;
+ case 'M': /* cop */
+ snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm2) & 0x1f);
+ append(buf, tmp, buflen);
+ break;
+ case 'i': /* sixx d */
+ snprintf(tmp, sizeof(tmp), "%d", dec->imm);
+ append(buf, tmp, buflen);
+ break;
+ case 'o': /* offset */
+ snprintf(tmp, sizeof(tmp), "%d", (dec->imm) << 2);
+ append(buf, tmp, buflen);
+ break;
+ case 'x': /* sixx x */
+ switch (dec->bit) {
+ case IM_5:
+ snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm) & 0x1f);
+ append(buf, tmp, buflen);
+ break;
+ case IM_8:
+ snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm) & 0xff);
+ append(buf, tmp, buflen);
+ break;
+ case IM_12:
+ snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm) & 0xfff);
+ append(buf, tmp, buflen);
+ break;
+ case IM_14:
+ snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm) & 0x3fff);
+ append(buf, tmp, buflen);
+ break;
+ case IM_15:
+ snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm) & 0x7fff);
+ append(buf, tmp, buflen);
+ break;
+ case IM_16:
+ snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm) & 0xffff);
+ append(buf, tmp, buflen);
+ break;
+ case IM_20:
+ snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm) & 0xfffff);
+ append(buf, tmp, buflen);
+ break;
+ default:
+ snprintf(tmp, sizeof(tmp), "0x%x", dec->imm);
+ append(buf, tmp, buflen);
+ break;
+ }
+ break;
+ case 'X': /* offset x*/
+ switch (dec->bit) {
+ case IM_16:
+ snprintf(tmp, sizeof(tmp), "0x%x",
+ ((dec->imm) << 2) & 0xffff);
+ append(buf, tmp, buflen);
+ break;
+ case IM_21:
+ snprintf(tmp, sizeof(tmp), "0x%x",
+ ((dec->imm) << 2) & 0x1fffff);
+ append(buf, tmp, buflen);
+ break;
+ case IM_26:
+ snprintf(tmp, sizeof(tmp), "0x%x",
+ ((dec->imm) << 2) & 0x3ffffff);
+ append(buf, tmp, buflen);
+ break;
+ default:
+ snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm) << 2);
+ append(buf, tmp, buflen);
+ break;
+ }
+ break;
+ case 'p': /* pc */
+ snprintf(tmp, sizeof(tmp), " # 0x%"PRIx32"",
+ dec->pc + ((dec->imm) << 2));
+ append(buf, tmp, buflen);
+ break;
+ default:
+ break;
+ }
+ fmt++;
+ }
+}
+
+
+/* disassemble instruction */
+
+static void
+disasm_insn(char *buf, size_t buflen, bfd_vma pc, unsigned long int insn)
+{
+ la_decode dec = { 0 };
+ dec.pc = pc;
+ dec.insn = insn;
+ decode_insn_opcode(&dec);
+ decode_insn_operands(&dec);
+ format_insn(buf, buflen, 16, &dec);
+}
+
+int
+print_insn_loongarch(bfd_vma memaddr, struct disassemble_info *info)
+{
+ char buf[128] = { 0 };
+ bfd_byte buffer[INSNLEN];
+ unsigned long insn;
+ int status;
+
+ status = (*info->read_memory_func)(memaddr, buffer, INSNLEN, info);
+ if (status == 0) {
+ insn = (uint32_t) bfd_getl32(buffer);
+ (*info->fprintf_func)(info->stream, "%08" PRIx64 " ", insn);
+ } else {
+ (*info->memory_error_func)(status, memaddr, info);
+ return -1;
+ }
+ disasm_insn(buf, sizeof(buf), memaddr, insn);
+ (*info->fprintf_func)(info->stream, "\t%s", buf);
+ return INSNLEN;
+}
diff --git a/disas/meson.build b/disas/meson.build
index 449f99e1de63b3bab10da894b4d4e681226b6618..06a69d9d726699414b5ddae566c12e2a468ec547 100644
--- a/disas/meson.build
+++ b/disas/meson.build
@@ -12,6 +12,7 @@ common_ss.add(when: 'CONFIG_I386_DIS', if_true: files('i386.c'))
common_ss.add(when: 'CONFIG_M68K_DIS', if_true: files('m68k.c'))
common_ss.add(when: 'CONFIG_MICROBLAZE_DIS', if_true: files('microblaze.c'))
common_ss.add(when: 'CONFIG_MIPS_DIS', if_true: files('mips.c'))
+common_ss.add(when: 'CONFIG_LOONGARCH_DIS', if_true: files('loongarch.c'))
common_ss.add(when: 'CONFIG_NANOMIPS_DIS', if_true: files('nanomips.cpp'))
common_ss.add(when: 'CONFIG_NIOS2_DIS', if_true: files('nios2.c'))
common_ss.add(when: 'CONFIG_PPC_DIS', if_true: files('ppc.c'))
diff --git a/gdb-xml/loongarch-base32.xml b/gdb-xml/loongarch-base32.xml
new file mode 100644
index 0000000000000000000000000000000000000000..04891e023f641b0001cec312deca5ddde2e5d62b
--- /dev/null
+++ b/gdb-xml/loongarch-base32.xml
@@ -0,0 +1,43 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/gdb-xml/loongarch-base64.xml b/gdb-xml/loongarch-base64.xml
new file mode 100644
index 0000000000000000000000000000000000000000..6308fb6ecb88901d17e8e553e86d4cfd7ca79120
--- /dev/null
+++ b/gdb-xml/loongarch-base64.xml
@@ -0,0 +1,43 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/gdb-xml/loongarch-fpu32.xml b/gdb-xml/loongarch-fpu32.xml
new file mode 100644
index 0000000000000000000000000000000000000000..a5b4d80e6c14b5ddafc1832fe8728c4340f1d944
--- /dev/null
+++ b/gdb-xml/loongarch-fpu32.xml
@@ -0,0 +1,52 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/gdb-xml/loongarch-fpu64.xml b/gdb-xml/loongarch-fpu64.xml
new file mode 100644
index 0000000000000000000000000000000000000000..74ab55a015014dd79a4eadf098862efb1a8694a0
--- /dev/null
+++ b/gdb-xml/loongarch-fpu64.xml
@@ -0,0 +1,57 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hw/acpi/Kconfig b/hw/acpi/Kconfig
index 622b0b50b7582888911375ec1f94d771f099a0c6..2f2fb33a7b36b365fadbeaa278faa143d05f667f 100644
--- a/hw/acpi/Kconfig
+++ b/hw/acpi/Kconfig
@@ -15,6 +15,14 @@ config ACPI_X86_ICH
bool
select ACPI_X86
+config ACPI_LOONGARCH
+ bool
+ select ACPI
+ select ACPI_CPU_HOTPLUG
+ select ACPI_MEMORY_HOTPLUG
+ select ACPI_PIIX4
+ select ACPI_PCIHP
+
config ACPI_CPU_HOTPLUG
bool
diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c
index b20903ea303f27130fb3d570278ddf1322ae5a2c..cd73fab65ba21ae0fc6c408c4ac5e419e76a719e 100644
--- a/hw/acpi/cpu.c
+++ b/hw/acpi/cpu.c
@@ -371,14 +371,25 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
aml_append(cpu_ctrl_dev, aml_mutex(CPU_LOCK, 0));
crs = aml_resource_template();
+#ifdef __loongarch__
+ aml_append(crs, aml_memory32_fixed(io_base,
+ ACPI_CPU_HOTPLUG_REG_LEN, AML_READ_WRITE));
+#else
aml_append(crs, aml_io(AML_DECODE16, io_base, io_base, 1,
ACPI_CPU_HOTPLUG_REG_LEN));
+#endif
aml_append(cpu_ctrl_dev, aml_name_decl("_CRS", crs));
/* declare CPU hotplug MMIO region with related access fields */
+#ifdef __loongarch__
+ aml_append(cpu_ctrl_dev,
+ aml_operation_region("PRST", AML_SYSTEM_MEMORY, aml_int(io_base),
+ ACPI_CPU_HOTPLUG_REG_LEN));
+#else
aml_append(cpu_ctrl_dev,
aml_operation_region("PRST", AML_SYSTEM_IO, aml_int(io_base),
ACPI_CPU_HOTPLUG_REG_LEN));
+#endif
field = aml_field("PRST", AML_BYTE_ACC, AML_NOLOCK,
AML_WRITE_AS_ZEROS);
diff --git a/hw/acpi/larch_7a.c b/hw/acpi/larch_7a.c
new file mode 100644
index 0000000000000000000000000000000000000000..35d4a752664de13ae0d9ea72ea3a066a15d8a3e7
--- /dev/null
+++ b/hw/acpi/larch_7a.c
@@ -0,0 +1,600 @@
+#include "qemu/osdep.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/runstate.h"
+#include "sysemu/reset.h"
+#include "hw/hw.h"
+#include "hw/irq.h"
+#include "hw/acpi/acpi.h"
+#include "hw/acpi/ls7a.h"
+#include "hw/nvram/fw_cfg.h"
+#include "qemu/config-file.h"
+#include "qapi/opts-visitor.h"
+#include "qapi/qapi-events-run-state.h"
+#include "qapi/error.h"
+#include "hw/loongarch/ls7a.h"
+#include "hw/mem/pc-dimm.h"
+#include "hw/mem/nvdimm.h"
+#include "migration/vmstate.h"
+
+static void ls7a_pm_update_sci_fn(ACPIREGS *regs)
+{
+ LS7APCIPMRegs *pm = container_of(regs, LS7APCIPMRegs, acpi_regs);
+ acpi_update_sci(&pm->acpi_regs, pm->irq);
+}
+
+static uint64_t ls7a_gpe_readb(void *opaque, hwaddr addr, unsigned width)
+{
+ LS7APCIPMRegs *pm = opaque;
+ return acpi_gpe_ioport_readb(&pm->acpi_regs, addr);
+}
+
+static void ls7a_gpe_writeb(void *opaque, hwaddr addr, uint64_t val,
+ unsigned width)
+{
+ LS7APCIPMRegs *pm = opaque;
+ acpi_gpe_ioport_writeb(&pm->acpi_regs, addr, val);
+ acpi_update_sci(&pm->acpi_regs, pm->irq);
+}
+
+static const MemoryRegionOps ls7a_gpe_ops = {
+ .read = ls7a_gpe_readb,
+ .write = ls7a_gpe_writeb,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 8,
+ .impl.min_access_size = 1,
+ .impl.max_access_size = 1,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+#define VMSTATE_GPE_ARRAY(_field, _state) \
+ { \
+ .name = (stringify(_field)), \
+ .version_id = 0, \
+ .num = ACPI_GPE0_LEN, \
+ .info = &vmstate_info_uint8, \
+ .size = sizeof(uint8_t), \
+ .flags = VMS_ARRAY | VMS_POINTER, \
+ .offset = vmstate_offset_pointer(_state, _field, uint8_t), \
+ }
+
+static uint64_t ls7a_reset_readw(void *opaque, hwaddr addr, unsigned width)
+{
+ return 0;
+}
+
+static void ls7a_reset_writew(void *opaque, hwaddr addr, uint64_t val,
+ unsigned width)
+{
+ if (val & 1) {
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ return;
+ }
+}
+
+static const MemoryRegionOps ls7a_reset_ops = {
+ .read = ls7a_reset_readw,
+ .write = ls7a_reset_writew,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static bool vmstate_test_use_memhp(void *opaque)
+{
+ LS7APCIPMRegs *s = opaque;
+ return s->acpi_memory_hotplug.is_enabled;
+}
+
+static const VMStateDescription vmstate_memhp_state = {
+ .name = "ls7a_pm/memhp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .needed = vmstate_test_use_memhp,
+ .fields = (VMStateField[]) {
+ VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, LS7APCIPMRegs),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_cpuhp_state = {
+ .name = "ls7a_pm/cpuhp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_CPU_HOTPLUG(cpuhp_state, LS7APCIPMRegs),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_ls7a_pm = {
+ .name = "ls7a_pm",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT16(acpi_regs.pm1.evt.sts, LS7APCIPMRegs),
+ VMSTATE_UINT16(acpi_regs.pm1.evt.en, LS7APCIPMRegs),
+ VMSTATE_UINT16(acpi_regs.pm1.cnt.cnt, LS7APCIPMRegs),
+ VMSTATE_TIMER_PTR(acpi_regs.tmr.timer, LS7APCIPMRegs),
+ VMSTATE_INT64(acpi_regs.tmr.overflow_time, LS7APCIPMRegs),
+ VMSTATE_GPE_ARRAY(acpi_regs.gpe.sts, LS7APCIPMRegs),
+ VMSTATE_GPE_ARRAY(acpi_regs.gpe.en, LS7APCIPMRegs),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * []) {
+ &vmstate_memhp_state,
+ &vmstate_cpuhp_state,
+ NULL
+ }
+};
+
+static inline int64_t acpi_pm_tmr_get_clock(void)
+{
+ return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), PM_TIMER_FREQUENCY,
+ NANOSECONDS_PER_SECOND);
+}
+
+static uint32_t acpi_pm_tmr_get(ACPIREGS *ar)
+{
+ uint32_t d = acpi_pm_tmr_get_clock();
+ return d & 0xffffff;
+}
+
+static void acpi_pm_tmr_timer(void *opaque)
+{
+ ACPIREGS *ar = opaque;
+ qemu_system_wakeup_request(QEMU_WAKEUP_REASON_PMTIMER, NULL);
+ ar->tmr.update_sci(ar);
+}
+
+static uint64_t acpi_pm_tmr_read(void *opaque, hwaddr addr, unsigned width)
+{
+ return acpi_pm_tmr_get(opaque);
+}
+
+static void acpi_pm_tmr_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned width)
+{
+ /* nothing */
+}
+
+static const MemoryRegionOps acpi_pm_tmr_ops = {
+ .read = acpi_pm_tmr_read,
+ .write = acpi_pm_tmr_write,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void ls7a_pm_tmr_init(ACPIREGS *ar, acpi_update_sci_fn update_sci,
+ MemoryRegion *parent, uint64_t offset)
+{
+ ar->tmr.update_sci = update_sci;
+ ar->tmr.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, acpi_pm_tmr_timer, ar);
+ memory_region_init_io(&ar->tmr.io, memory_region_owner(parent),
+ &acpi_pm_tmr_ops, ar, "acpi-tmr", 4);
+ memory_region_add_subregion(parent, offset, &ar->tmr.io);
+}
+
+static void acpi_pm1_evt_write_sts(ACPIREGS *ar, uint16_t val)
+{
+ uint16_t pm1_sts = acpi_pm1_evt_get_sts(ar);
+ if (pm1_sts & val & ACPI_BITMASK_TIMER_STATUS) {
+ /* if TMRSTS is reset, then compute the new overflow time */
+ acpi_pm_tmr_calc_overflow_time(ar);
+ }
+ ar->pm1.evt.sts &= ~val;
+}
+
+static uint64_t acpi_pm_evt_read(void *opaque, hwaddr addr, unsigned width)
+{
+ ACPIREGS *ar = opaque;
+ switch (addr) {
+ case 0:
+ return acpi_pm1_evt_get_sts(ar);
+ case 4:
+ return ar->pm1.evt.en;
+ default:
+ return 0;
+ }
+}
+
+static void acpi_pm1_evt_write_en(ACPIREGS *ar, uint16_t val)
+{
+ ar->pm1.evt.en = val;
+ qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_RTC,
+ val & ACPI_BITMASK_RT_CLOCK_ENABLE);
+ qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_PMTIMER,
+ val & ACPI_BITMASK_TIMER_ENABLE);
+}
+
+static void acpi_pm_evt_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned width)
+{
+ ACPIREGS *ar = opaque;
+ switch (addr) {
+ case 0:
+ acpi_pm1_evt_write_sts(ar, val);
+ ar->pm1.evt.update_sci(ar);
+ break;
+ case 4:
+ acpi_pm1_evt_write_en(ar, val);
+ ar->pm1.evt.update_sci(ar);
+ break;
+ }
+}
+
+static const MemoryRegionOps acpi_pm_evt_ops = {
+ .read = acpi_pm_evt_read,
+ .write = acpi_pm_evt_write,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void ls7a_pm1_evt_init(ACPIREGS *ar, acpi_update_sci_fn update_sci,
+ MemoryRegion *parent, uint64_t offset)
+{
+ ar->pm1.evt.update_sci = update_sci;
+ memory_region_init_io(&ar->pm1.evt.io, memory_region_owner(parent),
+ &acpi_pm_evt_ops, ar, "acpi-evt", 8);
+ memory_region_add_subregion(parent, offset, &ar->pm1.evt.io);
+}
+
+static uint64_t acpi_pm_cnt_read(void *opaque, hwaddr addr, unsigned width)
+{
+ ACPIREGS *ar = opaque;
+ return ar->pm1.cnt.cnt;
+}
+
+/* ACPI PM1aCNT */
+static void acpi_pm1_cnt_write(ACPIREGS *ar, uint16_t val)
+{
+ ar->pm1.cnt.cnt = val & ~(ACPI_BITMASK_SLEEP_ENABLE);
+ if (val & ACPI_BITMASK_SLEEP_ENABLE) {
+ /* change suspend type */
+ uint16_t sus_typ = (val >> 10) & 7;
+ switch (sus_typ) {
+ /* s3,s4 not support */
+ case 5:
+ case 6:
+ warn_report("acpi s3,s4 state not support");
+ break;
+ /* s5: soft off */
+ case 7:
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void acpi_pm_cnt_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned width)
+{
+ acpi_pm1_cnt_write(opaque, val);
+}
+
+static const MemoryRegionOps acpi_pm_cnt_ops = {
+ .read = acpi_pm_cnt_read,
+ .write = acpi_pm_cnt_write,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void acpi_notify_wakeup(Notifier *notifier, void *data)
+{
+ ACPIREGS *ar = container_of(notifier, ACPIREGS, wakeup);
+ WakeupReason *reason = data;
+
+ switch (*reason) {
+ case QEMU_WAKEUP_REASON_RTC:
+ ar->pm1.evt.sts |=
+ (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_RT_CLOCK_STATUS);
+ break;
+ case QEMU_WAKEUP_REASON_PMTIMER:
+ ar->pm1.evt.sts |=
+ (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_TIMER_STATUS);
+ break;
+ case QEMU_WAKEUP_REASON_OTHER:
+ /* ACPI_BITMASK_WAKE_STATUS should be set on resume.
+ * Pretend that resume was caused by power button */
+ ar->pm1.evt.sts |=
+ (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_POWER_BUTTON_STATUS);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ls7a_pm1_cnt_init(ACPIREGS *ar, MemoryRegion *parent,
+ bool disable_s3, bool disable_s4, uint8_t s4_val, uint64_t offset)
+{
+ FWCfgState *fw_cfg;
+
+ ar->pm1.cnt.s4_val = s4_val;
+ ar->wakeup.notify = acpi_notify_wakeup;
+ qemu_register_wakeup_notifier(&ar->wakeup);
+ memory_region_init_io(&ar->pm1.cnt.io, memory_region_owner(parent),
+ &acpi_pm_cnt_ops, ar, "acpi-cnt", 4);
+ memory_region_add_subregion(parent, offset, &ar->pm1.cnt.io);
+
+ fw_cfg = fw_cfg_find();
+ if (fw_cfg) {
+ uint8_t suspend[6] = {128, 0, 0, 129, 128, 128};
+ suspend[3] = 1 | ((!disable_s3) << 7);
+ suspend[4] = s4_val | ((!disable_s4) << 7);
+ fw_cfg_add_file(fw_cfg, "etc/system-states", g_memdup(suspend, 6), 6);
+ }
+}
+
+static void ls7a_pm_reset(void *opaque)
+{
+ LS7APCIPMRegs *pm = opaque;
+
+ acpi_pm1_evt_reset(&pm->acpi_regs);
+ acpi_pm1_cnt_reset(&pm->acpi_regs);
+ acpi_pm_tmr_reset(&pm->acpi_regs);
+ acpi_gpe_reset(&pm->acpi_regs);
+
+ acpi_update_sci(&pm->acpi_regs, pm->irq);
+}
+
+static void pm_powerdown_req(Notifier *n, void *opaque)
+{
+ LS7APCIPMRegs *pm = container_of(n, LS7APCIPMRegs, powerdown_notifier);
+
+ acpi_pm1_evt_power_down(&pm->acpi_regs);
+}
+
+void ls7a_pm_init(LS7APCIPMRegs *pm, qemu_irq *pic)
+{
+ unsigned long base, gpe_len, acpi_aci_irq;
+
+ /* ls7a board acpi hardware info, including
+ * acpi system io base address
+ * acpi gpe length
+ * acpi sci irq number
+ */
+ base = ACPI_IO_BASE;
+ gpe_len = ACPI_GPE0_LEN;
+ acpi_aci_irq = ACPI_SCI_IRQ;
+
+ pm->irq = pic[acpi_aci_irq - 64];
+ memory_region_init(&pm->iomem, NULL, "ls7a_pm", ACPI_IO_SIZE);
+ memory_region_add_subregion(get_system_memory(), base, &pm->iomem);
+
+ cpu_hotplug_hw_init(get_system_memory(), NULL,
+ &pm->cpuhp_state, CPU_HOTPLUG_BASE);
+
+ ls7a_pm_tmr_init(&pm->acpi_regs, ls7a_pm_update_sci_fn, &pm->iomem, LS7A_PM_TMR_BLK);
+ ls7a_pm1_evt_init(&pm->acpi_regs, ls7a_pm_update_sci_fn, &pm->iomem, LS7A_PM_EVT_BLK);
+ ls7a_pm1_cnt_init(&pm->acpi_regs, &pm->iomem, false, false, 2, LS7A_PM_CNT_BLK);
+
+ acpi_gpe_init(&pm->acpi_regs, gpe_len);
+ memory_region_init_io(&pm->iomem_gpe, NULL, &ls7a_gpe_ops, pm,
+ "acpi-gpe0", gpe_len);
+ memory_region_add_subregion(&pm->iomem, LS7A_GPE0_STS_REG, &pm->iomem_gpe);
+
+ memory_region_init_io(&pm->iomem_reset, NULL, &ls7a_reset_ops, pm,
+ "acpi-reset", 4);
+ memory_region_add_subregion(&pm->iomem, LS7A_GPE0_RESET_REG, &pm->iomem_reset);
+
+ qemu_register_reset(ls7a_pm_reset, pm);
+
+ pm->powerdown_notifier.notify = pm_powerdown_req;
+ qemu_register_powerdown_notifier(&pm->powerdown_notifier);
+
+ if (pm->acpi_memory_hotplug.is_enabled) {
+ acpi_memory_hotplug_init(get_system_memory(), NULL,
+ &pm->acpi_memory_hotplug, MEMORY_HOTPLUG_BASE);
+ }
+}
+
+
+static void ls7a_pm_get_gpe0_blk(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ uint64_t value = ACPI_IO_BASE + LS7A_GPE0_STS_REG;
+
+ visit_type_uint64(v, name, &value, errp);
+}
+
+static bool ls7a_pm_get_memory_hotplug_support(Object *obj, Error **errp)
+{
+ LS7APCIState *ls7a = get_ls7a_type(obj);
+
+ return ls7a->pm.acpi_memory_hotplug.is_enabled;
+}
+
+static void ls7a_pm_set_memory_hotplug_support(Object *obj, bool value,
+ Error **errp)
+{
+ LS7APCIState *ls7a = get_ls7a_type(obj);
+
+ ls7a->pm.acpi_memory_hotplug.is_enabled = value;
+}
+
+static void ls7a_pm_get_disable_s3(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ LS7APCIPMRegs *pm = opaque;
+ uint8_t value = pm->disable_s3;
+
+ visit_type_uint8(v, name, &value, errp);
+}
+
+static void ls7a_pm_set_disable_s3(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ LS7APCIPMRegs *pm = opaque;
+ Error *local_err = NULL;
+ uint8_t value;
+
+ visit_type_uint8(v, name, &value, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ pm->disable_s3 = value;
+out:
+ error_propagate(errp, local_err);
+}
+
+static void ls7a_pm_get_disable_s4(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ LS7APCIPMRegs *pm = opaque;
+ uint8_t value = pm->disable_s4;
+
+ visit_type_uint8(v, name, &value, errp);
+}
+
+static void ls7a_pm_set_disable_s4(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ LS7APCIPMRegs *pm = opaque;
+ Error *local_err = NULL;
+ uint8_t value;
+
+ visit_type_uint8(v, name, &value, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ pm->disable_s4 = value;
+out:
+ error_propagate(errp, local_err);
+}
+
+static void ls7a_pm_get_s4_val(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ LS7APCIPMRegs *pm = opaque;
+ uint8_t value = pm->s4_val;
+
+ visit_type_uint8(v, name, &value, errp);
+}
+
+static void ls7a_pm_set_s4_val(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ LS7APCIPMRegs *pm = opaque;
+ Error *local_err = NULL;
+ uint8_t value;
+
+ visit_type_uint8(v, name, &value, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ pm->s4_val = value;
+out:
+ error_propagate(errp, local_err);
+}
+
+void ls7a_pm_add_properties(Object *obj, LS7APCIPMRegs *pm, Error **errp)
+{
+ static const uint32_t gpe0_len = ACPI_GPE0_LEN;
+ pm->acpi_memory_hotplug.is_enabled = true;
+ pm->disable_s3 = 0;
+ pm->disable_s4 = 0;
+ pm->s4_val = 2;
+
+ object_property_add_uint32_ptr(obj, ACPI_PM_PROP_PM_IO_BASE,
+ &pm->pm_io_base, OBJ_PROP_FLAG_READ);
+ object_property_add(obj, ACPI_PM_PROP_GPE0_BLK, "uint32",
+ ls7a_pm_get_gpe0_blk,
+ NULL, NULL, pm);
+ object_property_add_uint32_ptr(obj, ACPI_PM_PROP_GPE0_BLK_LEN,
+ &gpe0_len, OBJ_PROP_FLAG_READ);
+ object_property_add_bool(obj, "memory-hotplug-support",
+ ls7a_pm_get_memory_hotplug_support,
+ ls7a_pm_set_memory_hotplug_support);
+ object_property_add(obj, ACPI_PM_PROP_S3_DISABLED, "uint8",
+ ls7a_pm_get_disable_s3,
+ ls7a_pm_set_disable_s3,
+ NULL, pm);
+ object_property_add(obj, ACPI_PM_PROP_S4_DISABLED, "uint8",
+ ls7a_pm_get_disable_s4,
+ ls7a_pm_set_disable_s4,
+ NULL, pm);
+ object_property_add(obj, ACPI_PM_PROP_S4_VAL, "uint8",
+ ls7a_pm_get_s4_val,
+ ls7a_pm_set_s4_val,
+ NULL, pm);
+}
+
+void ls7a_pm_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp)
+{
+ LS7APCIState *ls7a = get_ls7a_type(OBJECT(hotplug_dev));
+
+ if (ls7a->pm.acpi_memory_hotplug.is_enabled &&
+ object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
+ if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
+ nvdimm_acpi_plug_cb(hotplug_dev, dev);
+ } else {
+ acpi_memory_plug_cb(hotplug_dev, &ls7a->pm.acpi_memory_hotplug,
+ dev, errp);
+ }
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
+ acpi_cpu_plug_cb(hotplug_dev, &ls7a->pm.cpuhp_state, dev, errp);
+ } else {
+ error_setg(errp, "acpi: device plug request for not supported device"
+ " type: %s", object_get_typename(OBJECT(dev)));
+ }
+}
+
+void ls7a_pm_device_unplug_request_cb(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ LS7APCIState *ls7a = get_ls7a_type(OBJECT(hotplug_dev));
+
+ if (ls7a->pm.acpi_memory_hotplug.is_enabled &&
+ object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
+ acpi_memory_unplug_request_cb(hotplug_dev,
+ &ls7a->pm.acpi_memory_hotplug, dev,
+ errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
+ acpi_cpu_unplug_request_cb(hotplug_dev, &ls7a->pm.cpuhp_state,
+ dev, errp);
+ } else {
+ error_setg(errp, "acpi: device unplug request for not supported device"
+ " type: %s", object_get_typename(OBJECT(dev)));
+ }
+}
+
+void ls7a_pm_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp)
+{
+ LS7APCIState *ls7a = get_ls7a_type(OBJECT(hotplug_dev));
+
+ if (ls7a->pm.acpi_memory_hotplug.is_enabled &&
+ object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
+ acpi_memory_unplug_cb(&ls7a->pm.acpi_memory_hotplug, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
+ acpi_cpu_unplug_cb(&ls7a->pm.cpuhp_state, dev, errp);
+ } else {
+ error_setg(errp, "acpi: device unplug for not supported device"
+ " type: %s", object_get_typename(OBJECT(dev)));
+ }
+}
+
+void ls7a_pm_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list)
+{
+ LS7APCIState *ls7a = get_ls7a_type(OBJECT(adev));
+
+ acpi_memory_ospm_status(&ls7a->pm.acpi_memory_hotplug, list);
+ acpi_cpu_ospm_status(&ls7a->pm.cpuhp_state, list);
+}
+
+void ls7a_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev)
+{
+ LS7APCIState *ls7a = get_ls7a_type(OBJECT(adev));
+
+ acpi_send_gpe_event(&ls7a->pm.acpi_regs, ls7a->pm.irq, ev);
+}
+
+
+
diff --git a/hw/acpi/ls7a.c b/hw/acpi/ls7a.c
new file mode 100644
index 0000000000000000000000000000000000000000..2de50ccb9c07e2503cd87854f91a29e41f6cd604
--- /dev/null
+++ b/hw/acpi/ls7a.c
@@ -0,0 +1,598 @@
+#include "qemu/osdep.h"
+#include "sysemu/sysemu.h"
+#include "hw/hw.h"
+#include "hw/irq.h"
+#include "hw/acpi/acpi.h"
+#include "hw/acpi/ls7a.h"
+#include "hw/nvram/fw_cfg.h"
+#include "qemu/config-file.h"
+#include "qapi/opts-visitor.h"
+#include "qapi/qapi-events-run-state.h"
+#include "qapi/error.h"
+#include "hw/mips/ls7a.h"
+#include "hw/mem/pc-dimm.h"
+#include "hw/mem/nvdimm.h"
+
+static void ls7a_pm_update_sci_fn(ACPIREGS *regs)
+{
+ LS7APCIPMRegs *pm = container_of(regs, LS7APCIPMRegs, acpi_regs);
+ acpi_update_sci(&pm->acpi_regs, pm->irq);
+}
+
+static uint64_t ls7a_gpe_readb(void *opaque, hwaddr addr, unsigned width)
+{
+ LS7APCIPMRegs *pm = opaque;
+ return acpi_gpe_ioport_readb(&pm->acpi_regs, addr);
+}
+
+static void ls7a_gpe_writeb(void *opaque, hwaddr addr, uint64_t val,
+ unsigned width)
+{
+ LS7APCIPMRegs *pm = opaque;
+ acpi_gpe_ioport_writeb(&pm->acpi_regs, addr, val);
+ acpi_update_sci(&pm->acpi_regs, pm->irq);
+}
+
+static const MemoryRegionOps ls7a_gpe_ops = {
+ .read = ls7a_gpe_readb,
+ .write = ls7a_gpe_writeb,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 8,
+ .impl.min_access_size = 1,
+ .impl.max_access_size = 1,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+#define VMSTATE_GPE_ARRAY(_field, _state) \
+ { \
+ .name = (stringify(_field)), \
+ .version_id = 0, \
+ .num = ACPI_GPE0_LEN, \
+ .info = &vmstate_info_uint8, \
+ .size = sizeof(uint8_t), \
+ .flags = VMS_ARRAY | VMS_POINTER, \
+ .offset = vmstate_offset_pointer(_state, _field, uint8_t), \
+ }
+
+static uint64_t ls7a_reset_readw(void *opaque, hwaddr addr, unsigned width)
+{
+ return 0;
+}
+
+static void ls7a_reset_writew(void *opaque, hwaddr addr, uint64_t val,
+ unsigned width)
+{
+ if (val & 1) {
+ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
+ return;
+ }
+}
+
+static const MemoryRegionOps ls7a_reset_ops = {
+ .read = ls7a_reset_readw,
+ .write = ls7a_reset_writew,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static bool vmstate_test_use_memhp(void *opaque)
+{
+ LS7APCIPMRegs *s = opaque;
+ return s->acpi_memory_hotplug.is_enabled;
+}
+
+static const VMStateDescription vmstate_memhp_state = {
+ .name = "ls7a_pm/memhp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .needed = vmstate_test_use_memhp,
+ .fields = (VMStateField[]) {
+ VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, LS7APCIPMRegs),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_cpuhp_state = {
+ .name = "ls7a_pm/cpuhp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_CPU_HOTPLUG(cpuhp_state, LS7APCIPMRegs),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_ls7a_pm = {
+ .name = "ls7a_pm",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT16(acpi_regs.pm1.evt.sts, LS7APCIPMRegs),
+ VMSTATE_UINT16(acpi_regs.pm1.evt.en, LS7APCIPMRegs),
+ VMSTATE_UINT16(acpi_regs.pm1.cnt.cnt, LS7APCIPMRegs),
+ VMSTATE_TIMER_PTR(acpi_regs.tmr.timer, LS7APCIPMRegs),
+ VMSTATE_INT64(acpi_regs.tmr.overflow_time, LS7APCIPMRegs),
+ VMSTATE_GPE_ARRAY(acpi_regs.gpe.sts, LS7APCIPMRegs),
+ VMSTATE_GPE_ARRAY(acpi_regs.gpe.en, LS7APCIPMRegs),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * []) {
+ &vmstate_memhp_state,
+ &vmstate_cpuhp_state,
+ NULL
+ }
+};
+
+static inline int64_t acpi_pm_tmr_get_clock(void)
+{
+ return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), PM_TIMER_FREQUENCY,
+ NANOSECONDS_PER_SECOND);
+}
+
+static uint32_t acpi_pm_tmr_get(ACPIREGS *ar)
+{
+ uint32_t d = acpi_pm_tmr_get_clock();
+ return d & 0xffffff;
+}
+
+static void acpi_pm_tmr_timer(void *opaque)
+{
+ ACPIREGS *ar = opaque;
+ qemu_system_wakeup_request(QEMU_WAKEUP_REASON_PMTIMER, NULL);
+ ar->tmr.update_sci(ar);
+}
+
+static uint64_t acpi_pm_tmr_read(void *opaque, hwaddr addr, unsigned width)
+{
+ return acpi_pm_tmr_get(opaque);
+}
+
+static void acpi_pm_tmr_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned width)
+{
+ /* nothing */
+}
+
+static const MemoryRegionOps acpi_pm_tmr_ops = {
+ .read = acpi_pm_tmr_read,
+ .write = acpi_pm_tmr_write,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void ls7a_pm_tmr_init(ACPIREGS *ar, acpi_update_sci_fn update_sci,
+ MemoryRegion *parent, uint64_t offset)
+{
+ ar->tmr.update_sci = update_sci;
+ ar->tmr.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, acpi_pm_tmr_timer, ar);
+ memory_region_init_io(&ar->tmr.io, memory_region_owner(parent),
+ &acpi_pm_tmr_ops, ar, "acpi-tmr", 4);
+ memory_region_add_subregion(parent, offset, &ar->tmr.io);
+}
+
+static void acpi_pm1_evt_write_sts(ACPIREGS *ar, uint16_t val)
+{
+ uint16_t pm1_sts = acpi_pm1_evt_get_sts(ar);
+ if (pm1_sts & val & ACPI_BITMASK_TIMER_STATUS) {
+ /* if TMRSTS is reset, then compute the new overflow time */
+ acpi_pm_tmr_calc_overflow_time(ar);
+ }
+ ar->pm1.evt.sts &= ~val;
+}
+
+static uint64_t acpi_pm_evt_read(void *opaque, hwaddr addr, unsigned width)
+{
+ ACPIREGS *ar = opaque;
+ switch (addr) {
+ case 0:
+ return acpi_pm1_evt_get_sts(ar);
+ case 4:
+ return ar->pm1.evt.en;
+ default:
+ return 0;
+ }
+}
+
+static void acpi_pm1_evt_write_en(ACPIREGS *ar, uint16_t val)
+{
+ ar->pm1.evt.en = val;
+ qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_RTC,
+ val & ACPI_BITMASK_RT_CLOCK_ENABLE);
+ qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_PMTIMER,
+ val & ACPI_BITMASK_TIMER_ENABLE);
+}
+
+static void acpi_pm_evt_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned width)
+{
+ ACPIREGS *ar = opaque;
+ switch (addr) {
+ case 0:
+ acpi_pm1_evt_write_sts(ar, val);
+ ar->pm1.evt.update_sci(ar);
+ break;
+ case 4:
+ acpi_pm1_evt_write_en(ar, val);
+ ar->pm1.evt.update_sci(ar);
+ break;
+ }
+}
+
+static const MemoryRegionOps acpi_pm_evt_ops = {
+ .read = acpi_pm_evt_read,
+ .write = acpi_pm_evt_write,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void ls7a_pm1_evt_init(ACPIREGS *ar, acpi_update_sci_fn update_sci,
+ MemoryRegion *parent, uint64_t offset)
+{
+ ar->pm1.evt.update_sci = update_sci;
+ memory_region_init_io(&ar->pm1.evt.io, memory_region_owner(parent),
+ &acpi_pm_evt_ops, ar, "acpi-evt", 8);
+ memory_region_add_subregion(parent, offset, &ar->pm1.evt.io);
+}
+
+static uint64_t acpi_pm_cnt_read(void *opaque, hwaddr addr, unsigned width)
+{
+ ACPIREGS *ar = opaque;
+ return ar->pm1.cnt.cnt;
+}
+
+/* ACPI PM1aCNT */
+static void acpi_pm1_cnt_write(ACPIREGS *ar, uint16_t val)
+{
+ ar->pm1.cnt.cnt = val & ~(ACPI_BITMASK_SLEEP_ENABLE);
+
+ if (val & ACPI_BITMASK_SLEEP_ENABLE) {
+ /* change suspend type */
+ uint16_t sus_typ = (val >> 10) & 7;
+ switch (sus_typ) {
+ case 0: /* soft power off */
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ break;
+ case 1:
+ qemu_system_suspend_request();
+ break;
+ default:
+ if (sus_typ == ar->pm1.cnt.s4_val) { /* S4 request */
+ qapi_event_send_suspend_disk();
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ }
+ break;
+ }
+ }
+}
+
+static void acpi_pm_cnt_write(void *opaque, hwaddr addr, uint64_t val,
+ unsigned width)
+{
+ acpi_pm1_cnt_write(opaque, val);
+}
+
+static const MemoryRegionOps acpi_pm_cnt_ops = {
+ .read = acpi_pm_cnt_read,
+ .write = acpi_pm_cnt_write,
+ .valid.min_access_size = 4,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static void acpi_notify_wakeup(Notifier *notifier, void *data)
+{
+ ACPIREGS *ar = container_of(notifier, ACPIREGS, wakeup);
+ WakeupReason *reason = data;
+
+ switch (*reason) {
+ case QEMU_WAKEUP_REASON_RTC:
+ ar->pm1.evt.sts |=
+ (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_RT_CLOCK_STATUS);
+ break;
+ case QEMU_WAKEUP_REASON_PMTIMER:
+ ar->pm1.evt.sts |=
+ (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_TIMER_STATUS);
+ break;
+ case QEMU_WAKEUP_REASON_OTHER:
+ /* ACPI_BITMASK_WAKE_STATUS should be set on resume.
+ * Pretend that resume was caused by power button */
+ ar->pm1.evt.sts |=
+ (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_POWER_BUTTON_STATUS);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ls7a_pm1_cnt_init(ACPIREGS *ar, MemoryRegion *parent,
+ bool disable_s3, bool disable_s4, uint8_t s4_val, uint64_t offset)
+{
+ FWCfgState *fw_cfg;
+
+ ar->pm1.cnt.s4_val = s4_val;
+ ar->wakeup.notify = acpi_notify_wakeup;
+ qemu_register_wakeup_notifier(&ar->wakeup);
+ memory_region_init_io(&ar->pm1.cnt.io, memory_region_owner(parent),
+ &acpi_pm_cnt_ops, ar, "acpi-cnt", 4);
+ memory_region_add_subregion(parent, offset, &ar->pm1.cnt.io);
+
+ fw_cfg = fw_cfg_find();
+ if (fw_cfg) {
+ uint8_t suspend[6] = {128, 0, 0, 129, 128, 128};
+ suspend[3] = 1 | ((!disable_s3) << 7);
+ suspend[4] = s4_val | ((!disable_s4) << 7);
+ fw_cfg_add_file(fw_cfg, "etc/system-states", g_memdup(suspend, 6), 6);
+ }
+}
+
+static void ls7a_pm_reset(void *opaque)
+{
+ LS7APCIPMRegs *pm = opaque;
+
+ acpi_pm1_evt_reset(&pm->acpi_regs);
+ acpi_pm1_cnt_reset(&pm->acpi_regs);
+ acpi_pm_tmr_reset(&pm->acpi_regs);
+ acpi_gpe_reset(&pm->acpi_regs);
+
+ acpi_update_sci(&pm->acpi_regs, pm->irq);
+}
+
+static void pm_powerdown_req(Notifier *n, void *opaque)
+{
+ LS7APCIPMRegs *pm = container_of(n, LS7APCIPMRegs, powerdown_notifier);
+
+ acpi_pm1_evt_power_down(&pm->acpi_regs);
+}
+
+void ls7a_pm_init(LS7APCIPMRegs *pm, qemu_irq *pic)
+{
+ unsigned long base, gpe_len, acpi_aci_irq;
+
+ /* ls7a board acpi hardware info, including
+ * acpi system io base address
+ * acpi gpe length
+ * acpi sci irq number
+ */
+ base = ACPI_IO_BASE;
+ gpe_len = ACPI_GPE0_LEN;
+ acpi_aci_irq = ACPI_SCI_IRQ;
+
+ pm->irq = pic[acpi_aci_irq - 64];
+ memory_region_init(&pm->iomem, NULL, "ls7a_pm", ACPI_IO_SIZE);
+ memory_region_add_subregion(get_system_memory(), base, &pm->iomem);
+
+ cpu_hotplug_hw_init(get_system_memory(), NULL, &pm->cpuhp_state, CPU_HOTPLUG_BASE);
+
+ ls7a_pm_tmr_init(&pm->acpi_regs, ls7a_pm_update_sci_fn, &pm->iomem, LS7A_PM_TMR_BLK);
+ ls7a_pm1_evt_init(&pm->acpi_regs, ls7a_pm_update_sci_fn, &pm->iomem, LS7A_PM_EVT_BLK);
+ ls7a_pm1_cnt_init(&pm->acpi_regs, &pm->iomem, false, false, 2, LS7A_PM_CNT_BLK);
+
+ acpi_gpe_init(&pm->acpi_regs, gpe_len);
+ memory_region_init_io(&pm->iomem_gpe, NULL, &ls7a_gpe_ops, pm,
+ "acpi-gpe0", gpe_len);
+ memory_region_add_subregion(&pm->iomem, LS7A_GPE0_STS_REG, &pm->iomem_gpe);
+
+ memory_region_init_io(&pm->iomem_reset, NULL, &ls7a_reset_ops, pm,
+ "acpi-reset", 4);
+ memory_region_add_subregion(&pm->iomem, LS7A_GPE0_RESET_REG, &pm->iomem_reset);
+
+ qemu_register_reset(ls7a_pm_reset, pm);
+
+ pm->powerdown_notifier.notify = pm_powerdown_req;
+ qemu_register_powerdown_notifier(&pm->powerdown_notifier);
+
+ if (pm->acpi_memory_hotplug.is_enabled) {
+ acpi_memory_hotplug_init(get_system_memory(), NULL,
+ &pm->acpi_memory_hotplug, MEMORY_HOTPLUG_BASE);
+ }
+}
+
+
+static void ls7a_pm_get_gpe0_blk(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ uint64_t value = ACPI_IO_BASE + LS7A_GPE0_STS_REG;
+
+ visit_type_uint64(v, name, &value, errp);
+}
+
+static bool ls7a_pm_get_memory_hotplug_support(Object *obj, Error **errp)
+{
+ LS7APCIState *ls7a = get_ls7a_type(obj);
+
+ return ls7a->pm.acpi_memory_hotplug.is_enabled;
+}
+
+static void ls7a_pm_set_memory_hotplug_support(Object *obj, bool value,
+ Error **errp)
+{
+ LS7APCIState *ls7a = get_ls7a_type(obj);
+
+ ls7a->pm.acpi_memory_hotplug.is_enabled = value;
+}
+
+static void ls7a_pm_get_disable_s3(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ LS7APCIPMRegs *pm = opaque;
+ uint8_t value = pm->disable_s3;
+
+ visit_type_uint8(v, name, &value, errp);
+}
+
+static void ls7a_pm_set_disable_s3(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ LS7APCIPMRegs *pm = opaque;
+ Error *local_err = NULL;
+ uint8_t value;
+
+ visit_type_uint8(v, name, &value, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ pm->disable_s3 = value;
+out:
+ error_propagate(errp, local_err);
+}
+
+static void ls7a_pm_get_disable_s4(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ LS7APCIPMRegs *pm = opaque;
+ uint8_t value = pm->disable_s4;
+
+ visit_type_uint8(v, name, &value, errp);
+}
+
+static void ls7a_pm_set_disable_s4(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ LS7APCIPMRegs *pm = opaque;
+ Error *local_err = NULL;
+ uint8_t value;
+
+ visit_type_uint8(v, name, &value, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ pm->disable_s4 = value;
+out:
+ error_propagate(errp, local_err);
+}
+
+static void ls7a_pm_get_s4_val(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ LS7APCIPMRegs *pm = opaque;
+ uint8_t value = pm->s4_val;
+
+ visit_type_uint8(v, name, &value, errp);
+}
+
+static void ls7a_pm_set_s4_val(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ LS7APCIPMRegs *pm = opaque;
+ Error *local_err = NULL;
+ uint8_t value;
+
+ visit_type_uint8(v, name, &value, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ pm->s4_val = value;
+out:
+ error_propagate(errp, local_err);
+}
+
+void ls7a_pm_add_properties(Object *obj, LS7APCIPMRegs *pm, Error **errp)
+{
+ static const uint32_t gpe0_len = ACPI_GPE0_LEN;
+ pm->acpi_memory_hotplug.is_enabled = true;
+ pm->disable_s3 = 0;
+ pm->disable_s4 = 0;
+ pm->s4_val = 2;
+
+ object_property_add_uint32_ptr(obj, ACPI_PM_PROP_PM_IO_BASE,
+ &pm->pm_io_base, OBJ_PROP_FLAG_READ);
+ object_property_add(obj, ACPI_PM_PROP_GPE0_BLK, "uint32",
+ ls7a_pm_get_gpe0_blk,
+ NULL, NULL, pm);
+ object_property_add_uint32_ptr(obj, ACPI_PM_PROP_GPE0_BLK_LEN,
+ &gpe0_len, OBJ_PROP_FLAG_READ);
+ object_property_add_bool(obj, "memory-hotplug-support",
+ ls7a_pm_get_memory_hotplug_support,
+ ls7a_pm_set_memory_hotplug_support);
+ object_property_add(obj, ACPI_PM_PROP_S3_DISABLED, "uint8",
+ ls7a_pm_get_disable_s3,
+ ls7a_pm_set_disable_s3,
+ NULL, pm);
+ object_property_add(obj, ACPI_PM_PROP_S4_DISABLED, "uint8",
+ ls7a_pm_get_disable_s4,
+ ls7a_pm_set_disable_s4,
+ NULL, pm);
+ object_property_add(obj, ACPI_PM_PROP_S4_VAL, "uint8",
+ ls7a_pm_get_s4_val,
+ ls7a_pm_set_s4_val,
+ NULL, pm);
+}
+
+void ls7a_pm_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp)
+{
+ LS7APCIState *ls7a = get_ls7a_type(OBJECT(hotplug_dev));
+
+ if (ls7a->pm.acpi_memory_hotplug.is_enabled &&
+ object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
+ if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) {
+ nvdimm_acpi_plug_cb(hotplug_dev, dev);
+ } else {
+ acpi_memory_plug_cb(hotplug_dev, &ls7a->pm.acpi_memory_hotplug,
+ dev, errp);
+ }
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
+ acpi_cpu_plug_cb(hotplug_dev, &ls7a->pm.cpuhp_state, dev, errp);
+ } else {
+ error_setg(errp, "acpi: device plug request for not supported device"
+ " type: %s", object_get_typename(OBJECT(dev)));
+ }
+}
+
+void ls7a_pm_device_unplug_request_cb(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ LS7APCIState *ls7a = get_ls7a_type(OBJECT(hotplug_dev));
+
+ if (ls7a->pm.acpi_memory_hotplug.is_enabled &&
+ object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
+ acpi_memory_unplug_request_cb(hotplug_dev,
+ &ls7a->pm.acpi_memory_hotplug, dev,
+ errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
+ acpi_cpu_unplug_request_cb(hotplug_dev, &ls7a->pm.cpuhp_state,
+ dev, errp);
+ } else {
+ error_setg(errp, "acpi: device unplug request for not supported device"
+ " type: %s", object_get_typename(OBJECT(dev)));
+ }
+}
+
+void ls7a_pm_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp)
+{
+ LS7APCIState *ls7a = get_ls7a_type(OBJECT(hotplug_dev));
+
+ if (ls7a->pm.acpi_memory_hotplug.is_enabled &&
+ object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
+ acpi_memory_unplug_cb(&ls7a->pm.acpi_memory_hotplug, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
+ acpi_cpu_unplug_cb(&ls7a->pm.cpuhp_state, dev, errp);
+ } else {
+ error_setg(errp, "acpi: device unplug for not supported device"
+ " type: %s", object_get_typename(OBJECT(dev)));
+ }
+}
+
+void ls7a_pm_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list)
+{
+ LS7APCIState *ls7a = get_ls7a_type(OBJECT(adev));
+
+ acpi_memory_ospm_status(&ls7a->pm.acpi_memory_hotplug, list);
+ acpi_cpu_ospm_status(&ls7a->pm.cpuhp_state, list);
+}
+
+void ls7a_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev)
+{
+ LS7APCIState *ls7a = get_ls7a_type(OBJECT(adev));
+
+ acpi_send_gpe_event(&ls7a->pm.acpi_regs, ls7a->pm.irq, ev);
+}
+
+
+
diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build
index adf6347bc425801b02efda69ee40d6ab1bdf8895..5fe4cfa4f13ded963bdcd518bdbcb6e2d356d97d 100644
--- a/hw/acpi/meson.build
+++ b/hw/acpi/meson.build
@@ -6,6 +6,7 @@ acpi_ss.add(files(
'core.c',
'utils.c',
))
+acpi_ss.add(when: 'CONFIG_ACPI_LOONGARCH', if_true: files('larch_7a.c'))
acpi_ss.add(when: 'CONFIG_ACPI_CPU_HOTPLUG', if_true: files('cpu.c', 'cpu_hotplug.c'))
acpi_ss.add(when: 'CONFIG_ACPI_CPU_HOTPLUG', if_false: files('acpi-cpu-hotplug-stub.c'))
acpi_ss.add(when: 'CONFIG_ACPI_MEMORY_HOTPLUG', if_true: files('memory_hotplug.c'))
diff --git a/hw/loongarch/Kconfig b/hw/loongarch/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..3fe2677fda7e689fe97c0a9cb82aebcb01eb1e3e
--- /dev/null
+++ b/hw/loongarch/Kconfig
@@ -0,0 +1,17 @@
+config LS7A_APIC
+ bool
+
+config LS7A_RTC
+ bool
+
+config LOONGSON3A
+ bool
+
+config MEM_HOTPLUG
+ bool
+
+config ACPI_LOONGARCH
+ bool
+
+config E1000E_PCI
+ bool
diff --git a/hw/loongarch/acpi-build.c b/hw/loongarch/acpi-build.c
new file mode 100644
index 0000000000000000000000000000000000000000..682e016be28c10f59162616684945d47184d1553
--- /dev/null
+++ b/hw/loongarch/acpi-build.c
@@ -0,0 +1,827 @@
+/* Support for generating ACPI tables and passing them to Guests
+ *
+ * Copyright (C) 2008-2010 Kevin O'Connor
+ * Copyright (C) 2006 Fabrice Bellard
+ * Copyright (C) 2013 Red Hat Inc
+ *
+ * Author: Michael S. Tsirkin
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see .
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qapi/qmp/qnum.h"
+#include "acpi-build.h"
+#include "qemu-common.h"
+#include "qemu/bitmap.h"
+#include "qemu/error-report.h"
+#include "hw/pci/pci.h"
+#include "hw/boards.h"
+#include "hw/core/cpu.h"
+#include "target/loongarch64/cpu.h"
+#include "hw/misc/pvpanic.h"
+#include "hw/timer/hpet.h"
+#include "hw/acpi/acpi-defs.h"
+#include "hw/acpi/acpi.h"
+#include "hw/acpi/cpu.h"
+#include "hw/nvram/fw_cfg.h"
+#include "hw/acpi/bios-linker-loader.h"
+#include "hw/loader.h"
+#include "hw/isa/isa.h"
+#include "hw/block/fdc.h"
+#include "hw/acpi/memory_hotplug.h"
+#include "sysemu/tpm.h"
+#include "hw/acpi/tpm.h"
+#include "hw/acpi/vmgenid.h"
+#include "sysemu/tpm_backend.h"
+#include "hw/rtc/mc146818rtc_regs.h"
+#include "sysemu/numa.h"
+#include "sysemu/runstate.h"
+#include "sysemu/reset.h"
+#include "migration/vmstate.h"
+#include "hw/mem/memory-device.h"
+#include "hw/acpi/utils.h"
+#include "hw/acpi/pci.h"
+/* Supported chipsets: */
+#include "hw/acpi/aml-build.h"
+#include "hw/loongarch/larch.h"
+#include "hw/loongarch/ls7a.h"
+#include "hw/platform-bus.h"
+
+#include "hw/acpi/ipmi.h"
+#include "hw/acpi/ls7a.h"
+
+/* These are used to size the ACPI tables for -M pc-i440fx-1.7 and
+ * -M pc-i440fx-2.0. Even if the actual amount of AML generated grows
+ * a little bit, there should be plenty of free space since the DSDT
+ * shrunk by ~1.5k between QEMU 2.0 and QEMU 2.1.
+ */
+#define ACPI_BUILD_ALIGN_SIZE 0x1000
+
+#define ACPI_BUILD_TABLE_SIZE 0x20000
+
+/* #define DEBUG_ACPI_BUILD */
+#ifdef DEBUG_ACPI_BUILD
+#define ACPI_BUILD_DPRINTF(fmt, ...) \
+ do {printf("ACPI_BUILD: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define ACPI_BUILD_DPRINTF(fmt, ...)
+#endif
+
+/* Default IOAPIC ID */
+#define ACPI_BUILD_IOAPIC_ID 0x0
+
+/* PCI fw r3.0 MCFG table. */
+/* Subtable */
+
+typedef struct AcpiMiscInfo {
+ bool is_piix4;
+ bool has_hpet;
+ TPMVersion tpm_version;
+ const unsigned char *dsdt_code;
+ unsigned dsdt_size;
+ uint16_t pvpanic_port;
+ uint16_t applesmc_io_base;
+} AcpiMiscInfo;
+
+typedef struct AcpiBuildPciBusHotplugState {
+ GArray *device_table;
+ GArray *notify_table;
+ struct AcpiBuildPciBusHotplugState *parent;
+ bool pcihp_bridge_en;
+} AcpiBuildPciBusHotplugState;
+
+static void init_common_fadt_data(AcpiFadtData *data)
+{
+ AmlAddressSpace as = AML_AS_SYSTEM_MEMORY;
+ uint64_t base = LS7A_ACPI_REG_BASE;
+ AcpiFadtData fadt = {
+ .rev = 3,
+ .flags =
+ (1 << ACPI_FADT_F_WBINVD) |
+ (1 << ACPI_FADT_F_PROC_C1) |
+ (1 << ACPI_FADT_F_SLP_BUTTON) |
+ (1 << ACPI_FADT_F_TMR_VAL_EXT) |
+ (1 << ACPI_FADT_F_RESET_REG_SUP) ,
+ .plvl2_lat = 0xfff /* C2 state not supported */,
+ .plvl3_lat = 0xfff /* C3 state not supported */,
+ .smi_cmd = 0x00,
+ .sci_int = ACPI_SCI_IRQ,
+ .acpi_enable_cmd = 0x00,
+ .acpi_disable_cmd = 0x00,
+ .pm1a_evt = { .space_id = as, .bit_width = 8 * 8,
+ .address = base + LS7A_PM_EVT_BLK },
+ .pm1a_cnt = { .space_id = as, .bit_width = 4 * 8,
+ .address = base + LS7A_PM_CNT_BLK },
+ .pm_tmr = { .space_id = as, .bit_width = 4 * 8,
+ .address = base + LS7A_PM_TMR_BLK },
+ .gpe0_blk = { .space_id = as, .bit_width = 8 * 8,
+ .address = base + LS7A_GPE0_STS_REG},
+ .reset_reg = { .space_id = as, .bit_width = 4 * 8,
+ .address = base + LS7A_GPE0_RESET_REG},
+ .reset_val = 0x1,
+ };
+ *data = fadt;
+}
+
+static void acpi_align_size(GArray *blob, unsigned align)
+{
+ /* Align size to multiple of given size. This reduces the chance
+ * we need to change size in the future (breaking cross version migration).
+ */
+ g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align));
+}
+
+/* FACS */
+static void
+build_facs(GArray *table_data)
+{
+ const char *sig = "FACS";
+ const uint8_t reserved[40] = {};
+
+ g_array_append_vals(table_data, sig, 4); /* Signature */
+ build_append_int_noprefix(table_data, 64, 4); /* Length */
+ build_append_int_noprefix(table_data, 0, 4); /* Hardware Signature */
+ build_append_int_noprefix(table_data, 0, 4); /* Firmware Waking Vector */
+ build_append_int_noprefix(table_data, 0, 4); /* Global Lock */
+ build_append_int_noprefix(table_data, 0, 4); /* Flags */
+ g_array_append_vals(table_data, reserved, 40); /* Reserved */
+}
+
+void ls7a_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
+ const CPUArchIdList *apic_ids, GArray *entry, bool force_enabled)
+{
+ uint32_t apic_id = apic_ids->cpus[uid].arch_id;
+ /* Flags – Local APIC Flags */
+ uint32_t flags = apic_ids->cpus[uid].cpu != NULL || force_enabled ?
+ 1 /* Enabled */ : 0;
+
+ /* Rev 1.0b, Table 5-13 Processor Local APIC Structure */
+ build_append_int_noprefix(entry, 0, 1); /* Type */
+ build_append_int_noprefix(entry, 8, 1); /* Length */
+ build_append_int_noprefix(entry, uid, 1); /* ACPI Processor ID */
+ build_append_int_noprefix(entry, apic_id, 1); /* APIC ID */
+ build_append_int_noprefix(entry, flags, 4); /* Flags */
+}
+static void build_ioapic(GArray *entry, uint8_t id, uint32_t addr, uint32_t irq)
+{
+ /* Rev 1.0b, 5.2.8.2 IO APIC */
+ build_append_int_noprefix(entry, 1, 1); /* Type */
+ build_append_int_noprefix(entry, 12, 1); /* Length */
+ build_append_int_noprefix(entry, id, 1); /* IO APIC ID */
+ build_append_int_noprefix(entry, 0, 1); /* Reserved */
+ build_append_int_noprefix(entry, addr, 4); /* IO APIC Address */
+ build_append_int_noprefix(entry, irq, 4); /* System Vector Base */
+}
+
+static void
+build_madt(GArray *table_data, BIOSLinker *linker, LoongarchMachineState *lsms)
+{
+ LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms);
+ MachineClass *mc = MACHINE_GET_CLASS(lsms);
+ const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(lsms));
+ AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(lsms->acpi_dev);
+ AcpiDeviceIf *adev = ACPI_DEVICE_IF(lsms->acpi_dev);
+ int i;
+ AcpiTable table = { .sig = "APIC", .rev = 1, .oem_id = lsms->oem_id,
+ .oem_table_id = lsms->oem_table_id};
+
+ acpi_table_begin(&table, table_data);
+
+ /* Local APIC Address */
+ build_append_int_noprefix(table_data, 0, 4);
+ build_append_int_noprefix(table_data, 1 /* PCAT_COMPAT */, 4); /* Flags */
+
+
+ for (i = 0; i < apic_ids->len; i++) {
+ adevc->madt_cpu(adev, i, apic_ids, table_data, false);
+ }
+
+
+ build_ioapic(table_data, ACPI_BUILD_IOAPIC_ID, lsmc->ls7a_ioapic_reg_base, LOONGARCH_PCH_IRQ_BASE);
+
+ /* Rev 1.0b, 5.2.8.3.3 Local APIC NMI */
+ build_append_int_noprefix(table_data, 3, 1); /* Type */
+ build_append_int_noprefix(table_data, 6, 1); /* Length */
+ /* ACPI Processor ID */
+ build_append_int_noprefix(table_data, 0xFF /* all processors */, 1);
+ build_append_int_noprefix(table_data, 0, 2); /* Flags */
+ /* Local APIC INTI# */
+ build_append_int_noprefix(table_data, 1 /* ACPI_LINT1 */, 1);
+
+ /* Rev 1.0b, 5.2.8.3.3 Local APIC NMI */
+ build_append_int_noprefix(table_data, 4, 1); /* Type */
+ build_append_int_noprefix(table_data, 6, 1); /* Length */
+ /* ACPI Processor ID */
+ build_append_int_noprefix(table_data, 0xFF /* all processors */, 1);
+ build_append_int_noprefix(table_data, 0, 2); /* Flags */
+ /* Local APIC INTI# */
+ build_append_int_noprefix(table_data, 1 /* ACPI_LINT1 */, 1);
+
+
+ acpi_table_end(linker, &table);
+}
+
+static void
+build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine)
+{
+ uint64_t i, mem_len, mem_base;
+ MachineClass *mc = MACHINE_GET_CLASS(machine);
+ LoongarchMachineState *lsms = LoongarchMACHINE(machine);
+ const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine);
+ int nb_numa_nodes = machine->numa_state->num_nodes;
+ NodeInfo *numa_info = machine->numa_state->nodes;
+ AcpiTable table = { .sig = "SRAT", .rev = 1, .oem_id = lsms->oem_id,
+ .oem_table_id = lsms->oem_table_id};
+
+ acpi_table_begin(&table, table_data);
+ build_append_int_noprefix(table_data, 1, 4); /* Reserved */
+ build_append_int_noprefix(table_data, 0, 8); /* Reserved */
+
+ for (i = 0; i < apic_ids->len; ++i) {
+ /* 5.2.15.1 Processor Local APIC/SAPIC Affinity Structure */
+ build_append_int_noprefix(table_data, 0, 1); /* Type */
+ build_append_int_noprefix(table_data, 16, 1); /* Length */
+ /* Proximity Domain [7:0] */
+ build_append_int_noprefix(table_data, apic_ids->cpus[i].props.node_id, 1);
+ build_append_int_noprefix(table_data, apic_ids->cpus[i].arch_id, 1); /* APIC ID */
+ /* Flags, Table 5-36 */
+ build_append_int_noprefix(table_data, 1, 4);
+ build_append_int_noprefix(table_data, 0, 1); /* Local SAPIC EID */
+ /* Proximity Domain [31:8] */
+ build_append_int_noprefix(table_data, 0, 3);
+ build_append_int_noprefix(table_data, 0, 4); /* Reserved */
+ }
+
+ /* node0 */
+ mem_base = (uint64_t)0;
+ mem_len = 0x10000000;
+ build_srat_memory(table_data, mem_base, mem_len,
+ 0, MEM_AFFINITY_ENABLED);
+ mem_base = 0x90000000;
+ if (!nb_numa_nodes) {
+ mem_len = machine->ram_size - 0x10000000;
+ } else {
+ mem_len = numa_info[0].node_mem - 0x10000000;
+ }
+
+ build_srat_memory(table_data, mem_base, mem_len,
+ 0, MEM_AFFINITY_ENABLED);
+ mem_base += mem_len;
+
+ /* node1 ~ nodemax */
+ for (i = 1; i < nb_numa_nodes; ++i) {
+ mem_len = numa_info[i].node_mem;
+ build_srat_memory(table_data, mem_base, mem_len,
+ i, MEM_AFFINITY_ENABLED);
+ mem_base += mem_len;
+ }
+
+ if (lsms->hotplug_memory_size) {
+ build_srat_memory(table_data, machine->device_memory->base,
+ lsms->hotplug_memory_size, 0,
+ MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED);
+ }
+
+ acpi_table_end(linker, &table);
+
+}
+
+typedef
+struct AcpiBuildState {
+ /* Copy of table in RAM (for patching). */
+ MemoryRegion *table_mr;
+ /* Is table patched? */
+ uint8_t patched;
+ void *rsdp;
+ MemoryRegion *rsdp_mr;
+ MemoryRegion *linker_mr;
+} AcpiBuildState;
+
+static void build_ls7a_pci0_int(Aml *table)
+{
+ Aml *sb_scope = aml_scope("_SB");
+ Aml *pci0_scope = aml_scope("PCI0");
+ Aml *prt_pkg = aml_varpackage(128);
+ int slot, pin;
+
+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
+ for (pin = 0; pin < PCI_NUM_PINS; pin++) {
+ Aml *pkg = aml_package(4);
+ aml_append(pkg, aml_int((slot << 16) | 0xFFFF));
+ aml_append(pkg, aml_int(pin));
+ aml_append(pkg, aml_int(0));
+ aml_append(pkg, aml_int(LOONGARCH_PCH_IRQ_BASE + 16 + (slot * 4 + pin) % 16));
+ aml_append(prt_pkg, pkg);
+ }
+ }
+ aml_append(pci0_scope,
+ aml_name_decl("_PRT", prt_pkg));
+
+ aml_append(sb_scope, pci0_scope);
+
+ aml_append(table, sb_scope);
+}
+
+static void build_dbg_aml(Aml *table)
+{
+ Aml *field;
+ Aml *method;
+ Aml *while_ctx;
+ Aml *scope = aml_scope("\\");
+ Aml *buf = aml_local(0);
+ Aml *len = aml_local(1);
+ Aml *idx = aml_local(2);
+
+ aml_append(scope,
+ aml_operation_region("DBG", AML_SYSTEM_IO, aml_int(0x0402), 0x01));
+ field = aml_field("DBG", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE);
+ aml_append(field, aml_named_field("DBGB", 8));
+ aml_append(scope, field);
+
+ method = aml_method("DBUG", 1, AML_NOTSERIALIZED);
+
+ aml_append(method, aml_to_hexstring(aml_arg(0), buf));
+ aml_append(method, aml_to_buffer(buf, buf));
+ aml_append(method, aml_subtract(aml_sizeof(buf), aml_int(1), len));
+ aml_append(method, aml_store(aml_int(0), idx));
+
+ while_ctx = aml_while(aml_lless(idx, len));
+ aml_append(while_ctx,
+ aml_store(aml_derefof(aml_index(buf, idx)), aml_name("DBGB")));
+ aml_append(while_ctx, aml_increment(idx));
+ aml_append(method, while_ctx);
+
+ aml_append(method, aml_store(aml_int(0x0A), aml_name("DBGB")));
+ aml_append(scope, method);
+
+ aml_append(table, scope);
+}
+
+static Aml *build_ls7a_osc_method(void)
+{
+ Aml *if_ctx;
+ Aml *if_ctx2;
+ Aml *else_ctx;
+ Aml *method;
+ Aml *a_cwd1 = aml_name("CDW1");
+ Aml *a_ctrl = aml_local(0);
+
+ method = aml_method("_OSC", 4, AML_NOTSERIALIZED);
+ aml_append(method, aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1"));
+
+ if_ctx = aml_if(aml_equal(
+ aml_arg(0), aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766")));
+ aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2"));
+ aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3"));
+
+ aml_append(if_ctx, aml_store(aml_name("CDW3"), a_ctrl));
+
+ /*
+ * Always allow native PME, AER (no dependencies)
+ * Allow SHPC (PCI bridges can have SHPC controller)
+ */
+ aml_append(if_ctx, aml_and(a_ctrl, aml_int(0x1F), a_ctrl));
+
+ if_ctx2 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(1))));
+ /* Unknown revision */
+ aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x08), a_cwd1));
+ aml_append(if_ctx, if_ctx2);
+
+ if_ctx2 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), a_ctrl)));
+ /* Capabilities bits were masked */
+ aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x10), a_cwd1));
+ aml_append(if_ctx, if_ctx2);
+
+ /* Update DWORD3 in the buffer */
+ aml_append(if_ctx, aml_store(a_ctrl, aml_name("CDW3")));
+ aml_append(method, if_ctx);
+
+ else_ctx = aml_else();
+ /* Unrecognized UUID */
+ aml_append(else_ctx, aml_or(a_cwd1, aml_int(4), a_cwd1));
+ aml_append(method, else_ctx);
+
+ aml_append(method, aml_return(aml_arg(3)));
+ return method;
+}
+
+static void build_ls7a_rtc_device_aml(Aml *table)
+{
+ Aml *dev;
+ Aml *crs;
+ uint32_t rtc_irq = LS7A_RTC_IRQ;
+
+ Aml *scope = aml_scope("_SB");
+ dev = aml_device("RTC");
+ aml_append(dev, aml_name_decl("_HID", aml_string("LOON0001")));
+ crs = aml_resource_template();
+ aml_append(crs,
+ aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
+ AML_NON_CACHEABLE, AML_READ_WRITE,
+ 0, LS7A_RTC_REG_BASE,
+ LS7A_RTC_REG_BASE + LS7A_RTC_LEN - 1, 0, LS7A_RTC_LEN));
+ aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
+ AML_EXCLUSIVE, &rtc_irq, 1));
+
+ aml_append(dev, aml_name_decl("_CRS", crs));
+ aml_append(scope, dev);
+ aml_append(table, scope);
+}
+
+static void build_ls7a_uart_device_aml(Aml *table)
+{
+ Aml *dev;
+ Aml *crs;
+ Aml *pkg0, *pkg1, *pkg2;
+ uint32_t uart_irq = LS7A_UART_IRQ;
+
+ Aml *scope = aml_scope("_SB");
+ dev = aml_device("COMA");
+ aml_append(dev, aml_name_decl("_HID", aml_string("PNP0501")));
+ aml_append(dev, aml_name_decl("_UID", aml_int(0)));
+ aml_append(dev, aml_name_decl("_CCA", aml_int(1)));
+ crs = aml_resource_template();
+ aml_append(crs,
+ aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
+ AML_NON_CACHEABLE, AML_READ_WRITE,
+ 0, LS7A_UART_BASE, LS7A_UART_BASE + LS7A_UART_LEN - 1, 0, 0x8));
+ aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH,
+ AML_EXCLUSIVE, &uart_irq, 1));
+ aml_append(dev, aml_name_decl("_CRS", crs));
+ pkg0 = aml_package(0x2);
+ aml_append(pkg0, aml_int(0x01F78A40));
+ aml_append(pkg0, aml_string("clock-frenquency"));
+ pkg1 = aml_package(0x1);
+ aml_append(pkg1, pkg0);
+ pkg2 = aml_package(0x2);
+ aml_append(pkg2, aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301"));
+ aml_append(pkg2, pkg1);
+
+ aml_append(dev, aml_name_decl("_DSD", pkg2));
+
+ aml_append(scope, dev);
+ aml_append(table, scope);
+}
+#ifdef CONFIG_TPM
+static void acpi_dsdt_add_tpm(Aml *scope, LoongarchMachineState *vms)
+{
+ PlatformBusDevice *pbus = PLATFORM_BUS_DEVICE(vms->platform_bus_dev);
+ hwaddr pbus_base = VIRT_PLATFORM_BUS_BASEADDRESS;
+ SysBusDevice *sbdev = SYS_BUS_DEVICE(tpm_find());
+ MemoryRegion *sbdev_mr;
+ hwaddr tpm_base;
+
+ if (!sbdev) {
+ return;
+ }
+
+ tpm_base = platform_bus_get_mmio_addr(pbus, sbdev, 0);
+ assert(tpm_base != -1);
+
+ tpm_base += pbus_base;
+
+ sbdev_mr = sysbus_mmio_get_region(sbdev, 0);
+
+ Aml *dev = aml_device("TPM0");
+ aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101")));
+ aml_append(dev, aml_name_decl("_STR", aml_string("TPM 2.0 Device")));
+ aml_append(dev, aml_name_decl("_UID", aml_int(0)));
+
+ Aml *crs = aml_resource_template();
+ aml_append(crs,
+ aml_memory32_fixed(tpm_base,
+ (uint32_t)memory_region_size(sbdev_mr),
+ AML_READ_WRITE));
+ aml_append(dev, aml_name_decl("_CRS", crs));
+ aml_append(scope, dev);
+}
+#endif
+static void
+build_dsdt(GArray *table_data, BIOSLinker *linker, MachineState *machine)
+{
+ Aml *dsdt, *sb_scope, *scope, *dev, *crs, *pkg;
+ LoongarchMachineState *lsms = LoongarchMACHINE(machine);
+ uint32_t nr_mem = machine->ram_slots;
+ uint64_t base = LS7A_ACPI_REG_BASE;
+ int root_bus_limit = PCIE_MMCFG_BUS(LS_PCIECFG_SIZE - 1);
+ AcpiTable table = { .sig = "DSDT", .rev = 1, .oem_id = lsms->oem_id,
+ .oem_table_id = lsms->oem_table_id};
+
+ acpi_table_begin(&table, table_data);
+ dsdt = init_aml_allocator();
+
+ build_dbg_aml(dsdt);
+
+ sb_scope = aml_scope("_SB");
+ dev = aml_device("PCI0");
+ aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08")));
+ aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03")));
+ aml_append(dev, aml_name_decl("_ADR", aml_int(0)));
+ aml_append(dev, aml_name_decl("_BBN", aml_int(0)));
+ aml_append(dev, aml_name_decl("_UID", aml_int(1)));
+ aml_append(dev, build_ls7a_osc_method());
+ aml_append(sb_scope, dev);
+
+#ifdef CONFIG_TPM
+ acpi_dsdt_add_tpm(sb_scope, lsms);
+#endif
+ aml_append(dsdt, sb_scope);
+
+ build_ls7a_pci0_int(dsdt);
+ build_ls7a_rtc_device_aml(dsdt);
+ build_ls7a_uart_device_aml(dsdt);
+
+ if (lsms->acpi_dev) {
+ CPUHotplugFeatures opts = {
+ .acpi_1_compatible = true, .has_legacy_cphp = false
+ };
+ build_cpus_aml(dsdt, machine, opts, CPU_HOTPLUG_BASE,
+ "\\_SB.PCI0", "\\_GPE._E02");
+
+ build_memory_hotplug_aml(dsdt, nr_mem, "\\_SB.PCI0",
+ "\\_GPE._E03", AML_SYSTEM_MEMORY,
+ MEMORY_HOTPLUG_BASE);
+ }
+
+ scope = aml_scope("_GPE");
+ {
+ aml_append(scope, aml_name_decl("_HID", aml_string("ACPI0006")));
+ }
+ aml_append(dsdt, scope);
+
+ scope = aml_scope("\\_SB.PCI0");
+ /* build PCI0._CRS */
+ crs = aml_resource_template();
+ aml_append(crs,
+ aml_word_bus_number(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE,
+ 0x0000, 0x0, root_bus_limit,
+ 0x0000, root_bus_limit + 1));
+ aml_append(crs,
+ aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED,
+ AML_POS_DECODE, AML_ENTIRE_RANGE,
+ 0x0000, 0x4000, 0xFFFF, 0x0000, 0xC000));
+ aml_append(crs,
+ aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
+ AML_CACHEABLE, AML_READ_WRITE,
+ 0, 0x40000000, 0x7FFFFFFF, 0, 0x40000000));
+ aml_append(scope, aml_name_decl("_CRS", crs));
+
+ /* reserve GPE0 block resources */
+ dev = aml_device("GPE0");
+ aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06")));
+ aml_append(dev, aml_name_decl("_UID", aml_string("GPE0 resources")));
+ /* device present, functioning, decoding, not shown in UI */
+ aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
+ crs = aml_resource_template();
+ aml_append(crs,
+ aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED,
+ AML_CACHEABLE, AML_READ_WRITE,
+ 0, base + LS7A_GPE0_STS_REG,
+ base + LS7A_GPE0_STS_REG + 0x3, 0, 0x4));
+ aml_append(dev, aml_name_decl("_CRS", crs));
+ aml_append(scope, dev);
+ aml_append(dsdt, scope);
+
+ scope = aml_scope("\\");
+ pkg = aml_package(4);
+ aml_append(pkg, aml_int(7)); /* PM1a_CNT.SLP_TYP */
+ aml_append(pkg, aml_int(7)); /* PM1b_CNT.SLP_TYP not impl. */
+ aml_append(pkg, aml_int(0)); /* reserved */
+ aml_append(pkg, aml_int(0)); /* reserved */
+ aml_append(scope, aml_name_decl("_S5", pkg));
+ aml_append(dsdt, scope);
+
+ /* copy AML table into ACPI tables blob and patch header there */
+ g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
+ acpi_table_end(linker, &table);
+ free_aml_allocator();
+}
+
+
+static
+void acpi_build(AcpiBuildTables *tables, MachineState *machine)
+{
+ LoongarchMachineState *lsms = LoongarchMACHINE(machine);
+ GArray *table_offsets;
+ AcpiFadtData fadt_data;
+ unsigned facs, rsdt, fadt, dsdt;
+ uint8_t *u;
+ size_t aml_len = 0;
+ GArray *tables_blob = tables->table_data;
+
+ init_common_fadt_data(&fadt_data);
+
+ table_offsets = g_array_new(false, true /* clear */,
+ sizeof(uint32_t));
+ ACPI_BUILD_DPRINTF("init ACPI tables\n");
+
+ bios_linker_loader_alloc(tables->linker,
+ ACPI_BUILD_TABLE_FILE, tables_blob,
+ 64 /* Ensure FACS is aligned */,
+ false /* high memory */);
+
+ /*
+ * FACS is pointed to by FADT.
+ * We place it first since it's the only table that has alignment
+ * requirements.
+ */
+ facs = tables_blob->len;
+ build_facs(tables_blob);
+
+ /* DSDT is pointed to by FADT */
+ dsdt = tables_blob->len;
+ build_dsdt(tables_blob, tables->linker, MACHINE(qdev_get_machine()));
+
+ /* Count the size of the DSDT and SSDT, we will need it for legacy
+ * sizing of ACPI tables.
+ */
+ aml_len += tables_blob->len - dsdt;
+
+ /* ACPI tables pointed to by RSDT */
+ fadt = tables_blob->len;
+ acpi_add_table(table_offsets, tables_blob);
+ fadt_data.facs_tbl_offset = &facs;
+ fadt_data.dsdt_tbl_offset = &dsdt;
+ fadt_data.xdsdt_tbl_offset = &dsdt;
+ build_fadt(tables_blob, tables->linker, &fadt_data,
+ "LOONGS", "TP-R00");
+ aml_len += tables_blob->len - fadt;
+
+ acpi_add_table(table_offsets, tables_blob);
+ build_madt(tables_blob, tables->linker, lsms);
+
+ acpi_add_table(table_offsets, tables_blob);
+ build_srat(tables_blob, tables->linker, machine);
+ if (machine->numa_state->have_numa_distance) {
+ acpi_add_table(table_offsets, tables_blob);
+ build_slit(tables_blob, tables->linker, machine, lsms->oem_id,
+ lsms->oem_table_id);
+ }
+
+ if (tpm_get_version(tpm_find()) == TPM_VERSION_2_0) {
+ acpi_add_table(table_offsets, tables_blob);
+ build_tpm2(tables_blob, tables->linker, tables->tcpalog,
+ lsms->oem_id, lsms->oem_table_id);
+ }
+
+ /* Build mcfg */
+ acpi_add_table(table_offsets, tables_blob);
+ {
+ AcpiMcfgInfo mcfg = {
+ .base = LS_PCIECFG_BASE,
+ .size = LS_PCIECFG_SIZE,
+ };
+ build_mcfg(tables_blob, tables->linker, &mcfg, lsms->oem_id,
+ lsms->oem_table_id);
+ }
+
+ /* Add tables supplied by user (if any) */
+ for (u = acpi_table_first(); u; u = acpi_table_next(u)) {
+ unsigned len = acpi_table_len(u);
+
+ acpi_add_table(table_offsets, tables_blob);
+ g_array_append_vals(tables_blob, u, len);
+ }
+
+ /* RSDT is pointed to by RSDP */
+ rsdt = tables_blob->len;
+ build_rsdt(tables_blob, tables->linker, table_offsets,
+ "LOONGS", "TP-R00");
+
+ /* RSDP is in FSEG memory, so allocate it separately */
+ {
+ AcpiRsdpData rsdp_data = {
+ .revision = 0,
+ .oem_id = lsms->oem_id,
+ .xsdt_tbl_offset = NULL,
+ .rsdt_tbl_offset = &rsdt,
+ };
+ build_rsdp(tables->rsdp, tables->linker, &rsdp_data);
+ }
+ acpi_align_size(tables->linker->cmd_blob, ACPI_BUILD_ALIGN_SIZE);
+
+ /* Cleanup memory that's no longer used. */
+ g_array_free(table_offsets, true);
+}
+
+static void acpi_ram_update(MemoryRegion *mr, GArray *data)
+{
+ uint32_t size = acpi_data_len(data);
+
+ /* Make sure RAM size is correct -
+ in case it got changed e.g. by migration */
+ memory_region_ram_resize(mr, size, &error_abort);
+
+ memcpy(memory_region_get_ram_ptr(mr), data->data, size);
+ memory_region_set_dirty(mr, 0, size);
+}
+
+static void acpi_build_update(void *build_opaque)
+{
+ AcpiBuildState *build_state = build_opaque;
+ AcpiBuildTables tables;
+
+ /* No state to update or already patched? Nothing to do. */
+ if (!build_state || build_state->patched) {
+ return;
+ }
+ build_state->patched = 1;
+
+ acpi_build_tables_init(&tables);
+
+ acpi_build(&tables, MACHINE(qdev_get_machine()));
+
+ acpi_ram_update(build_state->table_mr, tables.table_data);
+
+ if (build_state->rsdp) {
+ memcpy(build_state->rsdp, tables.rsdp->data,
+ acpi_data_len(tables.rsdp));
+ } else {
+ acpi_ram_update(build_state->rsdp_mr, tables.rsdp);
+ }
+
+ acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob);
+ acpi_build_tables_cleanup(&tables, true);
+}
+
+static void acpi_build_reset(void *build_opaque)
+{
+ AcpiBuildState *build_state = build_opaque;
+ build_state->patched = 0;
+}
+
+static const VMStateDescription vmstate_acpi_build = {
+ .name = "acpi_build",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(patched, AcpiBuildState),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+void loongarch_acpi_setup(void)
+{
+ LoongarchMachineState *lsms = LoongarchMACHINE(qdev_get_machine());
+ AcpiBuildTables tables;
+ AcpiBuildState *build_state;
+
+ if (!lsms->fw_cfg) {
+ ACPI_BUILD_DPRINTF("No fw cfg. Bailing out.\n");
+ return;
+ }
+
+ if (!lsms->acpi_build_enabled) {
+ ACPI_BUILD_DPRINTF("ACPI build disabled. Bailing out.\n");
+ return;
+ }
+
+ if (!loongarch_is_acpi_enabled(lsms)) {
+ ACPI_BUILD_DPRINTF("ACPI disabled. Bailing out.\n");
+ return;
+ }
+
+ build_state = g_malloc0(sizeof *build_state);
+
+ acpi_build_tables_init(&tables);
+ acpi_build(&tables, MACHINE(lsms));
+
+ /* Now expose it all to Guest */
+ build_state->table_mr = acpi_add_rom_blob(acpi_build_update, build_state,
+ tables.table_data,
+ ACPI_BUILD_TABLE_FILE);
+ assert(build_state->table_mr != NULL);
+
+ build_state->linker_mr =
+ acpi_add_rom_blob(acpi_build_update, build_state, tables.linker->cmd_blob,
+ "etc/table-loader");
+
+ fw_cfg_add_file(lsms->fw_cfg, ACPI_BUILD_TPMLOG_FILE,
+ tables.tcpalog->data, acpi_data_len(tables.tcpalog));
+
+ build_state->rsdp = NULL;
+ build_state->rsdp_mr = acpi_add_rom_blob(acpi_build_update, build_state, tables.rsdp,
+ ACPI_BUILD_RSDP_FILE);
+
+ qemu_register_reset(acpi_build_reset, build_state);
+ acpi_build_reset(build_state);
+ vmstate_register(NULL, 0, &vmstate_acpi_build, build_state);
+
+ /* Cleanup tables but don't free the memory: we track it
+ * in build_state.
+ */
+ acpi_build_tables_cleanup(&tables, false);
+}
diff --git a/hw/loongarch/acpi-build.h b/hw/loongarch/acpi-build.h
new file mode 100644
index 0000000000000000000000000000000000000000..a914268bbe43f8121ce32a4288262b93bcedde9f
--- /dev/null
+++ b/hw/loongarch/acpi-build.h
@@ -0,0 +1,16 @@
+
+#ifndef HW_LARCH_ACPI_BUILD_H
+#define HW_LARCH_ACPI_BUILD_H
+
+#define EFI_ACPI_OEM_ID "LARCH"
+#define EFI_ACPI_OEM_TABLE_ID "LARCH" /* OEM table id 8 bytes long */
+#define EFI_ACPI_OEM_REVISION 0x00000002
+#define EFI_ACPI_CREATOR_ID "LINUX"
+#define EFI_ACPI_CREATOR_REVISION 0x01000013
+
+#define ACPI_COMPATIBLE_1_0 0
+#define ACPI_COMPATIBLE_2_0 1
+
+void loongarch_acpi_setup(void);
+
+#endif
diff --git a/hw/loongarch/apic.c b/hw/loongarch/apic.c
new file mode 100644
index 0000000000000000000000000000000000000000..67994d201f30be46f81eede3b2bd17cb4a204b89
--- /dev/null
+++ b/hw/loongarch/apic.c
@@ -0,0 +1,675 @@
+/*
+ * Loongarch 3A5000 interrupt controller emulation
+ *
+ * Copyright (C) 2020 Lu Zeng
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/boards.h"
+#include "hw/irq.h"
+#include "hw/loongarch/cpudevs.h"
+#include "hw/sysbus.h"
+#include "qemu/host-utils.h"
+#include "qemu/error-report.h"
+#include "sysemu/kvm.h"
+#include "hw/hw.h"
+#include "hw/irq.h"
+#include "target/loongarch64/cpu.h"
+#include "exec/address-spaces.h"
+#include "hw/loongarch/larch.h"
+#include "migration/vmstate.h"
+
+#define DEBUG_APIC 0
+
+#define DPRINTF(fmt, ...) \
+do { \
+ if (DEBUG_APIC) { \
+ fprintf(stderr, "APIC: " fmt , ## __VA_ARGS__); \
+ } \
+} while (0)
+
+#define APIC_OFFSET 0x400
+#define APIC_BASE (0x1f010000ULL)
+#define EXTIOI_NODETYPE_START (0x4a0 - APIC_OFFSET)
+#define EXTIOI_NODETYPE_END (0x4c0 - APIC_OFFSET)
+#define EXTIOI_IPMAP_START (0x4c0 - APIC_OFFSET)
+#define EXTIOI_IPMAP_END (0x4c8 - APIC_OFFSET)
+#define EXTIOI_ENABLE_START (0x600 - APIC_OFFSET)
+#define EXTIOI_ENABLE_END (0x620 - APIC_OFFSET)
+#define EXTIOI_BOUNCE_START (0x680 - APIC_OFFSET)
+#define EXTIOI_BOUNCE_END (0x6a0 - APIC_OFFSET)
+#define EXTIOI_ISR_START (0x700 - APIC_OFFSET)
+#define EXTIOI_ISR_END (0x720 - APIC_OFFSET)
+#define EXTIOI_COREMAP_START (0xC00 - APIC_OFFSET)
+#define EXTIOI_COREMAP_END (0xD00 - APIC_OFFSET)
+#define EXTIOI_COREISR_START (0x10000)
+#define EXTIOI_COREISR_END (EXTIOI_COREISR_START + 0x10000)
+
+static int ext_irq_pre_save(void *opaque)
+{
+#ifdef CONFIG_KVM
+ apicState *apic = opaque;
+ struct loongarch_kvm_irqchip *chip;
+ struct kvm_loongarch_ls3a_extirq_state *kstate;
+ int ret, length, i, vcpuid;
+#endif
+ if ((!kvm_enabled()) || (!kvm_irqchip_in_kernel())) {
+ return 0;
+ }
+#ifdef CONFIG_KVM
+ length = sizeof(struct loongarch_kvm_irqchip) +
+ sizeof(struct kvm_loongarch_ls3a_extirq_state);
+ chip = g_malloc0(length);
+ memset(chip, 0, length);
+ chip->chip_id = KVM_IRQCHIP_LS3A_EXTIRQ;
+ chip->len = length;
+
+ ret = kvm_vm_ioctl(kvm_state, KVM_GET_IRQCHIP, chip);
+ if (ret < 0) {
+ fprintf(stderr, "KVM_GET_IRQCHIP failed: %s\n", strerror(ret));
+ abort();
+ }
+
+ kstate = (struct kvm_loongarch_ls3a_extirq_state *)chip->data;
+ for (i = 0; i < EXTIOI_IRQS_BITMAP_SIZE; i++) {
+ apic->ext_en[i] = kstate->ext_en_r.reg_u8[i];
+ apic->ext_bounce[i] = kstate->bounce_r.reg_u8[i];
+ apic->ext_isr[i] = kstate->ext_isr_r.reg_u8[i];
+ for (vcpuid = 0; vcpuid < MAX_CORES; vcpuid++) {
+ apic->ext_coreisr[vcpuid][i] = kstate->ext_core_isr_r.reg_u8[vcpuid][i];
+ }
+ }
+ for (i = 0; i < EXTIOI_IRQS_IPMAP_SIZE; i++) {
+ apic->ext_ipmap[i] = kstate->ip_map_r.reg_u8[i];
+ }
+ for (i = 0; i < EXTIOI_IRQS; i++) {
+ apic->ext_coremap[i] = kstate->core_map_r.reg_u8[i];;
+ }
+ for (i = 0; i < 16; i++) {
+ apic->ext_nodetype[i] = kstate->node_type_r.reg_u16[i];
+ }
+ g_free(chip);
+#endif
+ return 0;
+}
+
+static int ext_irq_post_load(void *opaque, int version)
+{
+#ifdef CONFIG_KVM
+ apicState *apic = opaque;
+ struct loongarch_kvm_irqchip *chip;
+ struct kvm_loongarch_ls3a_extirq_state *kstate;
+ int ret, length, i, vcpuid;
+#endif
+ if ((!kvm_enabled()) || (!kvm_irqchip_in_kernel())) {
+ return 0;
+ }
+#ifdef CONFIG_KVM
+ length = sizeof(struct loongarch_kvm_irqchip) +
+ sizeof(struct kvm_loongarch_ls3a_extirq_state);
+ chip = g_malloc0(length);
+
+ chip->chip_id = KVM_IRQCHIP_LS3A_EXTIRQ;
+ chip->len = length;
+
+ kstate = (struct kvm_loongarch_ls3a_extirq_state *)chip->data;
+ for (i = 0; i < EXTIOI_IRQS_BITMAP_SIZE; i++) {
+ kstate->ext_en_r.reg_u8[i] = apic->ext_en[i];
+ kstate->bounce_r.reg_u8[i] = apic->ext_bounce[i];
+ kstate->ext_isr_r.reg_u8[i] = apic->ext_isr[i];
+ for (vcpuid = 0; vcpuid < MAX_CORES; vcpuid++) {
+ kstate->ext_core_isr_r.reg_u8[vcpuid][i] = apic->ext_coreisr[vcpuid][i];
+ }
+ }
+ for (i = 0; i < EXTIOI_IRQS_IPMAP_SIZE; i++) {
+ kstate->ip_map_r.reg_u8[i] = apic->ext_ipmap[i];
+ }
+ for (i = 0; i < EXTIOI_IRQS; i++) {
+ kstate->core_map_r.reg_u8[i] = apic->ext_coremap[i];
+ }
+ for (i = 0; i < 16; i++) {
+ kstate->node_type_r.reg_u16[i] = apic->ext_nodetype[i];
+ }
+
+ ret = kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, chip);
+ if (ret < 0) {
+ fprintf(stderr, "KVM_SET_IRQCHIP failed: %s\n", strerror(ret));
+ abort();
+ }
+ g_free(chip);
+#endif
+ return 0;
+}
+typedef struct nodeApicState {
+ unsigned long addr;
+ int nodeid;
+ apicState *apic;
+} nodeApicState;
+
+static void ioapic_update_irq(void *opaque, int irq, int level)
+{
+ apicState *s = opaque;
+ uint8_t ipnum, cpu, cpu_ipnum;
+ unsigned long found1, found2;
+ uint8_t reg_count, reg_bit;
+
+ reg_count = irq / 32;
+ reg_bit = irq % 32;
+
+ ipnum = s->ext_sw_ipmap[irq];
+ cpu = s->ext_sw_coremap[irq];
+ cpu_ipnum = cpu * LS3A_INTC_IP + ipnum;
+ if (level == 1) {
+ if (test_bit(reg_bit, ((void *)s->ext_en + 0x4 * reg_count)) == false) {
+ return;
+ }
+
+ if (test_bit(reg_bit, ((void *)s->ext_isr + 0x4 * reg_count)) == false) {
+ return;
+ }
+ bitmap_set(((void *)s->ext_coreisr[cpu] + 0x4 * reg_count), reg_bit, 1);
+ found1 = find_next_bit(((void *)s->ext_ipisr[cpu_ipnum] + 0x4 * reg_count),
+ EXTIOI_IRQS, 0);
+ bitmap_set(((void *)s->ext_ipisr[cpu_ipnum] + 0x4 * reg_count), reg_bit, 1);
+ if (found1 >= EXTIOI_IRQS) {
+ qemu_set_irq(s->parent_irq[cpu][ipnum], level);
+ }
+ } else {
+ bitmap_clear(((void *)s->ext_isr + 0x4 * reg_count), reg_bit, 1);
+ bitmap_clear(((void *)s->ext_coreisr[cpu] + 0x4 * reg_count), reg_bit, 1);
+ found1 = find_next_bit(((void *)s->ext_ipisr[cpu_ipnum] + 0x4 * reg_count),
+ EXTIOI_IRQS, 0);
+ found1 += reg_count * 32;
+ bitmap_clear(((void *)s->ext_ipisr[cpu_ipnum] + 0x4 * reg_count), reg_bit, 1);
+ found2 = find_next_bit(((void *)s->ext_ipisr[cpu_ipnum] + 0x4 * reg_count),
+ EXTIOI_IRQS, 0);
+ if ((found1 < EXTIOI_IRQS) && (found2 >= EXTIOI_IRQS)) {
+ qemu_set_irq(s->parent_irq[cpu][ipnum], level);
+ }
+ }
+}
+
+static void ioapic_setirq(void *opaque, int irq, int level)
+{
+ apicState *s = opaque;
+ uint8_t reg_count, reg_bit;
+
+ reg_count = irq / 32;
+ reg_bit = irq % 32;
+
+ if (level) {
+ bitmap_set(((void *)s->ext_isr + 0x4 * reg_count), reg_bit, 1);
+ } else {
+ bitmap_clear(((void *)s->ext_isr + 0x4 * reg_count), reg_bit, 1);
+ }
+
+ ioapic_update_irq(s, irq, level);
+}
+
+static uint32_t apic_readb(void *opaque, hwaddr addr)
+{
+ nodeApicState *node;
+ apicState *state;
+ unsigned long off;
+ uint8_t ret;
+ int cpu;
+
+ node = (nodeApicState *)opaque;
+ state = node->apic;
+ off = addr & 0xfffff;
+ ret = 0;
+ if ((off >= EXTIOI_ENABLE_START) && (off < EXTIOI_ENABLE_END)) {
+ off -= EXTIOI_ENABLE_START;
+ ret = *(uint8_t *)((void *)state->ext_en + off);
+ } else if ((off >= EXTIOI_BOUNCE_START) && (off < EXTIOI_BOUNCE_END)) {
+ off -= EXTIOI_BOUNCE_START;
+ ret = *(uint8_t *)((void *)state->ext_bounce + off);
+ } else if ((off >= EXTIOI_ISR_START) && (off < EXTIOI_ISR_END)) {
+ off -= EXTIOI_ISR_START;
+ ret = *(uint8_t *)((void *)state->ext_isr + off);
+ } else if ((off >= EXTIOI_COREISR_START) && (off < EXTIOI_COREISR_END)) {
+ off -= EXTIOI_COREISR_START;
+ cpu = (off >> 8) & 0xff;
+ ret = *(uint8_t *)((void *)state->ext_coreisr[cpu] + (off & 0x1f));
+ } else if ((off >= EXTIOI_IPMAP_START) && (off < EXTIOI_IPMAP_END)) {
+ off -= EXTIOI_IPMAP_START;
+ ret = *(uint8_t *)((void *)state->ext_ipmap + off);
+ } else if ((off >= EXTIOI_COREMAP_START) && (off < EXTIOI_COREMAP_END)) {
+ off -= EXTIOI_COREMAP_START;
+ ret = *(uint8_t *)((void *)state->ext_coremap + off);
+ } else if ((off >= EXTIOI_NODETYPE_START) && (off < EXTIOI_NODETYPE_END)) {
+ off -= EXTIOI_NODETYPE_START;
+ ret = *(uint8_t *)((void *)state->ext_nodetype + off);
+ }
+
+ DPRINTF("readb reg 0x" TARGET_FMT_plx " = %x\n", node->addr + addr, ret);
+ return ret;
+}
+
+static uint32_t apic_readw(void *opaque, hwaddr addr)
+{
+ nodeApicState *node;
+ apicState *state;
+ unsigned long off;
+ uint16_t ret;
+ int cpu;
+
+ node = (nodeApicState *)opaque;
+ state = node->apic;
+ off = addr & 0xfffff;
+ ret = 0;
+ if ((off >= EXTIOI_ENABLE_START) && (off < EXTIOI_ENABLE_END)) {
+ off -= EXTIOI_ENABLE_START;
+ ret = *(uint16_t *)((void *)state->ext_en + off);
+ } else if ((off >= EXTIOI_BOUNCE_START) && (off < EXTIOI_BOUNCE_END)) {
+ off -= EXTIOI_BOUNCE_START;
+ ret = *(uint16_t *)((void *)state->ext_bounce + off);
+ } else if ((off >= EXTIOI_ISR_START) && (off < EXTIOI_ISR_END)) {
+ off -= EXTIOI_ISR_START;
+ ret = *(uint16_t *)((void *)state->ext_isr + off);
+ } else if ((off >= EXTIOI_COREISR_START) && (off < EXTIOI_COREISR_END)) {
+ off -= EXTIOI_COREISR_START;
+ cpu = (off >> 8) & 0xff;
+ ret = *(uint16_t *)((void *)state->ext_coreisr[cpu] + (off & 0x1f));
+ } else if ((off >= EXTIOI_IPMAP_START) && (off < EXTIOI_IPMAP_END)) {
+ off -= EXTIOI_IPMAP_START;
+ ret = *(uint16_t *)((void *)state->ext_ipmap + off);
+ } else if ((off >= EXTIOI_COREMAP_START) && (off < EXTIOI_COREMAP_END)) {
+ off -= EXTIOI_COREMAP_START;
+ ret = *(uint16_t *)((void *)state->ext_coremap + off);
+ } else if ((off >= EXTIOI_NODETYPE_START) && (off < EXTIOI_NODETYPE_END)) {
+ off -= EXTIOI_NODETYPE_START;
+ ret = *(uint16_t *)((void *)state->ext_nodetype + off);
+ }
+
+ DPRINTF("readw reg 0x" TARGET_FMT_plx " = %x\n", node->addr + addr, ret);
+ return ret;
+}
+
+static uint32_t apic_readl(void *opaque, hwaddr addr)
+{
+ nodeApicState *node;
+ apicState *state;
+ unsigned long off;
+ uint32_t ret;
+ int cpu;
+
+ node = (nodeApicState *)opaque;
+ state = node->apic;
+ off = addr & 0xfffff;
+ ret = 0;
+ if ((off >= EXTIOI_ENABLE_START) && (off < EXTIOI_ENABLE_END)) {
+ off -= EXTIOI_ENABLE_START;
+ ret = *(uint32_t *)((void *)state->ext_en + off);
+ } else if ((off >= EXTIOI_BOUNCE_START) && (off < EXTIOI_BOUNCE_END)) {
+ off -= EXTIOI_BOUNCE_START;
+ ret = *(uint32_t *)((void *)state->ext_bounce + off);
+ } else if ((off >= EXTIOI_ISR_START) && (off < EXTIOI_ISR_END)) {
+ off -= EXTIOI_ISR_START;
+ ret = *(uint32_t *)((void *)state->ext_isr + off);
+ } else if ((off >= EXTIOI_COREISR_START) && (off < EXTIOI_COREISR_END)) {
+ off -= EXTIOI_COREISR_START;
+ cpu = (off >> 8) & 0xff;
+ ret = *(uint32_t *)((void *)state->ext_coreisr[cpu] + (off & 0x1f));
+ } else if ((off >= EXTIOI_IPMAP_START) && (off < EXTIOI_IPMAP_END)) {
+ off -= EXTIOI_IPMAP_START;
+ ret = *(uint32_t *)((void *)state->ext_ipmap + off);
+ } else if ((off >= EXTIOI_COREMAP_START) && (off < EXTIOI_COREMAP_END)) {
+ off -= EXTIOI_COREMAP_START;
+ ret = *(uint32_t *)((void *)state->ext_coremap + off);
+ } else if ((off >= EXTIOI_NODETYPE_START) && (off < EXTIOI_NODETYPE_END)) {
+ off -= EXTIOI_NODETYPE_START;
+ ret = *(uint32_t *)((void *)state->ext_nodetype + off);
+ }
+
+ DPRINTF("readl reg 0x" TARGET_FMT_plx " = %x\n", node->addr + addr, ret);
+ return ret;
+
+}
+
+static void apic_writeb(void *opaque, hwaddr addr, uint32_t val)
+{
+ nodeApicState *node;
+ apicState *state;
+ unsigned long off;
+ uint8_t old;
+ int cpu, i, ipnum, level, mask;
+
+ node = (nodeApicState *)opaque;
+ state = node->apic;
+ off = addr & 0xfffff;
+ if ((off >= EXTIOI_ENABLE_START) && (off < EXTIOI_ENABLE_END)) {
+ off -= EXTIOI_ENABLE_START;
+ old = *(uint8_t *)((void *)state->ext_en + off);
+ if (old != val) {
+ *(uint8_t *)((void *)state->ext_en + off) = val;
+ old = old ^ val;
+ mask = 0x1;
+ for (i = 0; i < 8; i++) {
+ if (old & mask) {
+ level = !!(val & (0x1 << i));
+ ioapic_update_irq(state, i + off * 8, level);
+ }
+ mask = mask << 1;
+ }
+ }
+ } else if ((off >= EXTIOI_BOUNCE_START) && (off < EXTIOI_BOUNCE_END)) {
+ off -= EXTIOI_BOUNCE_START;
+ *(uint8_t *)((void *)state->ext_bounce + off) = val;
+ } else if ((off >= EXTIOI_ISR_START) && (off < EXTIOI_ISR_END)) {
+ off -= EXTIOI_ISR_START;
+ old = *(uint8_t *)((void *)state->ext_isr + off);
+ *(uint8_t *)((void *)state->ext_isr + off) = old & ~val;
+ mask = 0x1;
+ for (i = 0; i < 8; i++) {
+ if ((old & mask) && (val & mask)) {
+ ioapic_update_irq(state, i + off * 8, 0);
+ }
+ mask = mask << 1;
+ }
+ } else if ((off >= EXTIOI_COREISR_START) && (off < EXTIOI_COREISR_END)) {
+ off -= EXTIOI_COREISR_START;
+ cpu = (off >> 8) & 0xff;
+ off = off & 0x1f;
+ old = *(uint8_t *)((void *)state->ext_coreisr[cpu] + off);
+ *(uint8_t *)((void *)state->ext_coreisr[cpu] + off) = old & ~val;
+ mask = 0x1;
+ for (i = 0; i < 8; i++) {
+ if ((old & mask) && (val & mask)) {
+ ioapic_update_irq(state, i + off * 8, 0);
+ }
+ mask = mask << 1;
+ }
+ } else if ((off >= EXTIOI_IPMAP_START) && (off < EXTIOI_IPMAP_END)) {
+ off -= EXTIOI_IPMAP_START;
+ val = val & 0xf;
+ *(uint8_t *)((void *)state->ext_ipmap + off) = val;
+ ipnum = 0;
+ for (i = 0; i < 4; i++) {
+ if (val & (0x1 << i)) {
+ ipnum = i;
+ break;
+ }
+ }
+ if (val) {
+ for (i = 0; i < 32; i++) {
+ cpu = off * 32 + i;
+ state->ext_sw_ipmap[cpu] = ipnum;
+ }
+ }
+ } else if ((off >= EXTIOI_COREMAP_START) && (off < EXTIOI_COREMAP_END)) {
+ off -= EXTIOI_COREMAP_START;
+ val = val & 0xff;
+ *(uint8_t *)((void *)state->ext_coremap + off) = val;
+ state->ext_sw_coremap[off] = val;
+ } else if ((off >= EXTIOI_NODETYPE_START) && (off < EXTIOI_NODETYPE_END)) {
+ off -= EXTIOI_NODETYPE_START;
+ *(uint8_t *)((void *)state->ext_nodetype + off) = val;
+ }
+
+ DPRINTF("writeb reg 0x" TARGET_FMT_plx " = %x\n", node->addr + addr, val);
+}
+
+static void apic_writew(void *opaque, hwaddr addr, uint32_t val)
+{
+ nodeApicState *node;
+ apicState *state;
+ unsigned long off;
+ uint16_t old;
+ int cpu, i, level, mask;
+
+ node = (nodeApicState *)opaque;
+ state = node->apic;
+ off = addr & 0xfffff;
+ if ((off >= EXTIOI_ENABLE_START) && (off < EXTIOI_ENABLE_END)) {
+ off -= EXTIOI_ENABLE_START;
+ old = *(uint16_t *)((void *)state->ext_en + off);
+ if (old != val) {
+ *(uint16_t *)((void *)state->ext_en + off) = val;
+ old = old ^ val;
+ mask = 0x1;
+ for (i = 0; i < 16; i++) {
+ if (old & mask) {
+ level = !!(val & (0x1 << i));
+ ioapic_update_irq(state, i + off * 8, level);
+ }
+ mask = mask << 1;
+ }
+ }
+ } else if ((off >= EXTIOI_BOUNCE_START) && (off < EXTIOI_BOUNCE_END)) {
+ off -= EXTIOI_BOUNCE_START;
+ *(uint16_t *)((void *)state->ext_bounce + off) = val;
+ } else if ((off >= EXTIOI_ISR_START) && (off < EXTIOI_ISR_END)) {
+ off -= EXTIOI_ISR_START;
+ old = *(uint16_t *)((void *)state->ext_isr + off);
+ *(uint16_t *)((void *)state->ext_isr + off) = old & ~val;
+ mask = 0x1;
+ for (i = 0; i < 16; i++) {
+ if ((old & mask) && (val & mask)) {
+ ioapic_update_irq(state, i + off * 8, 0);
+ }
+ mask = mask << 1;
+ }
+ } else if ((off >= EXTIOI_COREISR_START) && (off < EXTIOI_COREISR_END)) {
+ off -= EXTIOI_COREISR_START;
+ cpu = (off >> 8) & 0xff;
+ off = off & 0x1f;
+ old = *(uint16_t *)((void *)state->ext_coreisr[cpu] + off);
+ *(uint16_t *)((void *)state->ext_coreisr[cpu] + off) = old & ~val;
+ mask = 0x1;
+ for (i = 0; i < 16; i++) {
+ if ((old & mask) && (val & mask)) {
+ ioapic_update_irq(state, i + off * 8, 0);
+ }
+ mask = mask << 1;
+ }
+ } else if ((off >= EXTIOI_IPMAP_START) && (off < EXTIOI_IPMAP_END)) {
+ apic_writeb(opaque, addr, val & 0xff);
+ apic_writeb(opaque, addr + 1, (val >> 8) & 0xff);
+
+ } else if ((off >= EXTIOI_COREMAP_START) && (off < EXTIOI_COREMAP_END)) {
+ apic_writeb(opaque, addr, val & 0xff);
+ apic_writeb(opaque, addr + 1, (val >> 8) & 0xff);
+
+ } else if ((off >= EXTIOI_NODETYPE_START) && (off < EXTIOI_NODETYPE_END)) {
+ off -= EXTIOI_NODETYPE_START;
+ *(uint16_t *)((void *)state->ext_nodetype + off) = val;
+ }
+
+ DPRINTF("writew reg 0x" TARGET_FMT_plx " = %x\n", node->addr + addr, val);
+}
+
+static void apic_writel(void *opaque, hwaddr addr, uint32_t val)
+{
+ nodeApicState *node;
+ apicState *state;
+ unsigned long off;
+ uint32_t old;
+ int cpu, i, level, mask;
+
+ node = (nodeApicState *)opaque;
+ state = node->apic;
+ off = addr & 0xfffff;
+ if ((off >= EXTIOI_ENABLE_START) && (off < EXTIOI_ENABLE_END)) {
+ off -= EXTIOI_ENABLE_START;
+ old = *(uint32_t *)((void *)state->ext_en + off);
+ if (old != val) {
+ *(uint32_t *)((void *)state->ext_en + off) = val;
+ old = old ^ val;
+ mask = 0x1;
+ for (i = 0; i < 32; i++) {
+ if (old & mask) {
+ level = !!(val & (0x1 << i));
+ ioapic_update_irq(state, i + off * 8, level);
+ }
+ mask = mask << 1;
+ }
+ }
+ } else if ((off >= EXTIOI_BOUNCE_START) && (off < EXTIOI_BOUNCE_END)) {
+ off -= EXTIOI_BOUNCE_START;
+ *(uint32_t *)((void *)state->ext_bounce + off) = val;
+ } else if ((off >= EXTIOI_ISR_START) && (off < EXTIOI_ISR_END)) {
+ off -= EXTIOI_ISR_START;
+ old = *(uint32_t *)((void *)state->ext_isr + off);
+ *(uint32_t *)((void *)state->ext_isr + off) = old & ~val;
+ mask = 0x1;
+ for (i = 0; i < 32; i++) {
+ if ((old & mask) && (val & mask)) {
+ ioapic_update_irq(state, i + off * 8, 0);
+ }
+ mask = mask << 1;
+ }
+ } else if ((off >= EXTIOI_COREISR_START) && (off < EXTIOI_COREISR_END)) {
+ off -= EXTIOI_COREISR_START;
+ cpu = (off >> 8) & 0xff;
+ off = off & 0x1f;
+ old = *(uint32_t *)((void *)state->ext_coreisr[cpu] + off);
+ *(uint32_t *)((void *)state->ext_coreisr[cpu] + off) = old & ~val;
+ mask = 0x1;
+ for (i = 0; i < 32; i++) {
+ if ((old & mask) && (val & mask)) {
+ ioapic_update_irq(state, i + off * 8, 0);
+ }
+ mask = mask << 1;
+ }
+ } else if ((off >= EXTIOI_IPMAP_START) && (off < EXTIOI_IPMAP_END)) {
+ apic_writeb(opaque, addr, val & 0xff);
+ apic_writeb(opaque, addr + 1, (val >> 8) & 0xff);
+ apic_writeb(opaque, addr + 2, (val >> 16) & 0xff);
+ apic_writeb(opaque, addr + 3, (val >> 24) & 0xff);
+
+ } else if ((off >= EXTIOI_COREMAP_START) && (off < EXTIOI_COREMAP_END)) {
+ apic_writeb(opaque, addr, val & 0xff);
+ apic_writeb(opaque, addr + 1, (val >> 8) & 0xff);
+ apic_writeb(opaque, addr + 2, (val >> 16) & 0xff);
+ apic_writeb(opaque, addr + 3, (val >> 24) & 0xff);
+
+ } else if ((off >= EXTIOI_NODETYPE_START) && (off < EXTIOI_NODETYPE_END)) {
+ off -= EXTIOI_NODETYPE_START;
+ *(uint32_t *)((void *)state->ext_nodetype + off) = val;
+ }
+
+ DPRINTF("writel reg 0x" TARGET_FMT_plx " = %x\n", node->addr + addr, val);
+}
+
+static uint64_t apic_readfn(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ switch (size) {
+ case 1:
+ return apic_readb(opaque, addr);
+ case 2:
+ return apic_readw(opaque, addr);
+ case 4:
+ return apic_readl(opaque, addr);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void apic_writefn(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ switch (size) {
+ case 1:
+ apic_writeb(opaque, addr, value);
+ break;
+ case 2:
+ apic_writew(opaque, addr, value);
+ break;
+ case 4:
+ apic_writel(opaque, addr, value);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static const VMStateDescription vmstate_apic = {
+ .name = "apic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_save = ext_irq_pre_save,
+ .post_load = ext_irq_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(ext_en, apicState, EXTIOI_IRQS_BITMAP_SIZE),
+ VMSTATE_UINT8_ARRAY(ext_bounce, apicState, EXTIOI_IRQS_BITMAP_SIZE),
+ VMSTATE_UINT8_ARRAY(ext_isr, apicState, EXTIOI_IRQS_BITMAP_SIZE),
+ VMSTATE_UINT8_2DARRAY(ext_coreisr, apicState, MAX_CORES,
+ EXTIOI_IRQS_BITMAP_SIZE),
+ VMSTATE_UINT8_ARRAY(ext_ipmap, apicState, EXTIOI_IRQS_IPMAP_SIZE),
+ VMSTATE_UINT8_ARRAY(ext_coremap, apicState, EXTIOI_IRQS),
+ VMSTATE_UINT16_ARRAY(ext_nodetype, apicState, 16),
+ VMSTATE_UINT64(ext_control, apicState),
+ VMSTATE_UINT8_ARRAY(ext_sw_ipmap, apicState, EXTIOI_IRQS),
+ VMSTATE_UINT8_ARRAY(ext_sw_coremap, apicState, EXTIOI_IRQS),
+ VMSTATE_UINT8_2DARRAY(ext_ipisr, apicState, MAX_CORES * LS3A_INTC_IP,
+ EXTIOI_IRQS_BITMAP_SIZE),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const MemoryRegionOps apic_ops = {
+ .read = apic_readfn,
+ .write = apic_writefn,
+ .impl.min_access_size = 1,
+ .impl.max_access_size = 4,
+ .valid.min_access_size = 1,
+ .valid.max_access_size = 4,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+int cpu_init_apic(LoongarchMachineState *ms, CPULOONGARCHState *env, int cpu)
+{
+ apicState *apic;
+ nodeApicState *node;
+ MemoryRegion *iomem;
+ unsigned long base;
+ int pin;
+ char str[32];
+
+ if (ms->apic == NULL) {
+ apic = g_malloc0(sizeof(apicState));
+ vmstate_register(NULL, 0, &vmstate_apic, apic);
+ apic->irq = qemu_allocate_irqs(ioapic_setirq, apic, EXTIOI_IRQS);
+
+ for (pin = 0; pin < LS3A_INTC_IP; pin++) {
+ /* cpu_pin[9:2] <= intc_pin[7:0] */
+ apic->parent_irq[cpu][pin] = env->irq[pin + 2];
+ }
+ ms->apic = apic;
+
+ if (cpu == 0) {
+ base = APIC_BASE;
+ node = g_malloc0(sizeof(nodeApicState));
+ node->apic = ms->apic;
+ node->addr = base;
+
+ iomem = g_new(MemoryRegion, 1);
+ sprintf(str, "apic%d", cpu);
+ /* extioi addr 0x1f010000~0x1f02ffff */
+ memory_region_init_io(iomem, NULL, &apic_ops, node, str, 0x20000);
+ memory_region_add_subregion(get_system_memory(), base, iomem);
+ }
+
+ } else {
+ if (cpu != 0) {
+ for (pin = 0; pin < LS3A_INTC_IP; pin++) {
+ ms->apic->parent_irq[cpu][pin] = env->irq[pin + 2];
+ }
+ }
+ }
+ return 0;
+}
+
diff --git a/hw/loongarch/ioapic.c b/hw/loongarch/ioapic.c
new file mode 100644
index 0000000000000000000000000000000000000000..60abff85543bbaba9afe72021e1df6dedbe15ba6
--- /dev/null
+++ b/hw/loongarch/ioapic.c
@@ -0,0 +1,422 @@
+/*
+ * LS7A1000 Northbridge IOAPIC support
+ *
+ * Copyright (c) 2019 Loongarch Technology
+ * Authors:
+ * Zhu Chen
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "hw/irq.h"
+#include "qemu/log.h"
+#include "sysemu/kvm.h"
+#include "linux/kvm.h"
+#include "migration/vmstate.h"
+
+#define DEBUG_LS7A_APIC 0
+
+#define DPRINTF(fmt, ...) \
+do { \
+ if (DEBUG_LS7A_APIC) { \
+ fprintf(stderr, "IOAPIC: " fmt , ## __VA_ARGS__); \
+ } \
+} while (0)
+
+#define TYPE_LS7A_APIC "ioapic"
+#define LS7A_APIC(obj) OBJECT_CHECK(LS7AApicState, (obj), TYPE_LS7A_APIC)
+
+#define LS7A_IOAPIC_ROUTE_ENTRY_OFFSET 0x100
+#define LS7A_IOAPIC_INT_ID_OFFSET 0x00
+#define LS7A_INT_ID_VAL 0x7000000UL
+#define LS7A_INT_ID_VER 0x1f0001UL
+#define LS7A_IOAPIC_INT_MASK_OFFSET 0x20
+#define LS7A_IOAPIC_INT_EDGE_OFFSET 0x60
+#define LS7A_IOAPIC_INT_CLEAR_OFFSET 0x80
+#define LS7A_IOAPIC_INT_STATUS_OFFSET 0x3a0
+#define LS7A_IOAPIC_INT_POL_OFFSET 0x3e0
+#define LS7A_IOAPIC_HTMSI_EN_OFFSET 0x40
+#define LS7A_IOAPIC_HTMSI_VEC_OFFSET 0x200
+#define LS7A_AUTO_CTRL0_OFFSET 0xc0
+#define LS7A_AUTO_CTRL1_OFFSET 0xe0
+
+typedef struct LS7AApicState {
+ SysBusDevice parent_obj;
+ qemu_irq parent_irq[257];
+ uint64_t int_id;
+ uint64_t int_mask; /*0x020 interrupt mask register*/
+ uint64_t htmsi_en;/*0x040 1=msi*/
+ uint64_t intedge; /*0x060 edge=1 level =0*/
+ uint64_t intclr; /*0x080 for clean edge int,set 1 clean,set 0 is noused*/
+ uint64_t auto_crtl0; /*0x0c0*/
+ uint64_t auto_crtl1; /*0x0e0*/
+ uint8_t route_entry[64]; /*0x100 - 0x140*/
+ uint8_t htmsi_vector[64]; /*0x200 - 0x240*/
+ uint64_t intisr_chip0; /*0x300*/
+ uint64_t intisr_chip1;/*0x320*/
+ uint64_t last_intirr; /* edge detection */
+ uint64_t intirr; /* 0x380 interrupt request register */
+ uint64_t intisr; /* 0x3a0 interrupt service register */
+ uint64_t int_polarity; /*0x3e0 interrupt level polarity
+ selection register 0 for high level tirgger*/
+ MemoryRegion iomem;
+} LS7AApicState;
+
+static void update_irq(LS7AApicState *s)
+{
+ int i;
+ if ((s->intirr & (~s->int_mask)) & (~s->htmsi_en)) {
+ DPRINTF("7a update irqline up\n");
+ s->intisr = (s->intirr & (~s->int_mask) & (~s->htmsi_en));
+ qemu_set_irq(s->parent_irq[256], 1);
+ } else {
+ DPRINTF("7a update irqline down\n");
+ s->intisr &= (~s->htmsi_en);
+ qemu_set_irq(s->parent_irq[256], 0);
+ }
+ if (s->htmsi_en) {
+ for (i = 0; i < 64; i++) {
+ if ((((~s->intisr) & s->intirr) & s->htmsi_en) & (1ULL << i)) {
+ s->intisr |= 1ULL << i;
+ qemu_set_irq(s->parent_irq[s->htmsi_vector[i]], 1);
+ } else if (((~(s->intisr | s->intirr)) & s->htmsi_en) &
+ (1ULL << i)) {
+ qemu_set_irq(s->parent_irq[s->htmsi_vector[i]], 0);
+ }
+ }
+ }
+}
+
+static void irq_handler(void *opaque, int irq, int level)
+{
+ LS7AApicState *s = opaque;
+
+ assert(irq < 64);
+ uint64_t mask = 1ULL << irq;
+ DPRINTF("------ %s irq %d %d\n", __func__, irq, level);
+
+ if (s->intedge & mask) {
+ /* edge triggered */
+ /*TODO*/
+ } else {
+ /* level triggered */
+ if (level) {
+ s->intirr |= mask;
+ } else {
+ s->intirr &= ~mask;
+ }
+
+ }
+ update_irq(s);
+
+}
+
+static uint64_t ls7a_apic_reg_read(void *opaque, hwaddr addr, unsigned size)
+{
+ LS7AApicState *a = opaque;
+ uint64_t val = 0;
+ uint64_t offset;
+ int64_t offset_tmp;
+ offset = addr & 0xfff;
+ if (8 == size) {
+ switch (offset) {
+ case LS7A_IOAPIC_INT_ID_OFFSET:
+ val = LS7A_INT_ID_VER;
+ val = (val << 32) + LS7A_INT_ID_VAL;
+ break;
+ case LS7A_IOAPIC_INT_MASK_OFFSET:
+ val = a->int_mask;
+ break;
+ case LS7A_IOAPIC_INT_STATUS_OFFSET:
+ val = a->intisr & (~a->int_mask);
+ break;
+ case LS7A_IOAPIC_INT_EDGE_OFFSET:
+ val = a->intedge;
+ break;
+ case LS7A_IOAPIC_INT_POL_OFFSET:
+ val = a->int_polarity;
+ break;
+ case LS7A_IOAPIC_HTMSI_EN_OFFSET:
+ val = a->htmsi_en;
+ break;
+ case LS7A_AUTO_CTRL0_OFFSET:
+ case LS7A_AUTO_CTRL1_OFFSET:
+ break;
+ default:
+ break;
+ }
+ } else if (1 == size) {
+ if (offset >= LS7A_IOAPIC_HTMSI_VEC_OFFSET) {
+ offset_tmp = offset - LS7A_IOAPIC_HTMSI_VEC_OFFSET;
+ if (offset_tmp >= 0 && offset_tmp < 64) {
+ val = a->htmsi_vector[offset_tmp];
+ }
+ } else if (offset >= LS7A_IOAPIC_ROUTE_ENTRY_OFFSET) {
+ offset_tmp = offset - LS7A_IOAPIC_ROUTE_ENTRY_OFFSET;
+ if (offset_tmp >= 0 && offset_tmp < 64) {
+ val = a->route_entry[offset_tmp];
+ DPRINTF("addr %lx val %lx\n", addr, val);
+ }
+ }
+ }
+ DPRINTF(TARGET_FMT_plx" val %lx\n", addr, val);
+ return val;
+}
+
+static void ls7a_apic_reg_write(void *opaque, hwaddr addr, uint64_t data, unsigned size)
+{
+ LS7AApicState *a = opaque;
+ int64_t offset_tmp;
+ uint64_t offset;
+ offset = addr & 0xfff;
+ DPRINTF(TARGET_FMT_plx" size %d val %lx\n", addr, size, data);
+ if (8 == size) {
+ switch (offset) {
+ case LS7A_IOAPIC_INT_MASK_OFFSET:
+ a->int_mask = data;
+ update_irq(a);
+ break;
+ case LS7A_IOAPIC_INT_STATUS_OFFSET:
+ a->intisr = data;
+ break;
+ case LS7A_IOAPIC_INT_EDGE_OFFSET:
+ a->intedge = data;
+ break;
+ case LS7A_IOAPIC_INT_CLEAR_OFFSET:
+ a->intisr &= (~data);
+ update_irq(a);
+ break;
+ case LS7A_IOAPIC_INT_POL_OFFSET:
+ a->int_polarity = data;
+ break;
+ case LS7A_IOAPIC_HTMSI_EN_OFFSET:
+ a->htmsi_en = data;
+ break;
+ case LS7A_AUTO_CTRL0_OFFSET:
+ case LS7A_AUTO_CTRL1_OFFSET:
+ break;
+ default:
+ break;
+ }
+ } else if (1 == size) {
+ if (offset >= LS7A_IOAPIC_HTMSI_VEC_OFFSET) {
+ offset_tmp = offset - LS7A_IOAPIC_HTMSI_VEC_OFFSET;
+ if (offset_tmp >= 0 && offset_tmp < 64) {
+ a->htmsi_vector[offset_tmp] = (uint8_t)(data & 0xff);
+ }
+ } else if (offset >= LS7A_IOAPIC_ROUTE_ENTRY_OFFSET) {
+ offset_tmp = offset - LS7A_IOAPIC_ROUTE_ENTRY_OFFSET;
+ if (offset_tmp >= 0 && offset_tmp < 64) {
+ a->route_entry[offset_tmp] = (uint8_t)(data & 0xff);
+ }
+ }
+ }
+}
+
+static const MemoryRegionOps ls7a_apic_ops = {
+ .read = ls7a_apic_reg_read,
+ .write = ls7a_apic_reg_write,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static int kvm_ls7a_pre_save(void *opaque)
+{
+#ifdef CONFIG_KVM
+ LS7AApicState *s = opaque;
+ struct loongarch_kvm_irqchip *chip;
+ struct ls7a_ioapic_state *state;
+ int ret, i, length;
+
+ if ((!kvm_enabled()) || (!kvm_irqchip_in_kernel())) {
+ return 0;
+ }
+
+ length = sizeof(struct loongarch_kvm_irqchip) + sizeof(struct ls7a_ioapic_state);
+ chip = g_malloc0(length);
+ memset(chip, 0, length);
+ chip->chip_id = KVM_IRQCHIP_LS7A_IOAPIC;
+ chip->len = length;
+ ret = kvm_vm_ioctl(kvm_state, KVM_GET_IRQCHIP, chip);
+ if (ret < 0) {
+ fprintf(stderr, "KVM_GET_IRQCHIP failed: %s\n", strerror(ret));
+ abort();
+ }
+ state = (struct ls7a_ioapic_state *)chip->data;
+ s->int_id = state->int_id;
+ s->int_mask = state->int_mask;
+ s->htmsi_en = state->htmsi_en;
+ s->intedge = state->intedge;
+ s->intclr = state->intclr;
+ s->auto_crtl0 = state->auto_crtl0;
+ s->auto_crtl1 = state->auto_crtl1;
+ for (i = 0; i < 64; i++) {
+ s->route_entry[i] = state->route_entry[i];
+ s->htmsi_vector[i] = state->htmsi_vector[i];
+ }
+ s->intisr_chip0 = state->intisr_chip0;
+ s->intisr_chip1 = state->intisr_chip1;
+ s->intirr = state->intirr;
+ s->intisr = state->intisr;
+ s->int_polarity = state->int_polarity;
+ g_free(chip);
+#endif
+ return 0;
+}
+
+static int kvm_ls7a_post_load(void *opaque, int version)
+{
+#ifdef CONFIG_KVM
+ LS7AApicState *s = opaque;
+ struct loongarch_kvm_irqchip *chip;
+ struct ls7a_ioapic_state *state;
+ int ret, i, length;
+
+ if ((!kvm_enabled()) || (!kvm_irqchip_in_kernel())) {
+ return 0;
+ }
+ length = sizeof(struct loongarch_kvm_irqchip) + sizeof(struct ls7a_ioapic_state);
+ chip = g_malloc0(length);
+ memset(chip, 0, length);
+ chip->chip_id = KVM_IRQCHIP_LS7A_IOAPIC;
+ chip->len = length;
+
+ state = (struct ls7a_ioapic_state *)chip->data;
+ state->int_id = s->int_id;
+ state->int_mask = s->int_mask;
+ state->htmsi_en = s->htmsi_en;
+ state->intedge = s->intedge;
+ state->intclr = s->intclr;
+ state->auto_crtl0 = s->auto_crtl0;
+ state->auto_crtl1 = s->auto_crtl1;
+ for (i = 0; i < 64; i++) {
+ state->route_entry[i] = s->route_entry[i];
+ state->htmsi_vector[i] = s->htmsi_vector[i];
+ }
+ state->intisr_chip0 = s->intisr_chip0;
+ state->intisr_chip1 = s->intisr_chip1;
+ state->last_intirr = 0;
+ state->intirr = s->intirr;
+ state->intisr = s->intisr;
+ state->int_polarity = s->int_polarity;
+
+ ret = kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, chip);
+ if (ret < 0) {
+ fprintf(stderr, "KVM_GET_IRQCHIP failed: %s\n", strerror(ret));
+ abort();
+ }
+ g_free(chip);
+#endif
+ return 0;
+}
+
+static void ls7a_apic_reset(DeviceState *d)
+{
+ LS7AApicState *s = LS7A_APIC(d);
+ int i;
+
+ s->int_id = 0x001f000107000000;
+ s->int_mask = 0xffffffffffffffff;
+ s->htmsi_en = 0x0;
+ s->intedge = 0x0;
+ s->intclr = 0x0;
+ s->auto_crtl0 = 0x0;
+ s->auto_crtl1 = 0x0;
+ for (i = 0; i < 64; i++) {
+ s->route_entry[i] = 0x1;
+ s->htmsi_vector[i] = 0x0;
+ }
+ s->intisr_chip0 = 0x0;
+ s->intisr_chip1 = 0x0;
+ s->intirr = 0x0;
+ s->intisr = 0x0;
+ s->int_polarity = 0x0;
+ kvm_ls7a_post_load(s, 0);
+}
+
+static void ls7a_apic_init(Object *obj)
+{
+ DeviceState *dev = DEVICE(obj);
+ LS7AApicState *s = LS7A_APIC(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+ int tmp;
+ memory_region_init_io(&s->iomem, obj, &ls7a_apic_ops, s, TYPE_LS7A_APIC, 0x1000);
+ sysbus_init_mmio(sbd, &s->iomem);
+ for (tmp = 0; tmp < 257; tmp++) {
+ sysbus_init_irq(sbd, &s->parent_irq[tmp]);
+ }
+ qdev_init_gpio_in(dev, irq_handler, 64);
+}
+
+static const VMStateDescription vmstate_ls7a_apic = {
+ .name = TYPE_LS7A_APIC,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_save = kvm_ls7a_pre_save,
+ .post_load = kvm_ls7a_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(int_mask, LS7AApicState),
+ VMSTATE_UINT64(htmsi_en, LS7AApicState),
+ VMSTATE_UINT64(intedge, LS7AApicState),
+ VMSTATE_UINT64(intclr, LS7AApicState),
+ VMSTATE_UINT64(auto_crtl0, LS7AApicState),
+ VMSTATE_UINT64(auto_crtl1, LS7AApicState),
+ VMSTATE_UINT8_ARRAY(route_entry, LS7AApicState, 64),
+ VMSTATE_UINT8_ARRAY(htmsi_vector, LS7AApicState, 64),
+ VMSTATE_UINT64(intisr_chip0, LS7AApicState),
+ VMSTATE_UINT64(intisr_chip1, LS7AApicState),
+ VMSTATE_UINT64(last_intirr, LS7AApicState),
+ VMSTATE_UINT64(intirr, LS7AApicState),
+ VMSTATE_UINT64(intisr, LS7AApicState),
+ VMSTATE_UINT64(int_polarity, LS7AApicState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void ls7a_apic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = ls7a_apic_reset;
+ dc->vmsd = &vmstate_ls7a_apic;
+}
+
+static const TypeInfo ls7a_apic_info = {
+ .name = TYPE_LS7A_APIC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(LS7AApicState),
+ .instance_init = ls7a_apic_init,
+ .class_init = ls7a_apic_class_init,
+};
+
+static void ls7a_apic_register_types(void)
+{
+ type_register_static(&ls7a_apic_info);
+}
+
+type_init(ls7a_apic_register_types)
diff --git a/hw/loongarch/iocsr.c b/hw/loongarch/iocsr.c
new file mode 100644
index 0000000000000000000000000000000000000000..13d356d806d86837b643765e56953e7c77f48db7
--- /dev/null
+++ b/hw/loongarch/iocsr.c
@@ -0,0 +1,237 @@
+/*
+ * LOONGARCH IOCSR support
+ *
+ * Copyright (c) 2021 Loongarch Technology
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "qemu/log.h"
+#include "sysemu/kvm.h"
+#include "linux/kvm.h"
+#include "migration/vmstate.h"
+#include "hw/boards.h"
+#include "hw/loongarch/larch.h"
+
+#define BIT_ULL(nr) (1ULL << (nr))
+#define LOONGARCH_IOCSR_FEATURES 0x8
+#define IOCSRF_TEMP BIT_ULL(0)
+#define IOCSRF_NODECNT BIT_ULL(1)
+#define IOCSRF_MSI BIT_ULL(2)
+#define IOCSRF_EXTIOI BIT_ULL(3)
+#define IOCSRF_CSRIPI BIT_ULL(4)
+#define IOCSRF_FREQCSR BIT_ULL(5)
+#define IOCSRF_FREQSCALE BIT_ULL(6)
+#define IOCSRF_DVFSV1 BIT_ULL(7)
+#define IOCSRF_GMOD BIT_ULL(9)
+#define IOCSRF_VM BIT_ULL(11)
+#define LOONGARCH_IOCSR_VENDOR 0x10
+#define LOONGARCH_IOCSR_CPUNAME 0x20
+#define LOONGARCH_IOCSR_NODECNT 0x408
+#define LOONGARCH_IOCSR_MISC_FUNC 0x420
+#define IOCSR_MISC_FUNC_TIMER_RESET BIT_ULL(21)
+#define IOCSR_MISC_FUNC_EXT_IOI_EN BIT_ULL(48)
+
+enum {
+ IOCSR_FEATURES,
+ IOCSR_VENDOR,
+ IOCSR_CPUNAME,
+ IOCSR_NODECNT,
+ IOCSR_MISC_FUNC,
+ IOCSR_MAX
+};
+
+#ifdef CONFIG_KVM
+static uint32_t iocsr_array[IOCSR_MAX] = {
+ [IOCSR_FEATURES] = LOONGARCH_IOCSR_FEATURES,
+ [IOCSR_VENDOR] = LOONGARCH_IOCSR_VENDOR,
+ [IOCSR_CPUNAME] = LOONGARCH_IOCSR_CPUNAME,
+ [IOCSR_NODECNT] = LOONGARCH_IOCSR_NODECNT,
+ [IOCSR_MISC_FUNC] = LOONGARCH_IOCSR_MISC_FUNC,
+};
+#endif
+
+
+#define TYPE_IOCSR "iocsr"
+#define IOCSR(obj) OBJECT_CHECK(IOCSRState, (obj), TYPE_IOCSR)
+
+typedef struct IOCSRState {
+ SysBusDevice parent_obj;
+ uint64_t iocsr_val[IOCSR_MAX];
+} IOCSRState;
+
+IOCSRState iocsr_init = {
+ .iocsr_val = {
+ IOCSRF_NODECNT | IOCSRF_MSI | IOCSRF_EXTIOI
+ | IOCSRF_CSRIPI | IOCSRF_GMOD | IOCSRF_VM,
+ 0x6e6f73676e6f6f4c, /* Loongson */
+ 0x303030354133, /*3A5000*/
+ 0x4,
+ 0x0,
+ }
+};
+
+static int kvm_iocsr_pre_save(void *opaque)
+{
+#ifdef CONFIG_KVM
+ IOCSRState *s = opaque;
+ struct kvm_iocsr_entry entry;
+ int i = 0;
+
+ if ((!kvm_enabled())) {
+ return 0;
+ }
+
+ for (i = 0; i < IOCSR_MAX; i++) {
+ entry.addr = iocsr_array[i];
+ kvm_vm_ioctl(kvm_state, KVM_LOONGARCH_GET_IOCSR, &entry);
+ s->iocsr_val[i] = entry.data;
+ }
+#endif
+ return 0;
+}
+
+static int kvm_iocsr_post_load(void *opaque, int version)
+{
+#ifdef CONFIG_KVM
+ IOCSRState *s = opaque;
+ struct kvm_iocsr_entry entry;
+ int i = 0;
+
+ if (!kvm_enabled()) {
+ return 0;
+ }
+
+ for (i = 0; i < IOCSR_MAX; i++) {
+ entry.addr = iocsr_array[i];
+ entry.data = s->iocsr_val[i];
+ kvm_vm_ioctl(kvm_state, KVM_LOONGARCH_SET_IOCSR, &entry);
+ }
+#endif
+ return 0;
+}
+
+static void iocsr_reset(DeviceState *d)
+{
+ IOCSRState *s = IOCSR(d);
+ int i;
+
+ for (i = 0; i < IOCSR_MAX; i++) {
+ s->iocsr_val[i] = iocsr_init.iocsr_val[i];
+ }
+ kvm_iocsr_post_load(s, 0);
+}
+static void init_vendor_cpuname(uint64_t *vendor,
+ uint64_t *cpu_name, char *cpuname)
+{
+ int i = 0, len = 0;
+ char *index = NULL, *index_end = NULL;
+ char *vendor_c = (char *)vendor;
+ char *cpu_name_c = (char *)cpu_name;
+
+ index = strstr(cpuname, "-");
+ len = strlen(cpuname);
+ if ((index == NULL) || (len <= 0)) {
+ return ;
+ }
+
+ *vendor = 0;
+ *cpu_name = 0;
+ index_end = cpuname + len;
+
+ while (((cpuname + i) < index) && (i < sizeof(uint64_t))) {
+ vendor_c[i] = cpuname[i];
+ i++;
+ }
+
+ index += 1;
+ i = 0;
+
+ while (((index + i) < index_end) && (i < sizeof(uint64_t))) {
+ cpu_name_c[i] = index[i];
+ i++;
+ }
+
+ return ;
+}
+
+static void iocsr_instance_init(Object *obj)
+{
+ IOCSRState *s = IOCSR(obj);
+ int i;
+ LoongarchMachineState *lsms;
+ LoongarchMachineClass *lsmc;
+ Object *machine = qdev_get_machine();
+ ObjectClass *mc = object_get_class(machine);
+
+
+ /* 'lams' should be initialized */
+ if (!strcmp(MACHINE_CLASS(mc)->name, "none")) {
+ return;
+ }
+
+ lsms = LoongarchMACHINE(machine);
+ lsmc = LoongarchMACHINE_GET_CLASS(lsms);
+
+ init_vendor_cpuname((uint64_t *)&iocsr_init.iocsr_val[IOCSR_VENDOR],
+ (uint64_t *)&iocsr_init.iocsr_val[IOCSR_CPUNAME],
+ lsmc->cpu_name);
+
+ for (i = 0; i < IOCSR_MAX; i++) {
+ s->iocsr_val[i] = iocsr_init.iocsr_val[i];
+ }
+}
+
+static const VMStateDescription vmstate_iocsr = {
+ .name = TYPE_IOCSR,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_save = kvm_iocsr_pre_save,
+ .post_load = kvm_iocsr_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(iocsr_val, IOCSRState, IOCSR_MAX),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void iocsr_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->reset = iocsr_reset;
+ dc->vmsd = &vmstate_iocsr;
+
+}
+
+static const TypeInfo iocsr_info = {
+ .name = TYPE_IOCSR,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(IOCSRState),
+ .instance_init = iocsr_instance_init,
+ .class_init = iocsr_class_init,
+};
+
+static void iocsr_register_types(void)
+{
+ type_register_static(&iocsr_info);
+}
+
+type_init(iocsr_register_types)
diff --git a/hw/loongarch/ipi.c b/hw/loongarch/ipi.c
new file mode 100644
index 0000000000000000000000000000000000000000..59186f1deff4df80444ec899e393098fe7b4fe1d
--- /dev/null
+++ b/hw/loongarch/ipi.c
@@ -0,0 +1,267 @@
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "qapi/error.h"
+#include "hw/hw.h"
+#include "hw/irq.h"
+#include "hw/loongarch/cpudevs.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/cpus.h"
+#include "sysemu/kvm.h"
+#include "hw/core/cpu.h"
+#include "qemu/log.h"
+#include "hw/loongarch/bios.h"
+#include "elf.h"
+#include "linux/kvm.h"
+#include "hw/loongarch/larch.h"
+#include "hw/loongarch/ls7a.h"
+#include "migration/vmstate.h"
+
+static int gipi_pre_save(void *opaque)
+{
+#ifdef CONFIG_KVM
+ gipiState *state = opaque;
+ struct loongarch_gipiState *kstate;
+ struct loongarch_kvm_irqchip *chip;
+ int ret, i, j, length;
+#endif
+
+ if ((!kvm_enabled()) || (!kvm_irqchip_in_kernel())) {
+ return 0;
+ }
+
+#ifdef CONFIG_KVM
+ length = sizeof(struct loongarch_kvm_irqchip) + sizeof(struct loongarch_gipiState);
+ chip = g_malloc0(length);
+ memset(chip, 0, length);
+ chip->chip_id = KVM_IRQCHIP_LS3A_GIPI;
+ chip->len = length;
+ ret = kvm_vm_ioctl(kvm_state, KVM_GET_IRQCHIP, chip);
+ if (ret < 0) {
+ fprintf(stderr, "KVM_GET_IRQCHIP failed: %s\n", strerror(ret));
+ abort();
+ }
+
+ kstate = (struct loongarch_gipiState *)chip->data;
+
+ for (i = 0; i < MAX_GIPI_CORE_NUM; i++) {
+ state->core[i].status = kstate->core[i].status;
+ state->core[i].en = kstate->core[i].en;
+ state->core[i].set = kstate->core[i].set;
+ state->core[i].clear = kstate->core[i].clear;
+ for (j = 0; j < MAX_GIPI_MBX_NUM; j++) {
+ state->core[i].buf[j] = kstate->core[i].buf[j];
+ }
+ }
+ g_free(chip);
+#endif
+
+ return 0;
+}
+
+static int gipi_post_load(void *opaque, int version)
+{
+#ifdef CONFIG_KVM
+ gipiState *state = opaque;
+ struct loongarch_gipiState *kstate;
+ struct loongarch_kvm_irqchip *chip;
+ int ret, i, j, length;
+#endif
+
+ if ((!kvm_enabled()) || (!kvm_irqchip_in_kernel())) {
+ return 0;
+ }
+
+#ifdef CONFIG_KVM
+ length = sizeof(struct loongarch_kvm_irqchip) + sizeof(struct loongarch_gipiState);
+ chip = g_malloc0(length);
+ memset(chip, 0, length);
+ chip->chip_id = KVM_IRQCHIP_LS3A_GIPI;
+ chip->len = length;
+ kstate = (struct loongarch_gipiState *)chip->data;
+
+ for (i = 0; i < MAX_GIPI_CORE_NUM; i++) {
+ kstate->core[i].status = state->core[i].status;
+ kstate->core[i].en = state->core[i].en;
+ kstate->core[i].set = state->core[i].set;
+ kstate->core[i].clear = state->core[i].clear;
+ for (j = 0; j < MAX_GIPI_MBX_NUM; j++) {
+ kstate->core[i].buf[j] = state->core[i].buf[j];
+ }
+ }
+
+ ret = kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, chip);
+ if (ret < 0) {
+ fprintf(stderr, "KVM_GET_IRQCHIP failed: %s\n", strerror(ret));
+ abort();
+ }
+ g_free(chip);
+#endif
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_gipi_core = {
+ .name = "gipi-single",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(status, gipi_core),
+ VMSTATE_UINT32(en, gipi_core),
+ VMSTATE_UINT32(set, gipi_core),
+ VMSTATE_UINT32(clear, gipi_core),
+ VMSTATE_UINT64_ARRAY(buf, gipi_core, MAX_GIPI_MBX_NUM),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_gipi = {
+ .name = "gipi",
+ .pre_save = gipi_pre_save,
+ .post_load = gipi_post_load,
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_ARRAY(core, gipiState, MAX_GIPI_CORE_NUM, 0,
+ vmstate_gipi_core, gipi_core),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+
+static void gipi_writel(void *opaque, hwaddr addr, uint64_t val, unsigned size)
+{
+ gipi_core *s = opaque;
+ gipi_core *ss;
+ void *pbuf;
+ uint32_t cpu, action_data, mailaddr;
+ LoongarchMachineState *ms = LoongarchMACHINE(qdev_get_machine());
+
+ if ((size != 4) && (size != 8)) {
+ hw_error("size not 4 and not 8");
+ }
+ addr &= 0xff;
+ switch (addr) {
+ case CORE0_STATUS_OFF:
+ hw_error("CORE0_STATUS_OFF Can't be write\n");
+ break;
+ case CORE0_EN_OFF:
+ s->en = val;
+ break;
+ case CORE0_IPI_SEND:
+ cpu = (val >> 16) & 0x3ff;
+ action_data = 1UL << (val & 0x1f);
+ ss = &ms->gipi->core[cpu];
+ ss->status |= action_data;
+ if (ss->status != 0) {
+ qemu_irq_raise(ss->irq);
+ }
+ break;
+ case CORE0_MAIL_SEND:
+ cpu = (val >> 16) & 0x3ff;
+ mailaddr = (val >> 2) & 0x7;
+ ss = &ms->gipi->core[cpu];
+ pbuf = (void *)ss->buf + mailaddr * 4;
+ *(unsigned int *)pbuf = (val >> 32);
+ break;
+ case CORE0_SET_OFF:
+ hw_error("CORE0_SET_OFF Can't be write\n");
+ break;
+ case CORE0_CLEAR_OFF:
+ s->status ^= val;
+ if (s->status == 0) {
+ qemu_irq_lower(s->irq);
+ }
+ break;
+ case 0x20 ... 0x3c:
+ pbuf = (void *)s->buf + (addr - 0x20);
+ if (size == 1) {
+ *(unsigned char *)pbuf = (unsigned char)val;
+ } else if (size == 2) {
+ *(unsigned short *)pbuf = (unsigned short)val;
+ } else if (size == 4) {
+ *(unsigned int *)pbuf = (unsigned int)val;
+ } else if (size == 8) {
+ *(unsigned long *)pbuf = (unsigned long)val;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static uint64_t gipi_readl(void *opaque, hwaddr addr, unsigned size)
+{
+ gipi_core *s = opaque;
+ uint64_t ret = 0;
+ void *pbuf;
+
+ addr &= 0xff;
+ if ((size != 4) && (size != 8)) {
+ hw_error("size not 4 and not 8 size:%d\n", size);
+ }
+ switch (addr) {
+ case CORE0_STATUS_OFF:
+ ret = s->status;
+ break;
+ case CORE0_EN_OFF:
+ ret = s->en;
+ break;
+ case CORE0_SET_OFF:
+ ret = 0;
+ break;
+ case CORE0_CLEAR_OFF:
+ ret = 0;
+ break;
+ case 0x20 ... 0x3c:
+ pbuf = (void *)s->buf + (addr - 0x20);
+ if (size == 1) {
+ ret = *(unsigned char *)pbuf;
+ } else if (size == 2) {
+ ret = *(unsigned short *)pbuf;
+ } else if (size == 4) {
+ ret = *(unsigned int *)pbuf;
+ } else if (size == 8) {
+ ret = *(unsigned long *)pbuf;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const MemoryRegionOps gipi_ops = {
+ .read = gipi_readl,
+ .write = gipi_writel,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+int cpu_init_ipi(LoongarchMachineState *ms, qemu_irq parent, int cpu)
+{
+ hwaddr addr;
+ MemoryRegion *region;
+ char str[32];
+
+ if (ms->gipi == NULL) {
+ ms->gipi = g_malloc0(sizeof(gipiState));
+ vmstate_register(NULL, 0, &vmstate_gipi, ms->gipi);
+ }
+
+ ms->gipi->core[cpu].irq = parent;
+
+ addr = SMP_GIPI_MAILBOX | (cpu << 8);
+ region = g_new(MemoryRegion, 1);
+ sprintf(str, "gipi%d", cpu);
+ memory_region_init_io(region, NULL, &gipi_ops, &ms->gipi->core[cpu], str, 0x100);
+ memory_region_add_subregion(get_system_memory(), addr, region);
+ return 0;
+}
diff --git a/hw/loongarch/larch_3a.c b/hw/loongarch/larch_3a.c
new file mode 100644
index 0000000000000000000000000000000000000000..1a4e982b742a5aecd383ff7464aba058f755b408
--- /dev/null
+++ b/hw/loongarch/larch_3a.c
@@ -0,0 +1,2057 @@
+/*
+ * QEMU loongarch 3a develop board emulation
+ *
+ * Copyright (C) 2013-2014 qiaochong
+ * Copyright (C) 2016-2017 zhangshuangshuang
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "qemu/units.h"
+#include "qapi/error.h"
+#include "qemu/datadir.h"
+#include "hw/hw.h"
+#include "hw/loongarch/cpudevs.h"
+#include "hw/i386/pc.h"
+#include "hw/char/serial.h"
+#include "hw/isa/isa.h"
+#include "hw/qdev-core.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/runstate.h"
+#include "sysemu/reset.h"
+#include "migration/vmstate.h"
+#include "sysemu/cpus.h"
+#include "hw/boards.h"
+#include "qemu/log.h"
+#include "hw/loongarch/bios.h"
+#include "hw/loader.h"
+#include "elf.h"
+#include "exec/address-spaces.h"
+#include "hw/ide.h"
+#include "hw/pci/pci_host.h"
+#include "hw/pci/msi.h"
+#include "linux/kvm.h"
+#include "sysemu/kvm.h"
+#include "sysemu/numa.h"
+#include "hw/rtc/mc146818rtc.h"
+#include "hw/irq.h"
+#include "net/net.h"
+#include "hw/platform-bus.h"
+#include "hw/timer/i8254.h"
+#include "hw/loongarch/larch.h"
+#include "hw/loongarch/ls7a.h"
+#include "hw/nvram/fw_cfg.h"
+#include "hw/firmware/smbios.h"
+#include "acpi-build.h"
+#include
+#include
+#include "sysemu/block-backend.h"
+#include "hw/block/flash.h"
+#include "sysemu/device_tree.h"
+#include "qapi/visitor.h"
+#include "qapi/qapi-visit-common.h"
+#include "sysemu/tpm.h"
+#include "hw/loongarch/sysbus-fdt.h"
+
+#include
+
+#define DMA64_SUPPORTED 0x2
+#define MAX_IDE_BUS 2
+
+#define BOOTPARAM_PHYADDR 0x0ff00000ULL
+#define BOOTPARAM_ADDR (0x9000000000000000ULL + BOOTPARAM_PHYADDR)
+#define SMBIOS_PHYSICAL_ADDRESS 0x0fe00000
+#define SMBIOS_SIZE_LIMIT 0x200000
+#define RESERVED_SIZE_LIMIT 0x1100000
+#define COMMAND_LINE_SIZE 4096
+#define FW_CONF_ADDR 0x0fff0000
+
+#define PHYS_TO_VIRT(x) ((x) | 0x9000000000000000ULL)
+
+#define TARGET_REALPAGE_MASK (TARGET_PAGE_MASK << 2)
+
+#ifdef CONFIG_KVM
+#define align(x) (((x) + 63) & ~63)
+#else
+#define align(x) (((x) + 15) & ~15)
+#endif
+
+#define DEBUG_LOONGARCH3A 0
+#define FLASH_SECTOR_SIZE 4096
+
+#define DPRINTF(fmt, ...) \
+ do { if (DEBUG_LOONGARCH3A) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0)
+
+#define DEFINE_LS3A5K_MACHINE(suffix, name, optionfn) \
+ static void ls3a5k_init_##suffix(MachineState *machine) \
+ { \
+ ls3a5k_init(machine); \
+ } \
+ DEFINE_LOONGARCH_MACHINE(suffix, name, ls3a5k_init_##suffix, optionfn)
+
+struct efi_memory_map_loongarch {
+ uint16_t vers; /* version of efi_memory_map */
+ uint32_t nr_map; /* number of memory_maps */
+ uint32_t mem_freq; /* memory frequence */
+ struct mem_map {
+ uint32_t node_id; /* node_id which memory attached to */
+ uint32_t mem_type; /* system memory, pci memory, pci io, etc. */
+ uint64_t mem_start; /* memory map start address */
+ uint32_t mem_size; /* each memory_map size, not the total size */
+ } map[128];
+} __attribute__((packed));
+
+enum loongarch_cpu_type {
+ Loongson3 = 0x1,
+ Loongson3_comp = 0x2
+};
+
+struct GlobalProperty loongarch_compat[] = {
+ {
+ .driver = "rtl8139",
+ .property = "romfile",
+ .value = "",
+ },{
+ .driver = "e1000",
+ .property = "romfile",
+ .value = "",
+ },{
+ .driver = "virtio-net-pci",
+ .property = "romfile",
+ .value = "",
+ },{
+ .driver = "qxl-vga",
+ .property = "romfile",
+ .value = "",
+ },{
+ .driver = "VGA",
+ .property = "romfile",
+ .value = "",
+ },{
+ .driver = "cirrus-vga",
+ .property = "romfile",
+ .value = "",
+ },{
+ .driver = "virtio-vga",
+ .property = "romfile",
+ .value = "",
+ },{
+ .driver = "vmware-svga",
+ .property = "romfile",
+ .value = "",
+ },
+};
+const size_t loongarch_compat_len = G_N_ELEMENTS(loongarch_compat);
+
+/*
+ * Capability and feature descriptor structure for LOONGARCH CPU
+ */
+struct efi_cpuinfo_loongarch {
+ uint16_t vers; /* version of efi_cpuinfo_loongarch */
+ uint32_t processor_id; /* PRID, e.g. 6305, 6306 */
+ enum loongarch_cpu_type cputype; /* 3A, 3B, etc. */
+ uint32_t total_node; /* num of total numa nodes */
+ uint16_t cpu_startup_core_id; /* Core id */
+ uint16_t reserved_cores_mask;
+ uint32_t cpu_clock_freq; /* cpu_clock */
+ uint32_t nr_cpus;
+} __attribute__((packed));
+
+#define MAX_UARTS 64
+struct uart_device {
+ uint32_t iotype; /* see include/linux/serial_core.h */
+ uint32_t uartclk;
+ uint32_t int_offset;
+ uint64_t uart_base;
+} __attribute__((packed));
+
+#define MAX_SENSORS 64
+#define SENSOR_TEMPER 0x00000001
+#define SENSOR_VOLTAGE 0x00000002
+#define SENSOR_FAN 0x00000004
+struct sensor_device {
+ char name[32]; /* a formal name */
+ char label[64]; /* a flexible description */
+ uint32_t type; /* SENSOR_* */
+ uint32_t id; /* instance id of a sensor-class */
+ uint32_t fan_policy; /* see arch/loongarch/include/
+ asm/mach-loongarch/loongarch_hwmon.h */
+ uint32_t fan_percent;/* only for constant speed policy */
+ uint64_t base_addr; /* base address of device registers */
+} __attribute__((packed));
+
+struct system_loongarch {
+ uint16_t vers; /* version of system_loongarch */
+ uint32_t ccnuma_smp; /* 0: no numa; 1: has numa */
+ uint32_t sing_double_channel;/* 1:single; 2:double */
+ uint32_t nr_uarts;
+ struct uart_device uarts[MAX_UARTS];
+ uint32_t nr_sensors;
+ struct sensor_device sensors[MAX_SENSORS];
+ char has_ec;
+ char ec_name[32];
+ uint64_t ec_base_addr;
+ char has_tcm;
+ char tcm_name[32];
+ uint64_t tcm_base_addr;
+ uint64_t workarounds; /* see workarounds.h */
+} __attribute__((packed));
+
+struct irq_source_routing_table {
+ uint16_t vers;
+ uint16_t size;
+ uint16_t rtr_bus;
+ uint16_t rtr_devfn;
+ uint32_t vendor;
+ uint32_t device;
+ uint32_t PIC_type; /* conform use HT or PCI to route to CPU-PIC */
+ uint64_t ht_int_bit; /* 3A: 1<<24; 3B: 1<<16 */
+ uint64_t ht_enable; /* irqs used in this PIC */
+ uint32_t node_id; /* node id: 0x0-0; 0x1-1; 0x10-2; 0x11-3 */
+ uint64_t pci_mem_start_addr;
+ uint64_t pci_mem_end_addr;
+ uint64_t pci_io_start_addr;
+ uint64_t pci_io_end_addr;
+ uint64_t pci_config_addr;
+ uint32_t dma_mask_bits;
+ uint16_t dma_noncoherent;
+} __attribute__((packed));
+
+struct interface_info {
+ uint16_t vers; /* version of the specificition */
+ uint16_t size;
+ uint8_t flag;
+ char description[64];
+} __attribute__((packed));
+
+#define MAX_RESOURCE_NUMBER 128
+struct resource_loongarch {
+ uint64_t start; /* resource start address */
+ uint64_t end; /* resource end address */
+ char name[64];
+ uint32_t flags;
+};
+
+struct archdev_data {}; /* arch specific additions */
+
+struct board_devices {
+ char name[64]; /* hold the device name */
+ uint32_t num_resources; /* number of device_resource */
+ /* for each device's resource */
+ struct resource_loongarch resource[MAX_RESOURCE_NUMBER];
+ /* arch specific additions */
+ struct archdev_data archdata;
+};
+
+struct loongarch_special_attribute {
+ uint16_t vers; /* version of this special */
+ char special_name[64]; /* special_atribute_name */
+ uint32_t loongarch_special_type; /* type of special device */
+ /* for each device's resource */
+ struct resource_loongarch resource[MAX_RESOURCE_NUMBER];
+};
+
+struct loongarch_params {
+ uint64_t memory_offset; /* efi_memory_map_loongarch struct offset */
+ uint64_t cpu_offset; /* efi_cpuinfo_loongarch struct offset */
+ uint64_t system_offset; /* system_loongarch struct offset */
+ uint64_t irq_offset; /* irq_source_routing_table struct offset */
+ uint64_t interface_offset; /* interface_info struct offset */
+ uint64_t special_offset; /* loongarch_special_attribute struct offset */
+ uint64_t boarddev_table_offset; /* board_devices offset */
+};
+
+struct smbios_tables {
+ uint16_t vers; /* version of smbios */
+ uint64_t vga_bios; /* vga_bios address */
+ struct loongarch_params lp;
+};
+
+struct efi_reset_system_t {
+ uint64_t ResetCold;
+ uint64_t ResetWarm;
+ uint64_t ResetType;
+ uint64_t Shutdown;
+ uint64_t DoSuspend; /* NULL if not support */
+};
+
+struct efi_loongarch {
+ uint64_t mps; /* MPS table */
+ uint64_t acpi; /* ACPI table (IA64 ext 0.71) */
+ uint64_t acpi20; /* ACPI table (ACPI 2.0) */
+ struct smbios_tables smbios; /* SM BIOS table */
+ uint64_t sal_systab; /* SAL system table */
+ uint64_t boot_info; /* boot info table */
+};
+
+struct boot_params {
+ struct efi_loongarch efi;
+ struct efi_reset_system_t reset_system;
+};
+
+static struct _loaderparams {
+ unsigned long ram_size;
+ const char *kernel_filename;
+ const char *kernel_cmdline;
+ const char *initrd_filename;
+ unsigned long a0, a1, a2;
+} loaderparams;
+
+static struct _firmware_config {
+ unsigned long ram_size;
+ unsigned int mem_freq;
+ unsigned int cpu_nr;
+ unsigned int cpu_clock_freq;
+} fw_config;
+
+struct la_memmap_entry {
+ uint64_t address;
+ uint64_t length;
+ uint32_t type;
+ uint32_t reserved;
+} ;
+
+static void *boot_params_buf;
+static void *boot_params_p;
+static struct la_memmap_entry *la_memmap_table;
+static unsigned la_memmap_entries;
+
+CPULOONGARCHState *cpu_states[LOONGARCH_MAX_VCPUS];
+
+struct kvm_cpucfg ls3a5k_cpucfgs = {
+ .cpucfg[LOONGARCH_CPUCFG0] = CPUCFG0_3A5000_PRID,
+ .cpucfg[LOONGARCH_CPUCFG1] = CPUCFG1_ISGR64 | CPUCFG1_PAGING |
+ CPUCFG1_IOCSR | CPUCFG1_PABITS | CPUCFG1_VABITS | CPUCFG1_UAL |
+ CPUCFG1_RI | CPUCFG1_XI | CPUCFG1_RPLV | CPUCFG1_HUGEPG |
+ CPUCFG1_IOCSRBRD,
+ .cpucfg[LOONGARCH_CPUCFG2] = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
+ CPUCFG2_FPVERS | CPUCFG2_LSX | CPUCFG2_LASX | CPUCFG2_COMPLEX |
+ CPUCFG2_CRYPTO | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV | CPUCFG2_X86BT |
+ CPUCFG2_ARMBT | CPUCFG2_MIPSBT | CPUCFG2_LSPW | CPUCFG2_LAM,
+ .cpucfg[LOONGARCH_CPUCFG3] = CPUCFG3_CCDMA | CPUCFG3_SFB | CPUCFG3_UCACC |
+ CPUCFG3_LLEXC | CPUCFG3_SCDLY | CPUCFG3_LLDBAR | CPUCFG3_ITLBT |
+ CPUCFG3_ICACHET | CPUCFG3_SPW_LVL | CPUCFG3_SPW_HG_HF | CPUCFG3_RVA |
+ CPUCFG3_RVAMAX,
+ .cpucfg[LOONGARCH_CPUCFG4] = CCFREQ_100M,
+ .cpucfg[LOONGARCH_CPUCFG5] = CPUCFG5_CCMUL | CPUCFG5_CCDIV,
+ .cpucfg[LOONGARCH_CPUCFG6] = CPUCFG6_PMP | CPUCFG6_PAMVER | CPUCFG6_PMNUM |
+ CPUCFG6_PMBITS | CPUCFG6_UPM,
+ .cpucfg[LOONGARCH_CPUCFG16] = CPUCFG16_L1_IUPRE | CPUCFG16_L1_DPRE |
+ CPUCFG16_L2_IUPRE | CPUCFG16_L2_IUUNIFY | CPUCFG16_L2_IUPRIV |
+ CPUCFG16_L3_IUPRE | CPUCFG16_L3_IUUNIFY | CPUCFG16_L3_IUINCL,
+ .cpucfg[LOONGARCH_CPUCFG17] = CPUCFG17_L1I_WAYS_M | CPUCFG17_L1I_SETS_M |
+ CPUCFG17_L1I_SIZE_M,
+ .cpucfg[LOONGARCH_CPUCFG18] = CPUCFG18_L1D_WAYS_M | CPUCFG18_L1D_SETS_M |
+ CPUCFG18_L1D_SIZE_M,
+ .cpucfg[LOONGARCH_CPUCFG19] = CPUCFG19_L2_WAYS_M | CPUCFG19_L2_SETS_M |
+ CPUCFG19_L2_SIZE_M,
+ .cpucfg[LOONGARCH_CPUCFG20] = CPUCFG20_L3_WAYS_M | CPUCFG20_L3_SETS_M |
+ CPUCFG20_L3_SIZE_M,
+};
+
+bool loongarch_is_acpi_enabled(LoongarchMachineState *vms)
+{
+ if (vms->acpi == ON_OFF_AUTO_OFF) {
+ return false;
+ }
+ return true;
+}
+static void loongarch_get_acpi(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ LoongarchMachineState *lsms = LoongarchMACHINE(obj);
+ OnOffAuto acpi = lsms->acpi;
+
+ visit_type_OnOffAuto(v, name, &acpi, errp);
+}
+
+static void loongarch_set_acpi(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ LoongarchMachineState *lsms = LoongarchMACHINE(obj);
+
+ visit_type_OnOffAuto(v, name, &lsms->acpi, errp);
+}
+int la_memmap_add_entry(uint64_t address, uint64_t length, uint32_t type)
+{
+ int i;
+
+ for (i = 0; i < la_memmap_entries; i++) {
+ if (la_memmap_table[i].address == address) {
+ fprintf(stderr, "%s address:0x%lx length:0x%lx already exists\n",
+ __func__, address, length);
+ return 0;
+ }
+ }
+
+ la_memmap_table = g_renew(struct la_memmap_entry, la_memmap_table,
+ la_memmap_entries + 1);
+ la_memmap_table[la_memmap_entries].address = cpu_to_le64(address);
+ la_memmap_table[la_memmap_entries].length = cpu_to_le64(length);
+ la_memmap_table[la_memmap_entries].type = cpu_to_le32(type);
+ la_memmap_entries++;
+
+ return la_memmap_entries;
+}
+
+static ram_addr_t get_hotplug_membase(ram_addr_t ram_size)
+{
+ ram_addr_t sstart;
+
+ if (ram_size <= 0x10000000) {
+ sstart = 0x90000000;
+ } else {
+ sstart = 0x90000000 + ROUND_UP((ram_size - 0x10000000),
+ LOONGARCH_HOTPLUG_MEM_ALIGN);
+ }
+ return sstart;
+}
+
+static struct efi_memory_map_loongarch *init_memory_map(void *g_map)
+{
+ struct efi_memory_map_loongarch *emap = g_map;
+
+ emap->nr_map = 4;
+ emap->mem_freq = 266000000;
+
+ emap->map[0].node_id = 0;
+ emap->map[0].mem_type = 1;
+ emap->map[0].mem_start = 0x0;
+#ifdef CONFIG_KVM
+ emap->map[0].mem_size = (loaderparams.ram_size > 0x10000000
+ ? 256 : (loaderparams.ram_size >> 20)) - 18;
+#else
+ emap->map[0].mem_size = atoi(getenv("memsize"));
+#endif
+
+ emap->map[1].node_id = 0;
+ emap->map[1].mem_type = 2;
+ emap->map[1].mem_start = 0x90000000;
+#ifdef CONFIG_KVM
+ emap->map[1].mem_size = (loaderparams.ram_size > 0x10000000
+ ? (loaderparams.ram_size >> 20) - 256 : 0);
+#else
+ emap->map[1].mem_size = atoi(getenv("highmemsize"));
+#endif
+
+ /* support for smbios */
+ emap->map[2].node_id = 0;
+ emap->map[2].mem_type = 10;
+ emap->map[2].mem_start = SMBIOS_PHYSICAL_ADDRESS;
+ emap->map[2].mem_size = SMBIOS_SIZE_LIMIT >> 20;
+
+ emap->map[3].node_id = 0;
+ emap->map[3].mem_type = 3;
+ emap->map[3].mem_start = 0xee00000;
+ emap->map[3].mem_size = RESERVED_SIZE_LIMIT >> 20;
+
+ return emap;
+}
+
+static uint64_t get_host_cpu_freq(void)
+{
+ int fd = 0;
+ char buf[1024];
+ uint64_t freq = 0, size = 0;
+ char *buf_p;
+
+ fd = open("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", O_RDONLY);
+ if (fd == -1) {
+ fprintf(stderr, "/sys/devices/system/cpu/cpu0/cpufreq/ \
+ cpuinfo_max_freq not exist!\n");
+ fprintf(stderr, "Trying /proc/cpuinfo...\n");
+ } else {
+ size = read(fd, buf, 16);
+ if (size == -1) {
+ fprintf(stderr, "read err...\n");
+ }
+ close(fd);
+ freq = (uint64_t)atoi(buf);
+ return freq * 1000;
+ }
+
+ fd = open("/proc/cpuinfo", O_RDONLY);
+ if (fd == -1) {
+ fprintf(stderr, "Failed to open /proc/cpuinfo!\n");
+ return 0;
+ }
+
+ size = read(fd, buf, 1024);
+ if (size == -1) {
+ fprintf(stderr, "read err...\n");
+ }
+ close(fd);
+
+ buf_p = strstr(buf, "MHz");
+ if (buf_p) {
+ while (*buf_p != ':') {
+ buf_p++;
+ }
+ buf_p += 2;
+ } else {
+ buf_p = strstr(buf, "name");
+ while (*buf_p != '@') {
+ buf_p++;
+ }
+ buf_p += 2;
+ }
+
+ memcpy(buf, buf_p, 12);
+ buf_p = buf;
+ while ((*buf_p >= '0') && (*buf_p <= '9')) {
+ buf_p++;
+ }
+ *buf_p = '\0';
+
+ freq = (uint64_t)atoi(buf);
+ return freq * 1000 * 1000;
+}
+
+static char *get_host_cpu_model_name(void)
+{
+ int fd = 0;
+ int size = 0;
+ static char buf[1024];
+ char *buf_p;
+
+ fd = open("/proc/cpuinfo", O_RDONLY);
+ if (fd == -1) {
+ fprintf(stderr, "Failed to open /proc/cpuinfo!\n");
+ return 0;
+ }
+
+ size = read(fd, buf, 1024);
+ if (size == -1) {
+ fprintf(stderr, "read err...\n");
+ }
+ close(fd);
+ buf_p = strstr(buf, "Name");
+ if (!buf_p) {
+ buf_p = strstr(buf, "name");
+ }
+ if (!buf_p) {
+ fprintf(stderr, "Can't find cpu name\n");
+ return 0;
+ }
+
+
+ while (*buf_p != ':') {
+ buf_p++;
+ }
+ buf_p = buf_p + 2;
+ memcpy(buf, buf_p, 40);
+ buf_p = buf;
+ while (*buf_p != '\n') {
+ buf_p++;
+ }
+
+ *(buf_p) = '\0';
+
+ return buf;
+}
+
+static void fw_conf_init(unsigned long ramsize)
+{
+ MachineState *ms = MACHINE(qdev_get_machine());
+ int smp_cpus = ms->smp.cpus;
+ fw_config.ram_size = ramsize;
+ fw_config.mem_freq = 266000000;
+ fw_config.cpu_nr = smp_cpus ;
+ fw_config.cpu_clock_freq = get_host_cpu_freq();
+}
+
+static struct efi_cpuinfo_loongarch *init_cpu_info(void *g_cpuinfo_loongarch)
+{
+ struct efi_cpuinfo_loongarch *c = g_cpuinfo_loongarch;
+ MachineState *ms = MACHINE(qdev_get_machine());
+ int smp_cpus = ms->smp.cpus;
+ LoongarchMachineState *lsms = LoongarchMACHINE(qdev_get_machine());
+ LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms);
+
+ if (strstr(lsmc->cpu_name, "5000")) {
+ c->processor_id = 0x14c010;
+ }
+ c->cputype = Loongson3_comp;
+ c->cpu_clock_freq = get_host_cpu_freq();
+ if (!c->cpu_clock_freq) {
+ c->cpu_clock_freq = 200000000;
+ }
+ c->total_node = 1;
+ c->nr_cpus = smp_cpus;
+ c->cpu_startup_core_id = 0;
+ c->reserved_cores_mask = 0xffff & (0xffff << smp_cpus);
+
+ return c;
+}
+
+static struct system_loongarch *init_system_loongarch(void *g_sysitem)
+{
+ struct system_loongarch *s = g_sysitem;
+
+ s->ccnuma_smp = 1;
+ s->ccnuma_smp = 0;
+ s->sing_double_channel = 1;
+
+ return s;
+}
+
+enum loongarch_irq_source_enum {
+ HT, I8259, UNKNOWN
+};
+
+static struct irq_source_routing_table *init_irq_source(void *g_irq_source)
+{
+ struct irq_source_routing_table *irq_info = g_irq_source;
+ LoongarchMachineState *lsms = LoongarchMACHINE(qdev_get_machine());
+ LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms);
+
+ irq_info->PIC_type = HT;
+ irq_info->ht_int_bit = 1 << 24;
+ irq_info->ht_enable = 0x0000d17b;
+ irq_info->node_id = 0;
+
+ irq_info->pci_mem_start_addr = PCIE_MEMORY_BASE;
+ irq_info->pci_mem_end_addr = irq_info->pci_mem_start_addr + PCIE_MEMORY_SIZE - 1;
+
+ if (strstr(lsmc->cpu_name, "5000")) {
+ irq_info->pci_io_start_addr = LS3A5K_ISA_IO_BASE;
+ }
+ irq_info->dma_noncoherent = 1;
+ return irq_info;
+}
+
+static struct interface_info *init_interface_info(void *g_interface)
+{
+ struct interface_info *inter = g_interface;
+ int flashsize = 0x80000;
+
+ inter->vers = 0x0001;
+ inter->size = flashsize / 0x400;
+ inter->flag = 1;
+
+ strcpy(inter->description, "PMON_Version_v2.1");
+
+ return inter;
+}
+
+static struct board_devices *board_devices_info(void *g_board)
+{
+ struct board_devices *bd = g_board;
+ LoongarchMachineState *lsms = LoongarchMACHINE(qdev_get_machine());
+ LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms);
+
+ if (!strcmp(lsmc->bridge_name, "ls7a")) {
+ strcpy(bd->name, "Loongarch-3A-7A-1w-V1.03-demo");
+ }
+ bd->num_resources = 10;
+
+ return bd;
+}
+
+static struct loongarch_special_attribute *init_special_info(void *g_special)
+{
+ struct loongarch_special_attribute *special = g_special;
+ char update[11] = "2013-01-01";
+ int VRAM_SIZE = 0x20000;
+
+ strcpy(special->special_name, update);
+ special->resource[0].flags = 0;
+ special->resource[0].start = 0;
+ special->resource[0].end = VRAM_SIZE;
+ strcpy(special->resource[0].name, "SPMODULE");
+ special->resource[0].flags |= DMA64_SUPPORTED;
+
+ return special;
+}
+
+static void init_loongarch_params(struct loongarch_params *lp)
+{
+ void *p = boot_params_p;
+
+ lp->memory_offset = (unsigned long long)init_memory_map(p)
+ - (unsigned long long)lp;
+ p += align(sizeof(struct efi_memory_map_loongarch));
+
+ lp->cpu_offset = (unsigned long long)init_cpu_info(p)
+ - (unsigned long long)lp;
+ p += align(sizeof(struct efi_cpuinfo_loongarch));
+
+ lp->system_offset = (unsigned long long)init_system_loongarch(p)
+ - (unsigned long long)lp;
+ p += align(sizeof(struct system_loongarch));
+
+ lp->irq_offset = (unsigned long long)init_irq_source(p)
+ - (unsigned long long)lp;
+ p += align(sizeof(struct irq_source_routing_table));
+
+ lp->interface_offset = (unsigned long long)init_interface_info(p)
+ - (unsigned long long)lp;
+ p += align(sizeof(struct interface_info));
+
+ lp->boarddev_table_offset = (unsigned long long)board_devices_info(p)
+ - (unsigned long long)lp;
+ p += align(sizeof(struct board_devices));
+
+ lp->special_offset = (unsigned long long)init_special_info(p)
+ - (unsigned long long)lp;
+ p += align(sizeof(struct loongarch_special_attribute));
+
+ boot_params_p = p;
+}
+
+static void init_smbios(struct smbios_tables *smbios)
+{
+ smbios->vers = 1;
+ smbios->vga_bios = 1;
+ init_loongarch_params(&(smbios->lp));
+}
+
+static void init_efi(struct efi_loongarch *efi)
+{
+ init_smbios(&(efi->smbios));
+}
+
+static int init_boot_param(struct boot_params *bp)
+{
+ init_efi(&(bp->efi));
+
+ return 0;
+}
+
+static unsigned int ls3a5k_aui_boot_code[] = {
+ 0x0380200d, /* ori $r13,$r0,0x8 */
+ 0x0400002d, /* csrwr $r13,0x0 */
+ 0x0401000e, /* csrrd $r14,0x40 */
+ 0x0343fdce, /* andi $r14,$r14,0xff */
+ 0x143fc02c, /* lu12i.w $r12,261889(0x1fe01) */
+ 0x1600000c, /* lu32i.d $r12,0 */
+ 0x0320018c, /* lu52i.d $r12,$r12,-1792(0x800) */
+ 0x03400dcf, /* andi $r15,$r14,0x3 */
+ 0x004121ef, /* slli.d $r15,$r15,0x8 */
+ 0x00153d8c, /* or $r12,$r12,$r15 */
+ 0x034031d0, /* andi $r16,$r14,0xc */
+ 0x0041aa10, /* slli.d $r16,$r16,0x2a */
+ 0x0015418c, /* or $r12,$r12,$r16 */
+ 0x28808184, /* ld.w $r4,$r12,32(0x20) */
+ 0x43fffc9f, /* beqz $r4,0 -4 */
+ 0x28c08184, /* ld.d $r4,$r12,32(0x20) */
+ 0x28c0a183, /* ld.d $r3,$r12,40(0x28) */
+ 0x28c0c182, /* ld.d $r2,$r12,48(0x30) */
+ 0x28c0e185, /* ld.d $r5,$r12,56(0x38) */
+ 0x4c000080, /* jirl $r0,$r4,0 */
+};
+
+static int set_bootparam_uefi(ram_addr_t initrd_offset, long initrd_size)
+{
+ long params_size;
+ char memenv[32];
+ char highmemenv[32];
+ void *params_buf;
+ unsigned long *parg_env;
+ int ret = 0;
+
+ /* Allocate params_buf for command line. */
+ params_size = 0x100000;
+ params_buf = g_malloc0(params_size);
+
+ /*
+ * Layout of params_buf looks like this:
+ * argv[0], argv[1], 0, env[0], env[1], ...env[i], 0,
+ * argv[0]'s data, argv[1]'s data, env[0]'data, ..., env[i]'s data, 0
+ */
+ parg_env = (void *)params_buf;
+
+ ret = (3 + 1) * sizeof(target_ulong);
+ *parg_env++ = (BOOTPARAM_ADDR + ret);
+ ret += (1 + snprintf(params_buf + ret, COMMAND_LINE_SIZE - ret, "g"));
+
+ /* argv1 */
+ *parg_env++ = BOOTPARAM_ADDR + ret;
+ if (initrd_size > 0)
+ ret += (1 + snprintf(params_buf + ret, COMMAND_LINE_SIZE - ret,
+ "rd_start=0x%llx rd_size=%li %s",
+ PHYS_TO_VIRT((uint32_t)initrd_offset),
+ initrd_size, loaderparams.kernel_cmdline));
+ else
+ ret += (1 + snprintf(params_buf + ret, COMMAND_LINE_SIZE - ret, "%s",
+ loaderparams.kernel_cmdline));
+
+ /* argv2 */
+ *parg_env++ = 0;
+
+ /* env */
+ sprintf(memenv, "%lu", loaderparams.ram_size > 0x10000000
+ ? 256 : (loaderparams.ram_size >> 20));
+ sprintf(highmemenv, "%lu", loaderparams.ram_size > 0x10000000
+ ? (loaderparams.ram_size >> 20) - 256 : 0);
+
+ setenv("memsize", memenv, 1);
+ setenv("highmemsize", highmemenv, 1);
+
+ ret = ((ret + 32) & ~31);
+
+ boot_params_buf = (void *)(params_buf + ret);
+ boot_params_p = boot_params_buf + align(sizeof(struct boot_params));
+ init_boot_param(boot_params_buf);
+ rom_add_blob_fixed("params", params_buf, params_size,
+ BOOTPARAM_PHYADDR);
+ loaderparams.a0 = 2;
+ loaderparams.a1 = BOOTPARAM_ADDR;
+ loaderparams.a2 = BOOTPARAM_ADDR + ret;
+
+ return 0;
+}
+
+static uint64_t cpu_loongarch_virt_to_phys(void *opaque, uint64_t addr)
+{
+ return addr & 0x1fffffffll;
+}
+
+static void fw_cfg_add_kernel_info(FWCfgState *fw_cfg,
+ uint64_t highram_size,
+ uint64_t phyAddr_initrd)
+{
+ int64_t entry, kernel_low, kernel_high;
+ long initrd_size = 0;
+ uint64_t initrd_offset = 0;
+ void *cmdline_buf;
+ int ret = 0;
+
+ ret = load_elf(loaderparams.kernel_filename, NULL, cpu_loongarch_virt_to_phys, NULL,
+ (uint64_t *)&entry, (uint64_t *)&kernel_low,
+ (uint64_t *)&kernel_high, NULL, 0, EM_LOONGARCH, 1, 0);
+
+ if(0 > ret) {
+ error_report("kernel image load error");
+ exit(1);
+ }
+
+ fw_cfg_add_i64(fw_cfg, FW_CFG_KERNEL_ENTRY, entry);
+
+ if (loaderparams.initrd_filename) {
+ initrd_size = get_image_size(loaderparams.initrd_filename);
+ if (0 < initrd_size) {
+ if (initrd_size > highram_size) {
+ error_report("initrd size is too big, should below %ld MB",
+ highram_size / MiB);
+ /*prevent write io memory address space*/
+ exit(1);
+ }
+ initrd_offset = (phyAddr_initrd - initrd_size) & TARGET_REALPAGE_MASK;
+ initrd_size = load_image_targphys(loaderparams.initrd_filename,
+ initrd_offset,
+ loaderparams.ram_size - initrd_offset);
+ fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_ADDR, initrd_offset);
+ fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size);
+ } else {
+ error_report("initrd image size is error");
+ }
+ }
+
+ cmdline_buf = g_malloc0(COMMAND_LINE_SIZE);
+ if (initrd_size > 0)
+ ret = (1 + snprintf(cmdline_buf, COMMAND_LINE_SIZE,
+ "rd_start=0x%llx rd_size=%li %s",
+ PHYS_TO_VIRT(initrd_offset),
+ initrd_size, loaderparams.kernel_cmdline));
+ else
+ ret = (1 + snprintf(cmdline_buf, COMMAND_LINE_SIZE, "%s",
+ loaderparams.kernel_cmdline));
+
+ fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, ret);
+ fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, (const char *)cmdline_buf);
+
+ return ;
+}
+
+static int64_t load_kernel(void)
+{
+ int64_t entry, kernel_low, kernel_high;
+ long initrd_size = 0;
+ ram_addr_t initrd_offset = 0;
+
+ load_elf(loaderparams.kernel_filename, NULL, cpu_loongarch_virt_to_phys, NULL,
+ (uint64_t *)&entry, (uint64_t *)&kernel_low,
+ (uint64_t *)&kernel_high, NULL, 0, EM_LOONGARCH, 1, 0);
+
+ if (loaderparams.initrd_filename) {
+ initrd_size = get_image_size(loaderparams.initrd_filename);
+
+ if (initrd_size > 0) {
+ initrd_offset = (kernel_high * 4 + ~TARGET_REALPAGE_MASK)
+ & TARGET_REALPAGE_MASK;
+ initrd_size = load_image_targphys(loaderparams.initrd_filename,
+ initrd_offset,
+ loaderparams.ram_size - initrd_offset);
+ }
+ }
+ set_bootparam_uefi(initrd_offset, initrd_size);
+
+ return entry;
+}
+
+static void main_cpu_reset(void *opaque)
+{
+ ResetData *s = (ResetData *)opaque;
+ CPULOONGARCHState *env = &s->cpu->env;
+
+ cpu_reset(CPU(s->cpu));
+ env->active_tc.PC = s->vector;
+ env->active_tc.gpr[4] = loaderparams.a0;
+ env->active_tc.gpr[5] = loaderparams.a1;
+ env->active_tc.gpr[6] = loaderparams.a2;
+}
+
+void slave_cpu_reset(void *opaque)
+{
+ ResetData *s = (ResetData *)opaque;
+
+ cpu_reset(CPU(s->cpu));
+}
+
+
+/* KVM_IRQ_LINE irq field index values */
+#define KVM_LOONGARCH_IRQ_TYPE_SHIFT 24
+#define KVM_LOONGARCH_IRQ_TYPE_MASK 0xff
+#define KVM_LOONGARCH_IRQ_VCPU_SHIFT 16
+#define KVM_LOONGARCH_IRQ_VCPU_MASK 0xff
+#define KVM_LOONGARCH_IRQ_NUM_SHIFT 0
+#define KVM_LOONGARCH_IRQ_NUM_MASK 0xffff
+
+/* irq_type field */
+#define KVM_LOONGARCH_IRQ_TYPE_CPU_IP 0
+#define KVM_LOONGARCH_IRQ_TYPE_CPU_IO 1
+#define KVM_LOONGARCH_IRQ_TYPE_HT 2
+#define KVM_LOONGARCH_IRQ_TYPE_MSI 3
+#define KVM_LOONGARCH_IRQ_TYPE_IOAPIC 4
+
+static void legacy_set_irq(void *opaque, int irq, int level)
+{
+ qemu_irq *pic = opaque;
+
+ qemu_set_irq(pic[irq], level);
+}
+
+typedef struct ls3a_intctlstate {
+ uint8_t nodecounter_reg[0x100];
+ uint8_t pm_reg[0x100];
+ uint8_t msi_reg[0x8];
+ CPULOONGARCHState **env;
+ DeviceState *apicdev;
+ qemu_irq *ioapic_irq;
+#ifdef CONFIG_KVM
+ struct loongarch_kvm_irqchip chip;
+#endif
+} ls3a_intctlstate;
+
+typedef struct ls3a_func_args {
+ ls3a_intctlstate *state;
+ uint64_t base;
+ uint32_t mask;
+ uint8_t *mem;
+} ls3a_func_args;
+
+static uint64_t ls3a_msi_mem_read(void *opaque, hwaddr addr, unsigned size)
+{
+ return 0;
+}
+
+static void ls3a_msi_mem_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ struct kvm_msi msi;
+ apicState *apic;
+
+ apic = (apicState *)opaque;
+ msi.address_lo = 0;
+ msi.address_hi = 0;
+ msi.data = val & 0xff;
+ msi.flags = 0;
+ memset(msi.pad, 0, sizeof(msi.pad));
+
+ if (kvm_irqchip_in_kernel()) {
+ kvm_vm_ioctl(kvm_state, KVM_SIGNAL_MSI, &msi);
+ } else {
+ qemu_set_irq(apic->irq[msi.data], 1);
+ }
+}
+
+
+static const MemoryRegionOps ls3a_msi_ops = {
+ .read = ls3a_msi_mem_read,
+ .write = ls3a_msi_mem_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+
+static const VMStateDescription vmstate_ls3a_msi = {
+ .name = "ls3a-msi",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(msi_reg, ls3a_intctlstate, 0x8),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void ioapic_handler(void *opaque, int irq, int level)
+{
+ apicState *apic;
+ int kvm_irq;
+
+ apic = (apicState *)opaque;
+
+ if (kvm_irqchip_in_kernel()) {
+ kvm_irq = (KVM_LOONGARCH_IRQ_TYPE_IOAPIC << KVM_LOONGARCH_IRQ_TYPE_SHIFT)
+ | (0 << KVM_LOONGARCH_IRQ_VCPU_SHIFT) | irq;
+ kvm_set_irq(kvm_state, kvm_irq, !!level);
+ } else {
+ qemu_set_irq(apic->irq[irq], level);
+ }
+}
+
+static void *ls3a_intctl_init(MachineState *machine, CPULOONGARCHState *env[])
+{
+ qemu_irq *irqhandler;
+ ls3a_intctlstate *s;
+ LoongarchMachineState *lsms = LoongarchMACHINE(machine);
+ LoongarchMachineClass *mc = LoongarchMACHINE_GET_CLASS(lsms);
+ DeviceState *dev;
+ SysBusDevice *busdev;
+ MemoryRegion *address_space_mem = get_system_memory();
+ MemoryRegion *iomem = NULL;
+#ifdef CONFIG_KVM
+ int i;
+#endif
+
+ s = g_malloc0(sizeof(ls3a_intctlstate));
+
+ if (!s) {
+ return NULL;
+ }
+
+ /*Add MSI mmio memory*/
+ iomem = g_new(MemoryRegion, 1);
+ memory_region_init_io(iomem, NULL, &ls3a_msi_ops, lsms->apic,
+ "ls3a_msi", 0x8);
+ memory_region_add_subregion(address_space_mem,
+ MSI_ADDR_LOW, iomem);
+ vmstate_register(NULL, 0, &vmstate_ls3a_msi, s);
+
+ s->env = env;
+
+ if (!strcmp(mc->bridge_name, "ls7a")) {
+ if (lsms->apic_xrupt_override) {
+ DPRINTF("irqchip in kernel %d\n", kvm_irqchip_in_kernel());
+#ifdef CONFIG_KVM
+ if (kvm_has_gsi_routing()) {
+ for (i = 0; i < 32; ++i) {
+ kvm_irqchip_add_irq_route(kvm_state, i, 0, i);
+ }
+ kvm_gsi_routing_allowed = true;
+ }
+ kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
+#endif
+ }
+
+ irqhandler = qemu_allocate_irqs(ioapic_handler, lsms->apic, 64);
+ dev = qdev_new("ioapic");
+ busdev = SYS_BUS_DEVICE(dev);
+ sysbus_realize_and_unref(busdev, &error_fatal);
+ sysbus_mmio_map(busdev, 0, mc->ls7a_ioapic_reg_base);
+ s->ioapic_irq = irqhandler;
+ s->apicdev = dev;
+ return s->ioapic_irq;
+ }
+ return NULL;
+}
+
+/* Network support */
+static void network_init(PCIBus *pci_bus)
+{
+ int i;
+
+ for (i = 0; i < nb_nics; i++) {
+ NICInfo *nd = &nd_table[i];
+
+ if (!nd->model) {
+ nd->model = g_strdup("virtio-net-pci");
+ }
+
+ pci_nic_init_nofail(nd, pci_bus, nd->model, NULL);
+ }
+}
+
+void loongarch_cpu_destroy(MachineState *machine, LOONGARCHCPU *cpu)
+{
+ LoongarchMachineState *lsms = LoongarchMACHINE(machine);
+ unsigned int id;
+ int smp_cpus = machine->smp.cpus;
+ id = cpu->id;
+ qemu_unregister_reset(slave_cpu_reset, lsms->reset_info[id]);
+ g_free(lsms->reset_info[id]);
+ lsms->reset_info[id] = NULL;
+
+ smp_cpus -= 1;
+ if (lsms->fw_cfg) {
+ fw_cfg_modify_i16(lsms->fw_cfg, FW_CFG_NB_CPUS,
+ (uint16_t)smp_cpus);
+ }
+
+ qemu_del_vm_change_state_handler(cpu->cpuStateEntry);
+}
+
+LOONGARCHCPU *loongarch_cpu_create(MachineState *machine,
+ LOONGARCHCPU *cpu, Error **errp)
+{
+ CPULOONGARCHState *env;
+ unsigned int id;
+ LoongarchMachineState *lsms = LoongarchMACHINE(machine);
+ int smp_cpus = machine->smp.cpus;
+ id = cpu->id;
+ env = &cpu->env;
+ cpu_states[id] = env;
+ env->CSR_TMID |= id;
+
+ lsms = LoongarchMACHINE(machine);
+ lsms->reset_info[id] = g_malloc0(sizeof(ResetData));
+ lsms->reset_info[id]->cpu = cpu;
+ lsms->reset_info[id]->vector = env->active_tc.PC;
+ qemu_register_reset(slave_cpu_reset, lsms->reset_info[id]);
+
+ /* Init CPU internal devices */
+ cpu_init_irq(cpu);
+ cpu_loongarch_clock_init(cpu);
+
+ smp_cpus += 1;
+ if (lsms->fw_cfg) {
+ fw_cfg_modify_i16(lsms->fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus);
+ }
+ cpu_init_ipi(lsms, env->irq[12], id);
+ cpu_init_apic(lsms, env, id);
+
+ return cpu;
+}
+
+static void fw_cfg_boot_set(void *opaque, const char *boot_device,
+ Error **errp)
+{
+ fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]);
+}
+
+static FWCfgState *loongarch_fw_cfg_init(ram_addr_t ram_size,
+ LoongarchMachineState *lsms)
+{
+ FWCfgState *fw_cfg;
+ uint64_t *numa_fw_cfg;
+ int i;
+ const CPUArchIdList *cpus;
+ MachineClass *mc = MACHINE_GET_CLASS(lsms);
+ MachineState *ms = MACHINE(OBJECT(lsms));
+ int max_cpus = ms->smp.max_cpus;
+ int smp_cpus = ms->smp.cpus;
+ int nb_numa_nodes = ms->numa_state->num_nodes;
+ NodeInfo *numa_info = ms->numa_state->nodes;
+
+ fw_cfg = fw_cfg_init_mem_wide(FW_CFG_ADDR + 8, FW_CFG_ADDR, 8, 0, NULL);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus);
+ fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size);
+ fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus);
+
+ /* allocate memory for the NUMA channel: one (64bit) word for the number
+ * of nodes, one word for each VCPU->node and one word for each node to
+ * hold the amount of memory.
+ */
+ numa_fw_cfg = g_new0(uint64_t, 1 + max_cpus + nb_numa_nodes);
+ numa_fw_cfg[0] = cpu_to_le64(nb_numa_nodes);
+ cpus = mc->possible_cpu_arch_ids(MACHINE(lsms));
+ for (i = 0; i < cpus->len; i++) {
+ unsigned int apic_id = cpus->cpus[i].arch_id;
+ assert(apic_id < max_cpus);
+ numa_fw_cfg[apic_id + 1] = cpu_to_le64(cpus->cpus[i].props.node_id);
+ }
+ for (i = 0; i < nb_numa_nodes; i++) {
+ numa_fw_cfg[max_cpus + 1 + i] =
+ cpu_to_le64(numa_info[i].node_mem);
+ }
+ fw_cfg_add_bytes(fw_cfg, FW_CFG_NUMA, numa_fw_cfg,
+ (1 + max_cpus + nb_numa_nodes) *
+ sizeof(*numa_fw_cfg));
+
+ qemu_register_boot_set(fw_cfg_boot_set, fw_cfg);
+ return fw_cfg;
+}
+
+static void loongarch_build_smbios(LoongarchMachineState *lsms)
+{
+ LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms);
+ MachineState *ms = MACHINE(OBJECT(lsms));
+ uint8_t *smbios_tables, *smbios_anchor;
+ size_t smbios_tables_len, smbios_anchor_len;
+ const char *product = "QEMU Virtual Machine";
+
+ if (!lsms->fw_cfg) {
+ return;
+ }
+
+ if (kvm_enabled()) {
+ if (strstr(lsmc->cpu_name, "5000")) {
+ product = "KVM";
+ }
+ } else {
+ product = "Loongarch-3A5K-7A1000-TCG";
+ }
+
+ smbios_set_defaults("Loongson", product, lsmc->cpu_name, false,
+ true, NULL, NULL, SMBIOS_ENTRY_POINT_30);
+
+ smbios_get_tables(ms, NULL, 0, &smbios_tables, &smbios_tables_len,
+ &smbios_anchor, &smbios_anchor_len, &error_fatal);
+
+ if (smbios_anchor) {
+ fw_cfg_add_file(lsms->fw_cfg, "etc/smbios/smbios-tables",
+ smbios_tables, smbios_tables_len);
+ fw_cfg_add_file(lsms->fw_cfg, "etc/smbios/smbios-anchor",
+ smbios_anchor, smbios_anchor_len);
+ }
+}
+
+static
+void loongarch_machine_done(Notifier *notifier, void *data)
+{
+ LoongarchMachineState *lsms = container_of(notifier,
+ LoongarchMachineState, machine_done);
+
+ platform_bus_add_all_fdt_nodes(lsms->fdt, NULL,
+ VIRT_PLATFORM_BUS_BASEADDRESS,
+ VIRT_PLATFORM_BUS_SIZE,
+ VIRT_PLATFORM_BUS_IRQ);
+
+ qemu_fdt_dumpdtb(lsms->fdt, lsms->fdt_size);
+ /* load fdt */
+ MemoryRegion *fdt_rom = g_new(MemoryRegion, 1);
+ memory_region_init_rom(fdt_rom, NULL, "fdt", LS_FDT_SIZE, &error_fatal);
+ memory_region_add_subregion(get_system_memory(), LS_FDT_BASE, fdt_rom);
+ rom_add_blob_fixed("fdt", lsms->fdt, lsms->fdt_size, LS_FDT_BASE);
+
+ loongarch_acpi_setup();
+ loongarch_build_smbios(lsms);
+}
+
+#ifdef CONFIG_TCG
+#define FEATURE_REG 0x1fe00008
+#define VENDOR_REG 0x1fe00010
+#define CPUNAME_REG 0x1fe00020
+#define OTHER_FUNC_REG 0x1fe00420
+#define _str(x) #x
+#define str(x) _str(x)
+#define SIMPLE_OPS(ADDR, SIZE) \
+({\
+ MemoryRegion *iomem = g_new(MemoryRegion, 1);\
+ memory_region_init_io(iomem, NULL, &loongarch_qemu_ops, \
+ (void *)ADDR, str(ADDR) , SIZE);\
+ memory_region_add_subregion_overlap(address_space_mem, ADDR, iomem, 1);\
+})
+
+static int reg180;
+
+static void loongarch_qemu_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ addr = ((hwaddr)(long)opaque) + addr;
+ addr = addr & 0xffffffff;
+ switch (addr) {
+ case 0x1fe00180:
+ reg180 = val;
+ break;
+ }
+}
+
+static uint64_t loongarch_qemu_read(void *opaque, hwaddr addr, unsigned size)
+{
+ uint64_t feature = 0UL;
+ addr = ((hwaddr)(long)opaque) + addr;
+ addr = addr & 0xffffffff;
+ switch (addr) {
+ case 0x1fe00180:
+ return reg180;
+ case 0x1001041c:
+ return 0xa800;
+ case FEATURE_REG:
+ feature |= 1UL << 2 | 1UL << 3 | 1UL << 4 | 1UL << 11;
+ return feature ;
+ case VENDOR_REG:
+ return *(uint64_t *)"Loongson-3A5000";
+ case CPUNAME_REG:
+ return *(uint64_t *)"3A5000";
+ case 0x10013ffc:
+ return 0x80;
+ }
+ return 0;
+}
+
+static const MemoryRegionOps loongarch_qemu_ops = {
+ .read = loongarch_qemu_read,
+ .write = loongarch_qemu_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 8,
+ },
+};
+#endif
+
+
+static void loongarch_system_flash_cleanup_unused(LoongarchMachineState *lsms)
+{
+ char *prop_name;
+ int i;
+ Object *dev_obj;
+
+ for (i = 0; i < ARRAY_SIZE(lsms->flash); i++) {
+ dev_obj = OBJECT(lsms->flash[i]);
+ if (!object_property_get_bool(dev_obj, "realized", &error_abort)) {
+ prop_name = g_strdup_printf("pflash%d", i);
+ object_property_del(OBJECT(lsms), prop_name);
+ g_free(prop_name);
+ object_unparent(dev_obj);
+ lsms->flash[i] = NULL;
+ }
+ }
+}
+
+
+static bool loongarch_system_flash_init( LoongarchMachineState *lsms)
+{
+ int i = 0;
+ int64_t size = 0;
+ PFlashCFI01 *pflash = NULL;
+ BlockBackend *pflash_blk;
+
+ for(i = 0; i < ARRAY_SIZE(lsms->flash); i++) {
+ pflash_blk = NULL;
+ pflash = NULL;
+
+ pflash = lsms->flash[i];
+ pflash_cfi01_legacy_drive(pflash,
+ drive_get(IF_PFLASH, 0, i));
+
+ pflash_blk = pflash_cfi01_get_blk(pflash);
+ /*The pflash0 must be exist, or not support boot by pflash*/
+ if(pflash_blk == NULL) {
+ if(i == 0) {
+ return false;
+ } else {
+ break;
+ }
+ }
+
+ size = blk_getlength(pflash_blk);
+ if (size == 0 || size % FLASH_SECTOR_SIZE != 0) {
+ error_report("system firmware block device %s has invalid size "
+ "%" PRId64,
+ blk_name(pflash_blk), size);
+ error_report("its size must be a non-zero multiple of 0x%x",
+ FLASH_SECTOR_SIZE);
+ exit(1);
+ }
+ qdev_prop_set_uint32(DEVICE(pflash), "num-blocks",
+ size / FLASH_SECTOR_SIZE);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(pflash), &error_fatal);
+ if(i == 0) {
+ sysbus_mmio_map(SYS_BUS_DEVICE(pflash), 0, LS_BIOS_BASE);
+ } else {
+ sysbus_mmio_map_overlap(SYS_BUS_DEVICE(pflash), 0, LS_BIOS_VAR_BASE, 1);
+ }
+ }
+
+ return true;
+}
+static void ls3a5k_bios_init(LoongarchMachineState *lsms,
+ ram_addr_t ram_size,
+ uint64_t highram_size,
+ uint64_t phyAddr_initrd,
+ const char *kernel_filename,
+ const char *kernel_cmdline,
+ const char *initrd_filename)
+{
+ MemoryRegion *bios;
+ bool fw_cfg_used = false;
+ LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms);
+ char *filename;
+ int bios_size;
+ const char *bios_name;
+
+ bios_name = MACHINE(lsms)->firmware;
+ if (kernel_filename) {
+ loaderparams.ram_size = ram_size;
+ loaderparams.kernel_filename = kernel_filename;
+ loaderparams.kernel_cmdline = kernel_cmdline;
+ loaderparams.initrd_filename = initrd_filename;
+ }
+
+ if(loongarch_system_flash_init(lsms)) {
+ fw_cfg_used = true;
+ } else {
+ bios = g_new(MemoryRegion, 1);
+ memory_region_init_ram(bios, NULL, "loongarch.bios", LS_BIOS_SIZE, &error_fatal);
+ memory_region_set_readonly(bios, true);
+ memory_region_add_subregion(get_system_memory(), LS_BIOS_BASE, bios);
+
+ /* BIOS load */
+ if (bios_name) {
+ if (access(bios_name, R_OK) == 0) {
+ load_image_targphys(bios_name, LS_BIOS_BASE, LS_BIOS_SIZE);
+ } else {
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
+ load_image_targphys(filename, LS_BIOS_BASE, LS_BIOS_SIZE);
+ g_free(filename);
+ }
+ fw_cfg_used = true;
+ } else {
+ if (strstr(lsmc->cpu_name, "5000")) {
+ bios_size = sizeof(ls3a5k_aui_boot_code);
+ rom_add_blob_fixed("bios", ls3a5k_aui_boot_code, bios_size, LS_BIOS_BASE);
+ }
+
+ if (kernel_filename) {
+ lsms->reset_info[0]->vector = load_kernel();
+ }
+ }
+ }
+
+ loongarch_system_flash_cleanup_unused(lsms);
+
+ if (fw_cfg_used) {
+ lsms->fw_cfg = loongarch_fw_cfg_init(ram_size, lsms);
+ rom_set_fw(lsms->fw_cfg);
+ fw_conf_init(ram_size);
+ rom_add_blob_fixed("fw_conf", (void *)&fw_config,
+ sizeof(fw_config), FW_CONF_ADDR);
+
+ if (kernel_filename) {
+ fw_cfg_add_kernel_info(lsms->fw_cfg, highram_size, phyAddr_initrd);
+ }
+ }
+
+ if (lsms->fw_cfg != NULL) {
+ fw_cfg_add_file(lsms->fw_cfg, "etc/memmap",
+ la_memmap_table,
+ sizeof(struct la_memmap_entry) * (la_memmap_entries));
+ }
+
+ return ;
+}
+static void create_fdt(LoongarchMachineState *lsms)
+{
+ lsms->fdt = create_device_tree(&lsms->fdt_size);
+ if (!lsms->fdt) {
+ error_report("create_device_tree() failed");
+ exit(1);
+ }
+
+ /* Header */
+ qemu_fdt_setprop_string(lsms->fdt, "/", "compatible",
+ "linux,dummy-loongson3");
+ qemu_fdt_setprop_cell(lsms->fdt, "/", "#address-cells", 0x2);
+ qemu_fdt_setprop_cell(lsms->fdt, "/", "#size-cells", 0x2);
+}
+
+static void fdt_add_cpu_nodes(const LoongarchMachineState *lsms)
+{
+ int num;
+ const MachineState *ms = MACHINE(lsms);
+ int smp_cpus = ms->smp.cpus;
+
+ qemu_fdt_add_subnode(lsms->fdt, "/cpus");
+ qemu_fdt_setprop_cell(lsms->fdt, "/cpus", "#address-cells", 0x1);
+ qemu_fdt_setprop_cell(lsms->fdt, "/cpus", "#size-cells", 0x0);
+
+ /* cpu nodes */
+ for (num = smp_cpus - 1; num >= 0; num--) {
+ char *nodename = g_strdup_printf("/cpus/cpu@%d", num);
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(qemu_get_cpu(num));
+
+ qemu_fdt_add_subnode(lsms->fdt, nodename);
+ qemu_fdt_setprop_string(lsms->fdt, nodename, "device_type", "cpu");
+ qemu_fdt_setprop_string(lsms->fdt, nodename, "compatible",
+ cpu->dtb_compatible);
+ qemu_fdt_setprop_cell(lsms->fdt, nodename, "reg", num);
+ qemu_fdt_setprop_cell(lsms->fdt, nodename, "phandle",
+ qemu_fdt_alloc_phandle(lsms->fdt));
+ g_free(nodename);
+ }
+
+ /*cpu map */
+ qemu_fdt_add_subnode(lsms->fdt, "/cpus/cpu-map");
+
+ for (num = smp_cpus - 1; num >= 0; num--) {
+ char *cpu_path = g_strdup_printf("/cpus/cpu@%d", num);
+ char *map_path;
+
+ if (ms->smp.threads > 1) {
+ map_path = g_strdup_printf(
+ "/cpus/cpu-map/socket%d/core%d/thread%d",
+ num / (ms->smp.cores * ms->smp.threads),
+ (num / ms->smp.threads) % ms->smp.cores,
+ num % ms->smp.threads);
+ } else {
+ map_path = g_strdup_printf(
+ "/cpus/cpu-map/socket%d/core%d",
+ num / ms->smp.cores,
+ num % ms->smp.cores);
+ }
+ qemu_fdt_add_path(lsms->fdt, map_path);
+ qemu_fdt_setprop_phandle(lsms->fdt, map_path, "cpu", cpu_path);
+
+ g_free(map_path);
+ g_free(cpu_path);
+ }
+}
+
+static void fdt_add_fw_cfg_node(const LoongarchMachineState *lsms)
+{
+ char *nodename;
+ hwaddr base = FW_CFG_ADDR;
+
+ nodename = g_strdup_printf("/fw_cfg@%" PRIx64, base);
+ qemu_fdt_add_subnode(lsms->fdt, nodename);
+ qemu_fdt_setprop_string(lsms->fdt, nodename,
+ "compatible", "qemu,fw-cfg-mmio");
+ qemu_fdt_setprop_sized_cells(lsms->fdt, nodename, "reg",
+ 2, base, 2, 0x8);
+ qemu_fdt_setprop(lsms->fdt, nodename, "dma-coherent", NULL, 0);
+ g_free(nodename);
+}
+
+static void fdt_add_pcie_node(const LoongarchMachineState *lsms)
+{
+ char *nodename;
+ hwaddr base_mmio = PCIE_MEMORY_BASE;
+ hwaddr size_mmio = PCIE_MEMORY_SIZE;
+ hwaddr base_pio = LS3A5K_ISA_IO_BASE;
+ hwaddr size_pio = LS_ISA_IO_SIZE;
+ hwaddr base_pcie = LS_PCIECFG_BASE;
+ hwaddr size_pcie = LS_PCIECFG_SIZE;
+ hwaddr base = base_pcie;
+
+ nodename = g_strdup_printf("/pcie@%" PRIx64, base);
+ qemu_fdt_add_subnode(lsms->fdt, nodename);
+ qemu_fdt_setprop_string(lsms->fdt, nodename,
+ "compatible", "pci-host-ecam-generic");
+ qemu_fdt_setprop_string(lsms->fdt, nodename, "device_type", "pci");
+ qemu_fdt_setprop_cell(lsms->fdt, nodename, "#address-cells", 3);
+ qemu_fdt_setprop_cell(lsms->fdt, nodename, "#size-cells", 2);
+ qemu_fdt_setprop_cell(lsms->fdt, nodename, "linux,pci-domain", 0);
+ qemu_fdt_setprop_cells(lsms->fdt, nodename, "bus-range", 0,
+ PCIE_MMCFG_BUS(LS_PCIECFG_SIZE - 1));
+ qemu_fdt_setprop(lsms->fdt, nodename, "dma-coherent", NULL, 0);
+ qemu_fdt_setprop_sized_cells(lsms->fdt, nodename, "reg",
+ 2, base_pcie, 2, size_pcie);
+ qemu_fdt_setprop_sized_cells(lsms->fdt, nodename, "ranges",
+ 1, FDT_PCI_RANGE_IOPORT, 2, 0,
+ 2, base_pio, 2, size_pio,
+ 1, FDT_PCI_RANGE_MMIO, 2, base_mmio,
+ 2, base_mmio, 2, size_mmio);
+ g_free(nodename);
+}
+
+static void create_platform_bus(LoongarchMachineState *s, qemu_irq *pic)
+{
+ DeviceState *dev;
+ SysBusDevice *sysbus;
+ int i;
+ MemoryRegion *sysmem = get_system_memory();
+
+ dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE);
+ dev->id = g_strdup(TYPE_PLATFORM_BUS_DEVICE);
+ qdev_prop_set_uint32(dev, "num_irqs", VIRT_PLATFORM_BUS_NUM_IRQS);
+ qdev_prop_set_uint32(dev, "mmio_size", VIRT_PLATFORM_BUS_SIZE);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ s->platform_bus_dev = dev;
+
+ sysbus = SYS_BUS_DEVICE(dev);
+ for (i = 0; i < VIRT_PLATFORM_BUS_NUM_IRQS; i++) {
+ int irq = VIRT_PLATFORM_BUS_IRQ + i;
+ sysbus_connect_irq(sysbus, i, pic[irq - LOONGARCH_PCH_IRQ_BASE]);
+ }
+
+ memory_region_add_subregion(sysmem,
+ VIRT_PLATFORM_BUS_BASEADDRESS,
+ sysbus_mmio_get_region(sysbus, 0));
+}
+
+static void ls3a5k_init(MachineState *args)
+{
+ int i;
+ const char *cpu_model = args->cpu_type;
+ const char *kernel_filename = args->kernel_filename;
+ const char *kernel_cmdline = args->kernel_cmdline;
+ const char *initrd_filename = args->initrd_filename;
+
+ ram_addr_t ram_size = args->ram_size;
+ MemoryRegion *address_space_mem = get_system_memory();
+ ram_addr_t offset = 0;
+ MachineState *machine = args;
+ MachineClass *mc = MACHINE_GET_CLASS(machine);
+ LoongarchMachineState *lsms = LoongarchMACHINE(machine);
+ LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms);
+ int smp_cpus = machine->smp.cpus;
+ int nb_numa_nodes = machine->numa_state->num_nodes;
+ NodeInfo *numa_info = machine->numa_state->nodes;
+ LOONGARCHCPU *cpu;
+ CPULOONGARCHState *env;
+ qemu_irq *ls7a_apic = NULL;
+ qemu_irq *pirq = NULL;
+ PCIBus *pci_bus = NULL;
+ char *ramName = NULL;
+ uint64_t lowram_size = 0, highram_size = 0, phyAddr = 0,
+ memmap_size = 0, highram_end_addr = 0;
+
+ CPUArchIdList *possible_cpus;
+ if (strstr(lsmc->cpu_name, "5000")) {
+ if (strcmp(cpu_model, LOONGARCH_CPU_TYPE_NAME("Loongson-3A5000")) &&
+ strcmp(cpu_model, LOONGARCH_CPU_TYPE_NAME("host"))) {
+ error_report("machine type %s does not match cpu type %s",
+ lsmc->cpu_name, cpu_model);
+ exit(1);
+ }
+ if (kvm_enabled()) {
+ kvm_vm_ioctl(kvm_state, KVM_LARCH_SET_CPUCFG, ls3a5k_cpucfgs);
+ }
+ }
+
+ create_fdt(lsms);
+
+ DPRINTF("isa 0x%lx\n", lsmc->isa_io_base);
+ DPRINTF("cpu_name %s bridge_name %s\n",
+ lsmc->cpu_name, lsmc->bridge_name);
+
+ /* init CPUs */
+ mc->possible_cpu_arch_ids(machine);
+ possible_cpus = machine->possible_cpus;
+
+ for (i = 0; i < smp_cpus; i++) {
+ Object *obj = NULL;
+ Error *local_err = NULL;
+
+ obj = object_new(possible_cpus->cpus[i].type);
+
+ object_property_set_uint(obj, "id", possible_cpus->cpus[i].arch_id,
+ &local_err);
+ object_property_set_bool(obj, "realized", true, &local_err);
+
+ object_unref(obj);
+ error_propagate(&error_fatal, local_err);
+
+ cpu = LOONGARCH_CPU(CPU(obj));
+ if (cpu == NULL) {
+ fprintf(stderr, "Unable to find CPU definition\n");
+ exit(1);
+ }
+
+ env = &cpu->env;
+ cpu_states[i] = env;
+ env->CSR_TMID |= i;
+
+ lsms->reset_info[i] = g_malloc0(sizeof(ResetData));
+ lsms->reset_info[i]->cpu = cpu;
+ lsms->reset_info[i]->vector = env->active_tc.PC;
+ if (i == 0) {
+ qemu_register_reset(main_cpu_reset, lsms->reset_info[i]);
+ } else {
+ qemu_register_reset(slave_cpu_reset, lsms->reset_info[i]);
+ }
+
+ /* Init CPU internal devices */
+ cpu_init_irq(cpu);
+ cpu_loongarch_clock_init(cpu);
+ cpu_init_ipi(lsms, env->irq[12], i);
+ cpu_init_apic(lsms, env, i);
+ }
+
+ lsms->hotpluged_cpu_num = 0;
+ fdt_add_cpu_nodes(lsms);
+ env = cpu_states[0];
+
+ /* node0 mem*/
+ phyAddr = (uint64_t)0;
+ MemoryRegion *lowmem = g_new(MemoryRegion, 1);
+ ramName = g_strdup_printf("loongarch_ls3a.node%d.lowram", 0);
+
+ lowram_size = MIN(ram_size, 256 * 0x100000);
+ memory_region_init_alias(lowmem, NULL, ramName, machine->ram, 0, lowram_size);
+ memory_region_add_subregion(address_space_mem, phyAddr, lowmem);
+
+ offset += lowram_size;
+ if (nb_numa_nodes > 0) {
+ highram_size = numa_info[0].node_mem - 256 * MiB;
+ if (numa_info[0].node_mem > GiB) {
+ memmap_size = numa_info[0].node_mem - GiB;
+ la_memmap_add_entry(0xc0000000ULL, memmap_size, SYSTEM_RAM);
+ }
+ } else {
+ highram_size = ram_size - 256 * MiB;
+ if (ram_size > GiB) {
+ memmap_size = ram_size - GiB;
+ la_memmap_add_entry(0xc0000000ULL, memmap_size, SYSTEM_RAM);
+ }
+ }
+
+ phyAddr = (uint64_t)0x90000000;
+ MemoryRegion *highmem = g_new(MemoryRegion, 1);
+ ramName = g_strdup_printf("loongarch_ls3a.node%d.highram", 0);
+ memory_region_init_alias(highmem, NULL, ramName,
+ machine->ram, offset, highram_size);
+ memory_region_add_subregion(address_space_mem,
+ phyAddr, highmem);
+ offset += highram_size;
+ phyAddr += highram_size;
+
+ /* initrd address use high mem from high to low */
+ highram_end_addr = phyAddr;
+ /* node1~ nodemax */
+ for (i = 1; i < nb_numa_nodes; i++) {
+ MemoryRegion *nodemem = g_new(MemoryRegion, 1);
+ ramName = g_strdup_printf("loongarch_ls3a.node%d.ram", i);
+ memory_region_init_alias(nodemem, NULL, ramName,
+ machine->ram, offset, numa_info[i].node_mem);
+ memory_region_add_subregion(address_space_mem,
+ phyAddr, nodemem);
+ la_memmap_add_entry(phyAddr, numa_info[i].node_mem, SYSTEM_RAM);
+ offset += numa_info[i].node_mem;
+ phyAddr += numa_info[i].node_mem;
+ }
+
+ fdt_add_fw_cfg_node(lsms);
+ ls3a5k_bios_init(lsms, ram_size, highram_size, highram_end_addr,
+ kernel_filename, kernel_cmdline, initrd_filename);
+
+ lsms->machine_done.notify = loongarch_machine_done;
+ qemu_add_machine_init_done_notifier(&lsms->machine_done);
+ /*vmstate_register_ram_global(bios);*/
+
+ /* initialize hotplug memory address space */
+ lsms->hotplug_memory_size = 0;
+
+ /* always allocate the device memory information */
+ machine->device_memory = g_malloc0(sizeof(*machine->device_memory));
+ if (machine->ram_size < machine->maxram_size) {
+ int max_memslots;
+
+ lsms->hotplug_memory_size = machine->maxram_size - machine->ram_size;
+ /*
+ * Limit the number of hotpluggable memory slots to half the number
+ * slots that KVM supports, leaving the other half for PCI and other
+ * devices. However ensure that number of slots doesn't drop below 32.
+ */
+ max_memslots = LOONGARCH_MAX_RAM_SLOTS;
+ if (kvm_enabled()) {
+ max_memslots = kvm_get_max_memslots() / 2 ;
+ }
+
+ if (machine->ram_slots == 0)
+ machine->ram_slots = lsms->hotplug_memory_size /
+ LOONGARCH_HOTPLUG_MEM_ALIGN;
+
+ if (machine->ram_slots > max_memslots) {
+ error_report("Specified number of memory slots %"
+ PRIu64" exceeds max supported %d",
+ machine->ram_slots, max_memslots);
+ exit(1);
+ }
+
+ lsms->ram_slots = machine->ram_slots;
+
+ machine->device_memory->base = get_hotplug_membase(machine->ram_size);
+ memory_region_init(&machine->device_memory->mr, OBJECT(lsms),
+ "device-memory", lsms->hotplug_memory_size);
+ memory_region_add_subregion(get_system_memory(),
+ machine->device_memory->base,
+ &machine->device_memory->mr);
+ }
+
+ if (!strcmp(lsmc->bridge_name, "ls7a")) {
+ /*Initialize the 7A IO interrupt subsystem*/
+ DeviceState *ls7a_dev;
+ lsms->apic_xrupt_override = kvm_irqchip_in_kernel();
+ ls7a_apic = ls3a_intctl_init(machine, cpu_states);
+ if (!ls7a_apic) {
+ perror("Init 7A APIC failed\n");
+ exit(1);
+ }
+ pci_bus = ls7a_init(machine, ls7a_apic, &ls7a_dev);
+
+ object_property_add_link(OBJECT(machine),
+ LOONGARCH_MACHINE_ACPI_DEVICE_PROP,
+ TYPE_HOTPLUG_HANDLER,
+ (Object **)&lsms->acpi_dev,
+ object_property_allow_set_link,
+ OBJ_PROP_LINK_STRONG);
+ object_property_set_link(OBJECT(machine), LOONGARCH_MACHINE_ACPI_DEVICE_PROP,
+ OBJECT(ls7a_dev), &error_abort);
+
+ create_platform_bus(lsms, ls7a_apic);
+
+#ifdef CONFIG_KVM
+ if (kvm_enabled()) {
+ kvm_direct_msi_allowed = (kvm_check_extension(kvm_state,
+ KVM_CAP_SIGNAL_MSI) > 0);
+ } else {
+ kvm_direct_msi_allowed = 0;
+ }
+ msi_nonbroken = kvm_direct_msi_allowed;
+#else
+ msi_nonbroken = true;
+#endif
+ sysbus_create_simple("ls7a_rtc", LS7A_RTC_REG_BASE,
+ ls7a_apic[LS7A_RTC_IRQ - LOONGARCH_PCH_IRQ_BASE]);
+ }
+
+ /*Initialize the CPU serial device*/
+
+ if (serial_hd(0)) {
+ pirq = qemu_allocate_irqs(legacy_set_irq, ls7a_apic +
+ (LS7A_UART_IRQ - LOONGARCH_PCH_IRQ_BASE), 1);
+ serial_mm_init(address_space_mem, LS7A_UART_BASE, 0, pirq[0],
+ 115200, serial_hd(0), DEVICE_NATIVE_ENDIAN);
+ }
+
+ /*network card*/
+ network_init(pci_bus);
+ /* VGA setup. Don't bother loading the bios. */
+ pci_vga_init(pci_bus);
+
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(qdev_new("iocsr")), &error_fatal);
+
+#ifdef CONFIG_TCG
+ int nb_nodes = (smp_cpus - 1) / 4;
+ for (i = 0; i <= nb_nodes; i++) {
+ uint64_t off = (uint64_t)i << 44;
+ SIMPLE_OPS(((hwaddr)0x1fe00180 | off), 0x8);
+ SIMPLE_OPS(((hwaddr)0x1fe0019c | off), 0x8);
+ SIMPLE_OPS(((hwaddr)0x1fe001d0 | off), 0x8);
+ SIMPLE_OPS(((hwaddr)FEATURE_REG | off), 0x8);
+ SIMPLE_OPS(((hwaddr)VENDOR_REG | off), 0x8);
+ SIMPLE_OPS(((hwaddr)CPUNAME_REG | off), 0x8);
+ SIMPLE_OPS(((hwaddr)OTHER_FUNC_REG | off), 0x8);
+ }
+
+ SIMPLE_OPS(0x1001041c, 0x4);
+ SIMPLE_OPS(0x10002000, 0x14);
+ SIMPLE_OPS(0x10013ffc, 0x4);
+#endif
+
+ fdt_add_pcie_node(lsms);
+
+}
+
+static const CPUArchIdList *loongarch_possible_cpu_arch_ids(MachineState *ms)
+{
+ int i;
+ int max_cpus = ms->smp.max_cpus;
+
+ if (ms->possible_cpus) {
+ /*
+ * make sure that max_cpus hasn't changed since the first use, i.e.
+ * -smp hasn't been parsed after it
+ */
+ assert(ms->possible_cpus->len == max_cpus);
+ return ms->possible_cpus;
+ }
+
+ ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) +
+ sizeof(CPUArchId) * max_cpus);
+ ms->possible_cpus->len = max_cpus;
+ for (i = 0; i < ms->possible_cpus->len; i++) {
+ ms->possible_cpus->cpus[i].type = ms->cpu_type;
+ ms->possible_cpus->cpus[i].vcpus_count = 1;
+ ms->possible_cpus->cpus[i].props.has_core_id = true;
+ ms->possible_cpus->cpus[i].props.core_id = i;
+ ms->possible_cpus->cpus[i].arch_id = i;
+ }
+ return ms->possible_cpus;
+
+}
+
+static PFlashCFI01 *loongarch_pflash_create(LoongarchMachineState *lsms,
+ const char *name,
+ const char *alias_prop_name)
+{
+ DeviceState *dev = qdev_new(TYPE_PFLASH_CFI01);
+
+ qdev_prop_set_uint64(dev, "sector-length", FLASH_SECTOR_SIZE);
+ qdev_prop_set_uint8(dev, "width", 1);
+ qdev_prop_set_string(dev, "name", name);
+ object_property_add_child(OBJECT(lsms), name, OBJECT(dev));
+ object_property_add_alias(OBJECT(lsms), alias_prop_name,
+ OBJECT(dev), "drive");
+ return PFLASH_CFI01(dev);
+}
+
+
+static void loongarch_system_flash_create(LoongarchMachineState *lsms)
+{
+ lsms->flash[0] = loongarch_pflash_create(lsms, "system.flash0",
+ "pflash0");
+ lsms->flash[1] = loongarch_pflash_create(lsms, "system.flash1",
+ "pflash1");
+}
+
+static void loongarch_machine_initfn(Object *obj)
+{
+ LoongarchMachineState *lsms = LoongarchMACHINE(obj);
+ LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms);
+ lsms->acpi_build_enabled = lsmc->has_acpi_build;
+ loongarch_system_flash_create(lsms);
+ lsms->oem_id = g_strndup(EFI_ACPI_OEM_ID, 6);
+ lsms->oem_table_id = g_strndup(EFI_ACPI_OEM_TABLE_ID, 6);
+}
+
+static void ls3a5k_ls7a_machine_options(MachineClass *m)
+{
+ char *cpu_name = get_host_cpu_model_name();
+ LoongarchMachineClass *lsmc = LoongarchMACHINE_CLASS(m);
+ m->desc = "Loongarch3a5k LS7A1000 machine";
+ m->max_cpus = LOONGARCH_MAX_VCPUS;
+ m->alias = "loongson7a";
+ m->is_default = 1;
+ lsmc->isa_io_base = LS3A5K_ISA_IO_BASE;
+ lsmc->pciecfg_base = LS_PCIECFG_BASE;
+ lsmc->ls7a_ioapic_reg_base = LS3A5K_LS7A_IOAPIC_REG_BASE;
+ lsmc->node_shift = 44;
+ strncpy(lsmc->cpu_name, cpu_name, sizeof(lsmc->cpu_name) - 1);
+ lsmc->cpu_name[sizeof(lsmc->cpu_name) - 1] = 0;
+ strncpy(lsmc->bridge_name, "ls7a", sizeof(lsmc->bridge_name) - 1);
+ lsmc->bridge_name[sizeof(lsmc->bridge_name) - 1] = 0;
+ compat_props_add(m->compat_props, loongarch_compat, loongarch_compat_len);
+}
+
+static void ls3a_board_reset(MachineState *ms)
+{
+ qemu_devices_reset();
+#ifdef CONFIG_KVM
+ struct loongarch_kvm_irqchip *chip;
+ int length;
+
+ if (!kvm_enabled()) {
+ return;
+ }
+ length = sizeof(struct loongarch_kvm_irqchip) +
+ sizeof(struct loongarch_gipiState);
+ chip = g_malloc0(length);
+ memset(chip, 0, length);
+ chip->chip_id = KVM_IRQCHIP_LS3A_GIPI;
+ chip->len = length;
+ kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, chip);
+
+ length = sizeof(struct loongarch_kvm_irqchip) + sizeof(struct ls7a_ioapic_state);
+ chip = g_realloc(chip, length);
+ memset(chip, 0, length);
+ chip->chip_id = KVM_IRQCHIP_LS7A_IOAPIC;
+ chip->len = length;
+ kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, chip);
+
+ g_free(chip);
+#endif
+}
+
+static CpuInstanceProperties ls3a_cpu_index_to_props(MachineState *ms, unsigned cpu_index)
+{
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms);
+
+ assert(cpu_index < possible_cpus->len);
+ return possible_cpus->cpus[cpu_index].props;
+}
+
+static int64_t ls3a_get_default_cpu_node_id(const MachineState *ms, int idx)
+{
+ int nb_numa_nodes = ms->numa_state->num_nodes;
+ int smp_cores = ms->smp.cores;
+
+ if (nb_numa_nodes == 0) {
+ nb_numa_nodes = 1;
+ }
+ return idx / smp_cores % nb_numa_nodes;
+}
+
+static void loongarch_class_init(ObjectClass *oc, void *data)
+{
+ MachineClass *mc = MACHINE_CLASS(oc);
+ HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc);
+ LoongarchMachineClass *lsmc = LoongarchMACHINE_CLASS(oc);
+
+ lsmc->get_hotplug_handler = mc->get_hotplug_handler;
+ lsmc->has_acpi_build = true;
+ mc->get_hotplug_handler = loongarch_get_hotpug_handler;
+ mc->has_hotpluggable_cpus = true;
+ mc->cpu_index_to_instance_props = ls3a_cpu_index_to_props;
+ mc->possible_cpu_arch_ids = loongarch_possible_cpu_arch_ids;
+ mc->get_default_cpu_node_id = ls3a_get_default_cpu_node_id;
+ mc->default_ram_size = 1 * GiB;
+ mc->default_cpu_type = LOONGARCH_CPU_TYPE_NAME("Loongson-3A5000");
+ mc->default_ram_id = "loongarch_ls3a.ram";
+
+#ifdef CONFIG_TPM
+ machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS);
+#endif
+
+ mc->reset = ls3a_board_reset;
+ mc->max_cpus = LOONGARCH_MAX_VCPUS;
+ hc->pre_plug = loongarch_machine_device_pre_plug;
+ hc->plug = loongarch_machine_device_plug;
+ hc->unplug = longson_machine_device_unplug;
+ hc->unplug_request = loongarch_machine_device_unplug_request;
+
+ object_class_property_add(oc, "acpi", "OnOffAuto",
+ loongarch_get_acpi, loongarch_set_acpi,
+ NULL, NULL);
+ object_class_property_set_description(oc, "acpi",
+ "Enable ACPI");
+}
+
+static const TypeInfo loongarch_info = {
+ .name = TYPE_LOONGARCH_MACHINE,
+ .parent = TYPE_MACHINE,
+ .abstract = true,
+ .instance_size = sizeof(LoongarchMachineState),
+ .instance_init = loongarch_machine_initfn,
+ .class_size = sizeof(LoongarchMachineClass),
+ .class_init = loongarch_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_HOTPLUG_HANDLER },
+ { }
+ },
+
+};
+
+static void loongarch_machine_register_types(void)
+{
+ type_register_static(&loongarch_info);
+}
+
+type_init(loongarch_machine_register_types)
+
+DEFINE_LS3A5K_MACHINE(loongson7a_v1_0, "loongson7a_v1.0",
+ ls3a5k_ls7a_machine_options);
diff --git a/hw/loongarch/larch_hotplug.c b/hw/loongarch/larch_hotplug.c
new file mode 100644
index 0000000000000000000000000000000000000000..bb3e9826b2ddf1933ee0b1227445ca99ce59f0de
--- /dev/null
+++ b/hw/loongarch/larch_hotplug.c
@@ -0,0 +1,369 @@
+/*
+ * Hotplug emulation on Loongarch system.
+ *
+ * Copyright (c) 2018 Loongarch Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see .
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu-common.h"
+#include "qemu/queue.h"
+#include "qemu/units.h"
+#include "qemu/cutils.h"
+#include "qemu/bcd.h"
+#include "hw/hotplug.h"
+#include "hw/loongarch/cpudevs.h"
+#include "hw/mem/memory-device.h"
+#include "sysemu/numa.h"
+#include "sysemu/cpus.h"
+#include "hw/loongarch/larch.h"
+#include "hw/cpu/core.h"
+#include "hw/nvram/fw_cfg.h"
+#include "hw/platform-bus.h"
+
+/* find cpu slot in machine->possible_cpus by core_id */
+static CPUArchId *loongarch_find_cpu_slot(MachineState *ms, uint32_t id,
+ int *idx)
+{
+ int index = id;
+
+ if (index >= ms->possible_cpus->len) {
+ return NULL;
+ }
+ if (idx) {
+ *idx = index;
+ }
+ return &ms->possible_cpus->cpus[index];
+}
+
+static void loongarch_memory_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ Error *local_err = NULL;
+ LoongarchMachineState *lsms = LoongarchMACHINE(hotplug_dev);
+ HotplugHandlerClass *hhc;
+ uint64_t size;
+
+ size = memory_device_get_region_size(MEMORY_DEVICE(dev), &error_abort);
+ if (size % LOONGARCH_HOTPLUG_MEM_ALIGN) {
+ error_setg(&local_err, "Hotplugged memory size must be a multiple of "
+ "%lld MB", LOONGARCH_HOTPLUG_MEM_ALIGN / MiB);
+ goto out;
+ }
+
+ pc_dimm_plug(PC_DIMM(dev), MACHINE(lsms));
+
+ hhc = HOTPLUG_HANDLER_GET_CLASS(lsms->acpi_dev);
+ hhc->plug(HOTPLUG_HANDLER(lsms->acpi_dev), dev, &error_abort);
+out:
+ error_propagate(errp, local_err);
+}
+
+static void loongarch_memory_unplug_request(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ Error *local_err = NULL;
+ HotplugHandlerClass *hhc;
+ LoongarchMachineState *lsms = LoongarchMACHINE(hotplug_dev);
+
+ if (!lsms->acpi_dev || !loongarch_is_acpi_enabled(lsms)) {
+ error_setg(&local_err,
+ "memory hotplug is not enabled: missing acpi device or acpi disabled");
+ goto out;
+ }
+ hhc = HOTPLUG_HANDLER_GET_CLASS(lsms->acpi_dev);
+ hhc->unplug_request(HOTPLUG_HANDLER(lsms->acpi_dev), dev, &local_err);
+
+out:
+ error_propagate(errp, local_err);
+}
+
+static void loongarch_cpu_unplug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ CPUArchId *found_cpu;
+ HotplugHandlerClass *hhc;
+ Error *local_err = NULL;
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(dev);
+ MachineState *machine = MACHINE(OBJECT(hotplug_dev));
+ LoongarchMachineState *lsms = LoongarchMACHINE(machine);
+
+ hhc = HOTPLUG_HANDLER_GET_CLASS(lsms->acpi_dev);
+ hhc->unplug(HOTPLUG_HANDLER(lsms->acpi_dev), dev, &local_err);
+
+ if (local_err) {
+ goto out;
+ }
+
+ loongarch_cpu_destroy(machine, cpu);
+
+ found_cpu = loongarch_find_cpu_slot(MACHINE(lsms), cpu->id, NULL);
+ found_cpu->cpu = NULL;
+ object_unparent(OBJECT(dev));
+ lsms->hotpluged_cpu_num -= 1;
+out:
+ error_propagate(errp, local_err);
+}
+
+static void loongarch_memory_unplug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ Error *local_err = NULL;
+ HotplugHandlerClass *hhc;
+ LoongarchMachineState *lsms = LoongarchMACHINE(hotplug_dev);
+
+ hhc = HOTPLUG_HANDLER_GET_CLASS(lsms->acpi_dev);
+ hhc->unplug(HOTPLUG_HANDLER(lsms->acpi_dev), dev, &local_err);
+
+ if (local_err) {
+ goto out;
+ }
+
+ pc_dimm_unplug(PC_DIMM(dev), MACHINE(hotplug_dev));
+ object_unparent(OBJECT(dev));
+
+out:
+ error_propagate(errp, local_err);
+}
+
+static void loongarch_cpu_pre_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ MachineState *ms = MACHINE(OBJECT(hotplug_dev));
+ MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev);
+ LoongarchMachineState *lsms = LoongarchMACHINE(ms);
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(dev);
+ CPUArchId *cpu_slot;
+ Error *local_err = NULL;
+ int index;
+ int free_index = lsms->hotpluged_cpu_num + ms->smp.cpus;
+ int max_cpus = ms->smp.max_cpus;
+
+ if (dev->hotplugged && !mc->has_hotpluggable_cpus) {
+ error_setg(&local_err, "CPU hotplug not supported for this machine");
+ goto out;
+ }
+
+ if (!object_dynamic_cast(OBJECT(cpu), ms->cpu_type)) {
+ error_setg(errp, "Invalid CPU type, expected cpu type: '%s'",
+ ms->cpu_type);
+ return;
+ }
+
+ /* if ID is not set, set it based on core properties */
+ if (cpu->id == UNASSIGNED_CPU_ID) {
+ if ((cpu->core_id) > (max_cpus - 1)) {
+ error_setg(errp, "Invalid CPU core-id: %u must be in range 0:%u",
+ cpu->core_id, max_cpus - 1);
+ return;
+ }
+
+ if (free_index > (max_cpus - 1)) {
+ error_setg(errp, "The maximum number of CPUs cannot exceed %u.",
+ max_cpus);
+ return;
+ }
+
+ if (cpu->core_id != free_index) {
+ error_setg(errp, "Invalid CPU core-id: %u must be :%u",
+ cpu->core_id, free_index);
+ return;
+ }
+
+ cpu->id = cpu->core_id;
+ }
+
+ cpu_slot = loongarch_find_cpu_slot(MACHINE(hotplug_dev), cpu->id, &index);
+ if (!cpu_slot) {
+ error_setg(&local_err, "core id %d out of range", cpu->id);
+ goto out;
+ }
+
+ if (cpu_slot->cpu) {
+ error_setg(&local_err, "core %d already populated", cpu->id);
+ goto out;
+ }
+
+ numa_cpu_pre_plug(cpu_slot, dev, &local_err);
+
+ return ;
+out:
+ error_propagate(errp, local_err);
+}
+
+static void loongarch_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp)
+{
+ MachineState *machine = MACHINE(OBJECT(hotplug_dev));
+ LoongarchMachineState *lsms = LoongarchMACHINE(machine);
+ PCDIMMDevice *dimm = PC_DIMM(dev);
+ Error *local_err = NULL;
+ uint64_t size;
+
+ if (!lsms->acpi_dev || !loongarch_is_acpi_enabled(lsms)) {
+ error_setg(errp,
+ "memory hotplug is not enabled: missing acpi device or acpi disabled");
+ return;
+ }
+
+ size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (size % LOONGARCH_HOTPLUG_MEM_ALIGN) {
+ error_setg(errp, "Hotplugged memory size must be a multiple of "
+ "%lld MB", LOONGARCH_HOTPLUG_MEM_ALIGN / MiB);
+ return;
+ }
+
+ pc_dimm_pre_plug(dimm, MACHINE(hotplug_dev), NULL, errp);
+}
+
+static void loongarch_cpu_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ CPUArchId *found_cpu;
+ HotplugHandlerClass *hhc;
+ Error *local_err = NULL;
+ MachineState *machine = MACHINE(OBJECT(hotplug_dev));
+ LoongarchMachineState *lsms = LoongarchMACHINE(machine);
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(dev);
+
+ if (lsms->acpi_dev) {
+ loongarch_cpu_create(machine, cpu, errp);
+ hhc = HOTPLUG_HANDLER_GET_CLASS(lsms->acpi_dev);
+ hhc->plug(HOTPLUG_HANDLER(lsms->acpi_dev), dev, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ }
+
+ found_cpu = loongarch_find_cpu_slot(MACHINE(lsms), cpu->id, NULL);
+ found_cpu->cpu = OBJECT(dev);
+ lsms->hotpluged_cpu_num += 1;
+out:
+ error_propagate(errp, local_err);
+}
+
+static void loongarch_cpu_unplug_request(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ MachineState *machine = MACHINE(OBJECT(hotplug_dev));
+ LoongarchMachineState *lsms = LoongarchMACHINE(machine);
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(dev);
+ Error *local_err = NULL;
+ HotplugHandlerClass *hhc;
+ int idx = -1;
+
+ if (!lsms->acpi_dev) {
+ error_setg(&local_err, "CPU hot unplug not supported without ACPI");
+ goto out;
+ }
+
+ loongarch_find_cpu_slot(MACHINE(lsms), cpu->id, &idx);
+ assert(idx != -1);
+ if (idx == 0) {
+ error_setg(&local_err, "Boot CPU is unpluggable");
+ goto out;
+ }
+
+ hhc = HOTPLUG_HANDLER_GET_CLASS(lsms->acpi_dev);
+ hhc->unplug_request(HOTPLUG_HANDLER(lsms->acpi_dev), dev, &local_err);
+
+ if (local_err) {
+ goto out;
+ }
+
+ out:
+ error_propagate(errp, local_err);
+}
+
+void longson_machine_device_unplug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
+
+ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
+ loongarch_memory_unplug(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) {
+ if (!mc->has_hotpluggable_cpus) {
+ error_setg(errp, "CPU hot unplug not supported on this machine");
+ return;
+ }
+ loongarch_cpu_unplug(hotplug_dev, dev, errp);
+ } else {
+ error_setg(errp, "acpi: device unplug for not supported device"
+ " type: %s", object_get_typename(OBJECT(dev)));
+ }
+
+ return;
+}
+
+void loongarch_machine_device_unplug_request(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
+ loongarch_memory_unplug_request(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) {
+ loongarch_cpu_unplug_request(hotplug_dev, dev, errp);
+ }
+}
+
+HotplugHandler *loongarch_get_hotpug_handler(MachineState *machine,
+ DeviceState *dev)
+{
+ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) ||
+ object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU) ||
+ object_dynamic_cast(OBJECT(dev), TYPE_SYS_BUS_DEVICE)) {
+ return HOTPLUG_HANDLER(machine);
+ }
+ return NULL;
+}
+
+void loongarch_machine_device_pre_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
+ loongarch_memory_pre_plug(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) {
+ loongarch_cpu_pre_plug(hotplug_dev, dev, errp);
+ }
+}
+
+void loongarch_machine_device_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp)
+{
+ LoongarchMachineState *lsms = LoongarchMACHINE(hotplug_dev);
+
+ if (lsms->platform_bus_dev) {
+ MachineClass *mc = MACHINE_GET_CLASS(lsms);
+
+ if (device_is_dynamic_sysbus(mc, dev)) {
+ platform_bus_link_device(
+ PLATFORM_BUS_DEVICE(lsms->platform_bus_dev),
+ SYS_BUS_DEVICE(dev));
+ }
+ }
+
+ if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
+ loongarch_memory_plug(hotplug_dev, dev, errp);
+ } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) {
+ loongarch_cpu_plug(hotplug_dev, dev, errp);
+ }
+}
+
diff --git a/hw/loongarch/larch_int.c b/hw/loongarch/larch_int.c
new file mode 100644
index 0000000000000000000000000000000000000000..ca073a19cf8552b4f1973047d0b85b37408a9155
--- /dev/null
+++ b/hw/loongarch/larch_int.c
@@ -0,0 +1,91 @@
+/*
+ * QEMU LOONGARCH interrupt support
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "hw/hw.h"
+#include "hw/irq.h"
+#include "hw/loongarch/cpudevs.h"
+#include "cpu.h"
+#include "sysemu/kvm.h"
+#include "kvm_larch.h"
+#ifdef CONFIG_KVM
+#include
+#endif
+
+static void cpu_irq_request(void *opaque, int irq, int level)
+{
+ LOONGARCHCPU *cpu = opaque;
+ CPULOONGARCHState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ bool locked = false;
+
+ if (irq < 0 || irq > 13) {
+ return;
+ }
+
+ /* Make sure locking works even if BQL is already held by the caller */
+ if (!qemu_mutex_iothread_locked()) {
+ locked = true;
+ qemu_mutex_lock_iothread();
+ }
+
+ if (level) {
+ env->CSR_ESTAT |= 1 << irq;
+ } else {
+ env->CSR_ESTAT &= ~(1 << irq);
+ }
+
+ if (kvm_enabled()) {
+ if (irq == 2) {
+ kvm_loongarch_set_interrupt(cpu, irq, level);
+ } else if (irq == 3) {
+ kvm_loongarch_set_interrupt(cpu, irq, level);
+ } else if (irq == 12) {
+ kvm_loongarch_set_ipi_interrupt(cpu, irq, level);
+ }
+ }
+
+ if (env->CSR_ESTAT & CSR_ESTAT_IPMASK) {
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+
+ if (locked) {
+ qemu_mutex_unlock_iothread();
+ }
+}
+
+void cpu_init_irq(LOONGARCHCPU *cpu)
+{
+ CPULOONGARCHState *env = &cpu->env;
+ qemu_irq *qi;
+ int i;
+
+ qi = qemu_allocate_irqs(cpu_irq_request, loongarch_env_get_cpu(env), N_IRQS);
+ for (i = 0; i < N_IRQS; i++) {
+ env->irq[i] = qi[i];
+ }
+}
+
+
diff --git a/hw/loongarch/ls7a_nb.c b/hw/loongarch/ls7a_nb.c
new file mode 100644
index 0000000000000000000000000000000000000000..f11b855a7110f42b7d05d42c34180d78d089a23b
--- /dev/null
+++ b/hw/loongarch/ls7a_nb.c
@@ -0,0 +1,327 @@
+/*
+ * Loongarch 7A1000 north bridge support
+ *
+ * Copyright (c) 2019 Loongarch Technology
+ * Authors:
+ * Zhu Chen
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+
+#include "hw/hw.h"
+#include "hw/irq.h"
+#include "hw/sysbus.h"
+#include "hw/pci/pci.h"
+#include "hw/i386/pc.h"
+#include "hw/pci/pci_host.h"
+#include "hw/pci/pcie_host.h"
+#include "sysemu/sysemu.h"
+#include "exec/address-spaces.h"
+#include "qapi/error.h"
+#include "hw/loongarch/cpudevs.h"
+#include "hw/acpi/ls7a.h"
+#include "hw/i386/pc.h"
+#include "hw/isa/isa.h"
+#include "hw/boards.h"
+#include "qemu/log.h"
+#include "hw/loongarch/bios.h"
+#include "hw/loader.h"
+#include "elf.h"
+#include "exec/address-spaces.h"
+#include "exec/memory.h"
+#include "hw/pci/pci_bridge.h"
+#include "hw/pci/pci_bus.h"
+#include "linux/kvm.h"
+#include "sysemu/kvm.h"
+#include "sysemu/runstate.h"
+#include "sysemu/reset.h"
+#include "migration/vmstate.h"
+#include "hw/loongarch/larch.h"
+#include "hw/loongarch/ls7a.h"
+
+#undef DEBUG_LS7A
+
+#ifdef DEBUG_LS7A
+#define DPRINTF(fmt, ...) fprintf(stderr, "%s: " fmt, __func__, ##__VA_ARGS__)
+#else
+#define DPRINTF(fmt, ...)
+#endif
+
+static void ls7a_reset(void *opaque)
+{
+ uint64_t wmask;
+ wmask = ~(-1);
+
+ PCIDevice *dev = opaque;
+ pci_set_word(dev->config + PCI_VENDOR_ID, 0x0014);
+ pci_set_word(dev->wmask + PCI_VENDOR_ID, wmask & 0xffff);
+ pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff);
+ pci_set_word(dev->config + PCI_DEVICE_ID, 0x7a00);
+ pci_set_word(dev->wmask + PCI_DEVICE_ID, wmask & 0xffff);
+ pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff);
+ pci_set_word(dev->config + 0x4, 0x0000);
+ pci_set_word(dev->config + PCI_STATUS, 0x0010);
+ pci_set_word(dev->wmask + PCI_STATUS, wmask & 0xffff);
+ pci_set_word(dev->cmask + PCI_STATUS, 0xffff);
+ pci_set_byte(dev->config + PCI_REVISION_ID, 0x0);
+ pci_set_byte(dev->wmask + PCI_REVISION_ID, wmask & 0xff);
+ pci_set_byte(dev->cmask + PCI_REVISION_ID, 0xff);
+ pci_set_byte(dev->config + 0x9, 0x00);
+ pci_set_byte(dev->wmask + 0x9, wmask & 0xff);
+ pci_set_byte(dev->cmask + 0x9, 0xff);
+ pci_set_byte(dev->config + 0xa, 0x00);
+ pci_set_byte(dev->wmask + 0xa, wmask & 0xff);
+ pci_set_byte(dev->cmask + 0xa, 0xff);
+ pci_set_byte(dev->config + 0xb, 0x06);
+ pci_set_byte(dev->wmask + 0xb, wmask & 0xff);
+ pci_set_byte(dev->cmask + 0xb, 0xff);
+ pci_set_byte(dev->config + 0xc, 0x00);
+ pci_set_byte(dev->wmask + 0xc, wmask & 0xff);
+ pci_set_byte(dev->cmask + 0xc, 0xff);
+ pci_set_byte(dev->config + 0xe, 0x80);
+ pci_set_byte(dev->wmask + 0xe, wmask & 0xff);
+ pci_set_byte(dev->cmask + 0xe, 0xff);
+}
+
+static const VMStateDescription vmstate_ls7a_pcie = {
+ .name = "LS7A_PCIE",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_PCI_DEVICE(dev, LS7APCIState),
+ VMSTATE_STRUCT(pm, LS7APCIState, 0, vmstate_ls7a_pm, LS7APCIPMRegs),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static PCIINTxRoute ls7a_route_intx_pin_to_irq(void *opaque, int pin)
+{
+ PCIINTxRoute route;
+
+ route.irq = pin;
+ route.mode = PCI_INTX_ENABLED;
+ return route;
+}
+
+static int pci_ls7a_map_irq(PCIDevice *d, int irq_num)
+{
+ int irq;
+
+ irq = 16 + ((PCI_SLOT(d->devfn) * 4 + irq_num) & 0xf);
+ return irq;
+}
+
+static void pci_ls7a_set_irq(void *opaque, int irq_num, int level)
+{
+ qemu_irq *pic = opaque;
+ DPRINTF("------ %s irq %d %d\n", __func__, irq_num, level);
+ qemu_set_irq(pic[irq_num], level);
+}
+
+/*
+static int ls7a_pciehost_initfn(SysBusDevice *dev)
+{
+ return 0;
+}*/
+
+static void ls7a_pcie_realize(PCIDevice *dev, Error **errp)
+{
+ LS7APCIState *s = PCIE_LS7A(dev);
+ /* Ls7a North Bridge, built on FPGA, VENDOR_ID/DEVICE_ID are "undefined" */
+ pci_config_set_prog_interface(dev->config, 0x00);
+
+ /* set the default value of north bridge pci config */
+ qemu_register_reset(ls7a_reset, s);
+}
+
+static AddressSpace *ls7a_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
+{
+ return &address_space_memory;
+}
+
+static PCIBus *pci_ls7a_init(MachineState *machine, DeviceState *dev,
+ qemu_irq *pic)
+{
+ LoongarchMachineState *lsms = LoongarchMACHINE(machine);
+ LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms);
+ LS7APCIEHost *pciehost = LS7A_PCIE_HOST_BRIDGE(dev);
+ PCIExpressHost *e;
+ SysBusDevice *sysbus;
+ PCIHostState *phb;
+ MemoryRegion *mmio_alias;
+
+ e = PCIE_HOST_BRIDGE(dev);
+ sysbus = SYS_BUS_DEVICE(e);
+ phb = PCI_HOST_BRIDGE(e);
+
+ sysbus_init_mmio(sysbus, &e->mmio);
+
+ memory_region_init(&pciehost->io_mmio, OBJECT(pciehost),
+ "pciehost-mmio", UINT64_MAX);
+ sysbus_init_mmio(sysbus, &pciehost->io_mmio);
+ mmio_alias = g_new0(MemoryRegion, 1);
+ memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio",
+ &pciehost->io_mmio, PCIE_MEMORY_BASE,
+ PCIE_MEMORY_SIZE);
+ memory_region_add_subregion(get_system_memory(),
+ PCIE_MEMORY_BASE, mmio_alias);
+
+ memory_region_init(&pciehost->io_ioport, OBJECT(pciehost),
+ "pciehost-ioport", LS_ISA_IO_SIZE);
+ sysbus_init_mmio(sysbus, &pciehost->io_ioport);
+
+ sysbus_mmio_map(sysbus, 2, LS3A5K_ISA_IO_BASE);
+
+
+ phb->bus = pci_register_root_bus(dev, "pcie.0", pci_ls7a_set_irq,
+ pci_ls7a_map_irq, pic,
+ &pciehost->io_mmio, &pciehost->io_ioport,
+ (1 << 3), 128, TYPE_PCIE_BUS);
+ /*update pcie config memory*/
+ pcie_host_mmcfg_update(e, true, lsmc->pciecfg_base, LS_PCIECFG_SIZE);
+
+ pci_bus_set_route_irq_fn(phb->bus, ls7a_route_intx_pin_to_irq);
+
+ return phb->bus;
+}
+
+PCIBus *ls7a_init(MachineState *machine, qemu_irq *pic, DeviceState **ls7a_dev)
+{
+ DeviceState *dev;
+ PCIHostState *phb;
+ LS7APCIState *pbs;
+ PCIDevice *pcid;
+ PCIBus *pci_bus;
+ PCIExpressHost *e;
+
+ /*1. init the HT PCI CFG*/
+ DPRINTF("------ %d\n", __LINE__);
+ dev = qdev_new(TYPE_LS7A_PCIE_HOST_BRIDGE);
+ e = PCIE_HOST_BRIDGE(dev);
+ phb = PCI_HOST_BRIDGE(e);
+
+ DPRINTF("------ %d\n", __LINE__);
+ pci_bus = pci_ls7a_init(machine, dev, pic);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
+ phb->bus = pci_bus;
+ /* set the pcihost pointer after rs780_pcihost_initfn is called */
+ DPRINTF("------ %d\n", __LINE__);
+ pcid = pci_new(PCI_DEVFN(0, 0), TYPE_PCIE_LS7A);
+ pbs = PCIE_LS7A(pcid);
+ pbs->pciehost = LS7A_PCIE_HOST_BRIDGE(dev);
+ pbs->pciehost->pci_dev = pbs;
+
+ if (ls7a_dev) {
+ *ls7a_dev = DEVICE(pcid);
+ }
+
+ pci_realize_and_unref(pcid, phb->bus, &error_fatal);
+
+ /* IOMMU */
+ pci_setup_iommu(phb->bus, ls7a_pci_dma_iommu, NULL);
+
+ ls7a_pm_init(&pbs->pm, pic);
+ DPRINTF("------ %d\n", __LINE__);
+ /*3. init the north bridge VGA,not do now*/
+ return pci_bus;
+}
+
+LS7APCIState *get_ls7a_type(Object *obj)
+{
+ LS7APCIState *pbs;
+
+ pbs = PCIE_LS7A(obj);
+ return pbs;
+}
+
+static void ls7a_pcie_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+ HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
+ AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_CLASS(klass);
+
+ k->realize = ls7a_pcie_realize;
+ k->vendor_id = 0x0014;
+ k->device_id = 0x7a00;
+ k->revision = 0x00;
+ k->class_id = PCI_CLASS_BRIDGE_HOST;
+ dc->desc = "LS7A1000 PCIE Host bridge";
+ dc->vmsd = &vmstate_ls7a_pcie;
+ /*
+ * PCI-facing part of the host bridge, not usable without the
+ * host-facing part, which can't be device_add'ed, yet.
+ */
+ dc->user_creatable = false;
+ hc->plug = ls7a_pm_device_plug_cb;
+ hc->unplug_request = ls7a_pm_device_unplug_request_cb;
+ hc->unplug = ls7a_pm_device_unplug_cb;
+ adevc->ospm_status = ls7a_pm_ospm_status;
+ adevc->send_event = ls7a_send_gpe;
+ adevc->madt_cpu = ls7a_madt_cpu_entry;
+}
+
+static void ls7a_pci_add_properties(LS7APCIState *ls7a)
+{
+ ls7a_pm_add_properties(OBJECT(ls7a), &ls7a->pm, NULL);
+}
+
+static void ls7a_pci_initfn(Object *obj)
+{
+ LS7APCIState *ls7a = get_ls7a_type(obj);
+
+ ls7a_pci_add_properties(ls7a);
+}
+
+static const TypeInfo ls7a_pcie_device_info = {
+ .name = TYPE_PCIE_LS7A,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(LS7APCIState),
+ .class_init = ls7a_pcie_class_init,
+ .instance_init = ls7a_pci_initfn,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_HOTPLUG_HANDLER },
+ { TYPE_ACPI_DEVICE_IF },
+ { INTERFACE_CONVENTIONAL_PCI_DEVICE },
+ { },
+ },
+};
+
+static void ls7a_pciehost_class_init(ObjectClass *klass, void *data)
+{
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+ k->parent_class.fw_name = "pci";
+}
+
+static const TypeInfo ls7a_pciehost_info = {
+ .name = TYPE_LS7A_PCIE_HOST_BRIDGE,
+ .parent = TYPE_PCIE_HOST_BRIDGE,
+ .instance_size = sizeof(LS7APCIEHost),
+ .class_init = ls7a_pciehost_class_init,
+};
+
+static void ls7a_register_types(void)
+{
+ type_register_static(&ls7a_pciehost_info);
+ type_register_static(&ls7a_pcie_device_info);
+}
+
+type_init(ls7a_register_types)
diff --git a/hw/loongarch/meson.build b/hw/loongarch/meson.build
new file mode 100644
index 0000000000000000000000000000000000000000..47d886ddd43689e2552c0523fef37c2f07581ce2
--- /dev/null
+++ b/hw/loongarch/meson.build
@@ -0,0 +1,16 @@
+loongarch_ss = ss.source_set()
+loongarch_ss.add(files('larch_3a.c'), fdt)
+loongarch_ss.add(files(
+ 'larch_int.c',
+ 'larch_hotplug.c',
+ 'ls7a_nb.c',
+ 'ioapic.c',
+ 'acpi-build.c',
+ 'ipi.c',
+ 'apic.c',
+ 'iocsr.c',
+ 'sysbus-fdt.c',
+))
+
+hw_arch += {'loongarch64': loongarch_ss}
+
diff --git a/hw/loongarch/sysbus-fdt.c b/hw/loongarch/sysbus-fdt.c
new file mode 100644
index 0000000000000000000000000000000000000000..f750ad6b675ba4970f878bdd8032afe0fd9f5db5
--- /dev/null
+++ b/hw/loongarch/sysbus-fdt.c
@@ -0,0 +1,183 @@
+/*
+ * Loongarch Platform Bus device tree generation helpers
+ *
+ * Copyright (c) 2014 Linaro Limited
+ *
+ * Authors:
+ * Alex Graf
+ * Eric Auger
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include
+#include "qemu/error-report.h"
+#include "sysemu/device_tree.h"
+#include "hw/platform-bus.h"
+#include "hw/display/ramfb.h"
+#include "hw/loongarch/sysbus-fdt.h"
+#include "sysemu/tpm.h"
+
+/*
+ * internal struct that contains the information to create dynamic
+ * sysbus device node
+ */
+typedef struct PlatformBusFDTData {
+ void *fdt; /* device tree handle */
+ int irq_start; /* index of the first IRQ usable by platform bus devices */
+ const char *pbus_node_name; /* name of the platform bus node */
+ PlatformBusDevice *pbus;
+} PlatformBusFDTData;
+
+/* struct that allows to match a device and create its FDT node */
+typedef struct BindingEntry {
+ const char *typename;
+ const char *compat;
+ int (*add_fn)(SysBusDevice *sbdev, void *opaque);
+ bool (*match_fn)(SysBusDevice *sbdev, const struct BindingEntry *combo);
+} BindingEntry;
+
+
+
+static int no_fdt_node(SysBusDevice *sbdev, void *opaque)
+{
+ return 0;
+}
+
+/* Device type based matching */
+static bool type_match(SysBusDevice *sbdev, const BindingEntry *entry)
+{
+ return !strcmp(object_get_typename(OBJECT(sbdev)), entry->typename);
+}
+
+#define TYPE_BINDING(type, add_fn) {(type), NULL, (add_fn), NULL}
+
+#ifdef CONFIG_TPM
+/*
+ * add_tpm_tis_fdt_node: Create a DT node for TPM TIS
+ *
+ * See kernel documentation:
+ * Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt
+ * Optional interrupt for command completion is not exposed
+ */
+static int add_tpm_tis_fdt_node(SysBusDevice *sbdev, void *opaque)
+{
+ PlatformBusFDTData *data = opaque;
+ PlatformBusDevice *pbus = data->pbus;
+ void *fdt = data->fdt;
+ const char *parent_node = data->pbus_node_name;
+ char *nodename;
+ uint32_t reg_attr[2];
+ uint64_t mmio_base;
+
+ mmio_base = platform_bus_get_mmio_addr(pbus, sbdev, 0);
+ nodename = g_strdup_printf("%s/tpm_tis@%" PRIx64, parent_node, mmio_base);
+ qemu_fdt_add_subnode(fdt, nodename);
+
+ qemu_fdt_setprop_string(fdt, nodename, "compatible", "tcg,tpm-tis-mmio");
+
+ reg_attr[0] = cpu_to_be32(mmio_base);
+ reg_attr[1] = cpu_to_be32(0x5000);
+ qemu_fdt_setprop(fdt, nodename, "reg", reg_attr, 2 * sizeof(uint32_t));
+
+ g_free(nodename);
+
+ return 0;
+}
+#endif
+
+/* list of supported dynamic sysbus bindings */
+static const BindingEntry bindings[] = {
+#ifdef CONFIG_TPM
+ TYPE_BINDING(TYPE_TPM_TIS_SYSBUS, add_tpm_tis_fdt_node),
+#endif
+ TYPE_BINDING(TYPE_RAMFB_DEVICE, no_fdt_node),
+ TYPE_BINDING("", NULL), /* last element */
+};
+
+/* Generic Code */
+
+/**
+ * add_fdt_node - add the device tree node of a dynamic sysbus device
+ *
+ * @sbdev: handle to the sysbus device
+ * @opaque: handle to the PlatformBusFDTData
+ *
+ * Checks the sysbus type belongs to the list of device types that
+ * are dynamically instantiable and if so call the node creation
+ * function.
+ */
+static void add_fdt_node(SysBusDevice *sbdev, void *opaque)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(bindings); i++) {
+ const BindingEntry *iter = &bindings[i];
+
+ if (type_match(sbdev, iter)) {
+ if (!iter->match_fn || iter->match_fn(sbdev, iter)) {
+ ret = iter->add_fn(sbdev, opaque);
+ assert(!ret);
+ return;
+ }
+ }
+ }
+ error_report("Device %s can not be dynamically instantiated",
+ qdev_fw_name(DEVICE(sbdev)));
+ exit(1);
+}
+
+void platform_bus_add_all_fdt_nodes(void *fdt, const char *intc, hwaddr addr,
+ hwaddr bus_size, int irq_start)
+{
+ const char platcomp[] = "qemu,platform\0simple-bus";
+ PlatformBusDevice *pbus;
+ DeviceState *dev;
+ gchar *node;
+
+ assert(fdt);
+
+ node = g_strdup_printf("/platform@%"PRIx64, addr);
+
+ /* Create a /platform node that we can put all devices into */
+ qemu_fdt_add_subnode(fdt, node);
+ qemu_fdt_setprop(fdt, node, "compatible", platcomp, sizeof(platcomp));
+
+ /*
+ * Our platform bus region is less than 32bits, so 1 cell is enough for
+ * address and size
+ */
+ qemu_fdt_setprop_cells(fdt, node, "#size-cells", 1);
+ qemu_fdt_setprop_cells(fdt, node, "#address-cells", 1);
+ qemu_fdt_setprop_cells(fdt, node, "ranges", 0, addr >> 32, addr, bus_size);
+ if (intc != NULL) {
+ qemu_fdt_setprop_phandle(fdt, node, "interrupt-parent", intc);
+ }
+ dev = qdev_find_recursive(sysbus_get_default(), TYPE_PLATFORM_BUS_DEVICE);
+ pbus = PLATFORM_BUS_DEVICE(dev);
+
+ PlatformBusFDTData data = {
+ .fdt = fdt,
+ .irq_start = irq_start,
+ .pbus_node_name = node,
+ .pbus = pbus,
+ };
+
+ /* Loop through all dynamic sysbus devices and create their node */
+ foreach_dynamic_sysbus_device(add_fdt_node, &data);
+
+ g_free(node);
+}
diff --git a/hw/meson.build b/hw/meson.build
index b3366c888ef61b3093091c260d23926c0524ca6e..f224f8ad2878c12e17c3468fc8f58d7f0e633423 100644
--- a/hw/meson.build
+++ b/hw/meson.build
@@ -17,6 +17,7 @@ subdir('intc')
subdir('ipack')
subdir('ipmi')
subdir('isa')
+subdir('loongarch')
subdir('mem')
subdir('misc')
subdir('net')
diff --git a/hw/timer/Kconfig b/hw/timer/Kconfig
index 010be7ed1f56d72069825a236ae164f04079f353..b395c72d7d31185685353a81a2701a2c97e7b847 100644
--- a/hw/timer/Kconfig
+++ b/hw/timer/Kconfig
@@ -60,3 +60,5 @@ config STELLARIS_GPTM
config AVR_TIMER16
bool
+config LS7A_RTC
+ bool
diff --git a/hw/timer/ls7a_rtc.c b/hw/timer/ls7a_rtc.c
new file mode 100644
index 0000000000000000000000000000000000000000..756f2fc9ceb48ec706c12be64207c8cfe94249c3
--- /dev/null
+++ b/hw/timer/ls7a_rtc.c
@@ -0,0 +1,325 @@
+#include "qemu/osdep.h"
+#include "hw/sysbus.h"
+#include "hw/irq.h"
+#include "include/hw/register.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+#include "qemu/cutils.h"
+#include "qemu/log.h"
+#include "qemu-common.h"
+#include "migration/vmstate.h"
+
+#ifdef DEBUG_LS7A_RTC
+#define DPRINTF (fmt, ...) \
+do { printf("ls7a_rtc: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF (fmt, ...) do {} while (0)
+#endif
+
+
+#define SYS_TOYTRIM 0x20
+#define SYS_TOYWRITE0 0x24
+#define SYS_TOYWRITE1 0x28
+#define SYS_TOYREAD0 0x2C
+#define SYS_TOYREAD1 0x30
+#define SYS_TOYMATCH0 0x34
+#define SYS_TOYMATCH1 0x38
+#define SYS_TOYMATCH2 0x3C
+#define SYS_RTCCTRL 0x40
+#define SYS_RTCTRIM 0x60
+#define SYS_RTCWRTIE0 0x64
+#define SYS_RTCREAD0 0x68
+#define SYS_RTCMATCH0 0x6C
+#define SYS_RTCMATCH1 0x70
+#define SYS_RTCMATCH2 0x74
+
+/**
+ ** shift bits and filed mask
+ **/
+#define TOY_MON_MASK 0x3f
+#define TOY_DAY_MASK 0x1f
+#define TOY_HOUR_MASK 0x1f
+#define TOY_MIN_MASK 0x3f
+#define TOY_SEC_MASK 0x3f
+#define TOY_MSEC_MASK 0xf
+
+#define TOY_MON_SHIFT 26
+#define TOY_DAY_SHIFT 21
+#define TOY_HOUR_SHIFT 16
+#define TOY_MIN_SHIFT 10
+#define TOY_SEC_SHIFT 4
+#define TOY_MSEC_SHIFT 0
+
+#define TOY_MATCH_YEAR_MASK 0x3f
+#define TOY_MATCH_MON_MASK 0xf
+#define TOY_MATCH_DAY_MASK 0x1f
+#define TOY_MATCH_HOUR_MASK 0x1f
+#define TOY_MATCH_MIN_MASK 0x3f
+#define TOY_MATCH_SEC_MASK 0x3f
+
+
+#define TOY_MATCH_YEAR_SHIFT 26
+#define TOY_MATCH_MON_SHIFT 22
+#define TOY_MATCH_DAY_SHIFT 17
+#define TOY_MATCH_HOUR_SHIFT 12
+#define TOY_MATCH_MIN_SHIFT 6
+#define TOY_MATCH_SEC_SHIFT 0
+
+#define TOY_ENABLE_BIT (1U << 11)
+
+#define TYPE_LS7A_RTC "ls7a_rtc"
+#define LS7A_RTC(obj) OBJECT_CHECK(LS7A_RTCState, (obj), TYPE_LS7A_RTC)
+
+typedef struct LS7A_RTCState {
+ SysBusDevice parent_obj;
+
+ MemoryRegion iomem;
+ QEMUTimer *timer;
+ /* Needed to preserve the tick_count across migration, even if the
+ * absolute value of the rtc_clock is different on the source and
+ * destination.
+ */
+ int64_t offset;
+ int64_t data;
+ int64_t save_alarm_offset;
+ int tidx;
+ uint32_t toymatch[3];
+ uint32_t toytrim;
+ uint32_t cntrctl;
+ uint32_t rtctrim;
+ uint32_t rtccount;
+ uint32_t rtcmatch[3];
+ qemu_irq toy_irq;
+} LS7A_RTCState;
+
+enum {
+TOYEN = 1UL << 11,
+RTCEN = 1UL << 13,
+};
+
+static uint64_t ls7a_rtc_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ LS7A_RTCState *s = (LS7A_RTCState *)opaque;
+ struct tm tm;
+ unsigned int val;
+
+ val = 0;
+
+ switch (addr) {
+ case SYS_TOYREAD0:
+ qemu_get_timedate(&tm, s->offset);
+ val = (((tm.tm_mon + 1) & TOY_MON_MASK) << TOY_MON_SHIFT)
+ | (((tm.tm_mday) & TOY_DAY_MASK) << TOY_DAY_SHIFT)
+ | (((tm.tm_hour) & TOY_HOUR_MASK) << TOY_HOUR_SHIFT)
+ | (((tm.tm_min) & TOY_MIN_MASK) << TOY_MIN_SHIFT)
+ | (((tm.tm_sec) & TOY_SEC_MASK) << TOY_SEC_SHIFT) | 0x0;
+ break;
+ case SYS_TOYREAD1:
+ qemu_get_timedate(&tm, s->offset);
+ val = tm.tm_year;
+ break;
+ case SYS_TOYMATCH0:
+ val = s->toymatch[0];
+ break;
+ case SYS_TOYMATCH1:
+ val = s->toymatch[1];
+ break;
+ case SYS_TOYMATCH2:
+ val = s->toymatch[2];
+ break;
+ case SYS_RTCCTRL:
+ val = s->cntrctl;
+ break;
+ case SYS_RTCREAD0:
+ val = s->rtccount;
+ break;
+ case SYS_RTCMATCH0:
+ val = s->rtcmatch[0];
+ break;
+ case SYS_RTCMATCH1:
+ val = s->rtcmatch[1];
+ break;
+ case SYS_RTCMATCH2:
+ val = s->rtcmatch[2];
+ break;
+ default:
+ val = 0;
+ break;
+ }
+ return val;
+}
+
+
+static void ls7a_rtc_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ LS7A_RTCState *s = (LS7A_RTCState *)opaque;
+ struct tm tm;
+ int64_t alarm_offset, year_diff, expire_time;
+
+ switch (addr) {
+ case SYS_TOYWRITE0:
+ qemu_get_timedate(&tm, s->offset);
+ tm.tm_sec = (val >> TOY_SEC_SHIFT) & TOY_SEC_MASK;
+ tm.tm_min = (val >> TOY_MIN_SHIFT) & TOY_MIN_MASK;
+ tm.tm_hour = (val >> TOY_HOUR_SHIFT) & TOY_HOUR_MASK;
+ tm.tm_mday = ((val >> TOY_DAY_SHIFT) & TOY_DAY_MASK);
+ tm.tm_mon = ((val >> TOY_MON_SHIFT) & TOY_MON_MASK) - 1;
+ s->offset = qemu_timedate_diff(&tm);
+ break;
+ case SYS_TOYWRITE1:
+ qemu_get_timedate(&tm, s->offset);
+ tm.tm_year = val;
+ s->offset = qemu_timedate_diff(&tm);
+ break;
+ case SYS_TOYMATCH0:
+ s->toymatch[0] = val;
+ qemu_get_timedate(&tm, s->offset);
+ tm.tm_sec = (val >> TOY_MATCH_SEC_SHIFT) & TOY_MATCH_SEC_MASK;
+ tm.tm_min = (val >> TOY_MATCH_MIN_SHIFT) & TOY_MATCH_MIN_MASK;
+ tm.tm_hour = ((val >> TOY_MATCH_HOUR_SHIFT) & TOY_MATCH_HOUR_MASK);
+ tm.tm_mday = ((val >> TOY_MATCH_DAY_SHIFT) & TOY_MATCH_DAY_MASK);
+ tm.tm_mon = ((val >> TOY_MATCH_MON_SHIFT) & TOY_MATCH_MON_MASK) - 1;
+ year_diff = ((val >> TOY_MATCH_YEAR_SHIFT) & TOY_MATCH_YEAR_MASK);
+ year_diff = year_diff - (tm.tm_year & TOY_MATCH_YEAR_MASK);
+ tm.tm_year = tm.tm_year + year_diff;
+ alarm_offset = qemu_timedate_diff(&tm) - s->offset;
+ if ((alarm_offset < 0) && (alarm_offset > -5)) {
+ alarm_offset = 0;
+ }
+ expire_time = qemu_clock_get_ms(rtc_clock);
+ expire_time += ((alarm_offset * 1000) + 100);
+ timer_mod(s->timer, expire_time);
+ break;
+ case SYS_TOYMATCH1:
+ s->toymatch[1] = val;
+ break;
+ case SYS_TOYMATCH2:
+ s->toymatch[2] = val;
+ break;
+ case SYS_RTCCTRL:
+ s->cntrctl = val;
+ break;
+ case SYS_RTCWRTIE0:
+ s->rtccount = val;
+ break;
+ case SYS_RTCMATCH0:
+ s->rtcmatch[0] = val;
+ break;
+ case SYS_RTCMATCH1:
+ val = s->rtcmatch[1];
+ break;
+ case SYS_RTCMATCH2:
+ val = s->rtcmatch[2];
+ break;
+ default:
+ break;
+ }
+}
+
+static const MemoryRegionOps ls7a_rtc_ops = {
+ .read = ls7a_rtc_read,
+ .write = ls7a_rtc_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+
+};
+
+static void toy_timer(void *opaque)
+{
+ LS7A_RTCState *s = (LS7A_RTCState *) opaque;
+
+ if (s->cntrctl & TOY_ENABLE_BIT) {
+ qemu_irq_pulse(s->toy_irq);
+ }
+}
+
+static void ls7a_rtc_realize(DeviceState *dev, Error **errp)
+{
+ SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
+ LS7A_RTCState *d = LS7A_RTC(sbd);
+ memory_region_init_io(&d->iomem, NULL, &ls7a_rtc_ops,
+ (void *)d, "ls7a_rtc", 0x100);
+
+ sysbus_init_irq(sbd, &d->toy_irq);
+
+ sysbus_init_mmio(sbd, &d->iomem);
+ d->timer = timer_new_ms(rtc_clock, toy_timer, d);
+ timer_mod(d->timer, qemu_clock_get_ms(rtc_clock) + 100);
+ d->offset = 0;
+}
+
+static int ls7a_rtc_pre_save(void *opaque)
+{
+ LS7A_RTCState *s = (LS7A_RTCState *)opaque;
+ struct tm tm;
+ int64_t year_diff, value;
+
+ value = s->toymatch[0];
+ qemu_get_timedate(&tm, s->offset);
+ tm.tm_sec = (value >> TOY_MATCH_SEC_SHIFT) & TOY_MATCH_SEC_MASK;
+ tm.tm_min = (value >> TOY_MATCH_MIN_SHIFT) & TOY_MATCH_MIN_MASK;
+ tm.tm_hour = ((value >> TOY_MATCH_HOUR_SHIFT) & TOY_MATCH_HOUR_MASK);
+ tm.tm_mday = ((value >> TOY_MATCH_DAY_SHIFT) & TOY_MATCH_DAY_MASK);
+ tm.tm_mon = ((value >> TOY_MATCH_MON_SHIFT) & TOY_MATCH_MON_MASK) - 1;
+ year_diff = ((value >> TOY_MATCH_YEAR_SHIFT) & TOY_MATCH_YEAR_MASK);
+ year_diff = year_diff - (tm.tm_year & TOY_MATCH_YEAR_MASK);
+ tm.tm_year = tm.tm_year + year_diff;
+ s->save_alarm_offset = qemu_timedate_diff(&tm) - s->offset;
+
+ return 0;
+}
+
+
+static int ls7a_rtc_post_load(void *opaque, int version_id)
+{
+ LS7A_RTCState *s = (LS7A_RTCState *)opaque;
+ int64_t expire_time;
+
+ expire_time = qemu_clock_get_ms(rtc_clock) + (s->save_alarm_offset * 1000);
+ timer_mod(s->timer, expire_time);
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_ls7a_rtc = {
+ .name = "ls7a_rtc",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .pre_save = ls7a_rtc_pre_save,
+ .post_load = ls7a_rtc_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT64(offset, LS7A_RTCState),
+ VMSTATE_INT64(save_alarm_offset, LS7A_RTCState),
+ VMSTATE_UINT32(toymatch[0], LS7A_RTCState),
+ VMSTATE_UINT32(cntrctl, LS7A_RTCState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+
+static void ls7a_rtc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ dc->vmsd = &vmstate_ls7a_rtc;
+ dc->realize = ls7a_rtc_realize;
+ dc->desc = "ls7a rtc";
+}
+
+static const TypeInfo ls7a_rtc_info = {
+ .name = TYPE_LS7A_RTC,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(LS7A_RTCState),
+ .class_init = ls7a_rtc_class_init,
+};
+
+static void ls7a_rtc_register_types(void)
+{
+ type_register_static(&ls7a_rtc_info);
+}
+
+type_init(ls7a_rtc_register_types)
diff --git a/hw/timer/meson.build b/hw/timer/meson.build
index 03092e2cebf4e8811084b643af6acd3bf52df7e7..e841a2f6ee883d8c0b9919a90c671817c8f2ce20 100644
--- a/hw/timer/meson.build
+++ b/hw/timer/meson.build
@@ -16,6 +16,7 @@ softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_mct.c'))
softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_pwm.c'))
softmmu_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_gptimer.c'))
softmmu_ss.add(when: 'CONFIG_HPET', if_true: files('hpet.c'))
+softmmu_ss.add(when: 'CONFIG_LS7A_RTC', if_true: files('ls7a_rtc.c'))
softmmu_ss.add(when: 'CONFIG_I8254', if_true: files('i8254_common.c', 'i8254.c'))
softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_epit.c'))
softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_gpt.c'))
diff --git a/include/disas/dis-asm.h b/include/disas/dis-asm.h
index 08e1beec854f1a7260f883d902dff379e8f1bbaa..95b93f10027ab8e6e864585843d24be736e55e8f 100644
--- a/include/disas/dis-asm.h
+++ b/include/disas/dis-asm.h
@@ -461,6 +461,7 @@ int print_insn_riscv32 (bfd_vma, disassemble_info*);
int print_insn_riscv64 (bfd_vma, disassemble_info*);
int print_insn_rx(bfd_vma, disassemble_info *);
int print_insn_hexagon(bfd_vma, disassemble_info *);
+int print_insn_loongarch (bfd_vma, disassemble_info*);
#ifdef CONFIG_CAPSTONE
bool cap_disas_target(disassemble_info *info, uint64_t pc, size_t size);
diff --git a/include/elf.h b/include/elf.h
index 811bf4a1cb5cc9e271ac09c65b1554350a929c26..66030f4906cf6f54c17ab58a6f53e8043ba1ff53 100644
--- a/include/elf.h
+++ b/include/elf.h
@@ -182,6 +182,8 @@ typedef struct mips_elf_abiflags_v0 {
#define EM_NANOMIPS 249 /* Wave Computing nanoMIPS */
+#define EM_LOONGARCH 258 /* Loongarch */
+
/*
* This is an interim value that we will use until the committee comes
* up with a final number.
diff --git a/include/hw/acpi/ls7a.h b/include/hw/acpi/ls7a.h
new file mode 100644
index 0000000000000000000000000000000000000000..4401515c7b47642ffbca67de2513d51d225bfbc3
--- /dev/null
+++ b/include/hw/acpi/ls7a.h
@@ -0,0 +1,80 @@
+/*
+ * QEMU GMCH/LS7A PCI PM Emulation
+ *
+ * Copyright (c) 2009 Isaku Yamahata
+ * VA Linux Systems Japan K.K.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ */
+
+#ifndef HW_ACPI_LS7A_H
+#define HW_ACPI_LS7A_H
+
+#include "hw/acpi/acpi.h"
+#include "hw/acpi/cpu_hotplug.h"
+#include "hw/acpi/cpu.h"
+#include "hw/acpi/memory_hotplug.h"
+#include "hw/acpi/acpi_dev_interface.h"
+#include "hw/acpi/tco.h"
+
+#define CPU_HOTPLUG_BASE 0x1e000000
+#define MEMORY_HOTPLUG_BASE 0x1e00000c
+
+typedef struct LS7APCIPMRegs {
+ /*
+ * In ls7a spec says that pm1_cnt register is 32bit width and
+ * that the upper 16bits are reserved and unused.
+ * PM1a_CNT_BLK = 2 in FADT so it is defined as uint16_t.
+ */
+ ACPIREGS acpi_regs;
+
+ MemoryRegion iomem;
+ MemoryRegion iomem_gpe;
+ MemoryRegion iomem_smi;
+ MemoryRegion iomem_reset;
+
+ qemu_irq irq; /* SCI */
+
+ uint32_t pm_io_base;
+ Notifier powerdown_notifier;
+
+ bool cpu_hotplug_legacy;
+ AcpiCpuHotplug gpe_cpu;
+ CPUHotplugState cpuhp_state;
+
+ MemHotplugState acpi_memory_hotplug;
+
+ uint8_t disable_s3;
+ uint8_t disable_s4;
+ uint8_t s4_val;
+} LS7APCIPMRegs;
+
+void ls7a_pm_init(LS7APCIPMRegs *ls7a, qemu_irq *sci_irq);
+
+void ls7a_pm_iospace_update(LS7APCIPMRegs *pm, uint32_t pm_io_base);
+extern const VMStateDescription vmstate_ls7a_pm;
+
+void ls7a_pm_add_properties(Object *obj, LS7APCIPMRegs *pm, Error **errp);
+
+void ls7a_pm_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp);
+void ls7a_pm_device_unplug_request_cb(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp);
+void ls7a_pm_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
+ Error **errp);
+
+void ls7a_pm_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list);
+
+void ls7a_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev);
+#endif /* HW_ACPI_LS7A_H */
diff --git a/include/hw/loongarch/bios.h b/include/hw/loongarch/bios.h
new file mode 100644
index 0000000000000000000000000000000000000000..3677303bfab2e3630ea7676bfbdf109140882289
--- /dev/null
+++ b/include/hw/loongarch/bios.h
@@ -0,0 +1,5 @@
+#include "qemu/units.h"
+#include "cpu.h"
+
+#define BIOS_SIZE (4 * MiB)
+#define BIOS_FILENAME "loongarch_bios.bin"
diff --git a/include/hw/loongarch/cpudevs.h b/include/hw/loongarch/cpudevs.h
new file mode 100644
index 0000000000000000000000000000000000000000..c05ae7a7fc64bb26e9cf58a900de1241d4c2471c
--- /dev/null
+++ b/include/hw/loongarch/cpudevs.h
@@ -0,0 +1,53 @@
+#ifndef HW_LOONGARCH_CPUDEVS_H
+#define HW_LOONGARCH_CPUDEVS_H
+
+#include "target/loongarch64/cpu-qom.h"
+
+/* Definitions for LOONGARCH CPU internal devices. */
+#define MAX_GIPI_CORE_NUM 256
+#define MAX_GIPI_MBX_NUM 4
+
+#define LS3A_INTC_IP 8
+#define MAX_CORES 256
+#define EXTIOI_IRQS (256)
+#define EXTIOI_IRQS_BITMAP_SIZE (256 / 8)
+/* map to ipnum per 32 irqs */
+#define EXTIOI_IRQS_IPMAP_SIZE (256 / 32)
+
+typedef struct gipi_core {
+ uint32_t status;
+ uint32_t en;
+ uint32_t set;
+ uint32_t clear;
+ uint64_t buf[MAX_GIPI_MBX_NUM];
+ qemu_irq irq;
+} gipi_core;
+
+typedef struct gipiState {
+ gipi_core core[MAX_GIPI_CORE_NUM];
+} gipiState;
+
+typedef struct apicState {
+ /* hardware state */
+ uint8_t ext_en[EXTIOI_IRQS_BITMAP_SIZE];
+ uint8_t ext_bounce[EXTIOI_IRQS_BITMAP_SIZE];
+ uint8_t ext_isr[EXTIOI_IRQS_BITMAP_SIZE];
+ uint8_t ext_coreisr[MAX_CORES][EXTIOI_IRQS_BITMAP_SIZE];
+ uint8_t ext_ipmap[EXTIOI_IRQS_IPMAP_SIZE];
+ uint8_t ext_coremap[EXTIOI_IRQS];
+ uint16_t ext_nodetype[16];
+ uint64_t ext_control;
+
+ /* software state */
+ uint8_t ext_sw_ipmap[EXTIOI_IRQS];
+ uint8_t ext_sw_coremap[EXTIOI_IRQS];
+ uint8_t ext_ipisr[MAX_CORES * LS3A_INTC_IP][EXTIOI_IRQS_BITMAP_SIZE];
+
+ qemu_irq parent_irq[MAX_CORES][LS3A_INTC_IP];
+ qemu_irq *irq;
+} apicState;
+
+void cpu_init_irq(LOONGARCHCPU *cpu);
+void cpu_loongarch_clock_init(LOONGARCHCPU *cpu);
+
+#endif
diff --git a/include/hw/loongarch/larch.h b/include/hw/loongarch/larch.h
new file mode 100644
index 0000000000000000000000000000000000000000..3f4fdd946b2ebe204f26be33e1b83c7df49daa46
--- /dev/null
+++ b/include/hw/loongarch/larch.h
@@ -0,0 +1,168 @@
+/*
+ * Hotplug emulation on Loongarch system.
+ *
+ * Copyright (c) 2018 Loongarch Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see .
+ */
+
+#ifndef HW_LOONGARCH_H
+#define HW_LOONGARCH_H
+
+#include "target/loongarch64/cpu.h"
+#include "qemu-common.h"
+#include "exec/memory.h"
+#include "hw/mem/pc-dimm.h"
+#include "hw/hotplug.h"
+#include "hw/boards.h"
+#include "hw/acpi/acpi.h"
+#include "qemu/notify.h"
+#include "qemu/error-report.h"
+#include "qemu/queue.h"
+#include "hw/acpi/memory_hotplug.h"
+#include "hw/loongarch/cpudevs.h"
+#include "hw/block/flash.h"
+
+#define LOONGARCH_MAX_VCPUS 256
+#define LOONGARCH_MAX_PFLASH 2
+/* 256MB alignment for hotplug memory region */
+#define LOONGARCH_HOTPLUG_MEM_ALIGN (1ULL << 28)
+#define LOONGARCH_MAX_RAM_SLOTS 10
+
+#ifdef CONFIG_KVM
+#define LS_ISA_IO_SIZE 0x02000000
+#else
+#define LS_ISA_IO_SIZE 0x00010000
+#endif
+
+/* Memory types: */
+#define SYSTEM_RAM 1
+#define SYSTEM_RAM_RESERVED 2
+#define ACPI_TABLE 3
+#define ACPI_NVS 4
+#define SYSTEM_PMEM 5
+
+#define MAX_MEM_MAP 128
+
+typedef struct LoongarchMachineClass {
+ /*< private >*/
+ MachineClass parent_class;
+
+ /* Methods: */
+ HotplugHandler *(*get_hotplug_handler)(MachineState *machine,
+ DeviceState *dev);
+
+ bool has_acpi_build;
+
+ /* save different cpu address*/
+ uint64_t isa_io_base;
+ uint64_t ht_control_regs_base;
+ uint64_t hpet_mmio_addr;
+ uint64_t smbus_cfg_base;
+ uint64_t pciecfg_base;
+ uint64_t ls7a_ioapic_reg_base;
+ uint32_t node_shift;
+ char cpu_name[40];
+ char bridge_name[16];
+
+} LoongarchMachineClass;
+
+typedef struct ResetData {
+ LOONGARCHCPU *cpu;
+ uint64_t vector;
+} ResetData;
+
+typedef struct LoongarchMachineState {
+ /*< private >*/
+ MachineState parent_obj;
+
+ /* */
+ ram_addr_t hotplug_memory_size;
+
+ /* State for other subsystems/APIs: */
+ Notifier machine_done;
+ /* Pointers to devices and objects: */
+ HotplugHandler *acpi_dev;
+ int ram_slots;
+ ResetData *reset_info[LOONGARCH_MAX_VCPUS];
+ DeviceState *rtc;
+ gipiState *gipi;
+ apicState *apic;
+
+ FWCfgState *fw_cfg;
+ bool acpi_build_enabled;
+ bool apic_xrupt_override;
+ CPUArchIdList *possible_cpus;
+ PFlashCFI01 *flash[LOONGARCH_MAX_PFLASH];
+ void *fdt;
+ int fdt_size;
+ unsigned int hotpluged_cpu_num;
+ DeviceState *platform_bus_dev;
+ OnOffAuto acpi;
+ char *oem_id;
+ char *oem_table_id;
+} LoongarchMachineState;
+
+#define LOONGARCH_MACHINE_ACPI_DEVICE_PROP "loongarch-acpi-device"
+#define TYPE_LOONGARCH_MACHINE "loongarch-machine"
+
+#define LoongarchMACHINE(obj) \
+ OBJECT_CHECK(LoongarchMachineState, (obj), TYPE_LOONGARCH_MACHINE)
+#define LoongarchMACHINE_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(LoongarchMachineClass, (obj), TYPE_LOONGARCH_MACHINE)
+#define LoongarchMACHINE_CLASS(klass) \
+ OBJECT_CLASS_CHECK(LoongarchMachineClass, (klass), TYPE_LOONGARCH_MACHINE)
+
+#define DEFINE_LOONGARCH_MACHINE(suffix, namestr, initfn, optsfn) \
+ static void loongarch_machine_##suffix##_class_init(ObjectClass *oc, void *data) \
+ { \
+ MachineClass *mc = MACHINE_CLASS(oc); \
+ optsfn(mc); \
+ mc->init = initfn; \
+ } \
+ static const TypeInfo loongarch_machine_type_##suffix = { \
+ .name = namestr TYPE_MACHINE_SUFFIX, \
+ .parent = TYPE_LOONGARCH_MACHINE, \
+ .class_init = loongarch_machine_##suffix##_class_init, \
+ }; \
+ static void loongarch_machine_init_##suffix(void) \
+ { \
+ type_register(&loongarch_machine_type_##suffix); \
+ } \
+ type_init(loongarch_machine_init_##suffix)
+
+void loongarch_machine_device_unplug_request(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp);
+void longson_machine_device_unplug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp);
+HotplugHandler *loongarch_get_hotpug_handler(MachineState *machine,
+ DeviceState *dev);
+void loongarch_machine_device_pre_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp);
+void loongarch_machine_device_plug(HotplugHandler *hotplug_dev,
+ DeviceState *dev, Error **errp);
+
+LOONGARCHCPU *loongarch_cpu_create(MachineState *machine, LOONGARCHCPU *cpu,
+ Error **errp);
+void loongarch_cpu_destroy(MachineState *machine, LOONGARCHCPU *cpu);
+int cpu_init_ipi(LoongarchMachineState *ms, qemu_irq parent, int cpu);
+int cpu_init_apic(LoongarchMachineState *ms, CPULOONGARCHState *env, int cpu);
+int la_memmap_add_entry(uint64_t address, uint64_t length, uint32_t type);
+bool loongarch_is_acpi_enabled(LoongarchMachineState *vms);
+
+/* acpi-build.c */
+void ls7a_madt_cpu_entry(AcpiDeviceIf *adev, int uid,
+ const CPUArchIdList *apic_ids, GArray *entry, bool force_enabled);
+void slave_cpu_reset(void *opaque);
+#endif
diff --git a/include/hw/loongarch/ls7a.h b/include/hw/loongarch/ls7a.h
new file mode 100644
index 0000000000000000000000000000000000000000..05edee60306b0b19272b01b23f5dde044af87ec9
--- /dev/null
+++ b/include/hw/loongarch/ls7a.h
@@ -0,0 +1,150 @@
+#ifndef HW_LS7A_H
+#define HW_LS7A_H
+
+#include "hw/hw.h"
+#include "hw/isa/isa.h"
+#include "hw/sysbus.h"
+#include "hw/isa/apm.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/pcie_host.h"
+#include "hw/pci/pci_bridge.h"
+#include "hw/acpi/acpi.h"
+#include "hw/acpi/ls7a.h"
+#include "hw/pci/pci_bus.h"
+
+/* LS7A PCH Registers (Misc, Confreg) */
+#define LS7A_PCH_REG_BASE 0x10000000UL
+#define LS3A5K_LS7A_IOAPIC_REG_BASE (LS7A_PCH_REG_BASE)
+#define LS7A_MISC_REG_BASE (LS7A_PCH_REG_BASE + 0x00080000)
+#define LS7A_ACPI_REG_BASE (LS7A_MISC_REG_BASE + 0x00050000)
+
+#define LOONGARCH_PCH_IRQ_BASE 64
+#define LS7A_UART_IRQ (LOONGARCH_PCH_IRQ_BASE + 2)
+#define LS7A_RTC_IRQ (LOONGARCH_PCH_IRQ_BASE + 3)
+#define LS7A_SCI_IRQ (LOONGARCH_PCH_IRQ_BASE + 4)
+#define LS7A_ACPI_IO_BASE 0x800
+#define LS7A_ACPI_IO_SIZE 0x100
+#define LS7A_PM_EVT_BLK (0x0C) /* 4 bytes */
+#define LS7A_PM_CNT_BLK (0x14) /* 2 bytes */
+#define LS7A_GPE0_STS_REG (0x28) /* 4 bytes */
+#define LS7A_GPE0_ENA_REG (0x2C) /* 4 bytes */
+#define LS7A_GPE0_RESET_REG (0x30) /* 4 bytes */
+#define LS7A_PM_TMR_BLK (0x18) /* 4 bytes */
+#define LS7A_GPE0_LEN (8)
+#define LS7A_RTC_REG_BASE (LS7A_MISC_REG_BASE + 0x00050100)
+#define LS7A_RTC_LEN (0x100)
+
+#define ACPI_IO_BASE (LS7A_ACPI_REG_BASE)
+#define ACPI_GPE0_LEN (LS7A_GPE0_LEN)
+#define ACPI_IO_SIZE (LS7A_ACPI_IO_SIZE)
+#define ACPI_SCI_IRQ (LS7A_SCI_IRQ)
+
+#define VIRT_PLATFORM_BUS_BASEADDRESS 0x16000000
+#define VIRT_PLATFORM_BUS_SIZE 0x02000000
+#define VIRT_PLATFORM_BUS_NUM_IRQS 2
+#define VIRT_PLATFORM_BUS_IRQ (LOONGARCH_PCH_IRQ_BASE + 5)
+
+#define LS3A5K_ISA_IO_BASE 0x18000000UL
+#define LS_BIOS_BASE 0x1c000000
+#define LS_BIOS_VAR_BASE 0x1c3a0000
+#define LS_BIOS_SIZE (4 * 1024 * 1024)
+#define LS_FDT_BASE 0x1c400000
+#define LS_FDT_SIZE 0x00100000
+
+#define FW_CFG_ADDR 0x1e020000
+#define LS7A_REG_BASE 0x1FE00000
+#define LS7A_UART_BASE 0x1fe001e0
+#define LS7A_UART_LEN 0x8
+#define SMP_GIPI_MAILBOX 0x1f000000ULL
+#define CORE0_STATUS_OFF 0x000
+#define CORE0_EN_OFF 0x004
+#define CORE0_SET_OFF 0x008
+#define CORE0_CLEAR_OFF 0x00c
+#define CORE0_BUF_20 0x020
+#define CORE0_BUF_28 0x028
+#define CORE0_BUF_30 0x030
+#define CORE0_BUF_38 0x038
+#define CORE0_IPI_SEND 0x040
+#define CORE0_MAIL_SEND 0x048
+#define INT_ROUTER_REGS_BASE 0x1fe01400UL
+#define INT_ROUTER_REGS_SIZE 0x100
+#define INT_ROUTER_REGS_SYS_INT0 0x00
+#define INT_ROUTER_REGS_SYS_INT1 0x01
+#define INT_ROUTER_REGS_SYS_INT2 0x02
+#define INT_ROUTER_REGS_SYS_INT3 0x03
+#define INT_ROUTER_REGS_PCI_INT0 0x04
+#define INT_ROUTER_REGS_PCI_INT1 0x05
+#define INT_ROUTER_REGS_PCI_INT2 0x06
+#define INT_ROUTER_REGS_PCI_INT3 0x07
+#define INT_ROUTER_REGS_MATRIX_INT0 0x08
+#define INT_ROUTER_REGS_MATRIX_INT1 0x09
+#define INT_ROUTER_REGS_LPC_INT 0x0a
+#define INT_ROUTER_REGS_MC0 0x0b
+#define INT_ROUTER_REGS_MC1 0x0c
+#define INT_ROUTER_REGS_BARRIER 0x0d
+#define INT_ROUTER_REGS_THSENS_INT 0x0e
+#define INT_ROUTER_REGS_PCI_PERR 0x0f
+#define INT_ROUTER_REGS_HT0_INT0 0x10
+#define INT_ROUTER_REGS_HT0_INT1 0x11
+#define INT_ROUTER_REGS_HT0_INT2 0x12
+#define INT_ROUTER_REGS_HT0_INT3 0x13
+#define INT_ROUTER_REGS_HT0_INT4 0x14
+#define INT_ROUTER_REGS_HT0_INT5 0x15
+#define INT_ROUTER_REGS_HT0_INT6 0x16
+#define INT_ROUTER_REGS_HT0_INT7 0x17
+#define INT_ROUTER_REGS_HT1_INT0 0x18
+#define INT_ROUTER_REGS_HT1_INT1 0x19
+#define INT_ROUTER_REGS_HT1_INT2 0x1a
+#define INT_ROUTER_REGS_HT1_INT3 0x1b
+#define INT_ROUTER_REGS_HT1_INT4 0x1c
+#define INT_ROUTER_REGS_HT1_INT5 0x1d
+#define INT_ROUTER_REGS_HT1_INT6 0x1e
+#define INT_ROUTER_REGS_HT1_INT7 0x1f
+#define INT_ROUTER_REGS_ISR 0x20
+#define INT_ROUTER_REGS_EN 0x24
+#define INT_ROUTER_REGS_EN_SET 0x28
+#define INT_ROUTER_REGS_EN_CLR 0x2c
+#define INT_ROUTER_REGS_EDGE 0x38
+#define INT_ROUTER_REGS_CORE0_INTISR 0x40
+#define INT_ROUTER_REGS_CORE1_INTISR 0x48
+#define INT_ROUTER_REGS_CORE2_INTISR 0x50
+#define INT_ROUTER_REGS_CORE3_INTISR 0x58
+
+#define LS_PCIECFG_BASE 0x20000000
+#define LS_PCIECFG_SIZE 0x08000000
+#define MSI_ADDR_LOW 0x2FF00000
+#define MSI_ADDR_HI 0x0
+
+#define PCIE_MEMORY_BASE 0x40000000
+#define PCIE_MEMORY_SIZE 0x40000000
+
+typedef struct LS7APCIState LS7APCIState;
+typedef struct LS7APCIEHost {
+ PCIExpressHost parent_obj;
+ MemoryRegion io_ioport;
+ MemoryRegion io_mmio;
+ LS7APCIState *pci_dev;
+} LS7APCIEHost;
+
+struct LS7APCIState {
+ PCIDevice dev;
+
+ LS7APCIEHost *pciehost;
+
+ /* LS7A registers */
+ MemoryRegion iomem;
+ LS7APCIPMRegs pm;
+};
+
+#define TYPE_LS7A_PCIE_HOST_BRIDGE "ls7a1000-pciehost"
+#define LS7A_PCIE_HOST_BRIDGE(obj) \
+ OBJECT_CHECK(LS7APCIEHost, (obj), TYPE_LS7A_PCIE_HOST_BRIDGE)
+
+#define TYPE_PCIE_LS7A "ls7a1000_pcie"
+#define PCIE_LS7A(obj) \
+ OBJECT_CHECK(LS7APCIState, (obj), TYPE_PCIE_LS7A)
+
+PCIBus *ls7a_init(MachineState *machine, qemu_irq *irq, DeviceState **ls7a_dev);
+LS7APCIState *get_ls7a_type(Object *obj);
+
+#endif /* HW_LS7A_H */
diff --git a/include/hw/loongarch/sysbus-fdt.h b/include/hw/loongarch/sysbus-fdt.h
new file mode 100644
index 0000000000000000000000000000000000000000..340c382cdde0a39e228536868d6dd5eefc9faab9
--- /dev/null
+++ b/include/hw/loongarch/sysbus-fdt.h
@@ -0,0 +1,37 @@
+/*
+ * Dynamic sysbus device tree node generation API
+ *
+ * Copyright Linaro Limited, 2014
+ *
+ * Authors:
+ * Alex Graf
+ * Eric Auger
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ *
+ */
+
+#ifndef HW_ARM_SYSBUS_FDT_H
+#define HW_ARM_SYSBUS_FDT_H
+
+#include "exec/hwaddr.h"
+
+/**
+ * platform_bus_add_all_fdt_nodes - create all the platform bus nodes
+ *
+ * builds the parent platform bus node and all the nodes of dynamic
+ * sysbus devices attached to it.
+ */
+void platform_bus_add_all_fdt_nodes(void *fdt, const char *intc, hwaddr addr,
+ hwaddr bus_size, int irq_start);
+#endif
diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h
index 60718fc3429f987b48ed411f141559fe283fc5ad..903475bb2143e866caa7c99e356482f8554749ce 100644
--- a/include/qemu/osdep.h
+++ b/include/qemu/osdep.h
@@ -533,6 +533,9 @@ static inline void qemu_cleanup_generic_vfree(void *p)
Valgrind does not support alignments larger than 1 MiB,
therefore we need special code which handles running on Valgrind. */
# define QEMU_VMALLOC_ALIGN (512 * 4096)
+#elif defined(__linux__) && defined(__loongarch__)
+ /* Use 32 MiB alignment so transparent hugepages can be used by KVM. */
+# define QEMU_VMALLOC_ALIGN (qemu_real_host_page_size * qemu_real_host_page_size / 8)
#elif defined(__linux__) && defined(__s390x__)
/* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */
# define QEMU_VMALLOC_ALIGN (256 * 4096)
diff --git a/include/sysemu/arch_init.h b/include/sysemu/arch_init.h
index 70c579560adfd4e2bea6efadf2ea7f7ebc15fa38..62d1a4b92dccf59f592fd80076e310e4a7a03638 100644
--- a/include/sysemu/arch_init.h
+++ b/include/sysemu/arch_init.h
@@ -24,6 +24,7 @@ enum {
QEMU_ARCH_RX = (1 << 20),
QEMU_ARCH_AVR = (1 << 21),
QEMU_ARCH_HEXAGON = (1 << 22),
+ QEMU_ARCH_LOONGARCH64 = (1 << 23),
};
extern const uint32_t arch_type;
diff --git a/linux-headers/asm-loongarch64/bitsperlong.h b/linux-headers/asm-loongarch64/bitsperlong.h
new file mode 100644
index 0000000000000000000000000000000000000000..5c2c8779a6952ccbcf17fbd17495920c1b100c77
--- /dev/null
+++ b/linux-headers/asm-loongarch64/bitsperlong.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_LOONGARCH_BITSPERLONG_H
+#define __ASM_LOONGARCH_BITSPERLONG_H
+
+#define __BITS_PER_LONG _LOONGARCH_SZLONG
+
+#include
+
+#endif /* __ASM_LOONGARCH_BITSPERLONG_H */
diff --git a/linux-headers/asm-loongarch64/kvm.h b/linux-headers/asm-loongarch64/kvm.h
new file mode 100644
index 0000000000000000000000000000000000000000..3687a358fa9a4e254c47e67e32992e047d5b339a
--- /dev/null
+++ b/linux-headers/asm-loongarch64/kvm.h
@@ -0,0 +1,346 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2020 Loongson Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal
+ * Authors: Xing Li
+ */
+
+#ifndef __LINUX_KVM_LOONGARCH_H
+#define __LINUX_KVM_LOONGARCH_H
+
+#include
+
+#define __KVM_HAVE_GUEST_DEBUG
+#define KVM_GUESTDBG_USE_SW_BP 0x00010000
+#define KVM_GUESTDBG_USE_HW_BP 0x00020000
+#define KVM_DATA_HW_BREAKPOINT_NUM 8
+#define KVM_INST_HW_BREAKPOINT_NUM 8
+
+/*
+ * KVM Loongarch specific structures and definitions.
+ *
+ * Some parts derived from the x86 version of this file.
+ */
+
+#define __KVM_HAVE_READONLY_MEM
+
+#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+
+#define KVM_LARCH_VCPU_PVTIME_CTRL 2
+#define KVM_LARCH_VCPU_PVTIME_IPA 0
+
+/*
+ * for KVM_GET_REGS and KVM_SET_REGS
+ */
+struct kvm_regs {
+ /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
+ __u64 gpr[32];
+ __u64 pc;
+};
+
+/*
+ * for KVM_GET_CPUCFG
+ */
+struct kvm_cpucfg {
+ /* out (KVM_GET_CPUCFG) */
+ __u32 cpucfg[64];
+};
+
+/*
+ * for KVM_GET_FPU and KVM_SET_FPU
+ */
+struct kvm_fpu {
+ __u32 fcsr;
+ __u32 vcsr;
+ __u64 fcc; /* 8x8 */
+ struct kvm_fpureg {
+ __u64 val64[4]; //support max 256 bits
+ }fpr[32];
+};
+
+/*
+ * For LOONGARCH, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
+ * registers. The id field is broken down as follows:
+ *
+ * bits[63..52] - As per linux/kvm.h
+ * bits[51..32] - Must be zero.
+ * bits[31..16] - Register set.
+ *
+ * Register set = 0: GP registers from kvm_regs (see definitions below).
+ *
+ * Register set = 1: CSR registers.
+ *
+ * Register set = 2: KVM specific registers (see definitions below).
+ *
+ * Register set = 3: FPU / MSA registers (see definitions below).
+ * Register set = 4: LBT registers (see definitions below).
+ *
+ * Other sets registers may be added in the future. Each set would
+ * have its own identifier in bits[31..16].
+ */
+
+#define KVM_REG_LOONGARCH_GP (KVM_REG_LOONGARCH | 0x0000000000000000ULL)
+#define KVM_REG_LOONGARCH_CSR (KVM_REG_LOONGARCH | 0x0000000000010000ULL)
+#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x0000000000020000ULL)
+#define KVM_REG_LOONGARCH_FPU (KVM_REG_LOONGARCH | 0x0000000000030000ULL)
+#define KVM_REG_LOONGARCH_LBT (KVM_REG_LOONGARCH | 0x0000000000040000ULL)
+
+/*
+ * KVM_REG_LOONGARCH_GP - General purpose registers from kvm_regs.
+ */
+
+#define KVM_REG_LOONGARCH_R0 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 0)
+#define KVM_REG_LOONGARCH_R1 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 1)
+#define KVM_REG_LOONGARCH_R2 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 2)
+#define KVM_REG_LOONGARCH_R3 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 3)
+#define KVM_REG_LOONGARCH_R4 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 4)
+#define KVM_REG_LOONGARCH_R5 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 5)
+#define KVM_REG_LOONGARCH_R6 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 6)
+#define KVM_REG_LOONGARCH_R7 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 7)
+#define KVM_REG_LOONGARCH_R8 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 8)
+#define KVM_REG_LOONGARCH_R9 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 9)
+#define KVM_REG_LOONGARCH_R10 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 10)
+#define KVM_REG_LOONGARCH_R11 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 11)
+#define KVM_REG_LOONGARCH_R12 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 12)
+#define KVM_REG_LOONGARCH_R13 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 13)
+#define KVM_REG_LOONGARCH_R14 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 14)
+#define KVM_REG_LOONGARCH_R15 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 15)
+#define KVM_REG_LOONGARCH_R16 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 16)
+#define KVM_REG_LOONGARCH_R17 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 17)
+#define KVM_REG_LOONGARCH_R18 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 18)
+#define KVM_REG_LOONGARCH_R19 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 19)
+#define KVM_REG_LOONGARCH_R20 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 20)
+#define KVM_REG_LOONGARCH_R21 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 21)
+#define KVM_REG_LOONGARCH_R22 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 22)
+#define KVM_REG_LOONGARCH_R23 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 23)
+#define KVM_REG_LOONGARCH_R24 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 24)
+#define KVM_REG_LOONGARCH_R25 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 25)
+#define KVM_REG_LOONGARCH_R26 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 26)
+#define KVM_REG_LOONGARCH_R27 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 27)
+#define KVM_REG_LOONGARCH_R28 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 28)
+#define KVM_REG_LOONGARCH_R29 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 29)
+#define KVM_REG_LOONGARCH_R30 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 30)
+#define KVM_REG_LOONGARCH_R31 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 31)
+
+#define KVM_REG_LOONGARCH_HI (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 32)
+#define KVM_REG_LOONGARCH_LO (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 33)
+#define KVM_REG_LOONGARCH_PC (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 34)
+
+
+/*
+ * KVM_REG_LOONGARCH_KVM - KVM specific control registers.
+ */
+
+/*
+ * CP0_Count control
+ * DC: Set 0: Master disable CP0_Count and set COUNT_RESUME to now
+ * Set 1: Master re-enable CP0_Count with unchanged bias, handling timer
+ * interrupts since COUNT_RESUME
+ * This can be used to freeze the timer to get a consistent snapshot of
+ * the CP0_Count and timer interrupt pending state, while also resuming
+ * safely without losing time or guest timer interrupts.
+ * Other: Reserved, do not change.
+ */
+#define KVM_REG_LOONGARCH_COUNT_CTL (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 0)
+#define KVM_REG_LOONGARCH_COUNT_CTL_DC 0x00000001
+
+/*
+ * CP0_Count resume monotonic nanoseconds
+ * The monotonic nanosecond time of the last set of COUNT_CTL.DC (master
+ * disable). Any reads and writes of Count related registers while
+ * COUNT_CTL.DC=1 will appear to occur at this time. When COUNT_CTL.DC is
+ * cleared again (master enable) any timer interrupts since this time will be
+ * emulated.
+ * Modifications to times in the future are rejected.
+ */
+#define KVM_REG_LOONGARCH_COUNT_RESUME (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 1)
+/*
+ * CP0_Count rate in Hz
+ * Specifies the rate of the CP0_Count timer in Hz. Modifications occur without
+ * discontinuities in CP0_Count.
+ */
+#define KVM_REG_LOONGARCH_COUNT_HZ (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 2)
+
+#define KVM_REG_LOONGARCH_COUNTER (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3)
+
+#define KVM_REG_LOONGARCH_VCPU_RESET (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 4)
+
+#define KVM_REG_LBT_SCR0 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 1)
+#define KVM_REG_LBT_SCR1 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 2)
+#define KVM_REG_LBT_SCR2 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 3)
+#define KVM_REG_LBT_SCR3 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 4)
+#define KVM_REG_LBT_FLAGS (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 5)
+#define KVM_REG_LBT_FTOP (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 6)
+
+struct kvm_iocsr_entry {
+ __u32 addr;
+ __u32 pad;
+ __u64 data;
+};
+
+struct kvm_csr_entry {
+ __u32 index;
+ __u32 reserved;
+ __u64 data;
+};
+
+/* for KVM_GET_MSRS and KVM_SET_MSRS */
+struct kvm_msrs {
+ __u32 ncsrs; /* number of msrs in entries */
+ __u32 pad;
+ struct kvm_csr_entry entries[0];
+};
+
+#define __KVM_HAVE_IRQ_LINE
+
+struct kvm_debug_exit_arch {
+ __u64 epc;
+ __u32 fwps;
+ __u32 mwps;
+ __u32 exception;
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct hw_breakpoint {
+ __u64 addr;
+ __u64 mask;
+ __u32 asid;
+ __u32 ctrl;
+};
+
+struct kvm_guest_debug_arch {
+ struct hw_breakpoint data_breakpoint[KVM_DATA_HW_BREAKPOINT_NUM];
+ struct hw_breakpoint inst_breakpoint[KVM_INST_HW_BREAKPOINT_NUM];
+ int inst_bp_nums, data_bp_nums;
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+/* dummy definition */
+struct kvm_sregs {
+};
+
+struct kvm_loongarch_interrupt {
+ /* in */
+ __u32 cpu;
+ __u32 irq;
+};
+
+#define KVM_IRQCHIP_LS7A_IOAPIC 0x0
+#define KVM_IRQCHIP_LS3A_GIPI 0x1
+#define KVM_IRQCHIP_LS3A_HT_IRQ 0x2
+#define KVM_IRQCHIP_LS3A_ROUTE 0x3
+#define KVM_IRQCHIP_LS3A_EXTIRQ 0x4
+#define KVM_IRQCHIP_LS3A_IPMASK 0x5
+#define KVM_NR_IRQCHIPS 1
+#define KVM_IRQCHIP_NUM_PINS 64
+
+#define KVM_MAX_CORES 256
+#define KVM_EXTIOI_IRQS (256)
+#define KVM_EXTIOI_IRQS_BITMAP_SIZE (KVM_EXTIOI_IRQS / 8)
+/* map to ipnum per 32 irqs */
+#define KVM_EXTIOI_IRQS_IPMAP_SIZE (KVM_EXTIOI_IRQS / 32)
+#define KVM_EXTIOI_IRQS_PER_GROUP 32
+#define KVM_EXTIOI_IRQS_COREMAP_SIZE (KVM_EXTIOI_IRQS)
+#define KVM_EXTIOI_IRQS_NODETYPE_SIZE 16
+
+struct ls7a_ioapic_state {
+ __u64 int_id;
+ /* 0x020 interrupt mask register */
+ __u64 int_mask;
+ /* 0x040 1=msi */
+ __u64 htmsi_en;
+ /* 0x060 edge=1 level =0 */
+ __u64 intedge;
+ /* 0x080 for clean edge int,set 1 clean,set 0 is noused */
+ __u64 intclr;
+ /* 0x0c0 */
+ __u64 auto_crtl0;
+ /* 0x0e0 */
+ __u64 auto_crtl1;
+ /* 0x100 - 0x140 */
+ __u8 route_entry[64];
+ /* 0x200 - 0x240 */
+ __u8 htmsi_vector[64];
+ /* 0x300 */
+ __u64 intisr_chip0;
+ /* 0x320 */
+ __u64 intisr_chip1;
+ /* edge detection */
+ __u64 last_intirr;
+ /* 0x380 interrupt request register */
+ __u64 intirr;
+ /* 0x3a0 interrupt service register */
+ __u64 intisr;
+ /* 0x3e0 interrupt level polarity selection register,
+ * 0 for high level tirgger
+ */
+ __u64 int_polarity;
+};
+
+struct loongarch_gipi_single {
+ __u32 status;
+ __u32 en;
+ __u32 set;
+ __u32 clear;
+ __u64 buf[4];
+};
+
+struct loongarch_gipiState {
+ struct loongarch_gipi_single core[KVM_MAX_CORES];
+};
+
+struct kvm_loongarch_ls3a_extirq_state {
+ union ext_en_r {
+ uint64_t reg_u64[KVM_EXTIOI_IRQS_BITMAP_SIZE / 8];
+ uint32_t reg_u32[KVM_EXTIOI_IRQS_BITMAP_SIZE / 4];
+ uint8_t reg_u8[KVM_EXTIOI_IRQS_BITMAP_SIZE];
+ } ext_en_r;
+ union bounce_r {
+ uint64_t reg_u64[KVM_EXTIOI_IRQS_BITMAP_SIZE / 8];
+ uint32_t reg_u32[KVM_EXTIOI_IRQS_BITMAP_SIZE / 4];
+ uint8_t reg_u8[KVM_EXTIOI_IRQS_BITMAP_SIZE];
+ } bounce_r;
+ union ext_isr_r {
+ uint64_t reg_u64[KVM_EXTIOI_IRQS_BITMAP_SIZE / 8];
+ uint32_t reg_u32[KVM_EXTIOI_IRQS_BITMAP_SIZE / 4];
+ uint8_t reg_u8[KVM_EXTIOI_IRQS_BITMAP_SIZE];
+ } ext_isr_r;
+ union ext_core_isr_r {
+ uint64_t reg_u64[KVM_MAX_CORES][KVM_EXTIOI_IRQS_BITMAP_SIZE / 8];
+ uint32_t reg_u32[KVM_MAX_CORES][KVM_EXTIOI_IRQS_BITMAP_SIZE / 4];
+ uint8_t reg_u8[KVM_MAX_CORES][KVM_EXTIOI_IRQS_BITMAP_SIZE];
+ } ext_core_isr_r;
+ union ip_map_r {
+ uint64_t reg_u64;
+ uint32_t reg_u32[KVM_EXTIOI_IRQS_IPMAP_SIZE / 4];
+ uint8_t reg_u8[KVM_EXTIOI_IRQS_IPMAP_SIZE];
+ } ip_map_r;
+ union core_map_r {
+ uint64_t reg_u64[KVM_EXTIOI_IRQS_COREMAP_SIZE / 8];
+ uint32_t reg_u32[KVM_EXTIOI_IRQS_COREMAP_SIZE / 4];
+ uint8_t reg_u8[KVM_EXTIOI_IRQS_COREMAP_SIZE];
+ } core_map_r;
+ union node_type_r {
+ uint64_t reg_u64[KVM_EXTIOI_IRQS_NODETYPE_SIZE / 4];
+ uint32_t reg_u32[KVM_EXTIOI_IRQS_NODETYPE_SIZE / 2];
+ uint16_t reg_u16[KVM_EXTIOI_IRQS_NODETYPE_SIZE];
+ uint8_t reg_u8[KVM_EXTIOI_IRQS_NODETYPE_SIZE * 2];
+ } node_type_r;
+};
+
+struct loongarch_kvm_irqchip {
+ __u16 chip_id;
+ __u16 len;
+ __u16 vcpu_id;
+ __u16 reserved;
+ char data[0];
+};
+
+#endif /* __LINUX_KVM_LOONGARCH_H */
diff --git a/linux-headers/asm-loongarch64/sgidefs.h b/linux-headers/asm-loongarch64/sgidefs.h
new file mode 100644
index 0000000000000000000000000000000000000000..b809608349addf37c9005ef67ab38cb775feb584
--- /dev/null
+++ b/linux-headers/asm-loongarch64/sgidefs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+* Copyright (C) 2020 Loongson Technology Corporation Limited
+*
+* Author: Hanlu Li
+*/
+#ifndef __ASM_SGIDEFS_H
+#define __ASM_SGIDEFS_H
+
+#define _LOONGARCH_ISA_LOONGARCH32 6
+#define _LOONGARCH_ISA_LOONGARCH64 7
+
+/*
+ * Subprogram calling convention
+ */
+#define _LOONGARCH_SIM_ABILP32 1
+#define _LOONGARCH_SIM_ABILPX32 2
+#define _LOONGARCH_SIM_ABILP64 3
+
+#endif /* __ASM_SGIDEFS_H */
diff --git a/linux-headers/asm-loongarch64/unistd.h b/linux-headers/asm-loongarch64/unistd.h
new file mode 100644
index 0000000000000000000000000000000000000000..2a6014562a4e1e62277b0935ba4c740ded13a1b1
--- /dev/null
+++ b/linux-headers/asm-loongarch64/unistd.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ *
+ * Copyright (C) 2020 Loongson Technologies, Inc.
+ * Authors: Jun Yi
+ */
+
+#ifdef __LP64__
+#define __ARCH_WANT_NEW_STAT
+#endif /* __LP64__ */
+
+#include
diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h
index bcaf66cc4d2a06a861322b0ee82c2c5b38454b98..20b90426f57cb5344d478d7ce45aa39158e5f352 100644
--- a/linux-headers/linux/kvm.h
+++ b/linux-headers/linux/kvm.h
@@ -2002,6 +2002,29 @@ struct kvm_stats_desc {
char name[];
};
+#ifdef __loongarch__
+struct kvm_loongarch_vcpu_state {
+ __u8 online_vcpus;
+ __u8 is_migrate;
+ __u32 cpu_freq;
+ __u32 count_ctl;
+ __u64 pending_exceptions;
+ __u64 pending_exceptions_clr;
+ __u64 core_ext_ioisr[4];
+};
+
+#define KVM_CAP_LOONGARCH_FPU 165
+#define KVM_CAP_LOONGARCH_LSX 166
+#define KVM_CAP_LOONGARCH_VZ 167
+#define KVM_REG_LOONGARCH 0x8000000000000000ULL
+#define KVM_LARCH_GET_VCPU_STATE _IOR(KVMIO, 0xc0, struct kvm_loongarch_vcpu_state)
+#define KVM_LARCH_SET_VCPU_STATE _IOW(KVMIO, 0xc1, struct kvm_loongarch_vcpu_state)
+#define KVM_LARCH_GET_CPUCFG _IOR(KVMIO, 0xc2, struct kvm_cpucfg)
+#define KVM_LOONGARCH_GET_IOCSR _IOR(KVMIO, 0xc3, struct kvm_iocsr_entry)
+#define KVM_LOONGARCH_SET_IOCSR _IOW(KVMIO, 0xc4, struct kvm_iocsr_entry)
+#define KVM_LARCH_SET_CPUCFG _IOR(KVMIO, 0xc5, struct kvm_cpucfg)
+#endif
+
#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
#endif /* __LINUX_KVM_H */
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 767f54c76dc5844538bc071ac50dfe7c1d5b78a5..9fb632780af2397c677342077c2f6adfe46d2289 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -1041,6 +1041,73 @@ static uint32_t get_elf_hwcap(void)
#endif /* TARGET_MIPS */
+#ifdef TARGET_LOONGARCH64
+
+#define ELF_START_MMAP 0x80000000
+
+#define ELF_CLASS ELFCLASS64
+#define ELF_ARCH EM_LOONGARCH
+
+#define elf_check_arch(x) ((x) == EM_LOONGARCH)
+
+static inline void init_thread(struct target_pt_regs *regs,
+ struct image_info *infop)
+{
+ regs->csr_crmd = 2 << 3;
+ regs->csr_era = infop->entry;
+ regs->regs[3] = infop->start_stack;
+}
+
+/* See linux kernel: arch/mips/include/asm/elf.h. */
+#define ELF_NREG 45
+typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
+
+/* See linux kernel: arch/loongarch/include/uapi/asm/reg.h */
+enum {
+ TARGET_EF_R0 = 0,
+ TARGET_EF_R26 = TARGET_EF_R0 + 26,
+ TARGET_EF_R27 = TARGET_EF_R0 + 27,
+ TARGET_EF_CSR_ERA = TARGET_EF_R0 + 32,
+ TARGET_EF_CSR_BADV = TARGET_EF_R0 + 33,
+ TARGET_EF_CSR_CRMD = TARGET_EF_R0 + 34,
+ TARGET_EF_CSR_ESTAT = TARGET_EF_R0 + 38
+};
+
+/* See linux kernel: arch/loongarch/kernel/process.c:elf_dump_regs. */
+static void elf_core_copy_regs(target_elf_gregset_t *regs,
+ const CPULOONGARCHState *env)
+{
+ int i;
+
+ for (i = 0; i < TARGET_EF_R0; i++) {
+ (*regs)[i] = 0;
+ }
+ (*regs)[TARGET_EF_R0] = 0;
+
+ for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
+ (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]);
+ }
+
+ (*regs)[TARGET_EF_R26] = 0;
+ (*regs)[TARGET_EF_R27] = 0;
+ (*regs)[TARGET_EF_CSR_ERA] = tswapreg(env->active_tc.PC);
+ (*regs)[TARGET_EF_CSR_BADV] = tswapreg(env->CSR_BADV);
+ (*regs)[TARGET_EF_CSR_CRMD] = tswapreg(env->CSR_CRMD);
+ (*regs)[TARGET_EF_CSR_ESTAT] = tswapreg(env->CSR_ESTAT);
+}
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE 4096
+
+#define ELF_HWCAP get_elf_hwcap()
+
+static uint32_t get_elf_hwcap(void)
+{
+ return 0;
+}
+
+#endif /* TARGET_LOONGARCH64 */
+
#ifdef TARGET_MICROBLAZE
#define ELF_START_MMAP 0x80000000
diff --git a/linux-user/loongarch64/cpu_loop.c b/linux-user/loongarch64/cpu_loop.c
new file mode 100644
index 0000000000000000000000000000000000000000..6d4093e1d7ba737d672b06d2ceda3027a3833a25
--- /dev/null
+++ b/linux-user/loongarch64/cpu_loop.c
@@ -0,0 +1,193 @@
+/*
+ * qemu user cpu loop
+ *
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see .
+ */
+
+#include "qemu/osdep.h"
+#include "qemu.h"
+#include "cpu_loop-common.h"
+#include "elf.h"
+
+/* Break codes */
+enum {
+ BRK_OVERFLOW = 6,
+ BRK_DIVZERO = 7
+};
+
+static int do_break(CPULOONGARCHState *env, target_siginfo_t *info,
+ unsigned int code)
+{
+ int ret = -1;
+
+ switch (code) {
+ case BRK_OVERFLOW:
+ case BRK_DIVZERO:
+ info->si_signo = TARGET_SIGFPE;
+ info->si_errno = 0;
+ info->si_code = (code == BRK_OVERFLOW) ? FPE_INTOVF : FPE_INTDIV;
+ queue_signal(env, info->si_signo, QEMU_SI_FAULT, &*info);
+ ret = 0;
+ break;
+ default:
+ info->si_signo = TARGET_SIGTRAP;
+ info->si_errno = 0;
+ queue_signal(env, info->si_signo, QEMU_SI_FAULT, &*info);
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+void cpu_loop(CPULOONGARCHState *env)
+{
+ CPUState *cs = CPU(loongarch_env_get_cpu(env));
+ target_siginfo_t info;
+ int trapnr;
+ abi_long ret;
+
+ for (;;) {
+ cpu_exec_start(cs);
+ trapnr = cpu_exec(cs);
+ cpu_exec_end(cs);
+ process_queued_cpu_work(cs);
+
+ switch (trapnr) {
+ case EXCP_SYSCALL:
+ env->active_tc.PC += 4;
+ ret = do_syscall(env, env->active_tc.gpr[11],
+ env->active_tc.gpr[4], env->active_tc.gpr[5],
+ env->active_tc.gpr[6], env->active_tc.gpr[7],
+ env->active_tc.gpr[8], env->active_tc.gpr[9],
+ -1, -1);
+ if (ret == -TARGET_ERESTARTSYS) {
+ env->active_tc.PC -= 4;
+ break;
+ }
+ if (ret == -TARGET_QEMU_ESIGRETURN) {
+ /* Returning from a successful sigreturn syscall.
+ Avoid clobbering register state. */
+ break;
+ }
+ env->active_tc.gpr[4] = ret;
+ break;
+ case EXCP_TLBL:
+ case EXCP_TLBS:
+ case EXCP_AdEL:
+ case EXCP_AdES:
+ info.si_signo = TARGET_SIGSEGV;
+ info.si_errno = 0;
+ /* XXX: check env->error_code */
+ info.si_code = TARGET_SEGV_MAPERR;
+ info._sifields._sigfault._addr = env->CSR_BADV;
+ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ break;
+ case EXCP_FPDIS:
+ case EXCP_LSXDIS:
+ case EXCP_LASXDIS:
+ case EXCP_RI:
+ info.si_signo = TARGET_SIGILL;
+ info.si_errno = 0;
+ info.si_code = 0;
+ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ break;
+ case EXCP_INTERRUPT:
+ /* just indicate that signals should be handled asap */
+ break;
+ case EXCP_DEBUG:
+ info.si_signo = TARGET_SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TARGET_TRAP_BRKPT;
+ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ break;
+ case EXCP_FPE:
+ info.si_signo = TARGET_SIGFPE;
+ info.si_errno = 0;
+ info.si_code = TARGET_FPE_FLTUNK;
+ if (GET_FP_CAUSE(env->active_fpu.fcsr0) & FP_INVALID) {
+ info.si_code = TARGET_FPE_FLTINV;
+ } else if (GET_FP_CAUSE(env->active_fpu.fcsr0) & FP_DIV0) {
+ info.si_code = TARGET_FPE_FLTDIV;
+ } else if (GET_FP_CAUSE(env->active_fpu.fcsr0) & FP_OVERFLOW) {
+ info.si_code = TARGET_FPE_FLTOVF;
+ } else if (GET_FP_CAUSE(env->active_fpu.fcsr0) & FP_UNDERFLOW) {
+ info.si_code = TARGET_FPE_FLTUND;
+ } else if (GET_FP_CAUSE(env->active_fpu.fcsr0) & FP_INEXACT) {
+ info.si_code = TARGET_FPE_FLTRES;
+ }
+ queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
+ break;
+ case EXCP_BREAK:
+ {
+ abi_ulong trap_instr;
+ unsigned int code;
+
+ ret = get_user_u32(trap_instr, env->active_tc.PC);
+ if (ret != 0) {
+ goto error;
+ }
+
+ code = trap_instr & 0x7fff;
+
+ if (do_break(env, &info, code) != 0) {
+ goto error;
+ }
+ }
+ break;
+ case EXCP_TRAP:
+ {
+ abi_ulong trap_instr;
+ unsigned int code = 0;
+
+ ret = get_user_u32(trap_instr, env->active_tc.PC);
+
+ if (ret != 0) {
+ goto error;
+ }
+
+ /* The immediate versions don't provide a code. */
+ if (!(trap_instr & 0xFC000000)) {
+ code = ((trap_instr >> 6) & ((1 << 10) - 1));
+ }
+
+ if (do_break(env, &info, code) != 0) {
+ goto error;
+ }
+ }
+ break;
+ case EXCP_ATOMIC:
+ cpu_exec_step_atomic(cs);
+ break;
+ default:
+error:
+ printf("111111\n");
+ EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", trapnr);
+ abort();
+ }
+ process_pending_signals(env);
+ }
+}
+
+void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ env->active_tc.gpr[i] = regs->regs[i];
+ }
+ env->active_tc.PC = regs->csr_era & ~(target_ulong)1;
+}
diff --git a/linux-user/loongarch64/meson.build b/linux-user/loongarch64/meson.build
new file mode 100644
index 0000000000000000000000000000000000000000..c4c0b4d70191b623f30e5615964483c44e150e65
--- /dev/null
+++ b/linux-user/loongarch64/meson.build
@@ -0,0 +1,6 @@
+syscall_nr_generators += {
+ 'loongarch64': generator(sh,
+ arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@',
+ '', 'TARGET_SYSCALL_OFFSET' ],
+ output: '@BASENAME@_nr.h')
+}
diff --git a/linux-user/loongarch64/signal.c b/linux-user/loongarch64/signal.c
new file mode 100644
index 0000000000000000000000000000000000000000..6fe6852758abdb87a10388d4f7b9a961ce6ddb80
--- /dev/null
+++ b/linux-user/loongarch64/signal.c
@@ -0,0 +1,212 @@
+/*
+ * Emulation of Linux signals
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see .
+ */
+#include "qemu/osdep.h"
+#include "qemu.h"
+#include "signal-common.h"
+#include "linux-user/trace.h"
+
+#define FPU_REG_WIDTH 256
+union fpureg {
+ uint32_t val32[FPU_REG_WIDTH / 32];
+ uint64_t val64[FPU_REG_WIDTH / 64];
+};
+
+struct target_sigcontext {
+ uint64_t sc_pc;
+ uint64_t sc_regs[32];
+ uint32_t sc_flags;
+
+ uint32_t sc_fcsr;
+ uint32_t sc_vcsr;
+ uint64_t sc_fcc;
+ union fpureg sc_fpregs[32] __attribute__((aligned(32)));
+
+ uint32_t sc_reserved;
+
+};
+
+struct sigframe {
+ uint32_t sf_ass[4]; /* argument save space for o32 */
+ uint32_t sf_code[2]; /* signal trampoline */
+ struct target_sigcontext sf_sc;
+ target_sigset_t sf_mask;
+};
+
+struct target_ucontext {
+ target_ulong tuc_flags;
+ target_ulong tuc_link;
+ target_stack_t tuc_stack;
+ target_ulong pad0;
+ struct target_sigcontext tuc_mcontext;
+ target_sigset_t tuc_sigmask;
+};
+
+struct target_rt_sigframe {
+ uint32_t rs_ass[4]; /* argument save space for o32 */
+ uint32_t rs_code[2]; /* signal trampoline */
+ struct target_siginfo rs_info;
+ struct target_ucontext rs_uc;
+};
+
+static inline void setup_sigcontext(CPULOONGARCHState *regs,
+ struct target_sigcontext *sc)
+{
+ int i;
+
+ __put_user(exception_resume_pc(regs), &sc->sc_pc);
+ regs->hflags &= ~LARCH_HFLAG_BMASK;
+
+ __put_user(0, &sc->sc_regs[0]);
+ for (i = 1; i < 32; ++i) {
+ __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
+ }
+
+ for (i = 0; i < 32; ++i) {
+ __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i].val64[0]);
+ }
+}
+
+static inline void
+restore_sigcontext(CPULOONGARCHState *regs, struct target_sigcontext *sc)
+{
+ int i;
+
+ __get_user(regs->CSR_ERA, &sc->sc_pc);
+
+ for (i = 1; i < 32; ++i) {
+ __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
+ }
+
+ for (i = 0; i < 32; ++i) {
+ __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i].val64[0]);
+ }
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline abi_ulong
+get_sigframe(struct target_sigaction *ka, CPULOONGARCHState *regs,
+ size_t frame_size)
+{
+ unsigned long sp;
+
+ /*
+ * FPU emulator may have its own trampoline active just
+ * above the user stack, 16-bytes before the next lowest
+ * 16 byte boundary. Try to avoid trashing it.
+ */
+ sp = target_sigsp(get_sp_from_cpustate(regs) - 32, ka);
+
+ return (sp - frame_size) & ~7;
+}
+
+void setup_rt_frame(int sig, struct target_sigaction *ka,
+ target_siginfo_t *info,
+ target_sigset_t *set, CPULOONGARCHState *env)
+{
+ struct target_rt_sigframe *frame;
+ abi_ulong frame_addr;
+ int i;
+
+ frame_addr = get_sigframe(ka, env, sizeof(*frame));
+ trace_user_setup_rt_frame(env, frame_addr);
+ if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
+ goto give_sigsegv;
+ }
+
+ /* ori a7, $r0, TARGET_NR_rt_sigreturn */
+ /* syscall 0 */
+ __put_user(0x0380000b + (TARGET_NR_rt_sigreturn << 10), &frame->rs_code[0]);
+ __put_user(0x002b0000, &frame->rs_code[1]);
+
+ tswap_siginfo(&frame->rs_info, info);
+
+ __put_user(0, &frame->rs_uc.tuc_flags);
+ __put_user(0, &frame->rs_uc.tuc_link);
+ target_save_altstack(&frame->rs_uc.tuc_stack, env);
+
+ setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
+
+ for (i = 0; i < TARGET_NSIG_WORDS; i++) {
+ __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
+ }
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = pointer to siginfo_t
+ * a2 = pointer to ucontext_t
+ *
+ * $25 and PC point to the signal handler, $29 points to the
+ * struct sigframe.
+ */
+ env->active_tc.gpr[4] = sig;
+ env->active_tc.gpr[5] = frame_addr
+ + offsetof(struct target_rt_sigframe, rs_info);
+ env->active_tc.gpr[6] = frame_addr
+ + offsetof(struct target_rt_sigframe, rs_uc);
+ env->active_tc.gpr[3] = frame_addr;
+ env->active_tc.gpr[1] = frame_addr
+ + offsetof(struct target_rt_sigframe, rs_code);
+ /* The original kernel code sets CP0_ERA to the handler
+ * since it returns to userland using ertn
+ * we cannot do this here, and we must set PC directly */
+ env->active_tc.PC = env->active_tc.gpr[20] = ka->_sa_handler;
+ unlock_user_struct(frame, frame_addr, 1);
+ return;
+
+give_sigsegv:
+ unlock_user_struct(frame, frame_addr, 1);
+ force_sigsegv(sig);
+}
+
+long do_rt_sigreturn(CPULOONGARCHState *env)
+{
+ struct target_rt_sigframe *frame;
+ abi_ulong frame_addr;
+ sigset_t blocked;
+
+ frame_addr = env->active_tc.gpr[3];
+ trace_user_do_rt_sigreturn(env, frame_addr);
+ if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
+ goto badframe;
+ }
+
+ target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
+ set_sigmask(&blocked);
+
+ restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
+
+ if (do_sigaltstack(frame_addr +
+ offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
+ 0, get_sp_from_cpustate(env)) == -EFAULT)
+ goto badframe;
+
+ env->active_tc.PC = env->CSR_ERA;
+ /* I am not sure this is right, but it seems to work
+ * maybe a problem with nested signals ? */
+ env->CSR_ERA = 0;
+ return -TARGET_QEMU_ESIGRETURN;
+
+badframe:
+ force_sig(TARGET_SIGSEGV);
+ return -TARGET_QEMU_ESIGRETURN;
+}
diff --git a/linux-user/loongarch64/sockbits.h b/linux-user/loongarch64/sockbits.h
new file mode 100644
index 0000000000000000000000000000000000000000..0e4c8f012d781261da71333ae360abe22ca8083b
--- /dev/null
+++ b/linux-user/loongarch64/sockbits.h
@@ -0,0 +1 @@
+#include "../generic/sockbits.h"
diff --git a/linux-user/loongarch64/syscall_nr.h b/linux-user/loongarch64/syscall_nr.h
new file mode 100644
index 0000000000000000000000000000000000000000..a30aca8d8e13c65555235406bc25b6f5d0bee488
--- /dev/null
+++ b/linux-user/loongarch64/syscall_nr.h
@@ -0,0 +1,287 @@
+#ifndef LINUX_USER_LOONGARCH_SYSCALL_NR_H
+#define LINUX_USER_LOONGARCH_SYSCALL_NR_H
+
+#define TARGET_NR_io_setup 0
+#define TARGET_NR_io_destroy 1
+#define TARGET_NR_io_submit 2
+#define TARGET_NR_io_cancel 3
+#define TARGET_NR_io_getevents 4
+#define TARGET_NR_setxattr 5
+#define TARGET_NR_lsetxattr 6
+#define TARGET_NR_fsetxattr 7
+#define TARGET_NR_getxattr 8
+#define TARGET_NR_lgetxattr 9
+#define TARGET_NR_fgetxattr 10
+#define TARGET_NR_listxattr 11
+#define TARGET_NR_llistxattr 12
+#define TARGET_NR_flistxattr 13
+#define TARGET_NR_removexattr 14
+#define TARGET_NR_lremovexattr 15
+#define TARGET_NR_fremovexattr 16
+#define TARGET_NR_getcwd 17
+#define TARGET_NR_lookup_dcookie 18
+#define TARGET_NR_eventfd2 19
+#define TARGET_NR_epoll_create1 20
+#define TARGET_NR_epoll_ctl 21
+#define TARGET_NR_epoll_pwait 22
+#define TARGET_NR_dup 23
+#define TARGET_NR_dup3 24
+#define TARGET_NR_fcntl 25
+#define TARGET_NR_inotify_init1 26
+#define TARGET_NR_inotify_add_watch 27
+#define TARGET_NR_inotify_rm_watch 28
+#define TARGET_NR_ioctl 29
+#define TARGET_NR_ioprio_set 30
+#define TARGET_NR_ioprio_get 31
+#define TARGET_NR_flock 32
+#define TARGET_NR_mknodat 33
+#define TARGET_NR_mkdirat 34
+#define TARGET_NR_unlinkat 35
+#define TARGET_NR_symlinkat 36
+#define TARGET_NR_linkat 37
+#define TARGET_NR_renameat 38
+#define TARGET_NR_umount2 39
+#define TARGET_NR_mount 40
+#define TARGET_NR_pivot_root 41
+#define TARGET_NR_nfsservctl 42
+#define TARGET_NR_statfs 43
+#define TARGET_NR_fstatfs 44
+#define TARGET_NR_truncate 45
+#define TARGET_NR_ftruncate 46
+#define TARGET_NR_fallocate 47
+#define TARGET_NR_faccessat 48
+#define TARGET_NR_chdir 49
+#define TARGET_NR_fchdir 50
+#define TARGET_NR_chroot 51
+#define TARGET_NR_fchmod 52
+#define TARGET_NR_fchmodat 53
+#define TARGET_NR_fchownat 54
+#define TARGET_NR_fchown 55
+#define TARGET_NR_openat 56
+#define TARGET_NR_close 57
+#define TARGET_NR_vhangup 58
+#define TARGET_NR_pipe2 59
+#define TARGET_NR_quotactl 60
+#define TARGET_NR_getdents64 61
+#define TARGET_NR_lseek 62
+#define TARGET_NR_read 63
+#define TARGET_NR_write 64
+#define TARGET_NR_readv 65
+#define TARGET_NR_writev 66
+#define TARGET_NR_pread64 67
+#define TARGET_NR_pwrite64 68
+#define TARGET_NR_preadv 69
+#define TARGET_NR_pwritev 70
+#define TARGET_NR_sendfile 71
+#define TARGET_NR_pselect6 72
+#define TARGET_NR_ppoll 73
+#define TARGET_NR_signalfd4 74
+#define TARGET_NR_vmsplice 75
+#define TARGET_NR_splice 76
+#define TARGET_NR_tee 77
+#define TARGET_NR_readlinkat 78
+#define TARGET_NR_newfstatat 79
+#define TARGET_NR_fstat 80
+#define TARGET_NR_sync 81
+#define TARGET_NR_fsync 82
+#define TARGET_NR_fdatasync 83
+#define TARGET_NR_sync_file_range 84
+#define TARGET_NR_timerfd_create 85
+#define TARGET_NR_timerfd_settime 86
+#define TARGET_NR_timerfd_gettime 87
+#define TARGET_NR_utimensat 88
+#define TARGET_NR_acct 89
+#define TARGET_NR_capget 90
+#define TARGET_NR_capset 91
+#define TARGET_NR_personality 92
+#define TARGET_NR_exit 93
+#define TARGET_NR_exit_group 94
+#define TARGET_NR_waitid 95
+#define TARGET_NR_set_tid_address 96
+#define TARGET_NR_unshare 97
+#define TARGET_NR_futex 98
+#define TARGET_NR_set_robust_list 99
+#define TARGET_NR_get_robust_list 100
+#define TARGET_NR_nanosleep 101
+#define TARGET_NR_getitimer 102
+#define TARGET_NR_setitimer 103
+#define TARGET_NR_kexec_load 104
+#define TARGET_NR_init_module 105
+#define TARGET_NR_delete_module 106
+#define TARGET_NR_timer_create 107
+#define TARGET_NR_timer_gettime 108
+#define TARGET_NR_timer_getoverrun 109
+#define TARGET_NR_timer_settime 110
+#define TARGET_NR_timer_delete 111
+#define TARGET_NR_clock_settime 112
+#define TARGET_NR_clock_gettime 113
+#define TARGET_NR_clock_getres 114
+#define TARGET_NR_clock_nanosleep 115
+#define TARGET_NR_syslog 116
+#define TARGET_NR_ptrace 117
+#define TARGET_NR_sched_setparam 118
+#define TARGET_NR_sched_setscheduler 119
+#define TARGET_NR_sched_getscheduler 120
+#define TARGET_NR_sched_getparam 121
+#define TARGET_NR_sched_setaffinity 122
+#define TARGET_NR_sched_getaffinity 123
+#define TARGET_NR_sched_yield 124
+#define TARGET_NR_sched_get_priority_max 125
+#define TARGET_NR_sched_get_priority_min 126
+#define TARGET_NR_sched_rr_get_interval 127
+#define TARGET_NR_restart_syscall 128
+#define TARGET_NR_kill 129
+#define TARGET_NR_tkill 130
+#define TARGET_NR_tgkill 131
+#define TARGET_NR_sigaltstack 132
+#define TARGET_NR_rt_sigsuspend 133
+#define TARGET_NR_rt_sigaction 134
+#define TARGET_NR_rt_sigprocmask 135
+#define TARGET_NR_rt_sigpending 136
+#define TARGET_NR_rt_sigtimedwait 137
+#define TARGET_NR_rt_sigqueueinfo 138
+#define TARGET_NR_rt_sigreturn 139
+#define TARGET_NR_setpriority 140
+#define TARGET_NR_getpriority 141
+#define TARGET_NR_reboot 142
+#define TARGET_NR_setregid 143
+#define TARGET_NR_setgid 144
+#define TARGET_NR_setreuid 145
+#define TARGET_NR_setuid 146
+#define TARGET_NR_setresuid 147
+#define TARGET_NR_getresuid 148
+#define TARGET_NR_setresgid 149
+#define TARGET_NR_getresgid 150
+#define TARGET_NR_setfsuid 151
+#define TARGET_NR_setfsgid 152
+#define TARGET_NR_times 153
+#define TARGET_NR_setpgid 154
+#define TARGET_NR_getpgid 155
+#define TARGET_NR_getsid 156
+#define TARGET_NR_setsid 157
+#define TARGET_NR_getgroups 158
+#define TARGET_NR_setgroups 159
+#define TARGET_NR_uname 160
+#define TARGET_NR_sethostname 161
+#define TARGET_NR_setdomainname 162
+#define TARGET_NR_getrlimit 163
+#define TARGET_NR_setrlimit 164
+#define TARGET_NR_getrusage 165
+#define TARGET_NR_umask 166
+#define TARGET_NR_prctl 167
+#define TARGET_NR_getcpu 168
+#define TARGET_NR_gettimeofday 169
+#define TARGET_NR_settimeofday 170
+#define TARGET_NR_adjtimex 171
+#define TARGET_NR_getpid 172
+#define TARGET_NR_getppid 173
+#define TARGET_NR_getuid 174
+#define TARGET_NR_geteuid 175
+#define TARGET_NR_getgid 176
+#define TARGET_NR_getegid 177
+#define TARGET_NR_gettid 178
+#define TARGET_NR_sysinfo 179
+#define TARGET_NR_mq_open 180
+#define TARGET_NR_mq_unlink 181
+#define TARGET_NR_mq_timedsend 182
+#define TARGET_NR_mq_timedreceive 183
+#define TARGET_NR_mq_notify 184
+#define TARGET_NR_mq_getsetattr 185
+#define TARGET_NR_msgget 186
+#define TARGET_NR_msgctl 187
+#define TARGET_NR_msgrcv 188
+#define TARGET_NR_msgsnd 189
+#define TARGET_NR_semget 190
+#define TARGET_NR_semctl 191
+#define TARGET_NR_semtimedop 192
+#define TARGET_NR_semop 193
+#define TARGET_NR_shmget 194
+#define TARGET_NR_shmctl 195
+#define TARGET_NR_shmat 196
+#define TARGET_NR_shmdt 197
+#define TARGET_NR_socket 198
+#define TARGET_NR_socketpair 199
+#define TARGET_NR_bind 200
+#define TARGET_NR_listen 201
+#define TARGET_NR_accept 202
+#define TARGET_NR_connect 203
+#define TARGET_NR_getsockname 204
+#define TARGET_NR_getpeername 205
+#define TARGET_NR_sendto 206
+#define TARGET_NR_recvfrom 207
+#define TARGET_NR_setsockopt 208
+#define TARGET_NR_getsockopt 209
+#define TARGET_NR_shutdown 210
+#define TARGET_NR_sendmsg 211
+#define TARGET_NR_recvmsg 212
+#define TARGET_NR_readahead 213
+#define TARGET_NR_brk 214
+#define TARGET_NR_munmap 215
+#define TARGET_NR_mremap 216
+#define TARGET_NR_add_key 217
+#define TARGET_NR_request_key 218
+#define TARGET_NR_keyctl 219
+#define TARGET_NR_clone 220
+#define TARGET_NR_execve 221
+#define TARGET_NR_mmap 222
+#define TARGET_NR_fadvise64 223
+#define TARGET_NR_swapon 224
+#define TARGET_NR_swapoff 225
+#define TARGET_NR_mprotect 226
+#define TARGET_NR_msync 227
+#define TARGET_NR_mlock 228
+#define TARGET_NR_munlock 229
+#define TARGET_NR_mlockall 230
+#define TARGET_NR_munlockall 231
+#define TARGET_NR_mincore 232
+#define TARGET_NR_madvise 233
+#define TARGET_NR_remap_file_pages 234
+#define TARGET_NR_mbind 235
+#define TARGET_NR_get_mempolicy 236
+#define TARGET_NR_set_mempolicy 237
+#define TARGET_NR_migrate_pages 238
+#define TARGET_NR_move_pages 239
+#define TARGET_NR_rt_tgsigqueueinfo 240
+#define TARGET_NR_perf_event_open 241
+#define TARGET_NR_accept4 242
+#define TARGET_NR_recvmmsg 243
+#define TARGET_NR_arch_specific_syscall 244
+#define TARGET_NR_wait4 260
+#define TARGET_NR_prlimit64 261
+#define TARGET_NR_fanotify_init 262
+#define TARGET_NR_fanotify_mark 263
+#define TARGET_NR_name_to_handle_at 264
+#define TARGET_NR_open_by_handle_at 265
+#define TARGET_NR_clock_adjtime 266
+#define TARGET_NR_syncfs 267
+#define TARGET_NR_setns 268
+#define TARGET_NR_sendmmsg 269
+#define TARGET_NR_process_vm_readv 270
+#define TARGET_NR_process_vm_writev 271
+#define TARGET_NR_kcmp 272
+#define TARGET_NR_finit_module 273
+#define TARGET_NR_sched_setattr 274
+#define TARGET_NR_sched_getattr 275
+#define TARGET_NR_renameat2 276
+#define TARGET_NR_seccomp 277
+#define TARGET_NR_getrandom 278
+#define TARGET_NR_memfd_create 279
+#define TARGET_NR_bpf 280
+#define TARGET_NR_execveat 281
+#define TARGET_NR_userfaultfd 282
+#define TARGET_NR_membarrier 283
+#define TARGET_NR_mlock2 284
+#define TARGET_NR_copy_file_range 285
+#define TARGET_NR_preadv2 286
+#define TARGET_NR_pwritev2 287
+#define TARGET_NR_pkey_mprotect 288
+#define TARGET_NR_pkey_alloc 289
+#define TARGET_NR_pkey_free 290
+#define TARGET_NR_statx 291
+#define TARGET_NR_io_pgetevents 292
+#define TARGET_NR_rseq 293
+#define TARGET_NR_kexec_file_load 294
+
+#define TARGET_NR_syscalls (TARGET_NR_kexec_file_load + 1)
+
+#endif
diff --git a/linux-user/loongarch64/target_cpu.h b/linux-user/loongarch64/target_cpu.h
new file mode 100644
index 0000000000000000000000000000000000000000..0f6845737f82aec705b7716ac5574a7b647d312a
--- /dev/null
+++ b/linux-user/loongarch64/target_cpu.h
@@ -0,0 +1,45 @@
+/*
+ * MIPS specific CPU ABI and functions for linux-user
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see .
+ */
+#ifndef LOONGARCH_TARGET_CPU_H
+#define LOONGARCH_TARGET_CPU_H
+
+static inline void cpu_clone_regs_child(CPULOONGARCHState *env, target_ulong newsp,
+ unsigned flags)
+{
+ if (newsp) {
+ env->active_tc.gpr[3] = newsp;
+ }
+ env->active_tc.gpr[7] = 0;
+ env->active_tc.gpr[4] = 0;
+}
+
+static inline void cpu_clone_regs_parent(CPULOONGARCHState *env, unsigned flags)
+{
+}
+
+static inline void cpu_set_tls(CPULOONGARCHState *env, target_ulong newtls)
+{
+ env->active_tc.gpr[2] = newtls;
+}
+
+static inline abi_ulong get_sp_from_cpustate(CPULOONGARCHState *state)
+{
+ return state->active_tc.gpr[3];
+}
+#endif
diff --git a/linux-user/loongarch64/target_elf.h b/linux-user/loongarch64/target_elf.h
new file mode 100644
index 0000000000000000000000000000000000000000..6c153d12c4199a35bb627cfc2630bba8633af99d
--- /dev/null
+++ b/linux-user/loongarch64/target_elf.h
@@ -0,0 +1,14 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation, or (at your option) any
+ * later version. See the COPYING file in the top-level directory.
+ */
+
+#ifndef LOONGARCH_TARGET_ELF_H
+#define LOONGARCH_TARGET_ELF_H
+static inline const char *cpu_get_model(uint32_t eflags)
+{
+ return "Loongson-3A5000";
+}
+#endif
diff --git a/linux-user/loongarch64/target_fcntl.h b/linux-user/loongarch64/target_fcntl.h
new file mode 100644
index 0000000000000000000000000000000000000000..a3d7b460620dcd614b52c9d4d716c5c6f52c1d05
--- /dev/null
+++ b/linux-user/loongarch64/target_fcntl.h
@@ -0,0 +1,13 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation, or (at your option) any
+ * later version. See the COPYING file in the top-level directory.
+ */
+
+#ifndef LOONGARCH_TARGET_FCNTL_H
+#define LOONGARCH_TARGET_FCNTL_H
+
+#include "../generic/fcntl.h"
+
+#endif /* LOONGARCH_TARGET_FCNTL_H */
diff --git a/linux-user/loongarch64/target_signal.h b/linux-user/loongarch64/target_signal.h
new file mode 100644
index 0000000000000000000000000000000000000000..e418c8e8f538943851f2b49d97437b32985c8efc
--- /dev/null
+++ b/linux-user/loongarch64/target_signal.h
@@ -0,0 +1,23 @@
+#ifndef LOONGARCH_TARGET_SIGNAL_H
+#define LOONGARCH_TARGET_SIGNAL_H
+
+/* this struct defines a stack used during syscall handling */
+
+typedef struct target_sigaltstack {
+ abi_long ss_sp;
+ abi_int ss_flags;
+ abi_ulong ss_size;
+} target_stack_t;
+
+/*
+ * sigaltstack controls
+ */
+#define TARGET_SS_ONSTACK 1
+#define TARGET_SS_DISABLE 2
+
+#define TARGET_MINSIGSTKSZ 2048
+#define TARGET_SIGSTKSZ 8192
+
+#include "../generic/signal.h"
+
+#endif /* LOONGARCH_TARGET_SIGNAL_H */
diff --git a/linux-user/loongarch64/target_structs.h b/linux-user/loongarch64/target_structs.h
new file mode 100644
index 0000000000000000000000000000000000000000..280acd09712ddc061cf5d3913aeeca95c01423c1
--- /dev/null
+++ b/linux-user/loongarch64/target_structs.h
@@ -0,0 +1,62 @@
+/*
+ * LOONGARCH specific structures for linux-user
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see .
+ */
+#ifndef LOONGARCH_TARGET_STRUCTS_H
+#define LOONGARCH_TARGET_STRUCTS_H
+
+struct target_ipc_perm {
+ abi_int __key; /* Key. */
+ abi_uint uid; /* Owner's user ID. */
+ abi_uint gid; /* Owner's group ID. */
+ abi_uint cuid; /* Creator's user ID. */
+ abi_uint cgid; /* Creator's group ID. */
+ abi_uint mode; /* Read/write permission. */
+ abi_ushort __seq; /* Sequence number. */
+ abi_ushort __pad1;
+ abi_ulong __unused1;
+ abi_ulong __unused2;
+};
+
+struct target_shmid_ds {
+ struct target_ipc_perm shm_perm; /* operation permission struct */
+ abi_long shm_segsz; /* size of segment in bytes */
+ abi_ulong shm_atime; /* time of last shmat() */
+ abi_ulong shm_dtime; /* time of last shmdt() */
+ abi_ulong shm_ctime; /* time of last change by shmctl() */
+ abi_int shm_cpid; /* pid of creator */
+ abi_int shm_lpid; /* pid of last shmop */
+ abi_ulong shm_nattch; /* number of current attaches */
+ abi_ulong __unused1;
+ abi_ulong __unused2;
+};
+
+#define TARGET_SEMID64_DS
+
+/*
+ * The semid64_ds structure for the MIPS architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ */
+struct target_semid64_ds {
+ struct target_ipc_perm sem_perm;
+ abi_ulong sem_otime;
+ abi_ulong sem_ctime;
+ abi_ulong sem_nsems;
+ abi_ulong __unused1;
+ abi_ulong __unused2;
+};
+
+#endif
diff --git a/linux-user/loongarch64/target_syscall.h b/linux-user/loongarch64/target_syscall.h
new file mode 100644
index 0000000000000000000000000000000000000000..cb77f07080d5c2074bf1e027cd186c9c85b0f1d0
--- /dev/null
+++ b/linux-user/loongarch64/target_syscall.h
@@ -0,0 +1,44 @@
+#ifndef LOONGARCH_TARGET_SYSCALL_H
+#define LOONGARCH_TARGET_SYSCALL_H
+
+/* this struct defines the way the registers are stored on the
+ stack during a system call. */
+
+struct target_pt_regs {
+ /* Saved main processor registers. */
+ target_ulong regs[32];
+
+ /* Saved special registers. */
+ /* Saved special registers. */
+ target_ulong csr_crmd;
+ target_ulong csr_prmd;
+ target_ulong csr_euen;
+ target_ulong csr_ecfg;
+ target_ulong csr_estat;
+ target_ulong csr_era;
+ target_ulong csr_badvaddr;
+ target_ulong orig_a0;
+ target_ulong __last[0];
+};
+
+#define UNAME_MACHINE "loongarch"
+#define UNAME_MINIMUM_RELEASE "2.6.32"
+
+#define TARGET_CLONE_BACKWARDS
+#define TARGET_MINSIGSTKSZ 2048
+#define TARGET_MLOCKALL_MCL_CURRENT 1
+#define TARGET_MLOCKALL_MCL_FUTURE 2
+
+#define TARGET_FORCE_SHMLBA
+
+static inline abi_ulong target_shmlba(CPULOONGARCHState *env)
+{
+ return 0x40000;
+}
+
+#define TARGET_PR_SET_FP_MODE 45
+#define TARGET_PR_GET_FP_MODE 46
+#define TARGET_PR_FP_MODE_FR (1 << 0)
+#define TARGET_PR_FP_MODE_FRE (1 << 1)
+
+#endif /* LOONGARCH_TARGET_SYSCALL_H */
diff --git a/linux-user/loongarch64/termbits.h b/linux-user/loongarch64/termbits.h
new file mode 100644
index 0000000000000000000000000000000000000000..6c613a19739956be2cecabb36e33a214369c29e1
--- /dev/null
+++ b/linux-user/loongarch64/termbits.h
@@ -0,0 +1,224 @@
+#ifndef LINUX_USER_LOONGARCH_TERMBITS_H
+#define LINUX_USER_LOONGARCH_TERMBITS_H
+
+#define TARGET_NCCS 19
+
+struct target_termios {
+ unsigned int c_iflag; /* input mode flags */
+ unsigned int c_oflag; /* output mode flags */
+ unsigned int c_cflag; /* control mode flags */
+ unsigned int c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[TARGET_NCCS]; /* control characters */
+};
+
+/* c_iflag bits */
+#define TARGET_IGNBRK 0000001
+#define TARGET_BRKINT 0000002
+#define TARGET_IGNPAR 0000004
+#define TARGET_PARMRK 0000010
+#define TARGET_INPCK 0000020
+#define TARGET_ISTRIP 0000040
+#define TARGET_INLCR 0000100
+#define TARGET_IGNCR 0000200
+#define TARGET_ICRNL 0000400
+#define TARGET_IUCLC 0001000
+#define TARGET_IXON 0002000
+#define TARGET_IXANY 0004000
+#define TARGET_IXOFF 0010000
+#define TARGET_IMAXBEL 0020000
+#define TARGET_IUTF8 0040000
+
+/* c_oflag bits */
+#define TARGET_OPOST 0000001
+#define TARGET_OLCUC 0000002
+#define TARGET_ONLCR 0000004
+#define TARGET_OCRNL 0000010
+#define TARGET_ONOCR 0000020
+#define TARGET_ONLRET 0000040
+#define TARGET_OFILL 0000100
+#define TARGET_OFDEL 0000200
+#define TARGET_NLDLY 0000400
+#define TARGET_NL0 0000000
+#define TARGET_NL1 0000400
+#define TARGET_CRDLY 0003000
+#define TARGET_CR0 0000000
+#define TARGET_CR1 0001000
+#define TARGET_CR2 0002000
+#define TARGET_CR3 0003000
+#define TARGET_TABDLY 0014000
+#define TARGET_TAB0 0000000
+#define TARGET_TAB1 0004000
+#define TARGET_TAB2 0010000
+#define TARGET_TAB3 0014000
+#define TARGET_XTABS 0014000
+#define TARGET_BSDLY 0020000
+#define TARGET_BS0 0000000
+#define TARGET_BS1 0020000
+#define TARGET_VTDLY 0040000
+#define TARGET_VT0 0000000
+#define TARGET_VT1 0040000
+#define TARGET_FFDLY 0100000
+#define TARGET_FF0 0000000
+#define TARGET_FF1 0100000
+
+/* c_cflag bit meaning */
+#define TARGET_CBAUD 0010017
+#define TARGET_B0 0000000 /* hang up */
+#define TARGET_B50 0000001
+#define TARGET_B75 0000002
+#define TARGET_B110 0000003
+#define TARGET_B134 0000004
+#define TARGET_B150 0000005
+#define TARGET_B200 0000006
+#define TARGET_B300 0000007
+#define TARGET_B600 0000010
+#define TARGET_B1200 0000011
+#define TARGET_B1800 0000012
+#define TARGET_B2400 0000013
+#define TARGET_B4800 0000014
+#define TARGET_B9600 0000015
+#define TARGET_B19200 0000016
+#define TARGET_B38400 0000017
+#define TARGET_EXTA B19200
+#define TARGET_EXTB B38400
+#define TARGET_CSIZE 0000060
+#define TARGET_CS5 0000000
+#define TARGET_CS6 0000020
+#define TARGET_CS7 0000040
+#define TARGET_CS8 0000060
+#define TARGET_CSTOPB 0000100
+#define TARGET_CREAD 0000200
+#define TARGET_PARENB 0000400
+#define TARGET_PARODD 0001000
+#define TARGET_HUPCL 0002000
+#define TARGET_CLOCAL 0004000
+#define TARGET_CBAUDEX 0010000
+#define TARGET_B57600 0010001
+#define TARGET_B115200 0010002
+#define TARGET_B230400 0010003
+#define TARGET_B460800 0010004
+#define TARGET_CIBAUD 002003600000 /* input baud rate (not used) */
+#define TARGET_CMSPAR 010000000000 /* mark or space (stick) parity */
+#define TARGET_CRTSCTS 020000000000 /* flow control */
+
+/* c_lflag bits */
+#define TARGET_ISIG 0000001
+#define TARGET_ICANON 0000002
+#define TARGET_XCASE 0000004
+#define TARGET_ECHO 0000010
+#define TARGET_ECHOE 0000020
+#define TARGET_ECHOK 0000040
+#define TARGET_ECHONL 0000100
+#define TARGET_NOFLSH 0000200
+#define TARGET_TOSTOP 0000400
+#define TARGET_ECHOCTL 0001000
+#define TARGET_ECHOPRT 0002000
+#define TARGET_ECHOKE 0004000
+#define TARGET_FLUSHO 0010000
+#define TARGET_PENDIN 0040000
+#define TARGET_IEXTEN 0100000
+
+/* c_cc character offsets */
+#define TARGET_VINTR 0
+#define TARGET_VQUIT 1
+#define TARGET_VERASE 2
+#define TARGET_VKILL 3
+#define TARGET_VEOF 4
+#define TARGET_VTIME 5
+#define TARGET_VMIN 6
+#define TARGET_VSWTC 7
+#define TARGET_VSTART 8
+#define TARGET_VSTOP 9
+#define TARGET_VSUSP 10
+#define TARGET_VEOL 11
+#define TARGET_VREPRINT 12
+#define TARGET_VDISCARD 13
+#define TARGET_VWERASE 14
+#define TARGET_VLNEXT 15
+#define TARGET_VEOL2 16
+
+/* ioctls */
+
+#define TARGET_TCGETS 0x5401
+#define TARGET_TCSETS 0x5402
+#define TARGET_TCSETSW 0x5403
+#define TARGET_TCSETSF 0x5404
+#define TARGET_TCGETA 0x5405
+#define TARGET_TCSETA 0x5406
+#define TARGET_TCSETAW 0x5407
+#define TARGET_TCSETAF 0x5408
+#define TARGET_TCSBRK 0x5409
+#define TARGET_TCXONC 0x540A
+#define TARGET_TCFLSH 0x540B
+
+#define TARGET_TIOCEXCL 0x540C
+#define TARGET_TIOCNXCL 0x540D
+#define TARGET_TIOCSCTTY 0x540E
+#define TARGET_TIOCGPGRP 0x540F
+#define TARGET_TIOCSPGRP 0x5410
+#define TARGET_TIOCOUTQ 0x5411
+#define TARGET_TIOCSTI 0x5412
+#define TARGET_TIOCGWINSZ 0x5413
+#define TARGET_TIOCSWINSZ 0x5414
+#define TARGET_TIOCMGET 0x5415
+#define TARGET_TIOCMBIS 0x5416
+#define TARGET_TIOCMBIC 0x5417
+#define TARGET_TIOCMSET 0x5418
+#define TARGET_TIOCGSOFTCAR 0x5419
+#define TARGET_TIOCSSOFTCAR 0x541A
+#define TARGET_FIONREAD 0x541B
+#define TARGET_TIOCINQ TARGET_FIONREAD
+#define TARGET_TIOCLINUX 0x541C
+#define TARGET_TIOCCONS 0x541D
+#define TARGET_TIOCGSERIAL 0x541E
+#define TARGET_TIOCSSERIAL 0x541F
+#define TARGET_TIOCPKT 0x5420
+#define TARGET_FIONBIO 0x5421
+#define TARGET_TIOCNOTTY 0x5422
+#define TARGET_TIOCSETD 0x5423
+#define TARGET_TIOCGETD 0x5424
+#define TARGET_TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TARGET_TIOCTTYGSTRUCT 0x5426 /* For debugging only */
+#define TARGET_TIOCSBRK 0x5427 /* BSD compatibility */
+#define TARGET_TIOCCBRK 0x5428 /* BSD compatibility */
+#define TARGET_TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TARGET_TIOCGPTN TARGET_IOR('T', 0x30, unsigned int)
+ /* Get Pty Number (of pty-mux device) */
+#define TARGET_TIOCSPTLCK TARGET_IOW('T', 0x31, int)
+ /* Lock/unlock Pty */
+#define TARGET_TIOCGPTPEER TARGET_IO('T', 0x41)
+ /* Safely open the slave */
+
+#define TARGET_FIONCLEX 0x5450 /* these numbers need to be adjusted. */
+#define TARGET_FIOCLEX 0x5451
+#define TARGET_FIOASYNC 0x5452
+#define TARGET_TIOCSERCONFIG 0x5453
+#define TARGET_TIOCSERGWILD 0x5454
+#define TARGET_TIOCSERSWILD 0x5455
+#define TARGET_TIOCGLCKTRMIOS 0x5456
+#define TARGET_TIOCSLCKTRMIOS 0x5457
+#define TARGET_TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TARGET_TIOCSERGETLSR 0x5459 /* Get line status register */
+#define TARGET_TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TARGET_TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TARGET_TIOCMIWAIT 0x545C
+ /* wait for a change on serial input line(s) */
+#define TARGET_TIOCGICOUNT 0x545D
+ /* read serial port inline interrupt counts */
+#define TARGET_TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
+#define TARGET_TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
+
+/* Used for packet mode */
+#define TARGET_TIOCPKT_DATA 0
+#define TARGET_TIOCPKT_FLUSHREAD 1
+#define TARGET_TIOCPKT_FLUSHWRITE 2
+#define TARGET_TIOCPKT_STOP 4
+#define TARGET_TIOCPKT_START 8
+#define TARGET_TIOCPKT_NOSTOP 16
+#define TARGET_TIOCPKT_DOSTOP 32
+
+#define TARGET_TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+
+#endif
diff --git a/linux-user/meson.build b/linux-user/meson.build
index bf62c13e378957087adc1d44e585bb14e83dd369..195f9e83ac0d094a8eb2d57a74a1a36f34fcef62 100644
--- a/linux-user/meson.build
+++ b/linux-user/meson.build
@@ -39,3 +39,4 @@ subdir('sh4')
subdir('sparc')
subdir('x86_64')
subdir('xtensa')
+subdir('loongarch64')
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
index 5c713fa8ab26f8566cb820777772b09fa830c22e..66ddb25d1c88ce7a22d04cf8d41341b3f59d51bb 100644
--- a/linux-user/qemu.h
+++ b/linux-user/qemu.h
@@ -61,7 +61,7 @@ struct image_info {
/* For target-specific processing of NT_GNU_PROPERTY_TYPE_0. */
uint32_t note_flags;
-#ifdef TARGET_MIPS
+#if defined(TARGET_MIPS) || defined(TARGET_LOONGARCH64)
int fp_abi;
int interp_fp_abi;
#endif
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index f1cfcc81048695222abc0a0f546d258f92e8f328..729131ecd0ea1f7cfb1ea9cf16c17f740f063e1e 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -1614,6 +1614,9 @@ static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
#elif defined(TARGET_MIPS)
((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
return host_pipe[0];
+#elif defined(TARGET_LOONGARCH64)
+ ((CPULOONGARCHState *)cpu_env)->active_tc.gpr[5] = host_pipe[1];
+ return host_pipe[0];
#elif defined(TARGET_SH4)
((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
return host_pipe[0];
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index 0b139759377b6b3a1d09173fd549f28d22f6736e..7e2915d53e49bae5ca239a368badf40e054f015c 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -74,7 +74,7 @@
|| defined(TARGET_M68K) || defined(TARGET_CRIS) \
|| defined(TARGET_S390X) || defined(TARGET_OPENRISC) \
|| defined(TARGET_NIOS2) || defined(TARGET_RISCV) \
- || defined(TARGET_XTENSA)
+ || defined(TARGET_XTENSA) || defined(TARGET_LOONGARCH64)
#define TARGET_IOC_SIZEBITS 14
#define TARGET_IOC_DIRBITS 2
@@ -450,7 +450,7 @@ struct target_dirent64 {
#define TARGET_SIG_IGN ((abi_long)1) /* ignore signal */
#define TARGET_SIG_ERR ((abi_long)-1) /* error return from signal */
-#ifdef TARGET_MIPS
+#if defined(TARGET_MIPS) || defined(TARGET_LOONGARCH64)
#define TARGET_NSIG 128
#else
#define TARGET_NSIG 64
@@ -2133,7 +2133,7 @@ struct target_stat64 {
abi_ulong __unused5;
};
-#elif defined(TARGET_OPENRISC) || defined(TARGET_NIOS2) || defined(TARGET_RISCV)
+#elif defined(TARGET_OPENRISC) || defined(TARGET_NIOS2) || defined(TARGET_RISCV) || defined(TARGET_LOONGARCH64)
/* These are the asm-generic versions of the stat and stat64 structures */
@@ -2161,7 +2161,7 @@ struct target_stat {
unsigned int __unused5;
};
-#if !defined(TARGET_RISCV64)
+#if !(defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64))
#define TARGET_HAS_STRUCT_STAT64
struct target_stat64 {
uint64_t st_dev;
@@ -2331,6 +2331,7 @@ struct target_statfs64 {
};
#elif (defined(TARGET_PPC64) || defined(TARGET_X86_64) || \
defined(TARGET_SPARC64) || defined(TARGET_AARCH64) || \
+ defined(TARGET_LOONGARCH64) || \
defined(TARGET_RISCV)) && !defined(TARGET_ABI32)
struct target_statfs {
abi_long f_type;
diff --git a/meson.build b/meson.build
index 96de1a6ef948542aa93bd03242005907643e6c47..02fbc1cbbda726b73f0976db3cd6a6cfd354ae94 100644
--- a/meson.build
+++ b/meson.build
@@ -56,7 +56,7 @@ python = import('python').find_installation()
supported_oses = ['windows', 'freebsd', 'netbsd', 'openbsd', 'darwin', 'sunos', 'linux']
supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv', 'x86', 'x86_64',
- 'arm', 'aarch64', 'mips', 'mips64', 'sparc', 'sparc64']
+ 'arm', 'aarch64', 'mips', 'mips64', 'sparc', 'sparc64', 'loongarch64']
cpu = host_machine.cpu_family()
@@ -77,6 +77,8 @@ elif cpu in ['ppc', 'ppc64']
kvm_targets = ['ppc-softmmu', 'ppc64-softmmu']
elif cpu in ['mips', 'mips64']
kvm_targets = ['mips-softmmu', 'mipsel-softmmu', 'mips64-softmmu', 'mips64el-softmmu']
+elif cpu == 'loongarch64'
+ kvm_targets = ['loongarch64-softmmu']
else
kvm_targets = []
endif
@@ -359,6 +361,8 @@ if not get_option('tcg').disabled()
tcg_arch = 'i386'
elif config_host['ARCH'] == 'ppc64'
tcg_arch = 'ppc'
+ elif config_host['ARCH'] == 'loongarch64'
+ tcg_arch = 'loongarch64'
endif
add_project_arguments('-iquote', meson.current_source_dir() / 'tcg' / tcg_arch,
language: ['c', 'cpp', 'objc'])
@@ -1814,6 +1818,7 @@ disassemblers = {
'sh4' : ['CONFIG_SH4_DIS'],
'sparc' : ['CONFIG_SPARC_DIS'],
'xtensa' : ['CONFIG_XTENSA_DIS'],
+ 'loongarch64' : ['CONFIG_LOONGARCH_DIS'],
}
if link_language == 'cpp'
disassemblers += {
diff --git a/pc-bios/loongarch_bios.bin b/pc-bios/loongarch_bios.bin
new file mode 100644
index 0000000000000000000000000000000000000000..d6330c6f0532effe458726efa18222166a444839
Binary files /dev/null and b/pc-bios/loongarch_bios.bin differ
diff --git a/pc-bios/loongarch_vars.bin b/pc-bios/loongarch_vars.bin
new file mode 100644
index 0000000000000000000000000000000000000000..65bdb77af90b92dc268c0c5c70c054dee71599f4
Binary files /dev/null and b/pc-bios/loongarch_vars.bin differ
diff --git a/pc-bios/meson.build b/pc-bios/meson.build
index b40ff3f2bd395f6ef79f49ba4ac85a566ac7307d..60009bd89e84ecb8c7d66d70fb7078bb2038f3c8 100644
--- a/pc-bios/meson.build
+++ b/pc-bios/meson.build
@@ -83,6 +83,8 @@ blobs = files(
'opensbi-riscv32-generic-fw_dynamic.elf',
'opensbi-riscv64-generic-fw_dynamic.elf',
'npcm7xx_bootrom.bin',
+ 'loongarch_bios.bin',
+ 'loongarch_vars.bin',
)
if get_option('install_blobs')
diff --git a/qapi/machine-target.json b/qapi/machine-target.json
index f5ec4bc172b0dc48f73d8452945404cec3d77ae5..682dc86b427c02ddba0387cb84529d0ffc8a324d 100644
--- a/qapi/machine-target.json
+++ b/qapi/machine-target.json
@@ -324,7 +324,8 @@
'TARGET_ARM',
'TARGET_I386',
'TARGET_S390X',
- 'TARGET_MIPS' ] } }
+ 'TARGET_MIPS',
+ 'TARGET_LOONGARCH64' ] } }
##
# @query-cpu-definitions:
@@ -340,4 +341,5 @@
'TARGET_ARM',
'TARGET_I386',
'TARGET_S390X',
- 'TARGET_MIPS' ] } }
+ 'TARGET_MIPS',
+ 'TARGET_LOONGARCH64' ] } }
diff --git a/qapi/machine.json b/qapi/machine.json
index 067e3f53787928d38566e1e26e1c94ec1481ebf0..d1871c31cb5f9fa4eb1468a1381d293e0780c83b 100644
--- a/qapi/machine.json
+++ b/qapi/machine.json
@@ -34,7 +34,7 @@
'mips64el', 'mipsel', 'nios2', 'or1k', 'ppc',
'ppc64', 'riscv32', 'riscv64', 'rx', 's390x', 'sh4',
'sh4eb', 'sparc', 'sparc64', 'tricore',
- 'x86_64', 'xtensa', 'xtensaeb' ] }
+ 'x86_64', 'xtensa', 'xtensaeb', 'loongarch64' ] }
##
# @CpuS390State:
diff --git a/qapi/misc-target.json b/qapi/misc-target.json
index 5aa2b95b7d4aa09a4b11a9bbabafa2b30ce4f2ae..96a0fc6d6e5bd644ebf3dc1fb2577e19ea6d1646 100644
--- a/qapi/misc-target.json
+++ b/qapi/misc-target.json
@@ -33,6 +33,7 @@
'TARGET_PPC64',
'TARGET_S390X',
'TARGET_SH4',
+ 'TARGET_LOONGARCH64',
'TARGET_SPARC' ] } }
##
diff --git a/qemu-options.hx b/qemu-options.hx
index ae2c6dbbfc005c526026604d988612157dd5a7fd..bff7692c8f8582643020722ffb7c5d9895226483 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -2480,7 +2480,7 @@ DEF("smbios", HAS_ARG, QEMU_OPTION_smbios,
" specify SMBIOS type 17 fields\n"
"-smbios type=41[,designation=str][,kind=str][,instance=%d][,pcidev=str]\n"
" specify SMBIOS type 41 fields\n",
- QEMU_ARCH_I386 | QEMU_ARCH_ARM)
+ QEMU_ARCH_I386 | QEMU_ARCH_ARM | QEMU_ARCH_LOONGARCH64)
SRST
``-smbios file=binary``
Load SMBIOS entry from binary file.
diff --git a/softmmu/qdev-monitor.c b/softmmu/qdev-monitor.c
index 01f3834db575a6932c8e292068d4ab71125abe45..ece96121d3e09554e443a17ebf9a245b34f157b6 100644
--- a/softmmu/qdev-monitor.c
+++ b/softmmu/qdev-monitor.c
@@ -60,7 +60,8 @@ typedef struct QDevAlias
QEMU_ARCH_HPPA | QEMU_ARCH_I386 | \
QEMU_ARCH_MIPS | QEMU_ARCH_PPC | \
QEMU_ARCH_RISCV | QEMU_ARCH_SH4 | \
- QEMU_ARCH_SPARC | QEMU_ARCH_XTENSA)
+ QEMU_ARCH_SPARC | QEMU_ARCH_XTENSA | \
+ QEMU_ARCH_LOONGARCH64)
#define QEMU_ARCH_VIRTIO_CCW (QEMU_ARCH_S390X)
#define QEMU_ARCH_VIRTIO_MMIO (QEMU_ARCH_M68K)
diff --git a/target/Kconfig b/target/Kconfig
index ae7f24fc66b00dea4318be4793fbf315dde02b90..50b46d0487fa10d2f6d3d1f830e864a18994a1ec 100644
--- a/target/Kconfig
+++ b/target/Kconfig
@@ -4,6 +4,7 @@ source avr/Kconfig
source cris/Kconfig
source hppa/Kconfig
source i386/Kconfig
+source loongarch64/Kconfig
source m68k/Kconfig
source microblaze/Kconfig
source mips/Kconfig
diff --git a/target/loongarch64/Kconfig b/target/loongarch64/Kconfig
new file mode 100644
index 0000000000000000000000000000000000000000..46b26b1a85715e779672bea93152a3c62c170fe2
--- /dev/null
+++ b/target/loongarch64/Kconfig
@@ -0,0 +1,2 @@
+config LOONGARCH64
+ bool
diff --git a/target/loongarch64/arch_dump.c b/target/loongarch64/arch_dump.c
new file mode 100644
index 0000000000000000000000000000000000000000..14e9a8ce312fe571d50de505aa061904f383d920
--- /dev/null
+++ b/target/loongarch64/arch_dump.c
@@ -0,0 +1,175 @@
+/* Support for writing ELF notes for RM architectures
+ *
+ * Copyright (C) 2015 Red Hat Inc.
+ *
+ * Author: Andrew Jones
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see .
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "elf.h"
+#include "sysemu/dump.h"
+#include "internal.h"
+
+/* struct user_pt_regs from arch/loongarch/include/uapi/asm/ptrace.h */
+struct loongarch_user_regs {
+ uint64_t gpr[32];
+ uint64_t lo;
+ uint64_t hi;
+ uint64_t csr_era;
+ uint64_t csr_badvaddr;
+ uint64_t csr_crmd;
+ uint64_t csr_ecfg;
+ uint64_t pad[7];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct loongarch_user_regs) != 360);
+
+/* struct elf_prstatus from include/uapi/linux/elfcore.h */
+struct loongarch_elf_prstatus {
+ char pad1[32]; /* 32 == offsetof(struct elf_prstatus, pr_pid) */
+ uint32_t pr_pid;
+ char pad2[76]; /* 76 == offsetof(struct elf_prstatus, pr_reg) -
+ offsetof(struct elf_prstatus, pr_ppid) */
+ struct loongarch_user_regs pr_reg;
+ uint32_t pr_fpvalid;
+ char pad3[4];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct loongarch_elf_prstatus) != 480);
+
+/* struct user_fpsimd_state from arch/arm64/include/uapi/asm/ptrace.h
+ *
+ * While the vregs member of user_fpsimd_state is of type __uint128_t,
+ * QEMU uses an array of uint64_t, where the high half of the 128-bit
+ * value is always in the 2n+1'th index. Thus we also break the 128-
+ * bit values into two halves in this reproduction of user_fpsimd_state.
+ */
+
+struct loongarch_fpu_struct {
+ uint64_t fpr[32];
+ unsigned int fir;
+ unsigned int fcsr;
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct loongarch_fpu_struct) != 264);
+
+struct loongarch_note {
+ Elf64_Nhdr hdr;
+ char name[8]; /* align_up(sizeof("CORE"), 4) */
+ union {
+ struct loongarch_elf_prstatus prstatus;
+ struct loongarch_fpu_struct fpu;
+ };
+} QEMU_PACKED;
+
+#define LOONGARCH_NOTE_HEADER_SIZE offsetof(struct loongarch_note, prstatus)
+#define LOONGARCH_PRSTATUS_NOTE_SIZE (LOONGARCH_NOTE_HEADER_SIZE + \
+ sizeof(struct loongarch_elf_prstatus))
+#define LOONGARCH_PRFPREG_NOTE_SIZE (LOONGARCH_NOTE_HEADER_SIZE + \
+ sizeof(struct loongarch_fpu_struct))
+
+static void loongarch_note_init(struct loongarch_note *note, DumpState *s,
+ const char *name, Elf64_Word namesz,
+ Elf64_Word type, Elf64_Word descsz)
+{
+ memset(note, 0, sizeof(*note));
+
+ note->hdr.n_namesz = cpu_to_dump32(s, namesz);
+ note->hdr.n_descsz = cpu_to_dump32(s, descsz);
+ note->hdr.n_type = cpu_to_dump32(s, type);
+
+ memcpy(note->name, name, namesz);
+}
+
+static int loongarch_write_elf64_fprpreg(WriteCoreDumpFunction f,
+ CPULOONGARCHState *env, int cpuid,
+ DumpState *s)
+{
+ struct loongarch_note note;
+ int ret, i;
+
+ loongarch_note_init(¬e, s, "CORE", 5, NT_PRFPREG, sizeof(note.fpu));
+
+ note.fpu.fcsr = cpu_to_dump64(s, env->active_fpu.fcsr0);
+
+ for (i = 0; i < 32; ++i) {
+ note.fpu.fpr[i] = cpu_to_dump64(s, env->active_fpu.fpr[i].fd);
+ }
+
+ ret = f(¬e, LOONGARCH_PRFPREG_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int loongarch_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, DumpState *opaque)
+{
+ struct loongarch_note note;
+ CPULOONGARCHState *env = &LOONGARCH_CPU(cs)->env;
+ DumpState *s = opaque;
+ int ret, i;
+
+ loongarch_note_init(¬e, s, "CORE", 5, NT_PRSTATUS, sizeof(note.prstatus));
+
+ note.prstatus.pr_pid = cpu_to_dump32(s, cpuid);
+ note.prstatus.pr_fpvalid = cpu_to_dump32(s, 1);
+
+ for (i = 0; i < 32; ++i) {
+ note.prstatus.pr_reg.gpr[i] = cpu_to_dump64(s, env->active_tc.gpr[i]);
+ }
+ note.prstatus.pr_reg.csr_era = cpu_to_dump64(s, env->CSR_ERA);
+ note.prstatus.pr_reg.csr_badvaddr = cpu_to_dump64(s, env->CSR_BADV);
+ note.prstatus.pr_reg.csr_crmd = cpu_to_dump64(s, env->CSR_CRMD);
+ note.prstatus.pr_reg.csr_ecfg = cpu_to_dump64(s, env->CSR_ECFG);
+
+ ret = f(¬e, LOONGARCH_PRSTATUS_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ ret = loongarch_write_elf64_fprpreg(f, env, cpuid, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return ret;
+}
+
+int cpu_get_dump_info(ArchDumpInfo *info,
+ const GuestPhysBlockList *guest_phys_blocks)
+{
+ info->d_machine = EM_LOONGARCH;
+ info->d_endian = ELFDATA2LSB;
+ info->d_class = ELFCLASS64;
+
+ return 0;
+}
+
+ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
+{
+ size_t note_size = 0;
+
+ if (class == ELFCLASS64) {
+ note_size = LOONGARCH_PRSTATUS_NOTE_SIZE + LOONGARCH_PRFPREG_NOTE_SIZE;
+ }
+
+ return note_size * nr_cpus;
+}
+
diff --git a/target/loongarch64/cpu-csr.h b/target/loongarch64/cpu-csr.h
new file mode 100644
index 0000000000000000000000000000000000000000..e549bb46b6e6198fc0b5d759617bc3a3bf8e37bb
--- /dev/null
+++ b/target/loongarch64/cpu-csr.h
@@ -0,0 +1,869 @@
+#ifndef _CPU_CSR_H_
+#define _CPU_CSR_H_
+
+/* basic CSR register */
+#define LOONGARCH_CSR_CRMD 0x0 /* 32 current mode info */
+#define CSR_CRMD_DACM_SHIFT 7
+#define CSR_CRMD_DACM_WIDTH 2
+#define CSR_CRMD_DACM (0x3UL << CSR_CRMD_DACM_SHIFT)
+#define CSR_CRMD_DACF_SHIFT 5
+#define CSR_CRMD_DACF_WIDTH 2
+#define CSR_CRMD_DACF (0x3UL << CSR_CRMD_DACF_SHIFT)
+#define CSR_CRMD_PG_SHIFT 4
+#define CSR_CRMD_PG (0x1UL << CSR_CRMD_PG_SHIFT)
+#define CSR_CRMD_DA_SHIFT 3
+#define CSR_CRMD_DA (0x1UL << CSR_CRMD_DA_SHIFT)
+#define CSR_CRMD_IE_SHIFT 2
+#define CSR_CRMD_IE (0x1UL << CSR_CRMD_IE_SHIFT)
+#define CSR_CRMD_PLV_SHIFT 0
+#define CSR_CRMD_PLV_WIDTH 2
+#define CSR_CRMD_PLV (0x3UL << CSR_CRMD_PLV_SHIFT)
+
+#define PLV_USER 3
+#define PLV_KERN 0
+#define PLV_MASK 0x3
+
+#define LOONGARCH_CSR_PRMD 0x1 /* 32 prev-exception mode info */
+#define CSR_PRMD_PIE_SHIFT 2
+#define CSR_PRMD_PIE (0x1UL << CSR_PRMD_PIE_SHIFT)
+#define CSR_PRMD_PPLV_SHIFT 0
+#define CSR_PRMD_PPLV_WIDTH 2
+#define CSR_PRMD_PPLV (0x3UL << CSR_PRMD_PPLV_SHIFT)
+
+#define LOONGARCH_CSR_EUEN 0x2 /* 32 coprocessor enable */
+#define CSR_EUEN_LBTEN_SHIFT 3
+#define CSR_EUEN_LBTEN (0x1UL << CSR_EUEN_LBTEN_SHIFT)
+#define CSR_EUEN_LASXEN_SHIFT 2
+#define CSR_EUEN_LASXEN (0x1UL << CSR_EUEN_LASXEN_SHIFT)
+#define CSR_EUEN_LSXEN_SHIFT 1
+#define CSR_EUEN_LSXEN (0x1UL << CSR_EUEN_LSXEN_SHIFT)
+#define CSR_EUEN_FPEN_SHIFT 0
+#define CSR_EUEN_FPEN (0x1UL << CSR_EUEN_FPEN_SHIFT)
+
+#define LOONGARCH_CSR_MISC 0x3 /* 32 misc config */
+
+#define LOONGARCH_CSR_ECFG 0x4 /* 32 exception config */
+#define CSR_ECFG_VS_SHIFT 16
+#define CSR_ECFG_VS_WIDTH 3
+#define CSR_ECFG_VS (0x7UL << CSR_ECFG_VS_SHIFT)
+#define CSR_ECFG_IM_SHIFT 0
+#define CSR_ECFG_IM_WIDTH 13
+#define CSR_ECFG_IM (0x1fffUL << CSR_ECFG_IM_SHIFT)
+
+#define CSR_ECFG_IPMASK 0x00001fff
+
+#define LOONGARCH_CSR_ESTAT 0x5 /* Exception status */
+#define CSR_ESTAT_ESUBCODE_SHIFT 22
+#define CSR_ESTAT_ESUBCODE_WIDTH 9
+#define CSR_ESTAT_ESUBCODE (0x1ffULL << CSR_ESTAT_ESUBCODE_SHIFT)
+#define CSR_ESTAT_EXC_SH 16
+#define CSR_ESTAT_EXC_WIDTH 5
+#define CSR_ESTAT_EXC (0x1fULL << CSR_ESTAT_EXC_SH)
+#define CSR_ESTAT_IS_SHIFT 0
+#define CSR_ESTAT_IS_WIDTH 15
+#define CSR_ESTAT_IS (0x7fffULL << CSR_ESTAT_IS_SHIFT)
+
+#define CSR_ESTAT_IPMASK 0x00001fff
+
+#define EXCODE_IP 64
+#define EXCCODE_RSV 0
+#define EXCCODE_TLBL 1
+#define EXCCODE_TLBS 2
+#define EXCCODE_TLBI 3
+#define EXCCODE_MOD 4
+#define EXCCODE_TLBRI 5
+#define EXCCODE_TLBXI 6
+#define EXCCODE_TLBPE 7
+#define EXCCODE_ADE 8
+#define EXCCODE_UNALIGN 9
+#define EXCCODE_OOB 10
+#define EXCCODE_SYS 11
+#define EXCCODE_BP 12
+#define EXCCODE_RI 13
+#define EXCCODE_IPE 14
+#define EXCCODE_FPDIS 15
+#define EXCCODE_LSXDIS 16
+#define EXCCODE_LASXDIS 17
+#define EXCCODE_FPE 18
+#define EXCCODE_WATCH 19
+#define EXCCODE_BTDIS 20
+#define EXCCODE_BTE 21
+#define EXCCODE_PSI 22
+#define EXCCODE_HYP 23
+#define EXCCODE_FC 24
+#define EXCCODE_SE 25
+
+#define LOONGARCH_CSR_ERA 0x6 /* 64 error PC */
+
+#define LOONGARCH_CSR_BADV 0x7 /* 64 bad virtual address */
+
+#define LOONGARCH_CSR_BADI 0x8 /* 32 bad instruction */
+
+#define LOONGARCH_CSR_EEPN 0xc /* 64 exception enter base address */
+#define LOONGARCH_EEPN_CPUID (0x3ffULL << 0)
+
+#define CU_FPE 1
+#define CU_LSXE (1 << 1)
+#define CU_LASXE (1 << 2)
+#define CU_LBTE (1 << 3)
+
+/* TLB related CSR register : start with TLB if no pagewalk */
+/* 32 TLB Index, EHINV, PageSize, is_gtlb */
+#define LOONGARCH_CSR_TLBIDX 0x10
+#define CSR_TLBIDX_EHINV_SHIFT 31
+#define CSR_TLBIDX_EHINV (0x1ULL << CSR_TLBIDX_EHINV_SHIFT)
+#define CSR_TLBIDX_PS_SHIFT 24
+#define CSR_TLBIDX_PS_WIDTH 6
+#define CSR_TLBIDX_PS (0x3fULL << CSR_TLBIDX_PS_SHIFT)
+#define CSR_TLBIDX_IDX_SHIFT 0
+#define CSR_TLBIDX_IDX_WIDTH 12
+#define CSR_TLBIDX_IDX (0xfffULL << CSR_TLBIDX_IDX_SHIFT)
+#define CSR_TLBIDX_SIZEM 0x3f000000
+#define CSR_TLBIDX_SIZE CSR_TLBIDX_PS_SHIFT
+#define CSR_TLBIDX_IDXM 0xfff
+
+#define LOONGARCH_CSR_TLBEHI 0x11 /* 64 TLB EntryHi without ASID */
+
+#define LOONGARCH_CSR_TLBELO0 0x12 /* 64 TLB EntryLo0 */
+#define CSR_TLBLO0_RPLV_SHIFT 63
+#define CSR_TLBLO0_RPLV (0x1ULL << CSR_TLBLO0_RPLV_SHIFT)
+#define CSR_TLBLO0_XI_SHIFT 62
+#define CSR_TLBLO0_XI (0x1ULL << CSR_TLBLO0_XI_SHIFT)
+#define CSR_TLBLO0_RI_SHIFT 61
+#define CSR_TLBLO0_RI (0x1ULL << CSR_TLBLO0_RI_SHIFT)
+#define CSR_TLBLO0_PPN_SHIFT 12
+#define CSR_TLBLO0_PPN_WIDTH 36 /* ignore lower 12bits */
+#define CSR_TLBLO0_PPN (0xfffffffffULL << CSR_TLBLO0_PPN_SHIFT)
+#define CSR_TLBLO0_GLOBAL_SHIFT 6
+#define CSR_TLBLO0_GLOBAL (0x1ULL << CSR_TLBLO0_GLOBAL_SHIFT)
+#define CSR_TLBLO0_CCA_SHIFT 4
+#define CSR_TLBLO0_CCA_WIDTH 2
+#define CSR_TLBLO0_CCA (0x3ULL << CSR_TLBLO0_CCA_SHIFT)
+#define CSR_TLBLO0_PLV_SHIFT 2
+#define CSR_TLBLO0_PLV_WIDTH 2
+#define CSR_TLBLO0_PLV (0x3ULL << CSR_TLBLO0_PLV_SHIFT)
+#define CSR_TLBLO0_WE_SHIFT 1
+#define CSR_TLBLO0_WE (0x1ULL << CSR_TLBLO0_WE_SHIFT)
+#define CSR_TLBLO0_V_SHIFT 0
+#define CSR_TLBLO0_V (0x1ULL << CSR_TLBLO0_V_SHIFT)
+
+#define LOONGARCH_CSR_TLBELO1 0x13 /* 64 TLB EntryLo1 */
+#define CSR_TLBLO1_RPLV_SHIFT 63
+#define CSR_TLBLO1_RPLV (0x1ULL << CSR_TLBLO1_RPLV_SHIFT)
+#define CSR_TLBLO1_XI_SHIFT 62
+#define CSR_TLBLO1_XI (0x1ULL << CSR_TLBLO1_XI_SHIFT)
+#define CSR_TLBLO1_RI_SHIFT 61
+#define CSR_TLBLO1_RI (0x1ULL << CSR_TLBLO1_RI_SHIFT)
+#define CSR_TLBLO1_PPN_SHIFT 12
+#define CSR_TLBLO1_PPN_WIDTH 36 /* ignore lower 12bits */
+#define CSR_TLBLO1_PPN (0xfffffffffULL << CSR_TLBLO1_PPN_SHIFT)
+#define CSR_TLBLO1_GLOBAL_SHIFT 6
+#define CSR_TLBLO1_GLOBAL (0x1ULL << CSR_TLBLO1_GLOBAL_SHIFT)
+#define CSR_TLBLO1_CCA_SHIFT 4
+#define CSR_TLBLO1_CCA_WIDTH 2
+#define CSR_TLBLO1_CCA (0x3ULL << CSR_TLBLO1_CCA_SHIFT)
+#define CSR_TLBLO1_PLV_SHIFT 2
+#define CSR_TLBLO1_PLV_WIDTH 2
+#define CSR_TLBLO1_PLV (0x3ULL << CSR_TLBLO1_PLV_SHIFT)
+#define CSR_TLBLO1_WE_SHIFT 1
+#define CSR_TLBLO1_WE (0x1ULL << CSR_TLBLO1_WE_SHIFT)
+#define CSR_TLBLO1_V_SHIFT 0
+#define CSR_TLBLO1_V (0x1ULL << CSR_TLBLO1_V_SHIFT)
+
+#define LOONGARCH_ENTRYLO_RI (1ULL << 61)
+#define LOONGARCH_ENTRYLO_XI (1ULL << 62)
+
+#define LOONGARCH_CSR_TLBWIRED 0x14 /* 32 TLB wired */
+#define LOONGARCH_CSR_GTLBC 0x15 /* guest-related TLB */
+#define CSR_GTLBC_RID_SHIFT 16
+#define CSR_GTLBC_RID_WIDTH 8
+#define CSR_GTLBC_RID (0xffULL << CSR_GTLBC_RID_SHIFT)
+#define CSR_GTLBC_TOTI_SHIFT 13
+#define CSR_GTLBC_TOTI (0x1ULL << CSR_GTLBC_TOTI_SHIFT)
+#define CSR_GTLBC_USERID_SHIFT 12
+#define CSR_GTLBC_USERID (0x1ULL << CSR_GTLBC_USERID_SHIFT)
+#define CSR_GTLBC_GMTLBSZ_SHIFT 0
+#define CSR_GTLBC_GMTLBSZ_WIDTH 6
+#define CSR_GTLBC_GMTLBSZ (0x3fULL << CSR_GTLBC_GVTLBSZ_SHIFT)
+
+#define LOONGARCH_CSR_TRGP 0x16 /* guest-related TLB */
+#define CSR_TRGP_RID_SHIFT 16
+#define CSR_TRGP_RID_WIDTH 8
+#define CSR_TRGP_RID (0xffULL << CSR_TRGP_RID_SHIFT)
+#define CSR_TRGP_GTLB_SHIFT 0
+#define CSR_TRGP_GTLB (1 << CSR_TRGP_GTLB_SHIFT)
+
+#define LOONGARCH_CSR_ASID 0x18 /* 64 ASID */
+#define CSR_ASID_BIT_SHIFT 16 /* ASIDBits */
+#define CSR_ASID_BIT_WIDTH 8
+#define CSR_ASID_BIT (0xffULL << CSR_ASID_BIT_SHIFT)
+#define CSR_ASID_ASID_SHIFT 0
+#define CSR_ASID_ASID_WIDTH 10
+#define CSR_ASID_ASID (0x3ffULL << CSR_ASID_ASID_SHIFT)
+
+/* 64 page table base address when badv[47] = 0 */
+#define LOONGARCH_CSR_PGDL 0x19
+/* 64 page table base address when badv[47] = 1 */
+#define LOONGARCH_CSR_PGDH 0x1a
+
+#define LOONGARCH_CSR_PGD 0x1b /* 64 page table base */
+
+#define LOONGARCH_CSR_PWCTL0 0x1c /* 64 PWCtl0 */
+#define CSR_PWCTL0_PTEW_SHIFT 30
+#define CSR_PWCTL0_PTEW_WIDTH 2
+#define CSR_PWCTL0_PTEW (0x3ULL << CSR_PWCTL0_PTEW_SHIFT)
+#define CSR_PWCTL0_DIR1WIDTH_SHIFT 25
+#define CSR_PWCTL0_DIR1WIDTH_WIDTH 5
+#define CSR_PWCTL0_DIR1WIDTH (0x1fULL << CSR_PWCTL0_DIR1WIDTH_SHIFT)
+#define CSR_PWCTL0_DIR1BASE_SHIFT 20
+#define CSR_PWCTL0_DIR1BASE_WIDTH 5
+#define CSR_PWCTL0_DIR1BASE (0x1fULL << CSR_PWCTL0_DIR1BASE_SHIFT)
+#define CSR_PWCTL0_DIR0WIDTH_SHIFT 15
+#define CSR_PWCTL0_DIR0WIDTH_WIDTH 5
+#define CSR_PWCTL0_DIR0WIDTH (0x1fULL << CSR_PWCTL0_DIR0WIDTH_SHIFT)
+#define CSR_PWCTL0_DIR0BASE_SHIFT 10
+#define CSR_PWCTL0_DIR0BASE_WIDTH 5
+#define CSR_PWCTL0_DIR0BASE (0x1fULL << CSR_PWCTL0_DIR0BASE_SHIFT)
+#define CSR_PWCTL0_PTWIDTH_SHIFT 5
+#define CSR_PWCTL0_PTWIDTH_WIDTH 5
+#define CSR_PWCTL0_PTWIDTH (0x1fULL << CSR_PWCTL0_PTWIDTH_SHIFT)
+#define CSR_PWCTL0_PTBASE_SHIFT 0
+#define CSR_PWCTL0_PTBASE_WIDTH 5
+#define CSR_PWCTL0_PTBASE (0x1fULL << CSR_PWCTL0_PTBASE_SHIFT)
+
+#define LOONGARCH_CSR_PWCTL1 0x1d /* 64 PWCtl1 */
+#define CSR_PWCTL1_DIR3WIDTH_SHIFT 18
+#define CSR_PWCTL1_DIR3WIDTH_WIDTH 5
+#define CSR_PWCTL1_DIR3WIDTH (0x1fULL << CSR_PWCTL1_DIR3WIDTH_SHIFT)
+#define CSR_PWCTL1_DIR3BASE_SHIFT 12
+#define CSR_PWCTL1_DIR3BASE_WIDTH 5
+#define CSR_PWCTL1_DIR3BASE (0x1fULL << CSR_PWCTL0_DIR3BASE_SHIFT)
+#define CSR_PWCTL1_DIR2WIDTH_SHIFT 6
+#define CSR_PWCTL1_DIR2WIDTH_WIDTH 5
+#define CSR_PWCTL1_DIR2WIDTH (0x1fULL << CSR_PWCTL1_DIR2WIDTH_SHIFT)
+#define CSR_PWCTL1_DIR2BASE_SHIFT 0
+#define CSR_PWCTL1_DIR2BASE_WIDTH 5
+#define CSR_PWCTL1_DIR2BASE (0x1fULL << CSR_PWCTL0_DIR2BASE_SHIFT)
+
+#define LOONGARCH_CSR_STLBPGSIZE 0x1e /* 64 */
+#define CSR_STLBPGSIZE_PS_WIDTH 6
+#define CSR_STLBPGSIZE_PS (0x3f)
+
+#define LOONGARCH_CSR_RVACFG 0x1f
+#define CSR_RVACFG_RDVA_WIDTH 4
+#define CSR_RVACFG_RDVA (0xf)
+
+/* read only CSR register : start with CPU */
+#define LOONGARCH_CSR_CPUID 0x20 /* 32 CPU core number */
+#define CSR_CPUID_CID_WIDTH 9
+#define CSR_CPUID_CID (0x1ff)
+
+#define LOONGARCH_CSR_PRCFG1 0x21 /* 32 CPU info */
+#define CSR_CONF1_VSMAX_SHIFT 12
+#define CSR_CONF1_VSMAX_WIDTH 3
+#define CSR_CONF1_VSMAX (7ULL << CSR_CONF1_VSMAX_SHIFT)
+/* stable timer bits - 1, 0x2f = 47*/
+#define CSR_CONF1_TMRBITS_SHIFT 4
+#define CSR_CONF1_TMRBITS_WIDTH 8
+#define CSR_CONF1_TMRBITS (0xffULL << CSR_CONF1_TMRBITS_SHIFT)
+#define CSR_CONF1_KSNUM_SHIFT 0
+#define CSR_CONF1_KSNUM_WIDTH 4
+#define CSR_CONF1_KSNUM (0x8)
+
+#define LOONGARCH_CSR_PRCFG2 0x22
+#define CSR_CONF2_PGMASK_SUPP 0x3ffff000
+
+#define LOONGARCH_CSR_PRCFG3 0x23
+#define CSR_CONF3_STLBIDX_SHIFT 20
+#define CSR_CONF3_STLBIDX_WIDTH 6
+#define CSR_CONF3_STLBIDX (0x3fULL << CSR_CONF3_STLBIDX_SHIFT)
+#define CSR_STLB_SETS 256
+#define CSR_CONF3_STLBWAYS_SHIFT 12
+#define CSR_CONF3_STLBWAYS_WIDTH 8
+#define CSR_CONF3_STLBWAYS (0xffULL << CSR_CONF3_STLBWAYS_SHIFT)
+#define CSR_STLBWAYS_SIZE 8
+#define CSR_CONF3_MTLBSIZE_SHIFT 4
+#define CSR_CONF3_MTLBSIZE_WIDTH 8
+#define CSR_CONF3_MTLBSIZE (0xffULL << CSR_CONF3_MTLBSIZE_SHIFT)
+/* mean VTLB 64 index */
+#define CSR_MTLB_SIZE 64
+#define CSR_CONF3_TLBORG_SHIFT 0
+#define CSR_CONF3_TLBORG_WIDTH 4
+#define CSR_CONF3_TLBORG (0xfULL << CSR_CONF3_TLBORG_SHIFT)
+/* mean use MTLB+STLB */
+#define TLB_ORG 2
+
+/* Kscratch : start with KS */
+#define LOONGARCH_CSR_KS0 0x30 /* 64 */
+#define LOONGARCH_CSR_KS1 0x31 /* 64 */
+#define LOONGARCH_CSR_KS2 0x32 /* 64 */
+#define LOONGARCH_CSR_KS3 0x33 /* 64 */
+#define LOONGARCH_CSR_KS4 0x34 /* 64 */
+#define LOONGARCH_CSR_KS5 0x35 /* 64 */
+#define LOONGARCH_CSR_KS6 0x36 /* 64 */
+#define LOONGARCH_CSR_KS7 0x37 /* 64 */
+#define LOONGARCH_CSR_KS8 0x38 /* 64 */
+
+/* timer : start with TM */
+#define LOONGARCH_CSR_TMID 0x40 /* 32 timer ID */
+
+#define LOONGARCH_CSR_TCFG 0x41 /* 64 timer config */
+#define CSR_TCFG_VAL_SHIFT 2
+#define CSR_TCFG_VAL_WIDTH 48
+#define CSR_TCFG_VAL (0x3fffffffffffULL << CSR_TCFG_VAL_SHIFT)
+#define CSR_TCFG_PERIOD_SHIFT 1
+#define CSR_TCFG_PERIOD (0x1ULL << CSR_TCFG_PERIOD_SHIFT)
+#define CSR_TCFG_EN (0x1)
+
+#define LOONGARCH_CSR_TVAL 0x42 /* 64 timer ticks remain */
+
+#define LOONGARCH_CSR_CNTC 0x43 /* 64 timer offset */
+
+#define LOONGARCH_CSR_TINTCLR 0x44 /* 64 timer interrupt clear */
+#define CSR_TINTCLR_TI_SHIFT 0
+#define CSR_TINTCLR_TI (1 << CSR_TINTCLR_TI_SHIFT)
+
+/* guest : start with GST */
+#define LOONGARCH_CSR_GSTAT 0x50 /* 32 basic guest info */
+#define CSR_GSTAT_GID_SHIFT 16
+#define CSR_GSTAT_GID_WIDTH 8
+#define CSR_GSTAT_GID (0xffULL << CSR_GSTAT_GID_SHIFT)
+#define CSR_GSTAT_GIDBIT_SHIFT 4
+#define CSR_GSTAT_GIDBIT_WIDTH 6
+#define CSR_GSTAT_GIDBIT (0x3fULL << CSR_GSTAT_GIDBIT_SHIFT)
+#define CSR_GSTAT_PVM_SHIFT 1
+#define CSR_GSTAT_PVM (0x1ULL << CSR_GSTAT_PVM_SHIFT)
+#define CSR_GSTAT_VM_SHIFT 0
+#define CSR_GSTAT_VM (0x1ULL << CSR_GSTAT_VM_SHIFT)
+
+#define LOONGARCH_CSR_GCFG 0x51 /* 32 guest config */
+#define CSR_GCFG_GPERF_SHIFT 24
+#define CSR_GCFG_GPERF_WIDTH 3
+#define CSR_GCFG_GPERF (0x7ULL << CSR_GCFG_GPERF_SHIFT)
+#define CSR_GCFG_GCI_SHIFT 20
+#define CSR_GCFG_GCI_WIDTH 2
+#define CSR_GCFG_GCI (0x3ULL << CSR_GCFG_GCI_SHIFT)
+#define CSR_GCFG_GCI_ALL (0x0ULL << CSR_GCFG_GCI_SHIFT)
+#define CSR_GCFG_GCI_HIT (0x1ULL << CSR_GCFG_GCI_SHIFT)
+#define CSR_GCFG_GCI_SECURE (0x2ULL << CSR_GCFG_GCI_SHIFT)
+#define CSR_GCFG_GCIP_SHIFT 16
+#define CSR_GCFG_GCIP (0xfULL << CSR_GCFG_GCIP_SHIFT)
+#define CSR_GCFG_GCIP_ALL (0x1ULL << CSR_GCFG_GCIP_SHIFT)
+#define CSR_GCFG_GCIP_HIT (0x1ULL << (CSR_GCFG_GCIP_SHIFT + 1))
+#define CSR_GCFG_GCIP_SECURE (0x1ULL << (CSR_GCFG_GCIP_SHIFT + 2))
+#define CSR_GCFG_TORU_SHIFT 15
+#define CSR_GCFG_TORU (0x1ULL << CSR_GCFG_TORU_SHIFT)
+#define CSR_GCFG_TORUP_SHIFT 14
+#define CSR_GCFG_TORUP (0x1ULL << CSR_GCFG_TORUP_SHIFT)
+#define CSR_GCFG_TOP_SHIFT 13
+#define CSR_GCFG_TOP (0x1ULL << CSR_GCFG_TOP_SHIFT)
+#define CSR_GCFG_TOPP_SHIFT 12
+#define CSR_GCFG_TOPP (0x1ULL << CSR_GCFG_TOPP_SHIFT)
+#define CSR_GCFG_TOE_SHIFT 11
+#define CSR_GCFG_TOE (0x1ULL << CSR_GCFG_TOE_SHIFT)
+#define CSR_GCFG_TOEP_SHIFT 10
+#define CSR_GCFG_TOEP (0x1ULL << CSR_GCFG_TOEP_SHIFT)
+#define CSR_GCFG_TIT_SHIFT 9
+#define CSR_GCFG_TIT (0x1ULL << CSR_GCFG_TIT_SHIFT)
+#define CSR_GCFG_TITP_SHIFT 8
+#define CSR_GCFG_TITP (0x1ULL << CSR_GCFG_TITP_SHIFT)
+#define CSR_GCFG_SIT_SHIFT 7
+#define CSR_GCFG_SIT (0x1ULL << CSR_GCFG_SIT_SHIFT)
+#define CSR_GCFG_SITP_SHIFT 6
+#define CSR_GCFG_SITP (0x1ULL << CSR_GCFG_SITP_SHIFT)
+#define CSR_GCFG_CACTRL_SHIFT 4
+#define CSR_GCFG_CACTRL_WIDTH 2
+#define CSR_GCFG_CACTRL (0x3ULL << CSR_GCFG_CACTRL_SHIFT)
+#define CSR_GCFG_CACTRL_GUEST (0x0ULL << CSR_GCFG_CACTRL_SHIFT)
+#define CSR_GCFG_CACTRL_ROOT (0x1ULL << CSR_GCFG_CACTRL_SHIFT)
+#define CSR_GCFG_CACTRL_NEST (0x2ULL << CSR_GCFG_CACTRL_SHIFT)
+#define CSR_GCFG_CCCP_WIDTH 4
+#define CSR_GCFG_CCCP (0xf)
+#define CSR_GCFG_CCCP_GUEST (0x1ULL << 0)
+#define CSR_GCFG_CCCP_ROOT (0x1ULL << 1)
+#define CSR_GCFG_CCCP_NEST (0x1ULL << 2)
+
+#define LOONGARCH_CSR_GINTC 0x52 /* 64 guest exception control */
+#define CSR_GINTC_HC_SHIFT 16
+#define CSR_GINTC_HC_WIDTH 8
+#define CSR_GINTC_HC (0xffULL << CSR_GINTC_HC_SHIFT)
+#define CSR_GINTC_PIP_SHIFT 8
+#define CSR_GINTC_PIP_WIDTH 8
+#define CSR_GINTC_PIP (0xffULL << CSR_GINTC_PIP_SHIFT)
+#define CSR_GINTC_VIP_SHIFT 0
+#define CSR_GINTC_VIP_WIDTH 8
+#define CSR_GINTC_VIP (0xff)
+
+#define LOONGARCH_CSR_GCNTC 0x53 /* 64 guest timer offset */
+
+/* LLBCTL */
+#define LOONGARCH_CSR_LLBCTL 0x60 /* 32 csr number to be changed */
+#define CSR_LLBCTL_ROLLB_SHIFT 0
+#define CSR_LLBCTL_ROLLB (1ULL << CSR_LLBCTL_ROLLB_SHIFT)
+#define CSR_LLBCTL_WCLLB_SHIFT 1
+#define CSR_LLBCTL_WCLLB (1ULL << CSR_LLBCTL_WCLLB_SHIFT)
+#define CSR_LLBCTL_KLO_SHIFT 2
+#define CSR_LLBCTL_KLO (1ULL << CSR_LLBCTL_KLO_SHIFT)
+
+/* implement dependent */
+#define LOONGARCH_CSR_IMPCTL1 0x80 /* 32 loongarch config */
+#define CSR_MISPEC_SHIFT 20
+#define CSR_MISPEC_WIDTH 8
+#define CSR_MISPEC (0xffULL << CSR_MISPEC_SHIFT)
+#define CSR_SSEN_SHIFT 18
+#define CSR_SSEN (1ULL << CSR_SSEN_SHIFT)
+#define CSR_SCRAND_SHIFT 17
+#define CSR_SCRAND (1ULL << CSR_SCRAND_SHIFT)
+#define CSR_LLEXCL_SHIFT 16
+#define CSR_LLEXCL (1ULL << CSR_LLEXCL_SHIFT)
+#define CSR_DISVC_SHIFT 15
+#define CSR_DISVC (1ULL << CSR_DISVC_SHIFT)
+#define CSR_VCLRU_SHIFT 14
+#define CSR_VCLRU (1ULL << CSR_VCLRU_SHIFT)
+#define CSR_DCLRU_SHIFT 13
+#define CSR_DCLRU (1ULL << CSR_DCLRU_SHIFT)
+#define CSR_FASTLDQ_SHIFT 12
+#define CSR_FASTLDQ (1ULL << CSR_FASTLDQ_SHIFT)
+#define CSR_USERCAC_SHIFT 11
+#define CSR_USERCAC (1ULL << CSR_USERCAC_SHIFT)
+#define CSR_ANTI_MISPEC_SHIFT 10
+#define CSR_ANTI_MISPEC (1ULL << CSR_ANTI_MISPEC_SHIFT)
+#define CSR_ANTI_FLUSHSFB_SHIFT 9
+#define CSR_ANTI_FLUSHSFB (1ULL << CSR_ANTI_FLUSHSFB_SHIFT)
+#define CSR_STFILL_SHIFT 8
+#define CSR_STFILL (1ULL << CSR_STFILL_SHIFT)
+#define CSR_LIFEP_SHIFT 7
+#define CSR_LIFEP (1ULL << CSR_LIFEP_SHIFT)
+#define CSR_LLSYNC_SHIFT 6
+#define CSR_LLSYNC (1ULL << CSR_LLSYNC_SHIFT)
+#define CSR_BRBTDIS_SHIFT 5
+#define CSR_BRBTDIS (1ULL << CSR_BRBTDIS_SHIFT)
+#define CSR_RASDIS_SHIFT 4
+#define CSR_RASDIS (1ULL << CSR_RASDIS_SHIFT)
+#define CSR_STPRE_SHIFT 2
+#define CSR_STPRE_WIDTH 2
+#define CSR_STPRE (3ULL << CSR_STPRE_SHIFT)
+#define CSR_INSTPRE_SHIFT 1
+#define CSR_INSTPRE (1ULL << CSR_INSTPRE_SHIFT)
+#define CSR_DATAPRE_SHIFT 0
+#define CSR_DATAPRE (1ULL << CSR_DATAPRE_SHIFT)
+
+#define LOONGARCH_CSR_IMPCTL2 0x81 /* 32 Flush */
+#define CSR_IMPCTL2_MTLB_SHIFT 0
+#define CSR_IMPCTL2_MTLB (1ULL << CSR_IMPCTL2_MTLB_SHIFT)
+#define CSR_IMPCTL2_STLB_SHIFT 1
+#define CSR_IMPCTL2_STLB (1ULL << CSR_IMPCTL2_STLB_SHIFT)
+#define CSR_IMPCTL2_DTLB_SHIFT 2
+#define CSR_IMPCTL2_DTLB (1ULL << CSR_IMPCTL2_DTLB_SHIFT)
+#define CSR_IMPCTL2_ITLB_SHIFT 3
+#define CSR_IMPCTL2_ITLB (1ULL << CSR_IMPCTL2_ITLB_SHIFT)
+#define CSR_IMPCTL2_BTAC_SHIFT 4
+#define CSR_IMPCTL2_BTAC (1ULL << CSR_IMPCTL2_BTAC_SHIFT)
+
+#define LOONGARCH_FLUSH_VTLB 1
+#define LOONGARCH_FLUSH_FTLB (1 << 1)
+#define LOONGARCH_FLUSH_DTLB (1 << 2)
+#define LOONGARCH_FLUSH_ITLB (1 << 3)
+#define LOONGARCH_FLUSH_BTAC (1 << 4)
+
+#define LOONGARCH_CSR_GNMI 0x82
+
+/* TLB Refill Only */
+#define LOONGARCH_CSR_TLBRENT 0x88 /* 64 TLB refill exception address */
+#define LOONGARCH_CSR_TLBRBADV 0x89 /* 64 TLB refill badvaddr */
+#define LOONGARCH_CSR_TLBRERA 0x8a /* 64 TLB refill ERA */
+#define LOONGARCH_CSR_TLBRSAVE 0x8b /* 64 KScratch for TLB refill */
+#define LOONGARCH_CSR_TLBRELO0 0x8c /* 64 TLB refill entrylo0 */
+#define LOONGARCH_CSR_TLBRELO1 0x8d /* 64 TLB refill entrylo1 */
+#define LOONGARCH_CSR_TLBREHI 0x8e /* 64 TLB refill entryhi */
+#define LOONGARCH_CSR_TLBRPRMD 0x8f /* 64 TLB refill mode info */
+
+/* error related */
+#define LOONGARCH_CSR_ERRCTL 0x90 /* 32 ERRCTL */
+#define LOONGARCH_CSR_ERRINFO 0x91
+#define LOONGARCH_CSR_ERRINFO1 0x92
+#define LOONGARCH_CSR_ERRENT 0x93 /* 64 error exception base */
+#define LOONGARCH_CSR_ERRERA 0x94 /* 64 error exception PC */
+#define LOONGARCH_CSR_ERRSAVE 0x95 /* 64 KScratch for error exception */
+
+#define LOONGARCH_CSR_CTAG 0x98 /* 64 TagLo + TagHi */
+
+/* direct map windows */
+#define LOONGARCH_CSR_DMWIN0 0x180 /* 64 direct map win0: MEM & IF */
+#define LOONGARCH_CSR_DMWIN1 0x181 /* 64 direct map win1: MEM & IF */
+#define LOONGARCH_CSR_DMWIN2 0x182 /* 64 direct map win2: MEM */
+#define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */
+#define CSR_DMW_PLV0 0x1
+#define CSR_DMW_PLV1 0x2
+#define CSR_DMW_PLV2 0x4
+#define CSR_DMW_PLV3 0x8
+#define CSR_DMW_BASE_SH 48
+#define dmwin_va2pa(va) \
+ (va & (((unsigned long)1 << CSR_DMW_BASE_SH) - 1))
+
+/* performance counter */
+#define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */
+#define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */
+#define LOONGARCH_CSR_PERFCTRL1 0x202 /* 32 perf event 1 config */
+#define LOONGARCH_CSR_PERFCNTR1 0x203 /* 64 perf event 1 count value */
+#define LOONGARCH_CSR_PERFCTRL2 0x204 /* 32 perf event 2 config */
+#define LOONGARCH_CSR_PERFCNTR2 0x205 /* 64 perf event 2 count value */
+#define LOONGARCH_CSR_PERFCTRL3 0x206 /* 32 perf event 3 config */
+#define LOONGARCH_CSR_PERFCNTR3 0x207 /* 64 perf event 3 count value */
+#define CSR_PERFCTRL_PLV0 (1ULL << 16)
+#define CSR_PERFCTRL_PLV1 (1ULL << 17)
+#define CSR_PERFCTRL_PLV2 (1ULL << 18)
+#define CSR_PERFCTRL_PLV3 (1ULL << 19)
+#define CSR_PERFCTRL_IE (1ULL << 20)
+#define CSR_PERFCTRL_EVENT 0x3ff
+
+/* debug */
+#define LOONGARCH_CSR_MWPC 0x300 /* data breakpoint config */
+#define LOONGARCH_CSR_MWPS 0x301 /* data breakpoint status */
+
+#define LOONGARCH_CSR_DB0ADDR 0x310 /* data breakpoint 0 address */
+#define LOONGARCH_CSR_DB0MASK 0x311 /* data breakpoint 0 mask */
+#define LOONGARCH_CSR_DB0CTL 0x312 /* data breakpoint 0 control */
+#define LOONGARCH_CSR_DB0ASID 0x313 /* data breakpoint 0 asid */
+
+#define LOONGARCH_CSR_DB1ADDR 0x318 /* data breakpoint 1 address */
+#define LOONGARCH_CSR_DB1MASK 0x319 /* data breakpoint 1 mask */
+#define LOONGARCH_CSR_DB1CTL 0x31a /* data breakpoint 1 control */
+#define LOONGARCH_CSR_DB1ASID 0x31b /* data breakpoint 1 asid */
+
+#define LOONGARCH_CSR_DB2ADDR 0x320 /* data breakpoint 2 address */
+#define LOONGARCH_CSR_DB2MASK 0x321 /* data breakpoint 2 mask */
+#define LOONGARCH_CSR_DB2CTL 0x322 /* data breakpoint 2 control */
+#define LOONGARCH_CSR_DB2ASID 0x323 /* data breakpoint 2 asid */
+
+#define LOONGARCH_CSR_DB3ADDR 0x328 /* data breakpoint 3 address */
+#define LOONGARCH_CSR_DB3MASK 0x329 /* data breakpoint 3 mask */
+#define LOONGARCH_CSR_DB3CTL 0x32a /* data breakpoint 3 control */
+#define LOONGARCH_CSR_DB3ASID 0x32b /* data breakpoint 3 asid */
+
+#define LOONGARCH_CSR_FWPC 0x380 /* instruction breakpoint config */
+#define LOONGARCH_CSR_FWPS 0x381 /* instruction breakpoint status */
+
+#define LOONGARCH_CSR_IB0ADDR 0x390 /* inst breakpoint 0 address */
+#define LOONGARCH_CSR_IB0MASK 0x391 /* inst breakpoint 0 mask */
+#define LOONGARCH_CSR_IB0CTL 0x392 /* inst breakpoint 0 control */
+#define LOONGARCH_CSR_IB0ASID 0x393 /* inst breakpoint 0 asid */
+#define LOONGARCH_CSR_IB1ADDR 0x398 /* inst breakpoint 1 address */
+#define LOONGARCH_CSR_IB1MASK 0x399 /* inst breakpoint 1 mask */
+#define LOONGARCH_CSR_IB1CTL 0x39a /* inst breakpoint 1 control */
+#define LOONGARCH_CSR_IB1ASID 0x39b /* inst breakpoint 1 asid */
+
+#define LOONGARCH_CSR_IB2ADDR 0x3a0 /* inst breakpoint 2 address */
+#define LOONGARCH_CSR_IB2MASK 0x3a1 /* inst breakpoint 2 mask */
+#define LOONGARCH_CSR_IB2CTL 0x3a2 /* inst breakpoint 2 control */
+#define LOONGARCH_CSR_IB2ASID 0x3a3 /* inst breakpoint 2 asid */
+
+#define LOONGARCH_CSR_IB3ADDR 0x3a8 /* inst breakpoint 3 address */
+#define LOONGARCH_CSR_IB3MASK 0x3a9 /* inst breakpoint 3 mask */
+#define LOONGARCH_CSR_IB3CTL 0x3aa /* inst breakpoint 3 control */
+#define LOONGARCH_CSR_IB3ASID 0x3ab /* inst breakpoint 3 asid */
+
+#define LOONGARCH_CSR_IB4ADDR 0x3b0 /* inst breakpoint 4 address */
+#define LOONGARCH_CSR_IB4MASK 0x3b1 /* inst breakpoint 4 mask */
+#define LOONGARCH_CSR_IB4CTL 0x3b2 /* inst breakpoint 4 control */
+#define LOONGARCH_CSR_IB4ASID 0x3b3 /* inst breakpoint 4 asid */
+
+#define LOONGARCH_CSR_IB5ADDR 0x3b8 /* inst breakpoint 5 address */
+#define LOONGARCH_CSR_IB5MASK 0x3b9 /* inst breakpoint 5 mask */
+#define LOONGARCH_CSR_IB5CTL 0x3ba /* inst breakpoint 5 control */
+#define LOONGARCH_CSR_IB5ASID 0x3bb /* inst breakpoint 5 asid */
+
+#define LOONGARCH_CSR_IB6ADDR 0x3c0 /* inst breakpoint 6 address */
+#define LOONGARCH_CSR_IB6MASK 0x3c1 /* inst breakpoint 6 mask */
+#define LOONGARCH_CSR_IB6CTL 0x3c2 /* inst breakpoint 6 control */
+#define LOONGARCH_CSR_IB6ASID 0x3c3 /* inst breakpoint 6 asid */
+
+#define LOONGARCH_CSR_IB7ADDR 0x3c8 /* inst breakpoint 7 address */
+#define LOONGARCH_CSR_IB7MASK 0x3c9 /* inst breakpoint 7 mask */
+#define LOONGARCH_CSR_IB7CTL 0x3ca /* inst breakpoint 7 control */
+#define LOONGARCH_CSR_IB7ASID 0x3cb /* inst breakpoint 7 asid */
+
+#define LOONGARCH_CSR_DEBUG 0x500 /* debug config */
+#define CSR_DEBUG_DM 0
+#define CSR_DEBUG_DMVER 1
+#define CSR_DEBUG_DINT 8
+#define CSR_DEBUG_DBP 9
+#define CSR_DEBUG_DIB 10
+#define CSR_DEBUG_DDB 11
+
+#define LOONGARCH_CSR_DERA 0x501 /* debug era */
+#define LOONGARCH_CSR_DESAVE 0x502 /* debug save */
+
+#define LOONGARCH_CSR_PRID 0xc0 /* 32 LOONGARCH CP0 PRID */
+
+#define LOONGARCH_CPUCFG0 0x0
+#define CPUCFG0_3A5000_PRID 0x0014c010
+
+#define LOONGARCH_CPUCFG1 0x1
+#define CPUCFG1_ISGR32 BIT(0)
+#define CPUCFG1_ISGR64 BIT(1)
+#define CPUCFG1_PAGING BIT(2)
+#define CPUCFG1_IOCSR BIT(3)
+#define CPUCFG1_PABITS (47 << 4)
+#define CPUCFG1_VABITS (47 << 12)
+#define CPUCFG1_UAL BIT(20)
+#define CPUCFG1_RI BIT(21)
+#define CPUCFG1_XI BIT(22)
+#define CPUCFG1_RPLV BIT(23)
+#define CPUCFG1_HUGEPG BIT(24)
+#define CPUCFG1_IOCSRBRD BIT(25)
+#define CPUCFG1_MSGINT BIT(26)
+
+#define LOONGARCH_CPUCFG2 0x2
+#define CPUCFG2_FP BIT(0)
+#define CPUCFG2_FPSP BIT(1)
+#define CPUCFG2_FPDP BIT(2)
+#define CPUCFG2_FPVERS (0 << 3)
+#define CPUCFG2_LSX BIT(6)
+#define CPUCFG2_LASX BIT(7)
+#define CPUCFG2_COMPLEX BIT(8)
+#define CPUCFG2_CRYPTO BIT(9)
+#define CPUCFG2_LVZP BIT(10)
+#define CPUCFG2_LVZVER (0 << 11)
+#define CPUCFG2_LLFTP BIT(14)
+#define CPUCFG2_LLFTPREV (1 << 15)
+#define CPUCFG2_X86BT BIT(18)
+#define CPUCFG2_ARMBT BIT(19)
+#define CPUCFG2_MIPSBT BIT(20)
+#define CPUCFG2_LSPW BIT(21)
+#define CPUCFG2_LAM BIT(22)
+
+#define LOONGARCH_CPUCFG3 0x3
+#define CPUCFG3_CCDMA BIT(0)
+#define CPUCFG3_SFB BIT(1)
+#define CPUCFG3_UCACC BIT(2)
+#define CPUCFG3_LLEXC BIT(3)
+#define CPUCFG3_SCDLY BIT(4)
+#define CPUCFG3_LLDBAR BIT(5)
+#define CPUCFG3_ITLBT BIT(6)
+#define CPUCFG3_ICACHET BIT(7)
+#define CPUCFG3_SPW_LVL (4 << 8)
+#define CPUCFG3_SPW_HG_HF BIT(11)
+#define CPUCFG3_RVA BIT(12)
+#define CPUCFG3_RVAMAX (7 << 13)
+
+#define LOONGARCH_CPUCFG4 0x4
+#define CCFREQ_100M 100000000 /* 100M */
+
+#define LOONGARCH_CPUCFG5 0x5
+#define CPUCFG5_CCMUL 1
+#define CPUCFG5_CCDIV (1 << 16)
+
+#define LOONGARCH_CPUCFG6 0x6
+#define CPUCFG6_PMP BIT(0)
+#define CPUCFG6_PAMVER (1 << 1)
+#define CPUCFG6_PMNUM (3 << 4)
+#define CPUCFG6_PMBITS (63 << 8)
+#define CPUCFG6_UPM BIT(14)
+
+#define LOONGARCH_CPUCFG16 0x10
+#define CPUCFG16_L1_IUPRE BIT(0)
+#define CPUCFG16_L1_UNIFY BIT(1)
+#define CPUCFG16_L1_DPRE BIT(2)
+#define CPUCFG16_L2_IUPRE BIT(3)
+#define CPUCFG16_L2_IUUNIFY BIT(4)
+#define CPUCFG16_L2_IUPRIV BIT(5)
+#define CPUCFG16_L2_IUINCL BIT(6)
+#define CPUCFG16_L2_DPRE BIT(7)
+#define CPUCFG16_L2_DPRIV BIT(8)
+#define CPUCFG16_L2_DINCL BIT(9)
+#define CPUCFG16_L3_IUPRE BIT(10)
+#define CPUCFG16_L3_IUUNIFY BIT(11)
+#define CPUCFG16_L3_IUPRIV BIT(12)
+#define CPUCFG16_L3_IUINCL BIT(13)
+#define CPUCFG16_L3_DPRE BIT(14)
+#define CPUCFG16_L3_DPRIV BIT(15)
+#define CPUCFG16_L3_DINCL BIT(16)
+
+#define LOONGARCH_CPUCFG17 0x11
+#define CPUCFG17_L1I_WAYS_M (3 << 0)
+#define CPUCFG17_L1I_SETS_M (8 << 16)
+#define CPUCFG17_L1I_SIZE_M (6 << 24)
+
+#define LOONGARCH_CPUCFG18 0x12
+#define CPUCFG18_L1D_WAYS_M (3 << 0)
+#define CPUCFG18_L1D_SETS_M (8 << 16)
+#define CPUCFG18_L1D_SIZE_M (6 << 24)
+
+#define LOONGARCH_CPUCFG19 0x13
+#define CPUCFG19_L2_WAYS_M (0xf << 0)
+#define CPUCFG19_L2_SETS_M (8 << 16)
+#define CPUCFG19_L2_SIZE_M (6 << 24)
+
+#define LOONGARCH_CPUCFG20 0x14
+#define CPUCFG20_L3_WAYS_M (0xf << 0)
+#define CPUCFG20_L3_SETS_M (0xe << 16)
+#define CPUCFG20_L3_SIZE_M (0x6 << 24)
+
+#define LOONGARCH_PAGE_HUGE 0x40
+#define LOONGARCH_HUGE_GLOBAL 0x1000
+#define LOONGARCH_HUGE_GLOBAL_SH 12
+
+/* All CSR register
+ *
+ * default value in target/loongarch/cpu.c
+ * reset function in target/loongarch/translate.c:cpu_state_reset()
+ *
+ * This macro will be used only twice.
+ * > In target/loongarch/cpu.h:CPULOONGARCHState
+ * > In target/loongarch/internal.h:loongarch_def_t
+ *
+ * helper_function to rd/wr:
+ * > declare in target/loongarch/helper.h
+ * > realize in target/loongarch/op_helper.c
+ *
+ * during translate:
+ * > gen_csr_rdl()
+ * > gen_csr_wrl()
+ * > gen_csr_rdq()
+ * > gen_csr_wrq()
+ */
+#define CPU_LOONGARCH_CSR \
+ uint64_t CSR_CRMD; \
+ uint64_t CSR_PRMD; \
+ uint64_t CSR_EUEN; \
+ uint64_t CSR_MISC; \
+ uint64_t CSR_ECFG; \
+ uint64_t CSR_ESTAT; \
+ uint64_t CSR_ERA; \
+ uint64_t CSR_BADV; \
+ uint64_t CSR_BADI; \
+ uint64_t CSR_EEPN; \
+ uint64_t CSR_TLBIDX; \
+ uint64_t CSR_TLBEHI; \
+ uint64_t CSR_TLBELO0; \
+ uint64_t CSR_TLBELO1; \
+ uint64_t CSR_TLBWIRED; \
+ uint64_t CSR_GTLBC; \
+ uint64_t CSR_TRGP; \
+ uint64_t CSR_ASID; \
+ uint64_t CSR_PGDL; \
+ uint64_t CSR_PGDH; \
+ uint64_t CSR_PGD; \
+ uint64_t CSR_PWCTL0; \
+ uint64_t CSR_PWCTL1; \
+ uint64_t CSR_STLBPGSIZE; \
+ uint64_t CSR_RVACFG; \
+ uint64_t CSR_CPUID; \
+ uint64_t CSR_PRCFG1; \
+ uint64_t CSR_PRCFG2; \
+ uint64_t CSR_PRCFG3; \
+ uint64_t CSR_KS0; \
+ uint64_t CSR_KS1; \
+ uint64_t CSR_KS2; \
+ uint64_t CSR_KS3; \
+ uint64_t CSR_KS4; \
+ uint64_t CSR_KS5; \
+ uint64_t CSR_KS6; \
+ uint64_t CSR_KS7; \
+ uint64_t CSR_KS8; \
+ uint64_t CSR_TMID; \
+ uint64_t CSR_TCFG; \
+ uint64_t CSR_TVAL; \
+ uint64_t CSR_CNTC; \
+ uint64_t CSR_TINTCLR; \
+ uint64_t CSR_GSTAT; \
+ uint64_t CSR_GCFG; \
+ uint64_t CSR_GINTC; \
+ uint64_t CSR_GCNTC; \
+ uint64_t CSR_LLBCTL; \
+ uint64_t CSR_IMPCTL1; \
+ uint64_t CSR_IMPCTL2; \
+ uint64_t CSR_GNMI; \
+ uint64_t CSR_TLBRENT; \
+ uint64_t CSR_TLBRBADV; \
+ uint64_t CSR_TLBRERA; \
+ uint64_t CSR_TLBRSAVE; \
+ uint64_t CSR_TLBRELO0; \
+ uint64_t CSR_TLBRELO1; \
+ uint64_t CSR_TLBREHI; \
+ uint64_t CSR_TLBRPRMD; \
+ uint64_t CSR_ERRCTL; \
+ uint64_t CSR_ERRINFO; \
+ uint64_t CSR_ERRINFO1; \
+ uint64_t CSR_ERRENT; \
+ uint64_t CSR_ERRERA; \
+ uint64_t CSR_ERRSAVE; \
+ uint64_t CSR_CTAG; \
+ uint64_t CSR_DMWIN0; \
+ uint64_t CSR_DMWIN1; \
+ uint64_t CSR_DMWIN2; \
+ uint64_t CSR_DMWIN3; \
+ uint64_t CSR_PERFCTRL0; \
+ uint64_t CSR_PERFCNTR0; \
+ uint64_t CSR_PERFCTRL1; \
+ uint64_t CSR_PERFCNTR1; \
+ uint64_t CSR_PERFCTRL2; \
+ uint64_t CSR_PERFCNTR2; \
+ uint64_t CSR_PERFCTRL3; \
+ uint64_t CSR_PERFCNTR3; \
+ uint64_t CSR_MWPC; \
+ uint64_t CSR_MWPS; \
+ uint64_t CSR_DB0ADDR; \
+ uint64_t CSR_DB0MASK; \
+ uint64_t CSR_DB0CTL; \
+ uint64_t CSR_DB0ASID; \
+ uint64_t CSR_DB1ADDR; \
+ uint64_t CSR_DB1MASK; \
+ uint64_t CSR_DB1CTL; \
+ uint64_t CSR_DB1ASID; \
+ uint64_t CSR_DB2ADDR; \
+ uint64_t CSR_DB2MASK; \
+ uint64_t CSR_DB2CTL; \
+ uint64_t CSR_DB2ASID; \
+ uint64_t CSR_DB3ADDR; \
+ uint64_t CSR_DB3MASK; \
+ uint64_t CSR_DB3CTL; \
+ uint64_t CSR_DB3ASID; \
+ uint64_t CSR_FWPC; \
+ uint64_t CSR_FWPS; \
+ uint64_t CSR_IB0ADDR; \
+ uint64_t CSR_IB0MASK; \
+ uint64_t CSR_IB0CTL; \
+ uint64_t CSR_IB0ASID; \
+ uint64_t CSR_IB1ADDR; \
+ uint64_t CSR_IB1MASK; \
+ uint64_t CSR_IB1CTL; \
+ uint64_t CSR_IB1ASID; \
+ uint64_t CSR_IB2ADDR; \
+ uint64_t CSR_IB2MASK; \
+ uint64_t CSR_IB2CTL; \
+ uint64_t CSR_IB2ASID; \
+ uint64_t CSR_IB3ADDR; \
+ uint64_t CSR_IB3MASK; \
+ uint64_t CSR_IB3CTL; \
+ uint64_t CSR_IB3ASID; \
+ uint64_t CSR_IB4ADDR; \
+ uint64_t CSR_IB4MASK; \
+ uint64_t CSR_IB4CTL; \
+ uint64_t CSR_IB4ASID; \
+ uint64_t CSR_IB5ADDR; \
+ uint64_t CSR_IB5MASK; \
+ uint64_t CSR_IB5CTL; \
+ uint64_t CSR_IB5ASID; \
+ uint64_t CSR_IB6ADDR; \
+ uint64_t CSR_IB6MASK; \
+ uint64_t CSR_IB6CTL; \
+ uint64_t CSR_IB6ASID; \
+ uint64_t CSR_IB7ADDR; \
+ uint64_t CSR_IB7MASK; \
+ uint64_t CSR_IB7CTL; \
+ uint64_t CSR_IB7ASID; \
+ uint64_t CSR_DEBUG; \
+ uint64_t CSR_DERA; \
+ uint64_t CSR_DESAVE; \
+
+#define LOONGARCH_CSR_32(_R, _S) \
+ (KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
+
+#define LOONGARCH_CSR_64(_R, _S) \
+ (KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
+
+#define KVM_IOC_CSRID(id) LOONGARCH_CSR_64(id, 0)
+
+#endif
diff --git a/target/loongarch64/cpu-param.h b/target/loongarch64/cpu-param.h
new file mode 100644
index 0000000000000000000000000000000000000000..24ca458af0cbe50874e197cab3d0c19abb123b3d
--- /dev/null
+++ b/target/loongarch64/cpu-param.h
@@ -0,0 +1,30 @@
+#ifndef CPU_PARAM_H
+#define CPU_PARAM_H
+
+/* If we want to use host float regs... */
+/* #define USE_HOST_FLOAT_REGS */
+
+/* Real pages are variable size... */
+#define TARGET_PAGE_BITS 14
+
+#define LOONGARCH_TLB_MAX 2112
+
+#define TARGET_LONG_BITS 64
+#define TARGET_PHYS_ADDR_SPACE_BITS 48
+#define TARGET_VIRT_ADDR_SPACE_BITS 48
+
+/*
+ * bit definitions for insn_flags (ISAs/ASEs flags)
+ * ------------------------------------------------
+ */
+#define ISA_LARCH32 0x00000001ULL
+#define ISA_LARCH64 0x00000002ULL
+#define INSN_LOONGARCH 0x00010000ULL
+
+#define CPU_LARCH32 (ISA_LARCH32)
+#define CPU_LARCH64 (ISA_LARCH32 | ISA_LARCH64)
+
+#define NB_MMU_MODES 4
+
+#endif /* QEMU_LOONGARCH_DEFS_H */
+
diff --git a/target/loongarch64/cpu-qom.h b/target/loongarch64/cpu-qom.h
new file mode 100644
index 0000000000000000000000000000000000000000..ee9c1de57103eec69088412e96645205cea815af
--- /dev/null
+++ b/target/loongarch64/cpu-qom.h
@@ -0,0 +1,54 @@
+/*
+ * QEMU LOONGARCH CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ *
+ */
+#ifndef QEMU_LOONGARCH_CPU_QOM_H
+#define QEMU_LOONGARCH_CPU_QOM_H
+
+#include "hw/core/cpu.h"
+
+#define TYPE_LOONGARCH_CPU "loongarch-cpu"
+
+#define LOONGARCH_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(LOONGARCHCPUClass, (klass), TYPE_LOONGARCH_CPU)
+#define LOONGARCH_CPU(obj) \
+ OBJECT_CHECK(LOONGARCHCPU, (obj), TYPE_LOONGARCH_CPU)
+#define LOONGARCH_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(LOONGARCHCPUClass, (obj), TYPE_LOONGARCH_CPU)
+
+/**
+ * LOONGARCHCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A LOONGARCH CPU model.
+ */
+typedef struct LOONGARCHCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceUnrealize parent_unrealize;
+ DeviceReset parent_reset;
+ const struct loongarch_def_t *cpu_def;
+} LOONGARCHCPUClass;
+
+typedef struct LOONGARCHCPU LOONGARCHCPU;
+
+#endif
diff --git a/target/loongarch64/cpu.c b/target/loongarch64/cpu.c
new file mode 100644
index 0000000000000000000000000000000000000000..a4535d34a6ace1f65c82e8a3d1b77b651e5d08d4
--- /dev/null
+++ b/target/loongarch64/cpu.c
@@ -0,0 +1,576 @@
+/*
+ * QEMU LOONGARCH CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+#include "cpu.h"
+#include "internal.h"
+#include "kvm_larch.h"
+#include "qemu-common.h"
+#include "hw/qdev-properties.h"
+#include "sysemu/kvm.h"
+#include "exec/exec-all.h"
+#include "sysemu/arch_init.h"
+#include "cpu-csr.h"
+#include "qemu/qemu-print.h"
+#include "qapi/qapi-commands-machine-target.h"
+#ifdef CONFIG_TCG
+#include "hw/core/tcg-cpu-ops.h"
+#endif /* CONFIG_TCG */
+
+#define LOONGARCH_CONFIG1 \
+((0x8 << CSR_CONF1_KSNUM_SHIFT) | (0x2f << CSR_CONF1_TMRBITS_SHIFT) | \
+ (0x7 << CSR_CONF1_VSMAX_SHIFT))
+
+#define LOONGARCH_CONFIG3 \
+((0x2 << CSR_CONF3_TLBORG_SHIFT) | (0x3f << CSR_CONF3_MTLBSIZE_SHIFT) | \
+ (0x7 << CSR_CONF3_STLBWAYS_SHIFT) | (0x8 << CSR_CONF3_STLBIDX_SHIFT))
+
+/*****************************************************************************/
+/* LOONGARCH CPU definitions */
+const loongarch_def_t loongarch_defs[] = {
+ {
+ .name = "Loongson-3A5000",
+
+ /* for LoongISA CSR */
+ .CSR_PRCFG1 = LOONGARCH_CONFIG1,
+ .CSR_PRCFG2 = 0x3ffff000,
+ .CSR_PRCFG3 = LOONGARCH_CONFIG3,
+ .CSR_CRMD = (0 << CSR_CRMD_PLV_SHIFT) | (0 << CSR_CRMD_IE_SHIFT) |
+ (1 << CSR_CRMD_DA_SHIFT) | (0 << CSR_CRMD_PG_SHIFT) |
+ (1 << CSR_CRMD_DACF_SHIFT) | (1 << CSR_CRMD_DACM_SHIFT) ,
+ .CSR_ECFG = 0x7 << 16,
+ .CSR_STLBPGSIZE = 0xe,
+ .CSR_RVACFG = 0x0,
+ .CSR_ASID = 0xa0000,
+ .FCSR0 = 0x0,
+ .FCSR0_rw_bitmask = 0x1f1f03df,
+ .PABITS = 48,
+ .insn_flags = CPU_LARCH64 | INSN_LOONGARCH,
+ .mmu_type = MMU_TYPE_LS3A5K,
+ },
+ {
+ .name = "host",
+
+ /* for LoongISA CSR */
+ .CSR_PRCFG1 = LOONGARCH_CONFIG1,
+ .CSR_PRCFG2 = 0x3ffff000,
+ .CSR_PRCFG3 = LOONGARCH_CONFIG3,
+ .CSR_CRMD = (0 << CSR_CRMD_PLV_SHIFT) | (0 << CSR_CRMD_IE_SHIFT) |
+ (1 << CSR_CRMD_DA_SHIFT) | (0 << CSR_CRMD_PG_SHIFT) |
+ (1 << CSR_CRMD_DACF_SHIFT) | (1 << CSR_CRMD_DACM_SHIFT) ,
+ .CSR_ECFG = 0x7 << 16,
+ .CSR_STLBPGSIZE = 0xe,
+ .CSR_RVACFG = 0x0,
+ .FCSR0 = 0x0,
+ .FCSR0_rw_bitmask = 0x1f1f03df,
+ .PABITS = 48,
+ .insn_flags = CPU_LARCH64 | INSN_LOONGARCH,
+ .mmu_type = MMU_TYPE_LS3A5K,
+ },
+};
+const int loongarch_defs_number = ARRAY_SIZE(loongarch_defs);
+
+void loongarch_cpu_list(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(loongarch_defs); i++) {
+ qemu_printf("LOONGARCH '%s'\n",
+ loongarch_defs[i].name);
+ }
+}
+
+CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
+{
+ CpuDefinitionInfoList *cpu_list = NULL;
+ const loongarch_def_t *def;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(loongarch_defs); i++) {
+ CpuDefinitionInfoList *entry;
+ CpuDefinitionInfo *info;
+
+ def = &loongarch_defs[i];
+ info = g_malloc0(sizeof(*info));
+ info->name = g_strdup(def->name);
+
+ entry = g_malloc0(sizeof(*entry));
+ entry->value = info;
+ entry->next = cpu_list;
+ cpu_list = entry;
+ }
+
+ return cpu_list;
+}
+
+static void loongarch_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+
+ env->active_tc.PC = value & ~(target_ulong)1;
+}
+
+static bool loongarch_cpu_has_work(CPUState *cs)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+ bool has_work = false;
+
+ /* It is implementation dependent if non-enabled
+ interrupts wake-up the CPU, however most of the implementations only
+ check for interrupts that can be taken. */
+ if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ cpu_loongarch_hw_interrupts_pending(env)) {
+ has_work = true;
+ }
+
+ return has_work;
+}
+
+const char * const regnames[] = {
+ "r0", "ra", "tp", "sp", "a0", "a1", "a2", "a3",
+ "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
+ "t4", "t5", "t6", "t7", "t8", "x0", "fp", "s0",
+ "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8",
+};
+
+const char * const fregnames[] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
+};
+
+static void fpu_dump_state(CPULOONGARCHState *env, FILE *f,
+ fprintf_function fpu_fprintf, int flags)
+{
+ int i;
+ int is_fpu64 = 1;
+
+#define printfpr(fp) \
+ do { \
+ if (is_fpu64) \
+ fpu_fprintf(f, "w:%08x d:%016" PRIx64 \
+ " fd:%13g fs:%13g psu: %13g\n", \
+ (fp)->w[FP_ENDIAN_IDX], (fp)->d, \
+ (double)(fp)->fd, \
+ (double)(fp)->fs[FP_ENDIAN_IDX], \
+ (double)(fp)->fs[!FP_ENDIAN_IDX]); \
+ else { \
+ fpr_t tmp; \
+ tmp.w[FP_ENDIAN_IDX] = (fp)->w[FP_ENDIAN_IDX]; \
+ tmp.w[!FP_ENDIAN_IDX] = ((fp) + 1)->w[FP_ENDIAN_IDX]; \
+ fpu_fprintf(f, "w:%08x d:%016" PRIx64 \
+ " fd:%13g fs:%13g psu:%13g\n", \
+ tmp.w[FP_ENDIAN_IDX], tmp.d, \
+ (double)tmp.fd, \
+ (double)tmp.fs[FP_ENDIAN_IDX], \
+ (double)tmp.fs[!FP_ENDIAN_IDX]); \
+ } \
+ } while (0)
+
+
+ fpu_fprintf(f, "FCSR0 0x%08x SR.FR %d fp_status 0x%02x\n",
+ env->active_fpu.fcsr0, is_fpu64,
+ get_float_exception_flags(&env->active_fpu.fp_status));
+ for (i = 0; i < 32; (is_fpu64) ? i++ : (i += 2)) {
+ fpu_fprintf(f, "%3s: ", fregnames[i]);
+ printfpr(&env->active_fpu.fpr[i]);
+ }
+
+#undef printfpr
+}
+
+void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+ int i;
+
+ qemu_fprintf(f, "pc:\t %lx\n", env->active_tc.PC);
+ for (i = 0; i < 32; i++) {
+ if ((i & 3) == 0) {
+ qemu_fprintf(f, "GPR%02d:", i);
+ }
+ qemu_fprintf(f, " %s " TARGET_FMT_lx, regnames[i],
+ env->active_tc.gpr[i]);
+ if ((i & 3) == 3) {
+ qemu_fprintf(f, "\n");
+ }
+ }
+ qemu_fprintf(f, "EUEN 0x%lx\n", env->CSR_EUEN);
+ qemu_fprintf(f, "ESTAT 0x%lx\n", env->CSR_ESTAT);
+ qemu_fprintf(f, "ERA 0x%lx\n", env->CSR_ERA);
+ qemu_fprintf(f, "CRMD 0x%lx\n", env->CSR_CRMD);
+ qemu_fprintf(f, "PRMD 0x%lx\n", env->CSR_PRMD);
+ qemu_fprintf(f, "BadVAddr 0x%lx\n", env->CSR_BADV);
+ qemu_fprintf(f, "TLB refill ERA 0x%lx\n", env->CSR_TLBRERA);
+ qemu_fprintf(f, "TLB refill BadV 0x%lx\n", env->CSR_TLBRBADV);
+ qemu_fprintf(f, "EEPN 0x%lx\n", env->CSR_EEPN);
+ qemu_fprintf(f, "BadInstr 0x%lx\n", env->CSR_BADI);
+ qemu_fprintf(f, "PRCFG1 0x%lx\nPRCFG2 0x%lx\nPRCFG3 0x%lx\n",
+ env->CSR_PRCFG1, env->CSR_PRCFG3, env->CSR_PRCFG3);
+ if ((flags & CPU_DUMP_FPU) && (env->hflags & LARCH_HFLAG_FPU)) {
+ fpu_dump_state(env, f, qemu_fprintf, flags);
+ }
+}
+
+void cpu_state_reset(CPULOONGARCHState *env)
+{
+ LOONGARCHCPU *cpu = loongarch_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ /* Reset registers to their default values */
+ env->CSR_PRCFG1 = env->cpu_model->CSR_PRCFG1;
+ env->CSR_PRCFG2 = env->cpu_model->CSR_PRCFG2;
+ env->CSR_PRCFG3 = env->cpu_model->CSR_PRCFG3;
+ env->CSR_CRMD = env->cpu_model->CSR_CRMD;
+ env->CSR_ECFG = env->cpu_model->CSR_ECFG;
+ env->CSR_STLBPGSIZE = env->cpu_model->CSR_STLBPGSIZE;
+ env->CSR_RVACFG = env->cpu_model->CSR_RVACFG;
+ env->CSR_ASID = env->cpu_model->CSR_ASID;
+
+ env->current_tc = 0;
+ env->active_fpu.fcsr0_rw_bitmask = env->cpu_model->FCSR0_rw_bitmask;
+ env->active_fpu.fcsr0 = env->cpu_model->FCSR0;
+ env->insn_flags = env->cpu_model->insn_flags;
+
+#if !defined(CONFIG_USER_ONLY)
+ env->CSR_ERA = env->active_tc.PC;
+ env->active_tc.PC = env->exception_base;
+#ifdef CONFIG_TCG
+ env->tlb->tlb_in_use = env->tlb->nb_tlb;
+#endif
+ env->CSR_TLBWIRED = 0;
+ env->CSR_TMID = cs->cpu_index;
+ env->CSR_CPUID = (cs->cpu_index & 0x1ff);
+ env->CSR_EEPN |= (uint64_t)0x80000000;
+ env->CSR_TLBRENT |= (uint64_t)0x80000000;
+#endif
+
+ /* Count register increments in debug mode, EJTAG version 1 */
+ env->CSR_DEBUG = (1 << CSR_DEBUG_DINT) | (0x1 << CSR_DEBUG_DMVER);
+
+ compute_hflags(env);
+ restore_fp_status(env);
+ cs->exception_index = EXCP_NONE;
+}
+
+/* CPUClass::reset() */
+static void loongarch_cpu_reset(DeviceState *dev)
+{
+ CPUState *s = CPU(dev);
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(s);
+ LOONGARCHCPUClass *mcc = LOONGARCH_CPU_GET_CLASS(cpu);
+ CPULOONGARCHState *env = &cpu->env;
+
+ mcc->parent_reset(dev);
+
+ memset(env, 0, offsetof(CPULOONGARCHState, end_reset_fields));
+
+ cpu_state_reset(env);
+
+#ifndef CONFIG_USER_ONLY
+ if (kvm_enabled()) {
+ kvm_loongarch_reset_vcpu(cpu);
+ }
+#endif
+}
+
+static void loongarch_cpu_disas_set_info(CPUState *s, disassemble_info *info)
+{
+ info->print_insn = print_insn_loongarch;
+}
+
+static void fpu_init(CPULOONGARCHState *env, const loongarch_def_t *def)
+{
+ memcpy(&env->active_fpu, &env->fpus[0], sizeof(env->active_fpu));
+}
+
+void cpu_loongarch_realize_env(CPULOONGARCHState *env)
+{
+ env->exception_base = 0x1C000000;
+
+#ifdef CONFIG_TCG
+#ifndef CONFIG_USER_ONLY
+ mmu_init(env, env->cpu_model);
+#endif
+#endif
+ fpu_init(env, env->cpu_model);
+}
+
+static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(dev);
+ LOONGARCHCPUClass *mcc = LOONGARCH_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ cpu_loongarch_realize_env(&cpu->env);
+
+ loongarch_cpu_register_gdb_regs_for_features(cs);
+
+ cpu_reset(cs);
+ qemu_init_vcpu(cs);
+
+ mcc->parent_realize(dev, errp);
+ cpu->hotplugged = 1;
+}
+
+static void loongarch_cpu_unrealizefn(DeviceState *dev)
+{
+ LOONGARCHCPUClass *mcc = LOONGARCH_CPU_GET_CLASS(dev);
+
+#ifndef CONFIG_USER_ONLY
+ cpu_remove_sync(CPU(dev));
+#endif
+
+ mcc->parent_unrealize(dev);
+}
+static void loongarch_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(obj);
+ CPULOONGARCHState *env = &cpu->env;
+ LOONGARCHCPUClass *mcc = LOONGARCH_CPU_GET_CLASS(obj);
+ cpu_set_cpustate_pointers(cpu);
+ cs->env_ptr = env;
+ env->cpu_model = mcc->cpu_def;
+ cs->halted = 1;
+ cpu->dtb_compatible = "loongarch,Loongson-3A5000";
+}
+
+static char *loongarch_cpu_type_name(const char *cpu_model)
+{
+ return g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model);
+}
+
+static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ typename = loongarch_cpu_type_name(cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ return oc;
+}
+
+static int64_t loongarch_cpu_get_arch_id(CPUState *cs)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+
+ return cpu->id;
+}
+
+static Property loongarch_cpu_properties[] = {
+ DEFINE_PROP_INT32("core-id", LOONGARCHCPU, core_id, -1),
+ DEFINE_PROP_INT32("id", LOONGARCHCPU, id, UNASSIGNED_CPU_ID),
+ DEFINE_PROP_INT32("node-id", LOONGARCHCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
+
+ DEFINE_PROP_END_OF_LIST()
+};
+
+#ifdef CONFIG_TCG
+static void loongarch_cpu_synchronize_from_tb(CPUState *cs,const TranslationBlock *tb)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+
+ env->active_tc.PC = tb->pc;
+ env->hflags &= ~LARCH_HFLAG_BMASK;
+ env->hflags |= tb->flags & LARCH_HFLAG_BMASK;
+}
+
+static const struct TCGCPUOps loongarch_tcg_ops = {
+ .initialize = loongarch_tcg_init,
+ .synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
+
+ .tlb_fill = loongarch_cpu_tlb_fill,
+ .cpu_exec_interrupt = loongarch_cpu_exec_interrupt,
+ .do_interrupt = loongarch_cpu_do_interrupt,
+
+#ifndef CONFIG_USER_ONLY
+ .do_unaligned_access = loongarch_cpu_do_unaligned_access,
+#endif /* !CONFIG_USER_ONLY */
+};
+#endif /* CONFIG_TCG */
+
+
+#if !defined(CONFIG_USER_ONLY)
+static int get_physical_address(CPULOONGARCHState *env, hwaddr *physical,
+ int *prot, target_ulong real_address,
+ int rw, int access_type, int mmu_idx)
+{
+ int user_mode = mmu_idx == LARCH_HFLAG_UM;
+ int kernel_mode = !user_mode;
+ unsigned plv, base_c, base_v, tmp;
+
+ /* effective address (modified for KVM T&E kernel segments) */
+ target_ulong address = real_address;
+
+ /* Check PG */
+ if (!(env->CSR_CRMD & CSR_CRMD_PG)) {
+ /* DA mode */
+ *physical = address & 0xffffffffffffUL;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TLBRET_MATCH;
+ }
+
+ plv = kernel_mode | (user_mode << 3);
+ base_v = address >> CSR_DMW_BASE_SH;
+ /* Check direct map window 0 */
+ base_c = env->CSR_DMWIN0 >> CSR_DMW_BASE_SH;
+ if ((plv & env->CSR_DMWIN0) && (base_c == base_v)) {
+ *physical = dmwin_va2pa(address);
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TLBRET_MATCH;
+ }
+ /* Check direct map window 1 */
+ base_c = env->CSR_DMWIN1 >> CSR_DMW_BASE_SH;
+ if ((plv & env->CSR_DMWIN1) && (base_c == base_v)) {
+ *physical = dmwin_va2pa(address);
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TLBRET_MATCH;
+ }
+ /* Check valid extension */
+ tmp = address >> 47;
+ if (!(tmp == 0 || tmp == 0x1ffff)) {
+ return TLBRET_BADADDR;
+ }
+ /* mapped address */
+ return env->tlb->map_address(env, physical, prot, real_address, rw,
+ access_type);
+}
+
+hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+ hwaddr phys_addr;
+ int prot;
+
+ if (get_physical_address(env, &phys_addr, &prot, addr, 0, ACCESS_INT,
+ cpu_mmu_index(env, false)) != 0) {
+ return -1;
+ }
+ return phys_addr;
+}
+#endif
+
+
+#ifndef CONFIG_USER_ONLY
+#include "hw/core/sysemu-cpu-ops.h"
+
+static const struct SysemuCPUOps loongarch_sysemu_ops = {
+ .write_elf64_note = loongarch_cpu_write_elf64_note,
+ .get_phys_page_debug = loongarch_cpu_get_phys_page_debug,
+ .legacy_vmsd = &vmstate_loongarch_cpu,
+};
+#endif
+
+
+static void loongarch_cpu_class_init(ObjectClass *c, void *data)
+{
+ LOONGARCHCPUClass *mcc = LOONGARCH_CPU_CLASS(c);
+ CPUClass *cc = CPU_CLASS(c);
+ DeviceClass *dc = DEVICE_CLASS(c);
+
+ device_class_set_props(dc, loongarch_cpu_properties);
+ device_class_set_parent_realize(dc, loongarch_cpu_realizefn,
+ &mcc->parent_realize);
+
+ device_class_set_parent_unrealize(dc, loongarch_cpu_unrealizefn,
+ &mcc->parent_unrealize);
+
+ device_class_set_parent_reset(dc, loongarch_cpu_reset, &mcc->parent_reset);
+ cc->get_arch_id = loongarch_cpu_get_arch_id;
+
+ cc->class_by_name = loongarch_cpu_class_by_name;
+ cc->has_work = loongarch_cpu_has_work;
+ cc->dump_state = loongarch_cpu_dump_state;
+ cc->set_pc = loongarch_cpu_set_pc;
+ cc->gdb_read_register = loongarch_cpu_gdb_read_register;
+ cc->gdb_write_register = loongarch_cpu_gdb_write_register;
+ cc->disas_set_info = loongarch_cpu_disas_set_info;
+#ifndef CONFIG_USER_ONLY
+ cc->sysemu_ops = &loongarch_sysemu_ops;
+#endif /* !CONFIG_USER_ONLY */
+
+ cc->gdb_num_core_regs = 33;
+ cc->gdb_core_xml_file = "loongarch-base64.xml";
+ cc->gdb_stop_before_watchpoint = true;
+
+ dc->user_creatable = true;
+#ifdef CONFIG_TCG
+ cc->tcg_ops = &loongarch_tcg_ops;
+#endif /* CONFIG_TCG */
+}
+
+static const TypeInfo loongarch_cpu_type_info = {
+ .name = TYPE_LOONGARCH_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(LOONGARCHCPU),
+ .instance_init = loongarch_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(LOONGARCHCPUClass),
+ .class_init = loongarch_cpu_class_init,
+};
+
+static void loongarch_cpu_cpudef_class_init(ObjectClass *oc, void *data)
+{
+ LOONGARCHCPUClass *mcc = LOONGARCH_CPU_CLASS(oc);
+ mcc->cpu_def = data;
+}
+
+static void loongarch_register_cpudef_type(const struct loongarch_def_t *def)
+{
+ char *typename = loongarch_cpu_type_name(def->name);
+ TypeInfo ti = {
+ .name = typename,
+ .parent = TYPE_LOONGARCH_CPU,
+ .class_init = loongarch_cpu_cpudef_class_init,
+ .class_data = (void *)def,
+ };
+
+ type_register(&ti);
+ g_free(typename);
+}
+
+static void loongarch_cpu_register_types(void)
+{
+ int i;
+
+ type_register_static(&loongarch_cpu_type_info);
+ for (i = 0; i < loongarch_defs_number; i++) {
+ loongarch_register_cpudef_type(&loongarch_defs[i]);
+ }
+}
+
+type_init(loongarch_cpu_register_types)
diff --git a/target/loongarch64/cpu.h b/target/loongarch64/cpu.h
new file mode 100644
index 0000000000000000000000000000000000000000..ab88658e47ec5779b673190e63a081885a189aad
--- /dev/null
+++ b/target/loongarch64/cpu.h
@@ -0,0 +1,336 @@
+#ifndef LOONGARCH_CPU_H
+#define LOONGARCH_CPU_H
+
+
+#define CPUArchState struct CPULOONGARCHState
+
+#include "qemu-common.h"
+#include "cpu-qom.h"
+#include "larch-defs.h"
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat.h"
+#include "sysemu/sysemu.h"
+#include "cpu-csr.h"
+
+#define TCG_GUEST_DEFAULT_MO (0)
+
+struct CPULOONGARCHState;
+typedef LOONGARCHCPU ArchCPU;
+typedef struct CPULOONGARCHTLBContext CPULOONGARCHTLBContext;
+
+#define LASX_REG_WIDTH (256)
+typedef union lasx_reg_t lasx_reg_t;
+union lasx_reg_t {
+ int64_t val64[LASX_REG_WIDTH / 64];
+};
+
+typedef union fpr_t fpr_t;
+union fpr_t {
+ float64 fd; /* ieee double precision */
+ float32 fs[2];/* ieee single precision */
+ uint64_t d; /* binary double fixed-point */
+ uint32_t w[2]; /* binary single fixed-point */
+/* FPU/LASX register mapping is not tested on big-endian hosts. */
+ lasx_reg_t lasx; /* vector data */
+};
+/* define FP_ENDIAN_IDX to access the same location
+ * in the fpr_t union regardless of the host endianness
+ */
+#if defined(HOST_WORDS_BIGENDIAN)
+# define FP_ENDIAN_IDX 1
+#else
+# define FP_ENDIAN_IDX 0
+#endif
+
+typedef struct CPULOONGARCHFPUContext {
+ /* Floating point registers */
+ fpr_t fpr[32];
+ float_status fp_status;
+
+ bool cf[8];
+ /* fcsr0
+ * 31:29 |28:24 |23:21 |20:16 |15:10 |9:8 |7 |6 |5 |4:0
+ * Cause Flags RM DAE TM Enables
+ */
+ uint32_t fcsr0;
+ uint32_t fcsr0_rw_bitmask;
+ uint32_t vcsr16;
+ uint64_t ftop;
+} CPULOONGARCHFPUContext;
+
+/* fp control and status register definition */
+#define FCSR0_M1 0xdf /* DAE, TM and Enables */
+#define FCSR0_M2 0x1f1f0000 /* Cause and Flags */
+#define FCSR0_M3 0x300 /* Round Mode */
+#define FCSR0_RM 8 /* Round Mode bit num on fcsr0 */
+#define GET_FP_CAUSE(reg) (((reg) >> 24) & 0x1f)
+#define GET_FP_ENABLE(reg) (((reg) >> 0) & 0x1f)
+#define GET_FP_FLAGS(reg) (((reg) >> 16) & 0x1f)
+#define SET_FP_CAUSE(reg, v) do { (reg) = ((reg) & ~(0x1f << 24)) | \
+ ((v & 0x1f) << 24); \
+ } while (0)
+#define SET_FP_ENABLE(reg, v) do { (reg) = ((reg) & ~(0x1f << 0)) | \
+ ((v & 0x1f) << 0); \
+ } while (0)
+#define SET_FP_FLAGS(reg, v) do { (reg) = ((reg) & ~(0x1f << 16)) | \
+ ((v & 0x1f) << 16); \
+ } while (0)
+#define UPDATE_FP_FLAGS(reg, v) do { (reg) |= ((v & 0x1f) << 16); } while (0)
+#define FP_INEXACT 1
+#define FP_UNDERFLOW 2
+#define FP_OVERFLOW 4
+#define FP_DIV0 8
+#define FP_INVALID 16
+
+#define TARGET_INSN_START_EXTRA_WORDS 2
+
+typedef struct loongarch_def_t loongarch_def_t;
+
+#define LOONGARCH_FPU_MAX 1
+#define LOONGARCH_KSCRATCH_NUM 8
+
+typedef struct TCState TCState;
+struct TCState {
+ target_ulong gpr[32];
+ target_ulong PC;
+};
+
+#define N_IRQS 14
+#define IRQ_TIMER 11
+#define IRQ_IPI 12
+#define IRQ_UART 2
+
+typedef struct CPULOONGARCHState CPULOONGARCHState;
+struct CPULOONGARCHState {
+ TCState active_tc;
+ CPULOONGARCHFPUContext active_fpu;
+
+ uint32_t current_tc;
+ uint64_t scr[4];
+ uint32_t PABITS;
+
+ /* LoongISA CSR register */
+ CPU_LOONGARCH_CSR
+ uint64_t lladdr;
+ target_ulong llval;
+ uint64_t llval_wp;
+ uint32_t llnewval_wp;
+
+ CPULOONGARCHFPUContext fpus[LOONGARCH_FPU_MAX];
+ /* QEMU */
+ int error_code;
+#define EXCP_TLB_NOMATCH 0x1
+#define EXCP_INST_NOTAVAIL 0x2 /* No valid instruction word for BadInstr */
+ uint32_t hflags; /* CPU State */
+ /* TMASK defines different execution modes */
+#define LARCH_HFLAG_TMASK 0x5F5807FF
+ /*
+ * The KSU flags must be the lowest bits in hflags. The flag order
+ * must be the same as defined for CP0 Status. This allows to use
+ * the bits as the value of mmu_idx.
+ */
+#define LARCH_HFLAG_KSU 0x00003 /* kernel/user mode mask */
+#define LARCH_HFLAG_UM 0x00003 /* user mode flag */
+#define LARCH_HFLAG_KM 0x00000 /* kernel mode flag */
+#define LARCH_HFLAG_64 0x00008 /* 64-bit instructions enabled */
+#define LARCH_HFLAG_FPU 0x00020 /* FPU enabled */
+#define LARCH_HFLAG_AWRAP 0x00200 /* 32-bit compatibility address wrapping */
+ /* If translation is interrupted between the branch instruction and
+ * the delay slot, record what type of branch it is so that we can
+ * resume translation properly. It might be possible to reduce
+ * this from three bits to two. */
+#define LARCH_HFLAG_BMASK 0x03800
+#define LARCH_HFLAG_B 0x00800 /* Unconditional branch */
+#define LARCH_HFLAG_BC 0x01000 /* Conditional branch */
+#define LARCH_HFLAG_BR 0x02000 /* branch to register (can't link TB) */
+#define LARCH_HFLAG_LSX 0x1000000
+#define LARCH_HFLAG_LASX 0x2000000
+#define LARCH_HFLAG_LBT 0x40000000
+ target_ulong btarget; /* Jump / branch target */
+ target_ulong bcond; /* Branch condition (if needed) */
+
+ uint64_t insn_flags; /* Supported instruction set */
+ int cpu_cfg[64];
+
+ /* Fields up to this point are cleared by a CPU reset */
+ struct {} end_reset_fields;
+
+ /* Fields from here on are preserved across CPU reset. */
+#if !defined(CONFIG_USER_ONLY)
+ CPULOONGARCHTLBContext *tlb;
+#endif
+
+ const loongarch_def_t *cpu_model;
+ void *irq[N_IRQS];
+ QEMUTimer *timer; /* Internal timer */
+ MemoryRegion *itc_tag; /* ITC Configuration Tags */
+ target_ulong exception_base; /* ExceptionBase input to the core */
+ struct {
+ uint64_t guest_addr;
+ } st;
+ struct {
+ /* scratch registers */
+ unsigned long scr0;
+ unsigned long scr1;
+ unsigned long scr2;
+ unsigned long scr3;
+ /* loongarch eflag */
+ unsigned long eflag;
+ } lbt;
+};
+
+
+/* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
+ * that ID hasn't been set yet
+ */
+#define UNASSIGNED_CPU_ID 0xFFFFFFFF
+
+/**
+ * LOONGARCHCPU:
+ * @env: #CPULOONGARCHState
+ *
+ * A LOONGARCH CPU.
+ */
+struct LOONGARCHCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+ CPUNegativeOffsetState neg;
+ CPULOONGARCHState env;
+ int32_t id;
+ int hotplugged;
+ uint8_t online_vcpus;
+ uint8_t is_migrate;
+ uint64_t counter_value;
+ uint32_t cpu_freq;
+ uint32_t count_ctl;
+ uint64_t pending_exceptions;
+ uint64_t pending_exceptions_clr;
+ uint64_t core_ext_ioisr[4];
+ VMChangeStateEntry *cpuStateEntry;
+ int32_t node_id; /* NUMA node this CPU belongs to */
+ int32_t core_id;
+ struct kvm_msrs *kvm_csr_buf;
+ /* 'compatible' string for this CPU for Linux device trees */
+ const char *dtb_compatible;
+};
+
+static inline LOONGARCHCPU *loongarch_env_get_cpu(CPULOONGARCHState *env)
+{
+ return container_of(env, LOONGARCHCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(loongarch_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(LOONGARCHCPU, env)
+
+void loongarch_cpu_list(void);
+
+#define cpu_signal_handler cpu_loongarch_signal_handler
+#define cpu_list loongarch_cpu_list
+
+/* MMU modes definitions. We carefully match the indices with our
+ hflags layout. */
+#define MMU_MODE0_SUFFIX _kernel
+#define MMU_MODE1_SUFFIX _super
+#define MMU_MODE2_SUFFIX _user
+#define MMU_MODE3_SUFFIX _error
+#define MMU_USER_IDX 3
+
+static inline int hflags_mmu_index(uint32_t hflags)
+{
+ return hflags & LARCH_HFLAG_KSU;
+}
+
+static inline int cpu_mmu_index(CPULOONGARCHState *env, bool ifetch)
+{
+ return hflags_mmu_index(env->hflags);
+}
+
+#include "exec/cpu-all.h"
+
+/* Memory access type :
+ * may be needed for precise access rights control and precise exceptions.
+ */
+enum {
+ /* 1 bit to define user level / supervisor access */
+ ACCESS_USER = 0x00,
+ ACCESS_SUPER = 0x01,
+ /* 1 bit to indicate direction */
+ ACCESS_STORE = 0x02,
+ /* Type of instruction that generated the access */
+ ACCESS_CODE = 0x10, /* Code fetch access */
+ ACCESS_INT = 0x20, /* Integer load/store access */
+ ACCESS_FLOAT = 0x30, /* floating point load/store access */
+};
+
+/* Exceptions */
+enum {
+ EXCP_NONE = -1,
+ EXCP_RESET = 0,
+ EXCP_SRESET,
+ EXCP_DINT,
+ EXCP_NMI,
+ EXCP_EXT_INTERRUPT, /* 7 */
+ EXCP_AdEL,
+ EXCP_AdES,
+ EXCP_TLBF,
+ EXCP_IBE,
+ EXCP_SYSCALL,
+ EXCP_BREAK,
+ EXCP_FPDIS,
+ EXCP_LSXDIS,
+ EXCP_LASXDIS,
+ EXCP_RI,
+ EXCP_OVERFLOW,
+ EXCP_TRAP,
+ EXCP_FPE,
+ EXCP_LTLBL,
+ EXCP_TLBL,
+ EXCP_TLBS,
+ EXCP_DBE,
+ EXCP_TLBXI,
+ EXCP_TLBRI,
+ EXCP_TLBPE,
+ EXCP_BTDIS,
+
+ EXCP_LAST = EXCP_BTDIS,
+};
+
+/*
+ * This is an internally generated WAKE request line.
+ * It is driven by the CPU itself. Raised when the MT
+ * block wants to wake a VPE from an inactive state and
+ * cleared when VPE goes from active to inactive.
+ */
+#define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0
+
+int cpu_loongarch_signal_handler(int host_signum, void *pinfo, void *puc);
+
+#define LOONGARCH_CPU_TYPE_SUFFIX "-" TYPE_LOONGARCH_CPU
+#define LOONGARCH_CPU_TYPE_NAME(model) model LOONGARCH_CPU_TYPE_SUFFIX
+#define CPU_RESOLVING_TYPE TYPE_LOONGARCH_CPU
+
+/* helper.c */
+target_ulong exception_resume_pc(CPULOONGARCHState *env);
+
+/* gdbstub.c */
+void loongarch_cpu_register_gdb_regs_for_features(CPUState *cs);
+void mmu_init(CPULOONGARCHState *env, const loongarch_def_t *def);
+
+static inline void cpu_get_tb_cpu_state(CPULOONGARCHState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->active_tc.PC;
+ *cs_base = 0;
+ *flags = env->hflags & (LARCH_HFLAG_TMASK | LARCH_HFLAG_BMASK);
+}
+
+static inline bool cpu_refill_state(CPULOONGARCHState *env)
+{
+ return env->CSR_TLBRERA & 0x1;
+}
+
+extern const char * const regnames[];
+extern const char * const fregnames[];
+#endif /* LOONGARCH_CPU_H */
diff --git a/target/loongarch64/csr_helper.c b/target/loongarch64/csr_helper.c
new file mode 100644
index 0000000000000000000000000000000000000000..182e59e925b586ab7ac19b02e7462a7786a412ad
--- /dev/null
+++ b/target/loongarch64/csr_helper.c
@@ -0,0 +1,704 @@
+/*
+ * loongarch tlb emulation helpers for qemu.
+ *
+ * Copyright (c) 2020 - 2021
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see .
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "internal.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "sysemu/kvm.h"
+#include "hw/irq.h"
+#include "cpu-csr.h"
+#include "instmap.h"
+
+#ifndef CONFIG_USER_ONLY
+target_ulong helper_csr_rdq(CPULOONGARCHState *env, uint64_t csr)
+{
+ int64_t v;
+
+#define CASE_CSR_RDQ(csr) \
+ case LOONGARCH_CSR_ ## csr: \
+ { \
+ v = env->CSR_ ## csr; \
+ break; \
+ }; \
+
+ switch (csr) {
+ CASE_CSR_RDQ(CRMD)
+ CASE_CSR_RDQ(PRMD)
+ CASE_CSR_RDQ(EUEN)
+ CASE_CSR_RDQ(MISC)
+ CASE_CSR_RDQ(ECFG)
+ CASE_CSR_RDQ(ESTAT)
+ CASE_CSR_RDQ(ERA)
+ CASE_CSR_RDQ(BADV)
+ CASE_CSR_RDQ(BADI)
+ CASE_CSR_RDQ(EEPN)
+ CASE_CSR_RDQ(TLBIDX)
+ CASE_CSR_RDQ(TLBEHI)
+ CASE_CSR_RDQ(TLBELO0)
+ CASE_CSR_RDQ(TLBELO1)
+ CASE_CSR_RDQ(TLBWIRED)
+ CASE_CSR_RDQ(GTLBC)
+ CASE_CSR_RDQ(TRGP)
+ CASE_CSR_RDQ(ASID)
+ CASE_CSR_RDQ(PGDL)
+ CASE_CSR_RDQ(PGDH)
+ CASE_CSR_RDQ(PGD)
+ CASE_CSR_RDQ(PWCTL0)
+ CASE_CSR_RDQ(PWCTL1)
+ CASE_CSR_RDQ(STLBPGSIZE)
+ CASE_CSR_RDQ(RVACFG)
+ CASE_CSR_RDQ(CPUID)
+ CASE_CSR_RDQ(PRCFG1)
+ CASE_CSR_RDQ(PRCFG2)
+ CASE_CSR_RDQ(PRCFG3)
+ CASE_CSR_RDQ(KS0)
+ CASE_CSR_RDQ(KS1)
+ CASE_CSR_RDQ(KS2)
+ CASE_CSR_RDQ(KS3)
+ CASE_CSR_RDQ(KS4)
+ CASE_CSR_RDQ(KS5)
+ CASE_CSR_RDQ(KS6)
+ CASE_CSR_RDQ(KS7)
+ CASE_CSR_RDQ(KS8)
+ CASE_CSR_RDQ(TMID)
+ CASE_CSR_RDQ(TCFG)
+ case LOONGARCH_CSR_TVAL:
+ v = cpu_loongarch_get_stable_timer_ticks(env);
+ break;
+ CASE_CSR_RDQ(CNTC)
+ CASE_CSR_RDQ(TINTCLR)
+ CASE_CSR_RDQ(GSTAT)
+ CASE_CSR_RDQ(GCFG)
+ CASE_CSR_RDQ(GINTC)
+ CASE_CSR_RDQ(GCNTC)
+ CASE_CSR_RDQ(LLBCTL)
+ CASE_CSR_RDQ(IMPCTL1)
+ CASE_CSR_RDQ(IMPCTL2)
+ CASE_CSR_RDQ(GNMI)
+ CASE_CSR_RDQ(TLBRENT)
+ CASE_CSR_RDQ(TLBRBADV)
+ CASE_CSR_RDQ(TLBRERA)
+ CASE_CSR_RDQ(TLBRSAVE)
+ CASE_CSR_RDQ(TLBRELO0)
+ CASE_CSR_RDQ(TLBRELO1)
+ CASE_CSR_RDQ(TLBREHI)
+ CASE_CSR_RDQ(TLBRPRMD)
+ CASE_CSR_RDQ(ERRCTL)
+ CASE_CSR_RDQ(ERRINFO)
+ CASE_CSR_RDQ(ERRINFO1)
+ CASE_CSR_RDQ(ERRENT)
+ CASE_CSR_RDQ(ERRERA)
+ CASE_CSR_RDQ(ERRSAVE)
+ CASE_CSR_RDQ(CTAG)
+ CASE_CSR_RDQ(DMWIN0)
+ CASE_CSR_RDQ(DMWIN1)
+ CASE_CSR_RDQ(DMWIN2)
+ CASE_CSR_RDQ(DMWIN3)
+ CASE_CSR_RDQ(PERFCTRL0)
+ CASE_CSR_RDQ(PERFCNTR0)
+ CASE_CSR_RDQ(PERFCTRL1)
+ CASE_CSR_RDQ(PERFCNTR1)
+ CASE_CSR_RDQ(PERFCTRL2)
+ CASE_CSR_RDQ(PERFCNTR2)
+ CASE_CSR_RDQ(PERFCTRL3)
+ CASE_CSR_RDQ(PERFCNTR3)
+ /* debug */
+ CASE_CSR_RDQ(MWPC)
+ CASE_CSR_RDQ(MWPS)
+ CASE_CSR_RDQ(DB0ADDR)
+ CASE_CSR_RDQ(DB0MASK)
+ CASE_CSR_RDQ(DB0CTL)
+ CASE_CSR_RDQ(DB0ASID)
+ CASE_CSR_RDQ(DB1ADDR)
+ CASE_CSR_RDQ(DB1MASK)
+ CASE_CSR_RDQ(DB1CTL)
+ CASE_CSR_RDQ(DB1ASID)
+ CASE_CSR_RDQ(DB2ADDR)
+ CASE_CSR_RDQ(DB2MASK)
+ CASE_CSR_RDQ(DB2CTL)
+ CASE_CSR_RDQ(DB2ASID)
+ CASE_CSR_RDQ(DB3ADDR)
+ CASE_CSR_RDQ(DB3MASK)
+ CASE_CSR_RDQ(DB3CTL)
+ CASE_CSR_RDQ(DB3ASID)
+ CASE_CSR_RDQ(FWPC)
+ CASE_CSR_RDQ(FWPS)
+ CASE_CSR_RDQ(IB0ADDR)
+ CASE_CSR_RDQ(IB0MASK)
+ CASE_CSR_RDQ(IB0CTL)
+ CASE_CSR_RDQ(IB0ASID)
+ CASE_CSR_RDQ(IB1ADDR)
+ CASE_CSR_RDQ(IB1MASK)
+ CASE_CSR_RDQ(IB1CTL)
+ CASE_CSR_RDQ(IB1ASID)
+ CASE_CSR_RDQ(IB2ADDR)
+ CASE_CSR_RDQ(IB2MASK)
+ CASE_CSR_RDQ(IB2CTL)
+ CASE_CSR_RDQ(IB2ASID)
+ CASE_CSR_RDQ(IB3ADDR)
+ CASE_CSR_RDQ(IB3MASK)
+ CASE_CSR_RDQ(IB3CTL)
+ CASE_CSR_RDQ(IB3ASID)
+ CASE_CSR_RDQ(IB4ADDR)
+ CASE_CSR_RDQ(IB4MASK)
+ CASE_CSR_RDQ(IB4CTL)
+ CASE_CSR_RDQ(IB4ASID)
+ CASE_CSR_RDQ(IB5ADDR)
+ CASE_CSR_RDQ(IB5MASK)
+ CASE_CSR_RDQ(IB5CTL)
+ CASE_CSR_RDQ(IB5ASID)
+ CASE_CSR_RDQ(IB6ADDR)
+ CASE_CSR_RDQ(IB6MASK)
+ CASE_CSR_RDQ(IB6CTL)
+ CASE_CSR_RDQ(IB6ASID)
+ CASE_CSR_RDQ(IB7ADDR)
+ CASE_CSR_RDQ(IB7MASK)
+ CASE_CSR_RDQ(IB7CTL)
+ CASE_CSR_RDQ(IB7ASID)
+ CASE_CSR_RDQ(DEBUG)
+ CASE_CSR_RDQ(DERA)
+ CASE_CSR_RDQ(DESAVE)
+ default :
+ assert(0);
+ }
+
+#undef CASE_CSR_RDQ
+ compute_hflags(env);
+ return v;
+}
+
+target_ulong helper_csr_wrq(CPULOONGARCHState *env, target_ulong val,
+ uint64_t csr)
+{
+ int64_t old_v, v;
+ old_v = -1;
+ v = val;
+
+#define CASE_CSR_WRQ(csr) \
+ case LOONGARCH_CSR_ ## csr: \
+ { \
+ old_v = env->CSR_ ## csr; \
+ env->CSR_ ## csr = v; \
+ break; \
+ }; \
+
+ switch (csr) {
+ CASE_CSR_WRQ(CRMD)
+ CASE_CSR_WRQ(PRMD)
+ CASE_CSR_WRQ(EUEN)
+ CASE_CSR_WRQ(MISC)
+ CASE_CSR_WRQ(ECFG)
+ CASE_CSR_WRQ(ESTAT)
+ CASE_CSR_WRQ(ERA)
+ CASE_CSR_WRQ(BADV)
+ CASE_CSR_WRQ(BADI)
+ CASE_CSR_WRQ(EEPN)
+ CASE_CSR_WRQ(TLBIDX)
+ CASE_CSR_WRQ(TLBEHI)
+ CASE_CSR_WRQ(TLBELO0)
+ CASE_CSR_WRQ(TLBELO1)
+ CASE_CSR_WRQ(TLBWIRED)
+ CASE_CSR_WRQ(GTLBC)
+ CASE_CSR_WRQ(TRGP)
+ CASE_CSR_WRQ(ASID)
+ CASE_CSR_WRQ(PGDL)
+ CASE_CSR_WRQ(PGDH)
+ CASE_CSR_WRQ(PGD)
+ CASE_CSR_WRQ(PWCTL0)
+ CASE_CSR_WRQ(PWCTL1)
+ CASE_CSR_WRQ(STLBPGSIZE)
+ CASE_CSR_WRQ(RVACFG)
+ CASE_CSR_WRQ(CPUID)
+ CASE_CSR_WRQ(PRCFG1)
+ CASE_CSR_WRQ(PRCFG2)
+ CASE_CSR_WRQ(PRCFG3)
+ CASE_CSR_WRQ(KS0)
+ CASE_CSR_WRQ(KS1)
+ CASE_CSR_WRQ(KS2)
+ CASE_CSR_WRQ(KS3)
+ CASE_CSR_WRQ(KS4)
+ CASE_CSR_WRQ(KS5)
+ CASE_CSR_WRQ(KS6)
+ CASE_CSR_WRQ(KS7)
+ CASE_CSR_WRQ(KS8)
+ CASE_CSR_WRQ(TMID)
+ case LOONGARCH_CSR_TCFG:
+ old_v = env->CSR_TCFG;
+ cpu_loongarch_store_stable_timer_config(env, v);
+ break;
+ CASE_CSR_WRQ(TVAL)
+ CASE_CSR_WRQ(CNTC)
+ case LOONGARCH_CSR_TINTCLR:
+ old_v = 0;
+ qemu_irq_lower(env->irq[IRQ_TIMER]);
+ break;
+ CASE_CSR_WRQ(GSTAT)
+ CASE_CSR_WRQ(GCFG)
+ CASE_CSR_WRQ(GINTC)
+ CASE_CSR_WRQ(GCNTC)
+ CASE_CSR_WRQ(LLBCTL)
+ CASE_CSR_WRQ(IMPCTL1)
+ case LOONGARCH_CSR_IMPCTL2:
+ if (v & CSR_IMPCTL2_MTLB) {
+ ls3a5k_flush_vtlb(env);
+ }
+ if (v & CSR_IMPCTL2_STLB) {
+ ls3a5k_flush_ftlb(env);
+ }
+ break;
+ CASE_CSR_WRQ(GNMI)
+ CASE_CSR_WRQ(TLBRENT)
+ CASE_CSR_WRQ(TLBRBADV)
+ CASE_CSR_WRQ(TLBRERA)
+ CASE_CSR_WRQ(TLBRSAVE)
+ CASE_CSR_WRQ(TLBRELO0)
+ CASE_CSR_WRQ(TLBRELO1)
+ CASE_CSR_WRQ(TLBREHI)
+ CASE_CSR_WRQ(TLBRPRMD)
+ CASE_CSR_WRQ(ERRCTL)
+ CASE_CSR_WRQ(ERRINFO)
+ CASE_CSR_WRQ(ERRINFO1)
+ CASE_CSR_WRQ(ERRENT)
+ CASE_CSR_WRQ(ERRERA)
+ CASE_CSR_WRQ(ERRSAVE)
+ CASE_CSR_WRQ(CTAG)
+ CASE_CSR_WRQ(DMWIN0)
+ CASE_CSR_WRQ(DMWIN1)
+ CASE_CSR_WRQ(DMWIN2)
+ CASE_CSR_WRQ(DMWIN3)
+ CASE_CSR_WRQ(PERFCTRL0)
+ CASE_CSR_WRQ(PERFCNTR0)
+ CASE_CSR_WRQ(PERFCTRL1)
+ CASE_CSR_WRQ(PERFCNTR1)
+ CASE_CSR_WRQ(PERFCTRL2)
+ CASE_CSR_WRQ(PERFCNTR2)
+ CASE_CSR_WRQ(PERFCTRL3)
+ CASE_CSR_WRQ(PERFCNTR3)
+ /* debug */
+ CASE_CSR_WRQ(MWPC)
+ CASE_CSR_WRQ(MWPS)
+ CASE_CSR_WRQ(DB0ADDR)
+ CASE_CSR_WRQ(DB0MASK)
+ CASE_CSR_WRQ(DB0CTL)
+ CASE_CSR_WRQ(DB0ASID)
+ CASE_CSR_WRQ(DB1ADDR)
+ CASE_CSR_WRQ(DB1MASK)
+ CASE_CSR_WRQ(DB1CTL)
+ CASE_CSR_WRQ(DB1ASID)
+ CASE_CSR_WRQ(DB2ADDR)
+ CASE_CSR_WRQ(DB2MASK)
+ CASE_CSR_WRQ(DB2CTL)
+ CASE_CSR_WRQ(DB2ASID)
+ CASE_CSR_WRQ(DB3ADDR)
+ CASE_CSR_WRQ(DB3MASK)
+ CASE_CSR_WRQ(DB3CTL)
+ CASE_CSR_WRQ(DB3ASID)
+ CASE_CSR_WRQ(FWPC)
+ CASE_CSR_WRQ(FWPS)
+ CASE_CSR_WRQ(IB0ADDR)
+ CASE_CSR_WRQ(IB0MASK)
+ CASE_CSR_WRQ(IB0CTL)
+ CASE_CSR_WRQ(IB0ASID)
+ CASE_CSR_WRQ(IB1ADDR)
+ CASE_CSR_WRQ(IB1MASK)
+ CASE_CSR_WRQ(IB1CTL)
+ CASE_CSR_WRQ(IB1ASID)
+ CASE_CSR_WRQ(IB2ADDR)
+ CASE_CSR_WRQ(IB2MASK)
+ CASE_CSR_WRQ(IB2CTL)
+ CASE_CSR_WRQ(IB2ASID)
+ CASE_CSR_WRQ(IB3ADDR)
+ CASE_CSR_WRQ(IB3MASK)
+ CASE_CSR_WRQ(IB3CTL)
+ CASE_CSR_WRQ(IB3ASID)
+ CASE_CSR_WRQ(IB4ADDR)
+ CASE_CSR_WRQ(IB4MASK)
+ CASE_CSR_WRQ(IB4CTL)
+ CASE_CSR_WRQ(IB4ASID)
+ CASE_CSR_WRQ(IB5ADDR)
+ CASE_CSR_WRQ(IB5MASK)
+ CASE_CSR_WRQ(IB5CTL)
+ CASE_CSR_WRQ(IB5ASID)
+ CASE_CSR_WRQ(IB6ADDR)
+ CASE_CSR_WRQ(IB6MASK)
+ CASE_CSR_WRQ(IB6CTL)
+ CASE_CSR_WRQ(IB6ASID)
+ CASE_CSR_WRQ(IB7ADDR)
+ CASE_CSR_WRQ(IB7MASK)
+ CASE_CSR_WRQ(IB7CTL)
+ CASE_CSR_WRQ(IB7ASID)
+ CASE_CSR_WRQ(DEBUG)
+ CASE_CSR_WRQ(DERA)
+ CASE_CSR_WRQ(DESAVE)
+ default :
+ assert(0);
+ }
+
+ if (csr == LOONGARCH_CSR_ASID) {
+ if (old_v != v) {
+ tlb_flush(CPU(loongarch_env_get_cpu(env)));
+ }
+ }
+
+#undef CASE_CSR_WRQ
+ compute_hflags(env);
+ return old_v;
+}
+
+target_ulong helper_csr_xchgq(CPULOONGARCHState *env, target_ulong val,
+ target_ulong mask, uint64_t csr)
+{
+ target_ulong v, tmp;
+ v = val & mask;
+
+#define CASE_CSR_XCHGQ(csr) \
+ case LOONGARCH_CSR_ ## csr: \
+ { \
+ val = env->CSR_ ## csr; \
+ env->CSR_ ## csr = (env->CSR_ ## csr) & (~mask); \
+ env->CSR_ ## csr = (env->CSR_ ## csr) | v; \
+ break; \
+ }; \
+
+ switch (csr) {
+ CASE_CSR_XCHGQ(CRMD)
+ CASE_CSR_XCHGQ(PRMD)
+ CASE_CSR_XCHGQ(EUEN)
+ CASE_CSR_XCHGQ(MISC)
+ CASE_CSR_XCHGQ(ECFG)
+ case LOONGARCH_CSR_ESTAT:
+ val = env->CSR_ESTAT;
+ qatomic_and(&env->CSR_ESTAT, ~mask);
+ qatomic_or(&env->CSR_ESTAT, v);
+ break;
+ CASE_CSR_XCHGQ(ERA)
+ CASE_CSR_XCHGQ(BADV)
+ CASE_CSR_XCHGQ(BADI)
+ CASE_CSR_XCHGQ(EEPN)
+ CASE_CSR_XCHGQ(TLBIDX)
+ CASE_CSR_XCHGQ(TLBEHI)
+ CASE_CSR_XCHGQ(TLBELO0)
+ CASE_CSR_XCHGQ(TLBELO1)
+ CASE_CSR_XCHGQ(TLBWIRED)
+ CASE_CSR_XCHGQ(GTLBC)
+ CASE_CSR_XCHGQ(TRGP)
+ CASE_CSR_XCHGQ(ASID)
+ CASE_CSR_XCHGQ(PGDL)
+ CASE_CSR_XCHGQ(PGDH)
+ CASE_CSR_XCHGQ(PGD)
+ CASE_CSR_XCHGQ(PWCTL0)
+ CASE_CSR_XCHGQ(PWCTL1)
+ CASE_CSR_XCHGQ(STLBPGSIZE)
+ CASE_CSR_XCHGQ(RVACFG)
+ CASE_CSR_XCHGQ(CPUID)
+ CASE_CSR_XCHGQ(PRCFG1)
+ CASE_CSR_XCHGQ(PRCFG2)
+ CASE_CSR_XCHGQ(PRCFG3)
+ CASE_CSR_XCHGQ(KS0)
+ CASE_CSR_XCHGQ(KS1)
+ CASE_CSR_XCHGQ(KS2)
+ CASE_CSR_XCHGQ(KS3)
+ CASE_CSR_XCHGQ(KS4)
+ CASE_CSR_XCHGQ(KS5)
+ CASE_CSR_XCHGQ(KS6)
+ CASE_CSR_XCHGQ(KS7)
+ CASE_CSR_XCHGQ(KS8)
+ CASE_CSR_XCHGQ(TMID)
+ case LOONGARCH_CSR_TCFG:
+ val = env->CSR_TCFG;
+ tmp = val & ~mask;
+ tmp |= v;
+ cpu_loongarch_store_stable_timer_config(env, tmp);
+ break;
+ CASE_CSR_XCHGQ(TVAL)
+ CASE_CSR_XCHGQ(CNTC)
+ CASE_CSR_XCHGQ(TINTCLR)
+ CASE_CSR_XCHGQ(GSTAT)
+ CASE_CSR_XCHGQ(GCFG)
+ CASE_CSR_XCHGQ(GINTC)
+ CASE_CSR_XCHGQ(GCNTC)
+ CASE_CSR_XCHGQ(LLBCTL)
+ CASE_CSR_XCHGQ(IMPCTL1)
+ CASE_CSR_XCHGQ(IMPCTL2)
+ CASE_CSR_XCHGQ(GNMI)
+ CASE_CSR_XCHGQ(TLBRENT)
+ CASE_CSR_XCHGQ(TLBRBADV)
+ CASE_CSR_XCHGQ(TLBRERA)
+ CASE_CSR_XCHGQ(TLBRSAVE)
+ CASE_CSR_XCHGQ(TLBRELO0)
+ CASE_CSR_XCHGQ(TLBRELO1)
+ CASE_CSR_XCHGQ(TLBREHI)
+ CASE_CSR_XCHGQ(TLBRPRMD)
+ CASE_CSR_XCHGQ(ERRCTL)
+ CASE_CSR_XCHGQ(ERRINFO)
+ CASE_CSR_XCHGQ(ERRINFO1)
+ CASE_CSR_XCHGQ(ERRENT)
+ CASE_CSR_XCHGQ(ERRERA)
+ CASE_CSR_XCHGQ(ERRSAVE)
+ CASE_CSR_XCHGQ(CTAG)
+ CASE_CSR_XCHGQ(DMWIN0)
+ CASE_CSR_XCHGQ(DMWIN1)
+ CASE_CSR_XCHGQ(DMWIN2)
+ CASE_CSR_XCHGQ(DMWIN3)
+ CASE_CSR_XCHGQ(PERFCTRL0)
+ CASE_CSR_XCHGQ(PERFCNTR0)
+ CASE_CSR_XCHGQ(PERFCTRL1)
+ CASE_CSR_XCHGQ(PERFCNTR1)
+ CASE_CSR_XCHGQ(PERFCTRL2)
+ CASE_CSR_XCHGQ(PERFCNTR2)
+ CASE_CSR_XCHGQ(PERFCTRL3)
+ CASE_CSR_XCHGQ(PERFCNTR3)
+ /* debug */
+ CASE_CSR_XCHGQ(MWPC)
+ CASE_CSR_XCHGQ(MWPS)
+ CASE_CSR_XCHGQ(DB0ADDR)
+ CASE_CSR_XCHGQ(DB0MASK)
+ CASE_CSR_XCHGQ(DB0CTL)
+ CASE_CSR_XCHGQ(DB0ASID)
+ CASE_CSR_XCHGQ(DB1ADDR)
+ CASE_CSR_XCHGQ(DB1MASK)
+ CASE_CSR_XCHGQ(DB1CTL)
+ CASE_CSR_XCHGQ(DB1ASID)
+ CASE_CSR_XCHGQ(DB2ADDR)
+ CASE_CSR_XCHGQ(DB2MASK)
+ CASE_CSR_XCHGQ(DB2CTL)
+ CASE_CSR_XCHGQ(DB2ASID)
+ CASE_CSR_XCHGQ(DB3ADDR)
+ CASE_CSR_XCHGQ(DB3MASK)
+ CASE_CSR_XCHGQ(DB3CTL)
+ CASE_CSR_XCHGQ(DB3ASID)
+ CASE_CSR_XCHGQ(FWPC)
+ CASE_CSR_XCHGQ(FWPS)
+ CASE_CSR_XCHGQ(IB0ADDR)
+ CASE_CSR_XCHGQ(IB0MASK)
+ CASE_CSR_XCHGQ(IB0CTL)
+ CASE_CSR_XCHGQ(IB0ASID)
+ CASE_CSR_XCHGQ(IB1ADDR)
+ CASE_CSR_XCHGQ(IB1MASK)
+ CASE_CSR_XCHGQ(IB1CTL)
+ CASE_CSR_XCHGQ(IB1ASID)
+ CASE_CSR_XCHGQ(IB2ADDR)
+ CASE_CSR_XCHGQ(IB2MASK)
+ CASE_CSR_XCHGQ(IB2CTL)
+ CASE_CSR_XCHGQ(IB2ASID)
+ CASE_CSR_XCHGQ(IB3ADDR)
+ CASE_CSR_XCHGQ(IB3MASK)
+ CASE_CSR_XCHGQ(IB3CTL)
+ CASE_CSR_XCHGQ(IB3ASID)
+ CASE_CSR_XCHGQ(IB4ADDR)
+ CASE_CSR_XCHGQ(IB4MASK)
+ CASE_CSR_XCHGQ(IB4CTL)
+ CASE_CSR_XCHGQ(IB4ASID)
+ CASE_CSR_XCHGQ(IB5ADDR)
+ CASE_CSR_XCHGQ(IB5MASK)
+ CASE_CSR_XCHGQ(IB5CTL)
+ CASE_CSR_XCHGQ(IB5ASID)
+ CASE_CSR_XCHGQ(IB6ADDR)
+ CASE_CSR_XCHGQ(IB6MASK)
+ CASE_CSR_XCHGQ(IB6CTL)
+ CASE_CSR_XCHGQ(IB6ASID)
+ CASE_CSR_XCHGQ(IB7ADDR)
+ CASE_CSR_XCHGQ(IB7MASK)
+ CASE_CSR_XCHGQ(IB7CTL)
+ CASE_CSR_XCHGQ(IB7ASID)
+ CASE_CSR_XCHGQ(DEBUG)
+ CASE_CSR_XCHGQ(DERA)
+ CASE_CSR_XCHGQ(DESAVE)
+ default :
+ assert(0);
+ }
+
+#undef CASE_CSR_XCHGQ
+ compute_hflags(env);
+ return val;
+}
+
+static target_ulong confbus_addr(CPULOONGARCHState *env, int cpuid,
+ target_ulong csr_addr)
+{
+ target_ulong addr;
+ target_ulong node_addr;
+ int cores_per_node = ((0x60018 >> 3) & 0xff) + 1;
+
+ switch (cores_per_node) {
+ case 4:
+ assert(cpuid < 64);
+ node_addr = ((target_ulong)(cpuid & 0x3c) << 42);
+ break;
+ case 8:
+ assert(cpuid < 128);
+ node_addr = ((target_ulong)(cpuid & 0x78) << 41) +
+ ((target_ulong)(cpuid & 0x4) << 14);
+ break;
+ case 16:
+ assert(cpuid < 256);
+ node_addr = ((target_ulong)(cpuid & 0xf0) << 40) +
+ ((target_ulong)(cpuid & 0xc) << 14);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ /*
+ * per core address
+ *0x10xx => ipi
+ * 0x18xx => extioi isr
+ */
+ if (((csr_addr & 0xff00) == 0x1000)) {
+ addr = (csr_addr & 0xff) + (target_ulong)(cpuid << 8);
+ addr = 0x800000001f000000UL + addr;
+ return addr;
+ } else if ((csr_addr & 0xff00) == 0x1800) {
+ addr = (csr_addr & 0xff) + ((target_ulong)(cpuid << 8));
+ addr = 0x800000001f020000UL + addr;
+ return addr;
+ } else if ((csr_addr & 0xff00) >= 0x1400 && (csr_addr & 0xff00) < 0x1d00) {
+ addr = 0x800000001f010000UL + ((csr_addr & 0xfff) - 0x400);
+ return addr;
+ } else if (csr_addr == 0x408) {
+ addr = csr_addr;
+ } else {
+ addr = csr_addr + node_addr;
+ }
+
+ addr = 0x800000001fe00000UL + addr;
+ return addr;
+}
+
+void helper_iocsr(CPULOONGARCHState *env, target_ulong r_addr,
+ target_ulong r_val, uint32_t op)
+{
+ target_ulong addr;
+ target_ulong val = env->active_tc.gpr[r_val];
+ int mask;
+
+ addr = confbus_addr(env, CPU(loongarch_env_get_cpu(env))->cpu_index,
+ env->active_tc.gpr[r_addr]);
+
+ switch (env->active_tc.gpr[r_addr]) {
+ /* IPI send */
+ case 0x1040:
+ if (op != OPC_LARCH_ST_W) {
+ return;
+ }
+ op = OPC_LARCH_ST_W;
+ break;
+
+ /* Mail send */
+ case 0x1048:
+ if (op != OPC_LARCH_ST_D) {
+ return;
+ }
+ op = OPC_LARCH_ST_D;
+ break;
+
+ /* ANY send */
+ case 0x1158:
+ if (op != OPC_LARCH_ST_D) {
+ return;
+ }
+ addr = confbus_addr(env, (val >> 16) & 0x3ff, val & 0xffff);
+ mask = (val >> 27) & 0xf;
+ val = (val >> 32);
+ switch (mask) {
+ case 0:
+ op = OPC_LARCH_ST_W;
+ break;
+ case 0x7:
+ op = OPC_LARCH_ST_B;
+ addr += 3;
+ val >>= 24;
+ break;
+ case 0xb:
+ op = OPC_LARCH_ST_B;
+ addr += 2;
+ val >>= 16;
+ break;
+ case 0xd:
+ op = OPC_LARCH_ST_B;
+ addr += 1;
+ val >>= 8;
+ break;
+ case 0xe:
+ op = OPC_LARCH_ST_B;
+ break;
+ case 0xc:
+ op = OPC_LARCH_ST_H;
+ break;
+ case 0x3:
+ op = OPC_LARCH_ST_H;
+ addr += 2;
+ val >>= 16;
+ break;
+ default:
+ qemu_log("Unsupported any_send mask0x%x\n", mask);
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ switch (op) {
+ case OPC_LARCH_LD_D:
+ env->active_tc.gpr[r_val] = cpu_ldq_data_ra(env, addr,
+ GETPC());
+ break;
+ case OPC_LARCH_LD_W:
+ env->active_tc.gpr[r_val] = cpu_ldl_data_ra(env, addr,
+ GETPC());
+ break;
+ case OPC_LARCH_LD_H:
+ assert(0);
+ break;
+ case OPC_LARCH_LD_B:
+ assert(0);
+ break;
+ case OPC_LARCH_ST_D:
+ cpu_stq_data_ra(env, addr, val, GETPC());
+ break;
+ case OPC_LARCH_ST_W:
+ cpu_stl_data_ra(env, addr, val, GETPC());
+ break;
+ case OPC_LARCH_ST_H:
+ cpu_stb_data_ra(env, addr, val, GETPC());
+ break;
+ case OPC_LARCH_ST_B:
+ cpu_stb_data_ra(env, addr, val, GETPC());
+ break;
+ default:
+ qemu_log("Unknown op 0x%x", op);
+ assert(0);
+ }
+}
+#endif
+
+target_ulong helper_cpucfg(CPULOONGARCHState *env, target_ulong rj)
+{
+ return 0;
+}
+
+
diff --git a/target/loongarch64/fpu.c b/target/loongarch64/fpu.c
new file mode 100644
index 0000000000000000000000000000000000000000..795458205bc408e6b9c787ea6a834e360baceb95
--- /dev/null
+++ b/target/loongarch64/fpu.c
@@ -0,0 +1,28 @@
+/*
+ * loongarch float point emulation helpers for qemu.
+ *
+ * Copyright (c) 2020-2021
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see .
+ */
+#include "qemu/osdep.h"
+#include "fpu/softfloat.h"
+
+/* convert loongarch rounding mode in fcsr0 to IEEE library */
+unsigned int ieee_rm[] = {
+ float_round_nearest_even,
+ float_round_to_zero,
+ float_round_up,
+ float_round_down
+};
diff --git a/target/loongarch64/fpu_helper.c b/target/loongarch64/fpu_helper.c
new file mode 100644
index 0000000000000000000000000000000000000000..42d7f05ca28022125b0ee77a08e4422558139f6f
--- /dev/null
+++ b/target/loongarch64/fpu_helper.c
@@ -0,0 +1,952 @@
+/*
+ * loongarch float point emulation helpers for qemu.
+ *
+ * Copyright (c) 2020-2021
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see .
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "fpu/softfloat.h"
+
+#define FP_TO_INT32_OVERFLOW 0x7fffffff
+#define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL
+
+#define FLOAT_CLASS_SIGNALING_NAN 0x001
+#define FLOAT_CLASS_QUIET_NAN 0x002
+#define FLOAT_CLASS_NEGATIVE_INFINITY 0x004
+#define FLOAT_CLASS_NEGATIVE_NORMAL 0x008
+#define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010
+#define FLOAT_CLASS_NEGATIVE_ZERO 0x020
+#define FLOAT_CLASS_POSITIVE_INFINITY 0x040
+#define FLOAT_CLASS_POSITIVE_NORMAL 0x080
+#define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100
+#define FLOAT_CLASS_POSITIVE_ZERO 0x200
+
+target_ulong helper_movfcsr2gr(CPULOONGARCHState *env, uint32_t reg)
+{
+ target_ulong r = 0;
+
+ switch (reg) {
+ case 0:
+ r = (uint32_t)env->active_fpu.fcsr0;
+ break;
+ case 1:
+ r = (env->active_fpu.fcsr0 & FCSR0_M1);
+ break;
+ case 2:
+ r = (env->active_fpu.fcsr0 & FCSR0_M2);
+ break;
+ case 3:
+ r = (env->active_fpu.fcsr0 & FCSR0_M3);
+ break;
+ case 16:
+ r = (uint32_t)env->active_fpu.vcsr16;
+ break;
+ default:
+ printf("%s: warning, fcsr '%d' not supported\n", __func__, reg);
+ assert(0);
+ break;
+ }
+
+ return r;
+}
+
+void helper_movgr2fcsr(CPULOONGARCHState *env, target_ulong arg1,
+ uint32_t fcsr, uint32_t rj)
+{
+ switch (fcsr) {
+ case 0:
+ env->active_fpu.fcsr0 = arg1;
+ break;
+ case 1:
+ env->active_fpu.fcsr0 = (arg1 & FCSR0_M1) |
+ (env->active_fpu.fcsr0 & ~FCSR0_M1);
+ break;
+ case 2:
+ env->active_fpu.fcsr0 = (arg1 & FCSR0_M2) |
+ (env->active_fpu.fcsr0 & ~FCSR0_M2);
+ break;
+ case 3:
+ env->active_fpu.fcsr0 = (arg1 & FCSR0_M3) |
+ (env->active_fpu.fcsr0 & ~FCSR0_M3);
+ break;
+ case 16:
+ env->active_fpu.vcsr16 = arg1;
+ break;
+ default:
+ printf("%s: warning, fcsr '%d' not supported\n", __func__, fcsr);
+ assert(0);
+ break;
+ }
+ restore_fp_status(env);
+ set_float_exception_flags(0, &env->active_fpu.fp_status);
+}
+
+void helper_movreg2cf(CPULOONGARCHState *env, uint32_t cd, target_ulong src)
+{
+ env->active_fpu.cf[cd & 0x7] = src & 0x1;
+}
+
+void helper_movreg2cf_i32(CPULOONGARCHState *env, uint32_t cd, uint32_t src)
+{
+ env->active_fpu.cf[cd & 0x7] = src & 0x1;
+}
+
+void helper_movreg2cf_i64(CPULOONGARCHState *env, uint32_t cd, uint64_t src)
+{
+ env->active_fpu.cf[cd & 0x7] = src & 0x1;
+}
+
+target_ulong helper_movcf2reg(CPULOONGARCHState *env, uint32_t cj)
+{
+ return (target_ulong)env->active_fpu.cf[cj & 0x7];
+}
+
+int ieee_ex_to_loongarch(int xcpt)
+{
+ int ret = 0;
+ if (xcpt) {
+ if (xcpt & float_flag_invalid) {
+ ret |= FP_INVALID;
+ }
+ if (xcpt & float_flag_overflow) {
+ ret |= FP_OVERFLOW;
+ }
+ if (xcpt & float_flag_underflow) {
+ ret |= FP_UNDERFLOW;
+ }
+ if (xcpt & float_flag_divbyzero) {
+ ret |= FP_DIV0;
+ }
+ if (xcpt & float_flag_inexact) {
+ ret |= FP_INEXACT;
+ }
+ }
+ return ret;
+}
+
+static inline void update_fcsr0(CPULOONGARCHState *env, uintptr_t pc)
+{
+ int tmp = ieee_ex_to_loongarch(get_float_exception_flags(
+ &env->active_fpu.fp_status));
+
+ SET_FP_CAUSE(env->active_fpu.fcsr0, tmp);
+ if (tmp) {
+ set_float_exception_flags(0, &env->active_fpu.fp_status);
+
+ if (GET_FP_ENABLE(env->active_fpu.fcsr0) & tmp) {
+ do_raise_exception(env, EXCP_FPE, pc);
+ } else {
+ UPDATE_FP_FLAGS(env->active_fpu.fcsr0, tmp);
+ }
+ }
+}
+
+/* unary operations, modifying fp status */
+uint64_t helper_float_sqrt_d(CPULOONGARCHState *env, uint64_t fdt0)
+{
+ fdt0 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return fdt0;
+}
+
+uint32_t helper_float_sqrt_s(CPULOONGARCHState *env, uint32_t fst0)
+{
+ fst0 = float32_sqrt(fst0, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return fst0;
+}
+
+uint64_t helper_float_cvtd_s(CPULOONGARCHState *env, uint32_t fst0)
+{
+ uint64_t fdt2;
+
+ fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return fdt2;
+}
+
+uint64_t helper_float_cvtd_w(CPULOONGARCHState *env, uint32_t wt0)
+{
+ uint64_t fdt2;
+
+ fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return fdt2;
+}
+
+uint64_t helper_float_cvtd_l(CPULOONGARCHState *env, uint64_t dt0)
+{
+ uint64_t fdt2;
+
+ fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return fdt2;
+}
+
+uint64_t helper_float_cvt_l_d(CPULOONGARCHState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_cvt_l_s(CPULOONGARCHState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_cvts_d(CPULOONGARCHState *env, uint64_t fdt0)
+{
+ uint32_t fst2;
+
+ fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return fst2;
+}
+
+uint32_t helper_float_cvts_w(CPULOONGARCHState *env, uint32_t wt0)
+{
+ uint32_t fst2;
+
+ fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return fst2;
+}
+
+uint32_t helper_float_cvts_l(CPULOONGARCHState *env, uint64_t dt0)
+{
+ uint32_t fst2;
+
+ fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return fst2;
+}
+
+uint32_t helper_float_cvt_w_s(CPULOONGARCHState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_cvt_w_d(CPULOONGARCHState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_round_l_d(CPULOONGARCHState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_round_l_s(CPULOONGARCHState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_round_w_d(CPULOONGARCHState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_round_w_s(CPULOONGARCHState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_trunc_l_d(CPULOONGARCHState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ dt2 = float64_to_int64_round_to_zero(fdt0,
+ &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_trunc_l_s(CPULOONGARCHState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_trunc_w_d(CPULOONGARCHState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_trunc_w_s(CPULOONGARCHState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_ceil_l_d(CPULOONGARCHState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_ceil_l_s(CPULOONGARCHState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_ceil_w_d(CPULOONGARCHState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_ceil_w_s(CPULOONGARCHState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_floor_l_d(CPULOONGARCHState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_floor_l_s(CPULOONGARCHState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_floor_w_d(CPULOONGARCHState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_floor_w_s(CPULOONGARCHState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcsr0(env, GETPC());
+ return wt2;
+}
+
+/* unary operations, not modifying fp status */
+#define FLOAT_UNOP(name) \
+uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
+{ \
+ return float64_ ## name(fdt0); \
+} \
+uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
+{ \
+ return float32_ ## name(fst0); \
+}
+
+FLOAT_UNOP(abs)
+FLOAT_UNOP(chs)
+#undef FLOAT_UNOP
+
+uint64_t helper_float_recip_d(CPULOONGARCHState *env, uint64_t fdt0)
+{
+ uint64_t fdt2;
+
+ fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return fdt2;
+}
+
+uint32_t helper_float_recip_s(CPULOONGARCHState *env, uint32_t fst0)
+{
+ uint32_t fst2;
+
+ fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return fst2;
+}
+
+uint64_t helper_float_rsqrt_d(CPULOONGARCHState *env, uint64_t fdt0)
+{
+ uint64_t fdt2;
+
+ fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
+ fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return fdt2;
+}
+
+uint32_t helper_float_rsqrt_s(CPULOONGARCHState *env, uint32_t fst0)
+{
+ uint32_t fst2;
+
+ fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
+ fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return fst2;
+}
+
+uint32_t helper_float_rint_s(CPULOONGARCHState *env, uint32_t fs)
+{
+ uint32_t fdret;
+
+ fdret = float32_round_to_int(fs, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return fdret;
+}
+
+uint64_t helper_float_rint_d(CPULOONGARCHState *env, uint64_t fs)
+{
+ uint64_t fdret;
+
+ fdret = float64_round_to_int(fs, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return fdret;
+}
+
+#define FLOAT_CLASS(name, bits) \
+uint ## bits ## _t float_ ## name(uint ## bits ## _t arg, \
+ float_status *status) \
+{ \
+ if (float ## bits ## _is_signaling_nan(arg, status)) { \
+ return FLOAT_CLASS_SIGNALING_NAN; \
+ } else if (float ## bits ## _is_quiet_nan(arg, status)) { \
+ return FLOAT_CLASS_QUIET_NAN; \
+ } else if (float ## bits ## _is_neg(arg)) { \
+ if (float ## bits ## _is_infinity(arg)) { \
+ return FLOAT_CLASS_NEGATIVE_INFINITY; \
+ } else if (float ## bits ## _is_zero(arg)) { \
+ return FLOAT_CLASS_NEGATIVE_ZERO; \
+ } else if (float ## bits ## _is_zero_or_denormal(arg)) { \
+ return FLOAT_CLASS_NEGATIVE_SUBNORMAL; \
+ } else { \
+ return FLOAT_CLASS_NEGATIVE_NORMAL; \
+ } \
+ } else { \
+ if (float ## bits ## _is_infinity(arg)) { \
+ return FLOAT_CLASS_POSITIVE_INFINITY; \
+ } else if (float ## bits ## _is_zero(arg)) { \
+ return FLOAT_CLASS_POSITIVE_ZERO; \
+ } else if (float ## bits ## _is_zero_or_denormal(arg)) { \
+ return FLOAT_CLASS_POSITIVE_SUBNORMAL; \
+ } else { \
+ return FLOAT_CLASS_POSITIVE_NORMAL; \
+ } \
+ } \
+} \
+ \
+uint ## bits ## _t helper_float_ ## name(CPULOONGARCHState *env, \
+ uint ## bits ## _t arg) \
+{ \
+ return float_ ## name(arg, &env->active_fpu.fp_status); \
+}
+
+FLOAT_CLASS(class_s, 32)
+FLOAT_CLASS(class_d, 64)
+#undef FLOAT_CLASS
+
+/* binary operations */
+#define FLOAT_BINOP(name) \
+uint64_t helper_float_ ## name ## _d(CPULOONGARCHState *env, \
+ uint64_t fdt0, uint64_t fdt1) \
+{ \
+ uint64_t dt2; \
+ \
+ dt2 = float64_ ## name(fdt0, fdt1, &env->active_fpu.fp_status);\
+ update_fcsr0(env, GETPC()); \
+ return dt2; \
+} \
+ \
+uint32_t helper_float_ ## name ## _s(CPULOONGARCHState *env, \
+ uint32_t fst0, uint32_t fst1) \
+{ \
+ uint32_t wt2; \
+ \
+ wt2 = float32_ ## name(fst0, fst1, &env->active_fpu.fp_status);\
+ update_fcsr0(env, GETPC()); \
+ return wt2; \
+}
+
+FLOAT_BINOP(add)
+FLOAT_BINOP(sub)
+FLOAT_BINOP(mul)
+FLOAT_BINOP(div)
+#undef FLOAT_BINOP
+
+uint64_t helper_float_exp2_d(CPULOONGARCHState *env,
+ uint64_t fdt0, uint64_t fdt1)
+{
+ uint64_t dt2;
+ int64_t n = (int64_t)fdt1;
+
+ dt2 = float64_scalbn(fdt0,
+ n > 0x1000 ? 0x1000 :
+ n < -0x1000 ? -0x1000 : n,
+ &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_exp2_s(CPULOONGARCHState *env,
+ uint32_t fst0, uint32_t fst1)
+{
+ uint32_t wt2;
+ int32_t n = (int32_t)fst1;
+
+ wt2 = float32_scalbn(fst0,
+ n > 0x200 ? 0x200 :
+ n < -0x200 ? -0x200 : n,
+ &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return wt2;
+}
+
+#define FLOAT_MINMAX(name, bits, minmaxfunc) \
+uint ## bits ## _t helper_float_ ## name(CPULOONGARCHState *env, \
+ uint ## bits ## _t fs, \
+ uint ## bits ## _t ft) \
+{ \
+ uint ## bits ## _t fdret; \
+ \
+ fdret = float ## bits ## _ ## minmaxfunc(fs, ft, \
+ &env->active_fpu.fp_status); \
+ update_fcsr0(env, GETPC()); \
+ return fdret; \
+}
+
+FLOAT_MINMAX(max_s, 32, maxnum)
+FLOAT_MINMAX(max_d, 64, maxnum)
+FLOAT_MINMAX(maxa_s, 32, maxnummag)
+FLOAT_MINMAX(maxa_d, 64, maxnummag)
+
+FLOAT_MINMAX(min_s, 32, minnum)
+FLOAT_MINMAX(min_d, 64, minnum)
+FLOAT_MINMAX(mina_s, 32, minnummag)
+FLOAT_MINMAX(mina_d, 64, minnummag)
+#undef FLOAT_MINMAX
+
+#define FLOAT_FMADDSUB(name, bits, muladd_arg) \
+uint ## bits ## _t helper_float_ ## name(CPULOONGARCHState *env, \
+ uint ## bits ## _t fs, \
+ uint ## bits ## _t ft, \
+ uint ## bits ## _t fd) \
+{ \
+ uint ## bits ## _t fdret; \
+ \
+ fdret = float ## bits ## _muladd(fs, ft, fd, muladd_arg, \
+ &env->active_fpu.fp_status); \
+ update_fcsr0(env, GETPC()); \
+ return fdret; \
+}
+
+FLOAT_FMADDSUB(maddf_s, 32, 0)
+FLOAT_FMADDSUB(maddf_d, 64, 0)
+FLOAT_FMADDSUB(msubf_s, 32, float_muladd_negate_c)
+FLOAT_FMADDSUB(msubf_d, 64, float_muladd_negate_c)
+FLOAT_FMADDSUB(nmaddf_s, 32, float_muladd_negate_result)
+FLOAT_FMADDSUB(nmaddf_d, 64, float_muladd_negate_result)
+FLOAT_FMADDSUB(nmsubf_s, 32, float_muladd_negate_result | float_muladd_negate_c)
+FLOAT_FMADDSUB(nmsubf_d, 64, float_muladd_negate_result | float_muladd_negate_c)
+#undef FLOAT_FMADDSUB
+
+/* compare operations */
+#define FOP_CONDN_D(op, cond) \
+uint64_t helper_cmp_d_ ## op(CPULOONGARCHState *env, uint64_t fdt0, \
+ uint64_t fdt1) \
+{ \
+ uint64_t c; \
+ c = cond; \
+ update_fcsr0(env, GETPC()); \
+ if (c) { \
+ return -1; \
+ } else { \
+ return 0; \
+ } \
+}
+
+/*
+ * NOTE: the comma operator will make "cond" to eval to false,
+ * but float64_unordered_quiet() is still called.
+ */
+FOP_CONDN_D(af, (float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status), 0))
+FOP_CONDN_D(un, (float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(eq, (float64_eq_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(ueq, (float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_eq_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(lt, (float64_lt_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(ult, (float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(le, (float64_le_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(ule, (float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_le_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+/*
+ * NOTE: the comma operator will make "cond" to eval to false,
+ * but float64_unordered() is still called.
+ */
+FOP_CONDN_D(saf, (float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status), 0))
+FOP_CONDN_D(sun, (float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(seq, (float64_eq(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(sueq, (float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_eq(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(slt, (float64_lt(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(sult, (float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(sle, (float64_le(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(sule, (float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_le(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(or, (float64_le_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_le_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(une, (float64_unordered_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(ne, (float64_lt_quiet(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt_quiet(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(sor, (float64_le(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_le(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(sune, (float64_unordered(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_D(sne, (float64_lt(fdt1, fdt0,
+ &env->active_fpu.fp_status)
+ || float64_lt(fdt0, fdt1,
+ &env->active_fpu.fp_status)))
+
+#define FOP_CONDN_S(op, cond) \
+uint32_t helper_cmp_s_ ## op(CPULOONGARCHState *env, uint32_t fst0, \
+ uint32_t fst1) \
+{ \
+ uint64_t c; \
+ c = cond; \
+ update_fcsr0(env, GETPC()); \
+ if (c) { \
+ return -1; \
+ } else { \
+ return 0; \
+ } \
+}
+
+/*
+ * NOTE: the comma operator will make "cond" to eval to false,
+ * but float32_unordered_quiet() is still called.
+ */
+FOP_CONDN_S(af, (float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status), 0))
+FOP_CONDN_S(un, (float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(eq, (float32_eq_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(ueq, (float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_eq_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(lt, (float32_lt_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(ult, (float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(le, (float32_le_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(ule, (float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_le_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+/*
+ * NOTE: the comma operator will make "cond" to eval to false,
+ * but float32_unordered() is still called.
+ */
+FOP_CONDN_S(saf, (float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status), 0))
+FOP_CONDN_S(sun, (float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(seq, (float32_eq(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(sueq, (float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_eq(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(slt, (float32_lt(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(sult, (float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(sle, (float32_le(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(sule, (float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_le(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(or, (float32_le_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_le_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(une, (float32_unordered_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(ne, (float32_lt_quiet(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt_quiet(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(sor, (float32_le(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_le(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(sune, (float32_unordered(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt(fst0, fst1,
+ &env->active_fpu.fp_status)))
+FOP_CONDN_S(sne, (float32_lt(fst1, fst0,
+ &env->active_fpu.fp_status)
+ || float32_lt(fst0, fst1,
+ &env->active_fpu.fp_status)))
+
+uint32_t helper_float_logb_s(CPULOONGARCHState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ wt2 = float32_log2(fst0, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_logb_d(CPULOONGARCHState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ dt2 = float64_log2(fdt0, &env->active_fpu.fp_status);
+ update_fcsr0(env, GETPC());
+ return dt2;
+}
+
+target_ulong helper_fsel(CPULOONGARCHState *env, target_ulong fj,
+ target_ulong fk, uint32_t ca)
+{
+ if (env->active_fpu.cf[ca & 0x7]) {
+ return fk;
+ } else {
+ return fj;
+ }
+}
diff --git a/target/loongarch64/fpu_helper.h b/target/loongarch64/fpu_helper.h
new file mode 100644
index 0000000000000000000000000000000000000000..b6898c2e915424c35e3a35f846e28e3201789607
--- /dev/null
+++ b/target/loongarch64/fpu_helper.h
@@ -0,0 +1,129 @@
+/* loongarch internal definitions and helpers
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef LOONGARCH_FPU_H
+#define LOONGARCH_FPU_H
+
+#include "cpu-csr.h"
+
+
+extern const struct loongarch_def_t loongarch_defs[];
+extern const int loongarch_defs_number;
+
+enum CPULSXDataFormat {
+ DF_BYTE = 0,
+ DF_HALF,
+ DF_WORD,
+ DF_DOUBLE,
+ DF_QUAD
+};
+
+void loongarch_cpu_do_interrupt(CPUState *cpu);
+bool loongarch_cpu_exec_interrupt(CPUState *cpu, int int_req);
+void loongarch_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr) QEMU_NORETURN;
+
+#if !defined(CONFIG_USER_ONLY)
+
+typedef struct r4k_tlb_t r4k_tlb_t;
+struct r4k_tlb_t {
+ target_ulong VPN;
+ uint32_t PageMask;
+ uint16_t ASID;
+ unsigned int G:1;
+ unsigned int C0:3;
+ unsigned int C1:3;
+ unsigned int V0:1;
+ unsigned int V1:1;
+ unsigned int D0:1;
+ unsigned int D1:1;
+ unsigned int XI0:1;
+ unsigned int XI1:1;
+ unsigned int RI0:1;
+ unsigned int RI1:1;
+ unsigned int EHINV:1;
+ uint64_t PPN[2];
+};
+
+int no_mmu_map_address(CPULOONGARCHState *env, hwaddr *physical, int *prot,
+ target_ulong address, int rw, int access_type);
+int fixed_mmu_map_address(CPULOONGARCHState *env, hwaddr *physical, int *prot,
+ target_ulong address, int rw, int access_type);
+int r4k_map_address(CPULOONGARCHState *env, hwaddr *physical, int *prot,
+ target_ulong address, int rw, int access_type);
+
+/* loongarch 3a5000 tlb helper function : lisa csr */
+int ls3a5k_map_address(CPULOONGARCHState *env, hwaddr *physical,
+ int *prot, target_ulong address,
+ int rw, int access_type);
+void ls3a5k_helper_tlbwr(CPULOONGARCHState *env);
+void ls3a5k_helper_tlbfill(CPULOONGARCHState *env);
+void ls3a5k_helper_tlbsrch(CPULOONGARCHState *env);
+void ls3a5k_helper_tlbrd(CPULOONGARCHState *env);
+void ls3a5k_helper_tlbclr(CPULOONGARCHState *env);
+void ls3a5k_helper_tlbflush(CPULOONGARCHState *env);
+void ls3a5k_invalidate_tlb(CPULOONGARCHState *env, int idx);
+void ls3a5k_helper_invtlb(CPULOONGARCHState *env, target_ulong addr,
+ target_ulong info, int op);
+void ls3a5k_flush_vtlb(CPULOONGARCHState *env);
+void ls3a5k_flush_ftlb(CPULOONGARCHState *env);
+/*void loongarch_cpu_unassigned_access(CPUState *cpu, hwaddr addr,
+ bool is_write, bool is_exec, int unused,
+ unsigned size);
+*/
+hwaddr cpu_loongarch_translate_address(CPULOONGARCHState *env, target_ulong address,
+ int rw);
+#endif
+
+#define cpu_signal_handler cpu_loongarch_signal_handler
+
+
+static inline bool cpu_loongarch_hw_interrupts_enabled(CPULOONGARCHState *env)
+{
+ bool ret = 0;
+
+ ret = env->CSR_CRMD & (1 << CSR_CRMD_IE_SHIFT);
+
+ return ret;
+}
+
+
+void loongarch_tcg_init(void);
+
+
+/* helper.c */
+bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+
+/* op_helper.c */
+uint32_t float_class_s(uint32_t arg, float_status *fst);
+uint64_t float_class_d(uint64_t arg, float_status *fst);
+
+int ieee_ex_to_loongarch(int xcpt);
+void update_pagemask(CPULOONGARCHState *env, target_ulong arg1, int32_t *pagemask);
+
+void cpu_loongarch_tlb_flush(CPULOONGARCHState *env);
+void sync_c0_status(CPULOONGARCHState *env, CPULOONGARCHState *cpu, int tc);
+
+void QEMU_NORETURN do_raise_exception_err(CPULOONGARCHState *env, uint32_t exception,
+ int error_code, uintptr_t pc);
+int loongarch_read_qxfer(CPUState *cs, const char *annex,
+ uint8_t *read_buf,
+ unsigned long offset, unsigned long len);
+int loongarch_write_qxfer(CPUState *cs, const char *annex,
+ const uint8_t *write_buf,
+ unsigned long offset, unsigned long len);
+
+static inline void QEMU_NORETURN do_raise_exception(CPULOONGARCHState *env,
+ uint32_t exception,
+ uintptr_t pc)
+{
+ do_raise_exception_err(env, exception, 0, pc);
+}
+
+#endif
diff --git a/target/loongarch64/gdbstub.c b/target/loongarch64/gdbstub.c
new file mode 100644
index 0000000000000000000000000000000000000000..4013178f45bb048c5d64bb6a29fa03a2031ffe67
--- /dev/null
+++ b/target/loongarch64/gdbstub.c
@@ -0,0 +1,109 @@
+/*
+ * LOONGARCH gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see .
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "internal.h"
+#include "exec/gdbstub.h"
+#ifdef CONFIG_TCG
+#include "exec/helper-proto.h"
+#endif
+int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+
+ if (0 <= n && n < 32) {
+ return gdb_get_regl(mem_buf, env->active_tc.gpr[n]);
+ } else if (n == 32) {
+ return gdb_get_regl(mem_buf, env->active_tc.PC);
+ }
+ return 0;
+}
+
+int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+ target_ulong tmp = ldtul_p(mem_buf);
+
+ if (0 <= n && n < 32) {
+ return env->active_tc.gpr[n] = tmp, sizeof(target_ulong);
+ } else if (n == 32) {
+ return env->active_tc.PC = tmp, sizeof(target_ulong);
+ }
+ return 0;
+}
+
+static int loongarch_gdb_get_fpu(CPULOONGARCHState *env, GByteArray *mem_buf, int n)
+{
+ if (0 <= n && n < 32) {
+ return gdb_get_reg64(mem_buf, env->active_fpu.fpr[n].d);
+ } else if (32 <= n && n < 40) {
+ return gdb_get_reg8(mem_buf, env->active_fpu.cf[n - 32]);
+ } else if (n == 40) {
+ return gdb_get_reg32(mem_buf, env->active_fpu.fcsr0);
+ }
+ return 0;
+}
+
+static int loongarch_gdb_set_fpu(CPULOONGARCHState *env, uint8_t *mem_buf, int n)
+{
+ if (0 <= n && n < 32) {
+ return env->active_fpu.fpr[n].d = ldq_p(mem_buf), 8;
+ } else if (32 <= n && n < 40) {
+ return env->active_fpu.cf[n - 32] = ldub_p(mem_buf), 1;
+ } else if (n == 40) {
+ return env->active_fpu.fcsr0 = ldl_p(mem_buf), 4;
+ }
+ return 0;
+}
+
+void loongarch_cpu_register_gdb_regs_for_features(CPUState *cs)
+{
+ gdb_register_coprocessor(cs, loongarch_gdb_get_fpu, loongarch_gdb_set_fpu,
+ 41, "loongarch-fpu64.xml", 0);
+}
+
+#ifdef CONFIG_TCG
+int loongarch_read_qxfer(CPUState *cs, const char *annex, uint8_t *read_buf,
+ unsigned long offset, unsigned long len)
+{
+ if (strncmp(annex, "cpucfg", sizeof("cpucfg") - 1) == 0) {
+ if (offset % 4 != 0 || len % 4 != 0) {
+ return 0;
+ }
+
+ size_t i;
+ for (i = offset; i < offset + len; i += 4)
+ ((uint32_t *)read_buf)[(i - offset) / 4] =
+ helper_cpucfg(&(LOONGARCH_CPU(cs)->env), i / 4);
+ return 32 * 4;
+ }
+ return 0;
+}
+
+int loongarch_write_qxfer(CPUState *cs, const char *annex,
+ const uint8_t *write_buf, unsigned long offset,
+ unsigned long len)
+{
+ return 0;
+}
+#endif
diff --git a/target/loongarch64/helper.c b/target/loongarch64/helper.c
new file mode 100644
index 0000000000000000000000000000000000000000..841240e57b052e185804dc103f89a356a25612cc
--- /dev/null
+++ b/target/loongarch64/helper.c
@@ -0,0 +1,727 @@
+/*
+ * LOONGARCH emulation helpers for qemu.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see .
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/log.h"
+#include "hw/loongarch/cpudevs.h"
+
+
+#if !defined(CONFIG_USER_ONLY)
+
+static int ls3a5k_map_address_tlb_entry(
+ CPULOONGARCHState *env,
+ hwaddr *physical,
+ int *prot,
+ target_ulong address,
+ int rw,
+ int access_type,
+ ls3a5k_tlb_t *tlb)
+{
+ uint64_t mask = tlb->PageMask;
+ int n = !!(address & mask & ~(mask >> 1));
+ uint32_t plv = env->CSR_CRMD & CSR_CRMD_PLV;
+
+ /* Check access rights */
+ if (!(n ? tlb->V1 : tlb->V0)) {
+ return TLBRET_INVALID;
+ }
+
+ if (rw == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) {
+ return TLBRET_XI;
+ }
+
+ if (rw == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) {
+ return TLBRET_RI;
+ }
+
+ if (plv > (n ? tlb->PLV1 : tlb->PLV0)) {
+ return TLBRET_PE;
+ }
+
+ if (rw != MMU_DATA_STORE || (n ? tlb->WE1 : tlb->WE0)) {
+ /* PPN address
+ * 4 KB: [47:13] [12;0]
+ * 16 KB: [47:15] [14:0]
+ */
+ if (n) {
+ *physical = tlb->PPN1 | (address & (mask >> 1));
+ } else {
+ *physical = tlb->PPN0 | (address & (mask >> 1));
+ }
+ *prot = PAGE_READ;
+ if (n ? tlb->WE1 : tlb->WE0) {
+ *prot |= PAGE_WRITE;
+ }
+ if (!(n ? tlb->XI1 : tlb->XI0)) {
+ *prot |= PAGE_EXEC;
+ }
+ return TLBRET_MATCH;
+ }
+
+ return TLBRET_DIRTY;
+}
+
+/* Loongarch 3A5K -style MMU emulation */
+int ls3a5k_map_address(CPULOONGARCHState *env, hwaddr *physical, int *prot,
+ target_ulong address, int rw, int access_type)
+{
+ uint16_t asid = env->CSR_ASID & 0x3ff;
+ int i;
+ ls3a5k_tlb_t *tlb;
+
+ int ftlb_size = env->tlb->mmu.ls3a5k.ftlb_size;
+ int vtlb_size = env->tlb->mmu.ls3a5k.vtlb_size;
+
+ int ftlb_idx;
+
+ uint64_t mask;
+ uint64_t vpn; /* address to map */
+ uint64_t tag; /* address in TLB entry */
+
+ /* search VTLB */
+ for (i = ftlb_size; i < ftlb_size + vtlb_size; ++i) {
+ tlb = &env->tlb->mmu.ls3a5k.tlb[i];
+ mask = tlb->PageMask;
+
+ vpn = address & 0xffffffffe000 & ~mask;
+ tag = tlb->VPN & ~mask;
+
+ if ((tlb->G == 1 || tlb->ASID == asid)
+ && vpn == tag
+ && tlb->EHINV != 1)
+ {
+ return ls3a5k_map_address_tlb_entry(env, physical, prot,
+ address, rw, access_type, tlb);
+ }
+ }
+
+ if (ftlb_size == 0) {
+ return TLBRET_NOMATCH;
+ }
+
+ /* search FTLB */
+ mask = env->tlb->mmu.ls3a5k.ftlb_mask;
+ vpn = address & 0xffffffffe000 & ~mask;
+
+ ftlb_idx = (address & 0xffffffffc000) >> 15; /* 16 KB */
+ ftlb_idx = ftlb_idx & 0xff; /* [0,255] */
+
+ for (i = 0; i < 8; ++i) {
+ /* ---------- set 0 1 2 ... 7
+ * ftlb_idx -----------------------------------
+ * 0 | 0 1 2 ... 7
+ * 1 | 8 9 10 ... 15
+ * 2 | 16 17 18 ... 23
+ * ... |
+ * 255 | 2040 2041 2042 ... 2047
+ */
+ tlb = &env->tlb->mmu.ls3a5k.tlb[ftlb_idx * 8 + i];
+ tag = tlb->VPN & ~mask;
+
+ if ((tlb->G == 1 || tlb->ASID == asid)
+ && vpn == tag
+ && tlb->EHINV != 1)
+ {
+ return ls3a5k_map_address_tlb_entry(env, physical, prot,
+ address, rw, access_type, tlb);
+ }
+ }
+
+ return TLBRET_NOMATCH;
+}
+
+static int get_physical_address(CPULOONGARCHState *env, hwaddr *physical,
+ int *prot, target_ulong real_address,
+ int rw, int access_type, int mmu_idx)
+{
+ int user_mode = mmu_idx == LARCH_HFLAG_UM;
+ int kernel_mode = !user_mode;
+ unsigned plv, base_c, base_v, tmp;
+
+ /* effective address (modified for KVM T&E kernel segments) */
+ target_ulong address = real_address;
+
+ /* Check PG */
+ if (!(env->CSR_CRMD & CSR_CRMD_PG)) {
+ /* DA mode */
+ *physical = address & 0xffffffffffffUL;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TLBRET_MATCH;
+ }
+
+ plv = kernel_mode | (user_mode << 3);
+ base_v = address >> CSR_DMW_BASE_SH;
+ /* Check direct map window 0 */
+ base_c = env->CSR_DMWIN0 >> CSR_DMW_BASE_SH;
+ if ((plv & env->CSR_DMWIN0) && (base_c == base_v)) {
+ *physical = dmwin_va2pa(address);
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TLBRET_MATCH;
+ }
+ /* Check direct map window 1 */
+ base_c = env->CSR_DMWIN1 >> CSR_DMW_BASE_SH;
+ if ((plv & env->CSR_DMWIN1) && (base_c == base_v)) {
+ *physical = dmwin_va2pa(address);
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TLBRET_MATCH;
+ }
+ /* Check valid extension */
+ tmp = address >> 47;
+ if (!(tmp == 0 || tmp == 0x1ffff)) {
+ return TLBRET_BADADDR;
+ }
+ /* mapped address */
+ return env->tlb->map_address(env, physical, prot, real_address, rw,
+ access_type);
+}
+
+void cpu_loongarch_tlb_flush(CPULOONGARCHState *env)
+{
+ LOONGARCHCPU *cpu = loongarch_env_get_cpu(env);
+
+ /* Flush qemu's TLB and discard all shadowed entries. */
+ tlb_flush(CPU(cpu));
+ env->tlb->tlb_in_use = env->tlb->nb_tlb;
+}
+#endif
+
+static void raise_mmu_exception(CPULOONGARCHState *env, target_ulong address,
+ int rw, int tlb_error)
+{
+ CPUState *cs = CPU(loongarch_env_get_cpu(env));
+ int exception = 0, error_code = 0;
+
+ if (rw == MMU_INST_FETCH) {
+ error_code |= EXCP_INST_NOTAVAIL;
+ }
+
+ switch (tlb_error) {
+ default:
+ case TLBRET_BADADDR:
+ /* Reference to kernel address from user mode or supervisor mode */
+ /* Reference to supervisor address from user mode */
+ if (rw == MMU_DATA_STORE) {
+ exception = EXCP_AdES;
+ } else {
+ exception = EXCP_AdEL;
+ }
+ break;
+ case TLBRET_NOMATCH:
+ /* No TLB match for a mapped address */
+ if (rw == MMU_DATA_STORE) {
+ exception = EXCP_TLBS;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ error_code |= EXCP_TLB_NOMATCH;
+ break;
+ case TLBRET_INVALID:
+ /* TLB match with no valid bit */
+ if (rw == MMU_DATA_STORE) {
+ exception = EXCP_TLBS;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ break;
+ case TLBRET_DIRTY:
+ /* TLB match but 'D' bit is cleared */
+ exception = EXCP_LTLBL;
+ break;
+ case TLBRET_XI:
+ /* Execute-Inhibit Exception */
+ exception = EXCP_TLBXI;
+ break;
+ case TLBRET_RI:
+ /* Read-Inhibit Exception */
+ exception = EXCP_TLBRI;
+ break;
+ case TLBRET_PE:
+ /* Privileged Exception */
+ exception = EXCP_TLBPE;
+ break;
+ }
+
+ if (env->insn_flags & INSN_LOONGARCH) {
+ if (tlb_error == TLBRET_NOMATCH) {
+ env->CSR_TLBRBADV = address;
+ env->CSR_TLBREHI = address & (TARGET_PAGE_MASK << 1);
+ cs->exception_index = exception;
+ env->error_code = error_code;
+ return;
+ }
+ }
+
+ /* Raise exception */
+ env->CSR_BADV = address;
+ cs->exception_index = exception;
+ env->error_code = error_code;
+
+ if (env->insn_flags & INSN_LOONGARCH) {
+ env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1);
+ }
+}
+
+bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+#if !defined(CONFIG_USER_ONLY)
+ hwaddr physical;
+ int prot;
+ int loongarch_access_type;
+#endif
+ int ret = TLBRET_BADADDR;
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " mmu_idx %d\n",
+ __func__, env->active_tc.PC, address, mmu_idx);
+
+ /* data access */
+#if !defined(CONFIG_USER_ONLY)
+ /* XXX: put correct access by using cpu_restore_state() correctly */
+ loongarch_access_type = ACCESS_INT;
+ ret = get_physical_address(env, &physical, &prot, address,
+ access_type, loongarch_access_type, mmu_idx);
+ switch (ret) {
+ case TLBRET_MATCH:
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s address=%" VADDR_PRIx " physical " TARGET_FMT_plx
+ " prot %d asid %ld pc 0x%lx\n",
+ __func__, address, physical, prot,
+ env->CSR_ASID, env->active_tc.PC);
+ break;
+ default:
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s address=%" VADDR_PRIx " ret %d asid %ld pc 0x%lx\n",
+ __func__, address, ret, env->CSR_ASID, env->active_tc.PC);
+ break;
+ }
+ if (ret == TLBRET_MATCH) {
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
+ mmu_idx, TARGET_PAGE_SIZE);
+ ret = true;
+ }
+ if (probe) {
+ return false;
+ }
+#endif
+
+ raise_mmu_exception(env, address, access_type, ret);
+ do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+hwaddr cpu_loongarch_translate_address(CPULOONGARCHState *env,
+ target_ulong address, int rw)
+{
+ hwaddr physical;
+ int prot;
+ int access_type;
+ int ret = 0;
+
+ /* data access */
+ access_type = ACCESS_INT;
+ ret = get_physical_address(env, &physical, &prot, address, rw, access_type,
+ cpu_mmu_index(env, false));
+ if (ret != TLBRET_MATCH) {
+ raise_mmu_exception(env, address, rw, ret);
+ return -1LL;
+ } else {
+ return physical;
+ }
+}
+
+static const char * const excp_names[EXCP_LAST + 1] = {
+ [EXCP_RESET] = "reset",
+ [EXCP_SRESET] = "soft reset",
+ [EXCP_NMI] = "non-maskable interrupt",
+ [EXCP_EXT_INTERRUPT] = "interrupt",
+ [EXCP_AdEL] = "address error load",
+ [EXCP_AdES] = "address error store",
+ [EXCP_TLBF] = "TLB refill",
+ [EXCP_IBE] = "instruction bus error",
+ [EXCP_SYSCALL] = "syscall",
+ [EXCP_BREAK] = "break",
+ [EXCP_FPDIS] = "float unit unusable",
+ [EXCP_LSXDIS] = "vector128 unusable",
+ [EXCP_LASXDIS] = "vector256 unusable",
+ [EXCP_RI] = "reserved instruction",
+ [EXCP_OVERFLOW] = "arithmetic overflow",
+ [EXCP_TRAP] = "trap",
+ [EXCP_FPE] = "floating point",
+ [EXCP_LTLBL] = "TLB modify",
+ [EXCP_TLBL] = "TLB load",
+ [EXCP_TLBS] = "TLB store",
+ [EXCP_DBE] = "data bus error",
+ [EXCP_TLBXI] = "TLB execute-inhibit",
+ [EXCP_TLBRI] = "TLB read-inhibit",
+ [EXCP_TLBPE] = "TLB priviledged error",
+};
+#endif
+
+target_ulong exception_resume_pc(CPULOONGARCHState *env)
+{
+ target_ulong bad_pc;
+
+ bad_pc = env->active_tc.PC;
+ if (env->hflags & LARCH_HFLAG_BMASK) {
+ /* If the exception was raised from a delay slot, come back to
+ the jump. */
+ bad_pc -= 4;
+ }
+
+ return bad_pc;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static void set_hflags_for_handler (CPULOONGARCHState *env)
+{
+ /* Exception handlers are entered in 32-bit mode. */
+}
+
+static inline void set_badinstr_registers(CPULOONGARCHState *env)
+{
+ if ((env->insn_flags & INSN_LOONGARCH)) {
+ env->CSR_BADI = cpu_ldl_code(env, env->active_tc.PC);
+ return;
+ }
+}
+#endif
+
+static inline unsigned int get_vint_size(CPULOONGARCHState *env)
+{
+ unsigned int size = 0;
+
+ switch ((env->CSR_ECFG >> 16) & 0x7) {
+ case 0:
+ break;
+ case 1:
+ size = 2 * 4; /* #Insts * inst_size */
+ break;
+ case 2:
+ size = 4 * 4;
+ break;
+ case 3:
+ size = 8 * 4;
+ break;
+ case 4:
+ size = 16 * 4;
+ break;
+ case 5:
+ size = 32 * 4;
+ break;
+ case 6:
+ size = 64 * 4;
+ break;
+ case 7:
+ size = 128 * 4;
+ break;
+ default:
+ printf("%s: unexpected value", __func__);
+ assert(0);
+ }
+
+ return size;
+}
+
+#define is_refill(cs, env) (((cs->exception_index == EXCP_TLBL) \
+ || (cs->exception_index == EXCP_TLBS)) \
+ && (env->error_code & EXCP_TLB_NOMATCH))
+
+void loongarch_cpu_do_interrupt(CPUState *cs)
+{
+#if !defined(CONFIG_USER_ONLY)
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+ bool update_badinstr = 0;
+ int cause = -1;
+ const char *name;
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)
+ && cs->exception_index != EXCP_EXT_INTERRUPT) {
+ if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) {
+ name = "unknown";
+ } else {
+ name = excp_names[cs->exception_index];
+ }
+
+ qemu_log("%s enter: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx
+ " TLBRERA 0x%016lx" " %s exception\n", __func__,
+ env->active_tc.PC, env->CSR_ERA, env->CSR_TLBRERA, name);
+ }
+
+ switch (cs->exception_index) {
+ case EXCP_RESET:
+ cpu_reset(CPU(cpu));
+ break;
+ case EXCP_NMI:
+ env->CSR_ERRERA = exception_resume_pc(env);
+ env->hflags &= ~LARCH_HFLAG_BMASK;
+ env->hflags |= LARCH_HFLAG_64;
+ env->hflags &= ~LARCH_HFLAG_AWRAP;
+ env->hflags &= ~(LARCH_HFLAG_KSU);
+ env->active_tc.PC = env->exception_base;
+ set_hflags_for_handler(env);
+ break;
+ case EXCP_EXT_INTERRUPT:
+ cause = 0;
+ goto set_ERA;
+ case EXCP_LTLBL:
+ cause = 1;
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
+ goto set_ERA;
+ case EXCP_TLBL:
+ cause = 2;
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
+ goto set_ERA;
+ case EXCP_TLBS:
+ cause = 3;
+ update_badinstr = 1;
+ goto set_ERA;
+ case EXCP_AdEL:
+ cause = 4;
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
+ goto set_ERA;
+ case EXCP_AdES:
+ cause = 5;
+ update_badinstr = 1;
+ goto set_ERA;
+ case EXCP_IBE:
+ cause = 6;
+ goto set_ERA;
+ case EXCP_DBE:
+ cause = 7;
+ goto set_ERA;
+ case EXCP_SYSCALL:
+ cause = 8;
+ update_badinstr = 1;
+ goto set_ERA;
+ case EXCP_BREAK:
+ cause = 9;
+ update_badinstr = 1;
+ goto set_ERA;
+ case EXCP_RI:
+ cause = 10;
+ update_badinstr = 1;
+ goto set_ERA;
+ case EXCP_FPDIS:
+ case EXCP_LSXDIS:
+ case EXCP_LASXDIS:
+ cause = 11;
+ update_badinstr = 1;
+ goto set_ERA;
+ case EXCP_OVERFLOW:
+ cause = 12;
+ update_badinstr = 1;
+ goto set_ERA;
+ case EXCP_TRAP:
+ cause = 13;
+ update_badinstr = 1;
+ goto set_ERA;
+ case EXCP_FPE:
+ cause = 15;
+ update_badinstr = 1;
+ goto set_ERA;
+ case EXCP_TLBRI:
+ cause = 19;
+ update_badinstr = 1;
+ goto set_ERA;
+ case EXCP_TLBXI:
+ case EXCP_TLBPE:
+ cause = 20;
+ goto set_ERA;
+ set_ERA:
+ if (is_refill(cs, env)) {
+ env->CSR_TLBRERA = exception_resume_pc(env);
+ env->CSR_TLBRERA |= 1;
+ } else {
+ env->CSR_ERA = exception_resume_pc(env);
+ }
+
+ if (update_badinstr) {
+ set_badinstr_registers(env);
+ }
+ env->hflags &= ~(LARCH_HFLAG_KSU);
+
+ env->hflags &= ~LARCH_HFLAG_BMASK;
+ if (env->insn_flags & INSN_LOONGARCH) {
+ /* save PLV and IE */
+ if (is_refill(cs, env)) {
+ env->CSR_TLBRPRMD &= (~0x7);
+ env->CSR_TLBRPRMD |= (env->CSR_CRMD & 0x7);
+ } else {
+ env->CSR_PRMD &= (~0x7);
+ env->CSR_PRMD |= (env->CSR_CRMD & 0x7);
+ }
+
+ env->CSR_CRMD &= ~(0x7);
+
+ switch (cs->exception_index) {
+ case EXCP_EXT_INTERRUPT:
+ break;
+ case EXCP_TLBL:
+ if (env->error_code & EXCP_INST_NOTAVAIL) {
+ cause = EXCCODE_TLBI;
+ } else {
+ cause = EXCCODE_TLBL;
+ }
+ break;
+ case EXCP_TLBS:
+ cause = EXCCODE_TLBS;
+ break;
+ case EXCP_LTLBL:
+ cause = EXCCODE_MOD;
+ break;
+ case EXCP_TLBRI:
+ cause = EXCCODE_TLBRI;
+ break;
+ case EXCP_TLBXI:
+ cause = EXCCODE_TLBXI;
+ break;
+ case EXCP_TLBPE:
+ cause = EXCCODE_TLBPE;
+ break;
+ case EXCP_AdEL:
+ case EXCP_AdES:
+ case EXCP_IBE:
+ case EXCP_DBE:
+ cause = EXCCODE_ADE;
+ break;
+ case EXCP_SYSCALL:
+ cause = EXCCODE_SYS;
+ break;
+ case EXCP_BREAK:
+ cause = EXCCODE_BP;
+ break;
+ case EXCP_RI:
+ cause = EXCCODE_RI;
+ break;
+ case EXCP_FPDIS:
+ cause = EXCCODE_FPDIS;
+ break;
+ case EXCP_LSXDIS:
+ cause = EXCCODE_LSXDIS;
+ break;
+ case EXCP_LASXDIS:
+ cause = EXCCODE_LASXDIS;
+ break;
+ case EXCP_FPE:
+ cause = EXCCODE_FPE;
+ break;
+ default:
+ printf("Error: exception(%d) '%s' has not been supported\n",
+ cs->exception_index, excp_names[cs->exception_index]);
+ abort();
+ }
+
+ uint32_t vec_size = get_vint_size(env);
+ env->active_tc.PC = env->CSR_EEPN;
+ env->active_tc.PC += cause * vec_size;
+ if (is_refill(cs, env)) {
+ /* TLB Refill */
+ env->active_tc.PC = env->CSR_TLBRENT;
+ break; /* Do not modify excode */
+ }
+ if (cs->exception_index == EXCP_EXT_INTERRUPT) {
+ /* Interrupt */
+ uint32_t vector = 0;
+ uint32_t pending = env->CSR_ESTAT & CSR_ESTAT_IPMASK;
+ pending &= env->CSR_ECFG & CSR_ECFG_IPMASK;
+
+ /* Find the highest-priority interrupt. */
+ while (pending >>= 1) {
+ vector++;
+ }
+ env->active_tc.PC = env->CSR_EEPN +
+ (EXCODE_IP + vector) * vec_size;
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ qemu_log("%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx
+ " cause %d\n" " A " TARGET_FMT_lx " D "
+ TARGET_FMT_lx " vector = %d ExC %08lx ExS %08lx\n",
+ __func__, env->active_tc.PC, env->CSR_ERA,
+ cause, env->CSR_BADV, env->CSR_DERA, vector,
+ env->CSR_ECFG, env->CSR_ESTAT);
+ }
+ }
+ /* Excode */
+ env->CSR_ESTAT = (env->CSR_ESTAT & ~(0x1f << CSR_ESTAT_EXC_SH)) |
+ (cause << CSR_ESTAT_EXC_SH);
+ }
+ set_hflags_for_handler(env);
+ break;
+ default:
+ abort();
+ }
+ if (qemu_loglevel_mask(CPU_LOG_INT)
+ && cs->exception_index != EXCP_EXT_INTERRUPT) {
+ qemu_log("%s: PC " TARGET_FMT_lx " ERA 0x%08lx" " cause %d%s\n"
+ " ESTAT %08lx EXCFG 0x%08lx BADVA 0x%08lx BADI 0x%08lx \
+ SYS_NUM %lu cpu %d asid 0x%lx" "\n",
+ __func__, env->active_tc.PC,
+ is_refill(cs, env) ? env->CSR_TLBRERA : env->CSR_ERA,
+ cause,
+ is_refill(cs, env) ? "(refill)" : "",
+ env->CSR_ESTAT, env->CSR_ECFG,
+ is_refill(cs, env) ? env->CSR_TLBRBADV : env->CSR_BADV,
+ env->CSR_BADI, env->active_tc.gpr[11], cs->cpu_index,
+ env->CSR_ASID
+ );
+ }
+#endif
+ cs->exception_index = EXCP_NONE;
+}
+
+bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+
+ if (cpu_loongarch_hw_interrupts_enabled(env) &&
+ cpu_loongarch_hw_interrupts_pending(env)) {
+ /* Raise it */
+ cs->exception_index = EXCP_EXT_INTERRUPT;
+ env->error_code = 0;
+ loongarch_cpu_do_interrupt(cs);
+ return true;
+ }
+ }
+ return false;
+}
+
+void QEMU_NORETURN do_raise_exception_err(CPULOONGARCHState *env,
+ uint32_t exception,
+ int error_code,
+ uintptr_t pc)
+{
+ CPUState *cs = CPU(loongarch_env_get_cpu(env));
+
+ qemu_log_mask(CPU_LOG_INT, "%s: %d %d\n",
+ __func__, exception, error_code);
+ cs->exception_index = exception;
+ env->error_code = error_code;
+
+ cpu_loop_exit_restore(cs, pc);
+}
diff --git a/target/loongarch64/helper.h b/target/loongarch64/helper.h
new file mode 100644
index 0000000000000000000000000000000000000000..ff2026ed82c32f7750010359e1de6fe17fdad854
--- /dev/null
+++ b/target/loongarch64/helper.h
@@ -0,0 +1,168 @@
+DEF_HELPER_3(raise_exception_err, noreturn, env, i32, int)
+DEF_HELPER_2(raise_exception, noreturn, env, i32)
+DEF_HELPER_1(raise_exception_debug, noreturn, env)
+
+#if 0
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_3(ll, tl, env, tl, int)
+DEF_HELPER_3(lld, tl, env, tl, int)
+#endif
+#endif
+
+DEF_HELPER_FLAGS_1(bitswap, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(dbitswap, TCG_CALL_NO_RWG_SE, tl, tl)
+
+DEF_HELPER_3(crc32, tl, tl, tl, i32)
+DEF_HELPER_3(crc32c, tl, tl, tl, i32)
+
+#ifndef CONFIG_USER_ONLY
+/* LoongISA CSR register */
+DEF_HELPER_2(csr_rdq, tl, env, i64)
+DEF_HELPER_3(csr_wrq, tl, env, tl, i64)
+DEF_HELPER_4(csr_xchgq, tl, env, tl, tl, i64)
+
+#endif /* !CONFIG_USER_ONLY */
+
+/* CP1 functions */
+DEF_HELPER_2(movfcsr2gr, tl, env, i32)
+DEF_HELPER_4(movgr2fcsr, void, env, tl, i32, i32)
+
+DEF_HELPER_2(float_cvtd_s, i64, env, i32)
+DEF_HELPER_2(float_cvtd_w, i64, env, i32)
+DEF_HELPER_2(float_cvtd_l, i64, env, i64)
+DEF_HELPER_2(float_cvts_d, i32, env, i64)
+DEF_HELPER_2(float_cvts_w, i32, env, i32)
+DEF_HELPER_2(float_cvts_l, i32, env, i64)
+
+DEF_HELPER_FLAGS_2(float_class_s, TCG_CALL_NO_RWG_SE, i32, env, i32)
+DEF_HELPER_FLAGS_2(float_class_d, TCG_CALL_NO_RWG_SE, i64, env, i64)
+
+#define FOP_PROTO(op) \
+DEF_HELPER_4(float_ ## op ## _s, i32, env, i32, i32, i32) \
+DEF_HELPER_4(float_ ## op ## _d, i64, env, i64, i64, i64)
+FOP_PROTO(maddf)
+FOP_PROTO(msubf)
+FOP_PROTO(nmaddf)
+FOP_PROTO(nmsubf)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_3(float_ ## op ## _s, i32, env, i32, i32) \
+DEF_HELPER_3(float_ ## op ## _d, i64, env, i64, i64)
+FOP_PROTO(max)
+FOP_PROTO(maxa)
+FOP_PROTO(min)
+FOP_PROTO(mina)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_2(float_ ## op ## _l_s, i64, env, i32) \
+DEF_HELPER_2(float_ ## op ## _l_d, i64, env, i64) \
+DEF_HELPER_2(float_ ## op ## _w_s, i32, env, i32) \
+DEF_HELPER_2(float_ ## op ## _w_d, i32, env, i64)
+FOP_PROTO(cvt)
+FOP_PROTO(round)
+FOP_PROTO(trunc)
+FOP_PROTO(ceil)
+FOP_PROTO(floor)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_2(float_ ## op ## _s, i32, env, i32) \
+DEF_HELPER_2(float_ ## op ## _d, i64, env, i64)
+FOP_PROTO(sqrt)
+FOP_PROTO(rsqrt)
+FOP_PROTO(recip)
+FOP_PROTO(rint)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_1(float_ ## op ## _s, i32, i32) \
+DEF_HELPER_1(float_ ## op ## _d, i64, i64)
+FOP_PROTO(abs)
+FOP_PROTO(chs)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_3(float_ ## op ## _s, i32, env, i32, i32) \
+DEF_HELPER_3(float_ ## op ## _d, i64, env, i64, i64)
+FOP_PROTO(add)
+FOP_PROTO(sub)
+FOP_PROTO(mul)
+FOP_PROTO(div)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_3(cmp_d_ ## op, i64, env, i64, i64) \
+DEF_HELPER_3(cmp_s_ ## op, i32, env, i32, i32)
+FOP_PROTO(af)
+FOP_PROTO(un)
+FOP_PROTO(eq)
+FOP_PROTO(ueq)
+FOP_PROTO(lt)
+FOP_PROTO(ult)
+FOP_PROTO(le)
+FOP_PROTO(ule)
+FOP_PROTO(saf)
+FOP_PROTO(sun)
+FOP_PROTO(seq)
+FOP_PROTO(sueq)
+FOP_PROTO(slt)
+FOP_PROTO(sult)
+FOP_PROTO(sle)
+FOP_PROTO(sule)
+FOP_PROTO(or)
+FOP_PROTO(une)
+FOP_PROTO(ne)
+FOP_PROTO(sor)
+FOP_PROTO(sune)
+FOP_PROTO(sne)
+#undef FOP_PROTO
+
+/* Special functions */
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_1(tlbwr, void, env)
+DEF_HELPER_1(tlbfill, void, env)
+DEF_HELPER_1(tlbsrch, void, env)
+DEF_HELPER_1(tlbrd, void, env)
+DEF_HELPER_1(tlbclr, void, env)
+DEF_HELPER_1(tlbflush, void, env)
+DEF_HELPER_4(invtlb, void, env, tl, tl, tl)
+DEF_HELPER_1(ertn, void, env)
+DEF_HELPER_5(lddir, void, env, tl, tl, tl, i32)
+DEF_HELPER_4(ldpte, void, env, tl, tl, i32)
+DEF_HELPER_3(drdtime, void, env, tl, tl)
+DEF_HELPER_1(read_pgd, tl, env)
+#endif /* !CONFIG_USER_ONLY */
+DEF_HELPER_2(cpucfg, tl, env, tl)
+DEF_HELPER_1(idle, void, env)
+
+DEF_HELPER_3(float_exp2_s, i32, env, i32, i32)
+DEF_HELPER_3(float_exp2_d, i64, env, i64, i64)
+DEF_HELPER_2(float_logb_s, i32, env, i32)
+DEF_HELPER_2(float_logb_d, i64, env, i64)
+DEF_HELPER_3(movreg2cf, void, env, i32, tl)
+DEF_HELPER_2(movcf2reg, tl, env, i32)
+DEF_HELPER_3(movreg2cf_i32, void, env, i32, i32)
+DEF_HELPER_3(movreg2cf_i64, void, env, i32, i64)
+
+DEF_HELPER_2(cto_w, tl, env, tl)
+DEF_HELPER_2(ctz_w, tl, env, tl)
+DEF_HELPER_2(cto_d, tl, env, tl)
+DEF_HELPER_2(ctz_d, tl, env, tl)
+DEF_HELPER_2(bitrev_w, tl, env, tl)
+DEF_HELPER_2(bitrev_d, tl, env, tl)
+
+DEF_HELPER_2(load_scr, i64, env, i32)
+DEF_HELPER_3(store_scr, void, env, i32, i64)
+
+DEF_HELPER_3(asrtle_d, void, env, tl, tl)
+DEF_HELPER_3(asrtgt_d, void, env, tl, tl)
+
+DEF_HELPER_4(fsel, i64, env, i64, i64, i32)
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_4(iocsr, void, env, tl, tl, i32)
+#endif
+DEF_HELPER_3(memtrace_addr, void, env, tl, i32)
+DEF_HELPER_2(memtrace_val, void, env, tl)
diff --git a/target/loongarch64/insn.decode b/target/loongarch64/insn.decode
new file mode 100644
index 0000000000000000000000000000000000000000..f194f70116c32427be522ea3776d6bb449a4fe3a
--- /dev/null
+++ b/target/loongarch64/insn.decode
@@ -0,0 +1,514 @@
+# Fields
+%sd 0:2
+%rj 5:5
+%rd 0:5
+%sj 5:2
+%ptr 5:3
+%rk 10:5
+%sa2 15:2
+%sa3 15:3
+%si5 10:s5
+%code 0:15
+%cond 10:4
+%cond2 0:4
+%ui5 10:5
+%ui6 10:6
+%ui3 10:3
+%ui4 10:4
+%op 5:5
+%ui8 10:8
+%msbw 16:5
+%lsbw 10:5
+%msbd 16:6
+%lsbd 10:6
+%fd 0:5
+%fj 5:5
+%fk 10:5
+%fcsrd 0:5
+%fcsrs 5:5
+%cd 0:3
+%cj 5:3
+%si12 10:s12
+%ui12 10:12
+%csr 10:14
+%cop 0:5
+%level 10:8
+%seq 10:8
+%whint 0:15
+%addr 10:5
+%info 5:5
+%invop 0:5
+%fa 15:5
+%vd 0:5
+%vj 5:5
+%vk 10:5
+%va 15:5
+%xd 0:5
+%xj 5:5
+%xk 10:5
+%xa 15:5
+%fcond 15:5
+%ca 15:3
+%vui5 15:5
+%si16 10:s16
+%si20 5:s20
+%si14 10:s14
+%hint 0:5
+%si9 10:s9
+%si10 10:s10
+%si11 10:s11
+%si8 10:s8
+%idx1 18:1
+%idx2 18:2
+%idx3 18:3
+%idx4 18:4
+%idx 18:5
+%offs21 0:s5 10:16
+%offs16 10:s16
+%offs 0:s10 10:16
+%mode 5:5
+%ui2 10:2
+%ui1 10:1
+%ui7 10:7
+%i13 5:13
+
+# Argument sets
+&fmt_sdrj sd rj
+&fmt_rdsj rd sj
+&fmt_rdrj rd rj
+&fmt_empty
+&fmt_rjrk rj rk
+&fmt_rdrjrksa2 rd rj rk sa2
+&fmt_rdrjrksa3 rd rj rk sa3
+&fmt_rdrjrk rd rj rk
+&fmt_code code
+&fmt_rdrjui5 rd rj ui5
+&fmt_rdrjui6 rd rj ui6
+&fmt_rdrjmsbwlsbw rd rj msbw lsbw
+&fmt_rdrjmsbdlsbd rd rj msbd lsbd
+&fmt_fdfjfk fd fj fk
+&fmt_fdfj fd fj
+&fmt_fdrj fd rj
+&fmt_rdfj rd fj
+&fmt_fcsrdrj fcsrd rj
+&fmt_rdfcsrs rd fcsrs
+&fmt_cdfj cd fj
+&fmt_fdcj fd cj
+&fmt_cdrj cd rj
+&fmt_rdcj rd cj
+&fmt_rdrjsi12 rd rj si12
+&fmt_rdrjui12 rd rj ui12
+&fmt_rdrjcsr rd rj csr
+&fmt_coprjsi12 cop rj si12
+&fmt_rdrjlevel rd rj level
+&fmt_rjseq rj seq
+&fmt_whint whint
+&fmt_invtlb addr info invop
+&fmt_fdfjfkfa fd fj fk fa
+&fmt_cdfjfkfcond cd fj fk fcond
+&fmt_fdfjfkca fd fj fk ca
+&fmt_rdrjsi16 rd rj si16
+&fmt_rdsi20 rd si20
+&fmt_rdrjsi14 rd rj si14
+&fmt_hintrjsi12 hint rj si12
+&fmt_fdrjsi12 fd rj si12
+&fmt_fdrjrk fd rj rk
+&fmt_rjoffs21 rj offs21
+&fmt_cjoffs21 cj offs21
+&fmt_rdrjoffs16 rd rj offs16
+&fmt_offs offs
+&fmt_rjrdoffs16 rj rd offs16
+
+# Formats
+@fmt_sdrj .... ........ ..... ..... ..... ... .. &fmt_sdrj %sd %rj
+@fmt_rdsj .... ........ ..... ..... ... .. ..... &fmt_rdsj %rd %sj
+@fmt_rdrj .... ........ ..... ..... ..... ..... &fmt_rdrj %rd %rj
+@fmt_empty .... ........ ..... ..... ..... ..... &fmt_empty
+@fmt_rjrk .... ........ ..... ..... ..... ..... &fmt_rjrk %rj %rk
+@fmt_rdrjrksa2 .... ........ ... .. ..... ..... ..... &fmt_rdrjrksa2 %rd %rj %rk %sa2
+@fmt_rdrjrksa3 .... ........ .. ... ..... ..... ..... &fmt_rdrjrksa3 %rd %rj %rk %sa3
+@fmt_rdrjrk .... ........ ..... ..... ..... ..... &fmt_rdrjrk %rd %rj %rk
+@fmt_code .... ........ ..... ............... &fmt_code %code
+@fmt_rdrjui5 .... ........ ..... ..... ..... ..... &fmt_rdrjui5 %rd %rj %ui5
+@fmt_rdrjui6 .... ........ .... ...... ..... ..... &fmt_rdrjui6 %rd %rj %ui6
+@fmt_rdrjmsbwlsbw .... ....... ..... . ..... ..... ..... &fmt_rdrjmsbwlsbw %rd %rj %msbw %lsbw
+@fmt_rdrjmsbdlsbd .... ...... ...... ...... ..... ..... &fmt_rdrjmsbdlsbd %rd %rj %msbd %lsbd
+@fmt_fdfjfk .... ........ ..... ..... ..... ..... &fmt_fdfjfk %fd %fj %fk
+@fmt_fdfj .... ........ ..... ..... ..... ..... &fmt_fdfj %fd %fj
+@fmt_fdrj .... ........ ..... ..... ..... ..... &fmt_fdrj %fd %rj
+@fmt_rdfj .... ........ ..... ..... ..... ..... &fmt_rdfj %rd %fj
+@fmt_fcsrdrj .... ........ ..... ..... ..... ..... &fmt_fcsrdrj %fcsrd %rj
+@fmt_rdfcsrs .... ........ ..... ..... ..... ..... &fmt_rdfcsrs %rd %fcsrs
+@fmt_cdfj .... ........ ..... ..... ..... .. ... &fmt_cdfj %cd %fj
+@fmt_fdcj .... ........ ..... ..... .. ... ..... &fmt_fdcj %fd %cj
+@fmt_cdrj .... ........ ..... ..... ..... .. ... &fmt_cdrj %cd %rj
+@fmt_rdcj .... ........ ..... ..... .. ... ..... &fmt_rdcj %rd %cj
+@fmt_rdrjsi12 .... ...... ............ ..... ..... &fmt_rdrjsi12 %rd %rj %si12
+@fmt_rdrjui12 .... ...... ............ ..... ..... &fmt_rdrjui12 %rd %rj %ui12
+@fmt_rdrjcsr .... .... .............. ..... ..... &fmt_rdrjcsr %rd %rj %csr
+@fmt_coprjsi12 .... ...... ............ ..... ..... &fmt_coprjsi12 %cop %rj %si12
+@fmt_rdrjlevel .... ........ .. ........ ..... ..... &fmt_rdrjlevel %rd %rj %level
+@fmt_rjseq .... ........ .. ........ ..... ..... &fmt_rjseq %rj %seq
+@fmt_whint .... ........ ..... ............... &fmt_whint %whint
+@fmt_invtlb ...... ...... ..... ..... ..... ..... &fmt_invtlb %addr %info %invop
+@fmt_fdfjfkfa .... ........ ..... ..... ..... ..... &fmt_fdfjfkfa %fd %fj %fk %fa
+@fmt_cdfjfkfcond .... ........ ..... ..... ..... .. ... &fmt_cdfjfkfcond %cd %fj %fk %fcond
+@fmt_fdfjfkca .... ........ .. ... ..... ..... ..... &fmt_fdfjfkca %fd %fj %fk %ca
+@fmt_rdrjsi16 .... .. ................ ..... ..... &fmt_rdrjsi16 %rd %rj %si16
+@fmt_rdsi20 .... ... .................... ..... &fmt_rdsi20 %rd %si20
+@fmt_rdrjsi14 .... .... .............. ..... ..... &fmt_rdrjsi14 %rd %rj %si14
+@fmt_hintrjsi12 .... ...... ............ ..... ..... &fmt_hintrjsi12 %hint %rj %si12
+@fmt_fdrjsi12 .... ...... ............ ..... ..... &fmt_fdrjsi12 %fd %rj %si12
+@fmt_fdrjrk .... ........ ..... ..... ..... ..... &fmt_fdrjrk %fd %rj %rk
+@fmt_rjoffs21 .... .. ................ ..... ..... &fmt_rjoffs21 %rj %offs21
+@fmt_cjoffs21 .... .. ................ .. ... ..... &fmt_cjoffs21 %cj %offs21
+@fmt_rdrjoffs16 .... .. ................ ..... ..... &fmt_rdrjoffs16 %rd %rj %offs16
+@fmt_offs .... .. .......................... &fmt_offs %offs
+@fmt_rjrdoffs16 .... .. ................ ..... ..... &fmt_rjrdoffs16 %rj %rd %offs16
+
+# Instructions
+
+# Fiexd point arithmetic Instructions
+gr2scr 0000 00000000 00000 00010 ..... 000 .. @fmt_sdrj
+scr2gr 0000 00000000 00000 00011 000 .. ..... @fmt_rdsj
+clo_w 0000 00000000 00000 00100 ..... ..... @fmt_rdrj
+clz_w 0000 00000000 00000 00101 ..... ..... @fmt_rdrj
+cto_w 0000 00000000 00000 00110 ..... ..... @fmt_rdrj
+ctz_w 0000 00000000 00000 00111 ..... ..... @fmt_rdrj
+clo_d 0000 00000000 00000 01000 ..... ..... @fmt_rdrj
+clz_d 0000 00000000 00000 01001 ..... ..... @fmt_rdrj
+cto_d 0000 00000000 00000 01010 ..... ..... @fmt_rdrj
+ctz_d 0000 00000000 00000 01011 ..... ..... @fmt_rdrj
+revb_2h 0000 00000000 00000 01100 ..... ..... @fmt_rdrj
+revb_4h 0000 00000000 00000 01101 ..... ..... @fmt_rdrj
+revb_2w 0000 00000000 00000 01110 ..... ..... @fmt_rdrj
+revb_d 0000 00000000 00000 01111 ..... ..... @fmt_rdrj
+revh_2w 0000 00000000 00000 10000 ..... ..... @fmt_rdrj
+revh_d 0000 00000000 00000 10001 ..... ..... @fmt_rdrj
+bitrev_4b 0000 00000000 00000 10010 ..... ..... @fmt_rdrj
+bitrev_8b 0000 00000000 00000 10011 ..... ..... @fmt_rdrj
+bitrev_w 0000 00000000 00000 10100 ..... ..... @fmt_rdrj
+bitrev_d 0000 00000000 00000 10101 ..... ..... @fmt_rdrj
+ext_w_h 0000 00000000 00000 10110 ..... ..... @fmt_rdrj
+ext_w_b 0000 00000000 00000 10111 ..... ..... @fmt_rdrj
+rdtime_d 0000 00000000 00000 11010 ..... ..... @fmt_rdrj
+cpucfg 0000 00000000 00000 11011 ..... ..... @fmt_rdrj
+asrtle_d 0000 00000000 00010 ..... ..... 00000 @fmt_rjrk
+asrtgt_d 0000 00000000 00011 ..... ..... 00000 @fmt_rjrk
+alsl_w 0000 00000000 010 .. ..... ..... ..... @fmt_rdrjrksa2
+alsl_wu 0000 00000000 011 .. ..... ..... ..... @fmt_rdrjrksa2
+bytepick_w 0000 00000000 100 .. ..... ..... ..... @fmt_rdrjrksa2
+bytepick_d 0000 00000000 11 ... ..... ..... ..... @fmt_rdrjrksa3
+add_w 0000 00000001 00000 ..... ..... ..... @fmt_rdrjrk
+add_d 0000 00000001 00001 ..... ..... ..... @fmt_rdrjrk
+sub_w 0000 00000001 00010 ..... ..... ..... @fmt_rdrjrk
+sub_d 0000 00000001 00011 ..... ..... ..... @fmt_rdrjrk
+slt 0000 00000001 00100 ..... ..... ..... @fmt_rdrjrk
+sltu 0000 00000001 00101 ..... ..... ..... @fmt_rdrjrk
+maskeqz 0000 00000001 00110 ..... ..... ..... @fmt_rdrjrk
+masknez 0000 00000001 00111 ..... ..... ..... @fmt_rdrjrk
+nor 0000 00000001 01000 ..... ..... ..... @fmt_rdrjrk
+and 0000 00000001 01001 ..... ..... ..... @fmt_rdrjrk
+or 0000 00000001 01010 ..... ..... ..... @fmt_rdrjrk
+xor 0000 00000001 01011 ..... ..... ..... @fmt_rdrjrk
+orn 0000 00000001 01100 ..... ..... ..... @fmt_rdrjrk
+andn 0000 00000001 01101 ..... ..... ..... @fmt_rdrjrk
+sll_w 0000 00000001 01110 ..... ..... ..... @fmt_rdrjrk
+srl_w 0000 00000001 01111 ..... ..... ..... @fmt_rdrjrk
+sra_w 0000 00000001 10000 ..... ..... ..... @fmt_rdrjrk
+sll_d 0000 00000001 10001 ..... ..... ..... @fmt_rdrjrk
+srl_d 0000 00000001 10010 ..... ..... ..... @fmt_rdrjrk
+sra_d 0000 00000001 10011 ..... ..... ..... @fmt_rdrjrk
+rotr_w 0000 00000001 10110 ..... ..... ..... @fmt_rdrjrk
+rotr_d 0000 00000001 10111 ..... ..... ..... @fmt_rdrjrk
+mul_w 0000 00000001 11000 ..... ..... ..... @fmt_rdrjrk
+mulh_w 0000 00000001 11001 ..... ..... ..... @fmt_rdrjrk
+mulh_wu 0000 00000001 11010 ..... ..... ..... @fmt_rdrjrk
+mul_d 0000 00000001 11011 ..... ..... ..... @fmt_rdrjrk
+mulh_d 0000 00000001 11100 ..... ..... ..... @fmt_rdrjrk
+mulh_du 0000 00000001 11101 ..... ..... ..... @fmt_rdrjrk
+mulw_d_w 0000 00000001 11110 ..... ..... ..... @fmt_rdrjrk
+mulw_d_wu 0000 00000001 11111 ..... ..... ..... @fmt_rdrjrk
+div_w 0000 00000010 00000 ..... ..... ..... @fmt_rdrjrk
+mod_w 0000 00000010 00001 ..... ..... ..... @fmt_rdrjrk
+div_wu 0000 00000010 00010 ..... ..... ..... @fmt_rdrjrk
+mod_wu 0000 00000010 00011 ..... ..... ..... @fmt_rdrjrk
+div_d 0000 00000010 00100 ..... ..... ..... @fmt_rdrjrk
+mod_d 0000 00000010 00101 ..... ..... ..... @fmt_rdrjrk
+div_du 0000 00000010 00110 ..... ..... ..... @fmt_rdrjrk
+mod_du 0000 00000010 00111 ..... ..... ..... @fmt_rdrjrk
+crc_w_b_w 0000 00000010 01000 ..... ..... ..... @fmt_rdrjrk
+crc_w_h_w 0000 00000010 01001 ..... ..... ..... @fmt_rdrjrk
+crc_w_w_w 0000 00000010 01010 ..... ..... ..... @fmt_rdrjrk
+crc_w_d_w 0000 00000010 01011 ..... ..... ..... @fmt_rdrjrk
+crcc_w_b_w 0000 00000010 01100 ..... ..... ..... @fmt_rdrjrk
+crcc_w_h_w 0000 00000010 01101 ..... ..... ..... @fmt_rdrjrk
+crcc_w_w_w 0000 00000010 01110 ..... ..... ..... @fmt_rdrjrk
+crcc_w_d_w 0000 00000010 01111 ..... ..... ..... @fmt_rdrjrk
+break 0000 00000010 10100 ............... @fmt_code
+dbcl 0000 00000010 10101 ............... @fmt_code
+syscall 0000 00000010 10110 ............... @fmt_code
+alsl_d 0000 00000010 110 .. ..... ..... ..... @fmt_rdrjrksa2
+slli_w 0000 00000100 00001 ..... ..... ..... @fmt_rdrjui5
+slli_d 0000 00000100 0001 ...... ..... ..... @fmt_rdrjui6
+srli_w 0000 00000100 01001 ..... ..... ..... @fmt_rdrjui5
+srli_d 0000 00000100 0101 ...... ..... ..... @fmt_rdrjui6
+srai_w 0000 00000100 10001 ..... ..... ..... @fmt_rdrjui5
+srai_d 0000 00000100 1001 ...... ..... ..... @fmt_rdrjui6
+rotri_w 0000 00000100 11001 ..... ..... ..... @fmt_rdrjui5
+rotri_d 0000 00000100 1101 ...... ..... ..... @fmt_rdrjui6
+bstrins_w 0000 0000011 ..... 0 ..... ..... ..... @fmt_rdrjmsbwlsbw
+bstrpick_w 0000 0000011 ..... 1 ..... ..... ..... @fmt_rdrjmsbwlsbw
+bstrins_d 0000 000010 ...... ...... ..... ..... @fmt_rdrjmsbdlsbd
+bstrpick_d 0000 000011 ...... ...... ..... ..... @fmt_rdrjmsbdlsbd
+
+# float Instructions
+fadd_s 0000 00010000 00001 ..... ..... ..... @fmt_fdfjfk
+fadd_d 0000 00010000 00010 ..... ..... ..... @fmt_fdfjfk
+fsub_s 0000 00010000 00101 ..... ..... ..... @fmt_fdfjfk
+fsub_d 0000 00010000 00110 ..... ..... ..... @fmt_fdfjfk
+fmul_s 0000 00010000 01001 ..... ..... ..... @fmt_fdfjfk
+fmul_d 0000 00010000 01010 ..... ..... ..... @fmt_fdfjfk
+fdiv_s 0000 00010000 01101 ..... ..... ..... @fmt_fdfjfk
+fdiv_d 0000 00010000 01110 ..... ..... ..... @fmt_fdfjfk
+fmax_s 0000 00010000 10001 ..... ..... ..... @fmt_fdfjfk
+fmax_d 0000 00010000 10010 ..... ..... ..... @fmt_fdfjfk
+fmin_s 0000 00010000 10101 ..... ..... ..... @fmt_fdfjfk
+fmin_d 0000 00010000 10110 ..... ..... ..... @fmt_fdfjfk
+fmaxa_s 0000 00010000 11001 ..... ..... ..... @fmt_fdfjfk
+fmaxa_d 0000 00010000 11010 ..... ..... ..... @fmt_fdfjfk
+fmina_s 0000 00010000 11101 ..... ..... ..... @fmt_fdfjfk
+fmina_d 0000 00010000 11110 ..... ..... ..... @fmt_fdfjfk
+fscaleb_s 0000 00010001 00001 ..... ..... ..... @fmt_fdfjfk
+fscaleb_d 0000 00010001 00010 ..... ..... ..... @fmt_fdfjfk
+fcopysign_s 0000 00010001 00101 ..... ..... ..... @fmt_fdfjfk
+fcopysign_d 0000 00010001 00110 ..... ..... ..... @fmt_fdfjfk
+fabs_s 0000 00010001 01000 00001 ..... ..... @fmt_fdfj
+fabs_d 0000 00010001 01000 00010 ..... ..... @fmt_fdfj
+fneg_s 0000 00010001 01000 00101 ..... ..... @fmt_fdfj
+fneg_d 0000 00010001 01000 00110 ..... ..... @fmt_fdfj
+flogb_s 0000 00010001 01000 01001 ..... ..... @fmt_fdfj
+flogb_d 0000 00010001 01000 01010 ..... ..... @fmt_fdfj
+fclass_s 0000 00010001 01000 01101 ..... ..... @fmt_fdfj
+fclass_d 0000 00010001 01000 01110 ..... ..... @fmt_fdfj
+fsqrt_s 0000 00010001 01000 10001 ..... ..... @fmt_fdfj
+fsqrt_d 0000 00010001 01000 10010 ..... ..... @fmt_fdfj
+frecip_s 0000 00010001 01000 10101 ..... ..... @fmt_fdfj
+frecip_d 0000 00010001 01000 10110 ..... ..... @fmt_fdfj
+frsqrt_s 0000 00010001 01000 11001 ..... ..... @fmt_fdfj
+frsqrt_d 0000 00010001 01000 11010 ..... ..... @fmt_fdfj
+fmov_s 0000 00010001 01001 00101 ..... ..... @fmt_fdfj
+fmov_d 0000 00010001 01001 00110 ..... ..... @fmt_fdfj
+movgr2fr_w 0000 00010001 01001 01001 ..... ..... @fmt_fdrj
+movgr2fr_d 0000 00010001 01001 01010 ..... ..... @fmt_fdrj
+movgr2frh_w 0000 00010001 01001 01011 ..... ..... @fmt_fdrj
+movfr2gr_s 0000 00010001 01001 01101 ..... ..... @fmt_rdfj
+movfr2gr_d 0000 00010001 01001 01110 ..... ..... @fmt_rdfj
+movfrh2gr_s 0000 00010001 01001 01111 ..... ..... @fmt_rdfj
+movgr2fcsr 0000 00010001 01001 10000 ..... ..... @fmt_fcsrdrj
+movfcsr2gr 0000 00010001 01001 10010 ..... ..... @fmt_rdfcsrs
+movfr2cf 0000 00010001 01001 10100 ..... 00 ... @fmt_cdfj
+movcf2fr 0000 00010001 01001 10101 00 ... ..... @fmt_fdcj
+movgr2cf 0000 00010001 01001 10110 ..... 00 ... @fmt_cdrj
+movcf2gr 0000 00010001 01001 10111 00 ... ..... @fmt_rdcj
+fcvt_s_d 0000 00010001 10010 00110 ..... ..... @fmt_fdfj
+fcvt_d_s 0000 00010001 10010 01001 ..... ..... @fmt_fdfj
+ftintrm_w_s 0000 00010001 10100 00001 ..... ..... @fmt_fdfj
+ftintrm_w_d 0000 00010001 10100 00010 ..... ..... @fmt_fdfj
+ftintrm_l_s 0000 00010001 10100 01001 ..... ..... @fmt_fdfj
+ftintrm_l_d 0000 00010001 10100 01010 ..... ..... @fmt_fdfj
+ftintrp_w_s 0000 00010001 10100 10001 ..... ..... @fmt_fdfj
+ftintrp_w_d 0000 00010001 10100 10010 ..... ..... @fmt_fdfj
+ftintrp_l_s 0000 00010001 10100 11001 ..... ..... @fmt_fdfj
+ftintrp_l_d 0000 00010001 10100 11010 ..... ..... @fmt_fdfj
+ftintrz_w_s 0000 00010001 10101 00001 ..... ..... @fmt_fdfj
+ftintrz_w_d 0000 00010001 10101 00010 ..... ..... @fmt_fdfj
+ftintrz_l_s 0000 00010001 10101 01001 ..... ..... @fmt_fdfj
+ftintrz_l_d 0000 00010001 10101 01010 ..... ..... @fmt_fdfj
+ftintrne_w_s 0000 00010001 10101 10001 ..... ..... @fmt_fdfj
+ftintrne_w_d 0000 00010001 10101 10010 ..... ..... @fmt_fdfj
+ftintrne_l_s 0000 00010001 10101 11001 ..... ..... @fmt_fdfj
+ftintrne_l_d 0000 00010001 10101 11010 ..... ..... @fmt_fdfj
+ftint_w_s 0000 00010001 10110 00001 ..... ..... @fmt_fdfj
+ftint_w_d 0000 00010001 10110 00010 ..... ..... @fmt_fdfj
+ftint_l_s 0000 00010001 10110 01001 ..... ..... @fmt_fdfj
+ftint_l_d 0000 00010001 10110 01010 ..... ..... @fmt_fdfj
+ffint_s_w 0000 00010001 11010 00100 ..... ..... @fmt_fdfj
+ffint_s_l 0000 00010001 11010 00110 ..... ..... @fmt_fdfj
+ffint_d_w 0000 00010001 11010 01000 ..... ..... @fmt_fdfj
+ffint_d_l 0000 00010001 11010 01010 ..... ..... @fmt_fdfj
+frint_s 0000 00010001 11100 10001 ..... ..... @fmt_fdfj
+frint_d 0000 00010001 11100 10010 ..... ..... @fmt_fdfj
+
+# 12 bit immediate Instructions
+slti 0000 001000 ............ ..... ..... @fmt_rdrjsi12
+sltui 0000 001001 ............ ..... ..... @fmt_rdrjsi12
+addi_w 0000 001010 ............ ..... ..... @fmt_rdrjsi12
+addi_d 0000 001011 ............ ..... ..... @fmt_rdrjsi12
+lu52i_d 0000 001100 ............ ..... ..... @fmt_rdrjsi12
+andi 0000 001101 ............ ..... ..... @fmt_rdrjui12
+ori 0000 001110 ............ ..... ..... @fmt_rdrjui12
+xori 0000 001111 ............ ..... ..... @fmt_rdrjui12
+
+# core Instructions
+csrxchg 0000 0100 .............. ..... ..... @fmt_rdrjcsr
+cacop 0000 011000 ............ ..... ..... @fmt_coprjsi12
+lddir 0000 01100100 00 ........ ..... ..... @fmt_rdrjlevel
+ldpte 0000 01100100 01 ........ ..... 00000 @fmt_rjseq
+iocsrrd_b 0000 01100100 10000 00000 ..... ..... @fmt_rdrj
+iocsrrd_h 0000 01100100 10000 00001 ..... ..... @fmt_rdrj
+iocsrrd_w 0000 01100100 10000 00010 ..... ..... @fmt_rdrj
+iocsrrd_d 0000 01100100 10000 00011 ..... ..... @fmt_rdrj
+iocsrwr_b 0000 01100100 10000 00100 ..... ..... @fmt_rdrj
+iocsrwr_h 0000 01100100 10000 00101 ..... ..... @fmt_rdrj
+iocsrwr_w 0000 01100100 10000 00110 ..... ..... @fmt_rdrj
+iocsrwr_d 0000 01100100 10000 00111 ..... ..... @fmt_rdrj
+tlbclr 0000 01100100 10000 01000 00000 00000 @fmt_empty
+tlbflush 0000 01100100 10000 01001 00000 00000 @fmt_empty
+tlbsrch 0000 01100100 10000 01010 00000 00000 @fmt_empty
+tlbrd 0000 01100100 10000 01011 00000 00000 @fmt_empty
+tlbwr 0000 01100100 10000 01100 00000 00000 @fmt_empty
+tlbfill 0000 01100100 10000 01101 00000 00000 @fmt_empty
+ertn 0000 01100100 10000 01110 00000 00000 @fmt_empty
+idle 0000 01100100 10001 ............... @fmt_whint
+invtlb 0000 01100100 10011 ..... ..... ..... @fmt_invtlb
+
+# foure Op Instructions
+fmadd_s 0000 10000001 ..... ..... ..... ..... @fmt_fdfjfkfa
+fmadd_d 0000 10000010 ..... ..... ..... ..... @fmt_fdfjfkfa
+fmsub_s 0000 10000101 ..... ..... ..... ..... @fmt_fdfjfkfa
+fmsub_d 0000 10000110 ..... ..... ..... ..... @fmt_fdfjfkfa
+fnmadd_s 0000 10001001 ..... ..... ..... ..... @fmt_fdfjfkfa
+fnmadd_d 0000 10001010 ..... ..... ..... ..... @fmt_fdfjfkfa
+fnmsub_s 0000 10001101 ..... ..... ..... ..... @fmt_fdfjfkfa
+fnmsub_d 0000 10001110 ..... ..... ..... ..... @fmt_fdfjfkfa
+fcmp_cond_s 0000 11000001 ..... ..... ..... 00 ... @fmt_cdfjfkfcond
+fcmp_cond_d 0000 11000010 ..... ..... ..... 00 ... @fmt_cdfjfkfcond
+fsel 0000 11010000 00 ... ..... ..... ..... @fmt_fdfjfkca
+
+# loog immediate Instructions
+addu16i_d 0001 00 ................ ..... ..... @fmt_rdrjsi16
+lu12i_w 0001 010 .................... ..... @fmt_rdsi20
+lu32i_d 0001 011 .................... ..... @fmt_rdsi20
+pcaddi 0001 100 .................... ..... @fmt_rdsi20
+pcalau12i 0001 101 .................... ..... @fmt_rdsi20
+pcaddu12i 0001 110 .................... ..... @fmt_rdsi20
+pcaddu18i 0001 111 .................... ..... @fmt_rdsi20
+
+# load/store Instructions
+ll_w 0010 0000 .............. ..... ..... @fmt_rdrjsi14
+sc_w 0010 0001 .............. ..... ..... @fmt_rdrjsi14
+ll_d 0010 0010 .............. ..... ..... @fmt_rdrjsi14
+sc_d 0010 0011 .............. ..... ..... @fmt_rdrjsi14
+ldptr_w 0010 0100 .............. ..... ..... @fmt_rdrjsi14
+stptr_w 0010 0101 .............. ..... ..... @fmt_rdrjsi14
+ldptr_d 0010 0110 .............. ..... ..... @fmt_rdrjsi14
+stptr_d 0010 0111 .............. ..... ..... @fmt_rdrjsi14
+ld_b 0010 100000 ............ ..... ..... @fmt_rdrjsi12
+ld_h 0010 100001 ............ ..... ..... @fmt_rdrjsi12
+ld_w 0010 100010 ............ ..... ..... @fmt_rdrjsi12
+ld_d 0010 100011 ............ ..... ..... @fmt_rdrjsi12
+st_b 0010 100100 ............ ..... ..... @fmt_rdrjsi12
+st_h 0010 100101 ............ ..... ..... @fmt_rdrjsi12
+st_w 0010 100110 ............ ..... ..... @fmt_rdrjsi12
+st_d 0010 100111 ............ ..... ..... @fmt_rdrjsi12
+ld_bu 0010 101000 ............ ..... ..... @fmt_rdrjsi12
+ld_hu 0010 101001 ............ ..... ..... @fmt_rdrjsi12
+ld_wu 0010 101010 ............ ..... ..... @fmt_rdrjsi12
+preld 0010 101011 ............ ..... ..... @fmt_hintrjsi12
+fld_s 0010 101100 ............ ..... ..... @fmt_fdrjsi12
+fst_s 0010 101101 ............ ..... ..... @fmt_fdrjsi12
+fld_d 0010 101110 ............ ..... ..... @fmt_fdrjsi12
+fst_d 0010 101111 ............ ..... ..... @fmt_fdrjsi12
+ldx_b 0011 10000000 00000 ..... ..... ..... @fmt_rdrjrk
+ldx_h 0011 10000000 01000 ..... ..... ..... @fmt_rdrjrk
+ldx_w 0011 10000000 10000 ..... ..... ..... @fmt_rdrjrk
+ldx_d 0011 10000000 11000 ..... ..... ..... @fmt_rdrjrk
+stx_b 0011 10000001 00000 ..... ..... ..... @fmt_rdrjrk
+stx_h 0011 10000001 01000 ..... ..... ..... @fmt_rdrjrk
+stx_w 0011 10000001 10000 ..... ..... ..... @fmt_rdrjrk
+stx_d 0011 10000001 11000 ..... ..... ..... @fmt_rdrjrk
+ldx_bu 0011 10000010 00000 ..... ..... ..... @fmt_rdrjrk
+ldx_hu 0011 10000010 01000 ..... ..... ..... @fmt_rdrjrk
+ldx_wu 0011 10000010 10000 ..... ..... ..... @fmt_rdrjrk
+fldx_s 0011 10000011 00000 ..... ..... ..... @fmt_fdrjrk
+fldx_d 0011 10000011 01000 ..... ..... ..... @fmt_fdrjrk
+fstx_s 0011 10000011 10000 ..... ..... ..... @fmt_fdrjrk
+fstx_d 0011 10000011 11000 ..... ..... ..... @fmt_fdrjrk
+amswap_w 0011 10000110 00000 ..... ..... ..... @fmt_rdrjrk
+amswap_d 0011 10000110 00001 ..... ..... ..... @fmt_rdrjrk
+amadd_w 0011 10000110 00010 ..... ..... ..... @fmt_rdrjrk
+amadd_d 0011 10000110 00011 ..... ..... ..... @fmt_rdrjrk
+amand_w 0011 10000110 00100 ..... ..... ..... @fmt_rdrjrk
+amand_d 0011 10000110 00101 ..... ..... ..... @fmt_rdrjrk
+amor_w 0011 10000110 00110 ..... ..... ..... @fmt_rdrjrk
+amor_d 0011 10000110 00111 ..... ..... ..... @fmt_rdrjrk
+amxor_w 0011 10000110 01000 ..... ..... ..... @fmt_rdrjrk
+amxor_d 0011 10000110 01001 ..... ..... ..... @fmt_rdrjrk
+ammax_w 0011 10000110 01010 ..... ..... ..... @fmt_rdrjrk
+ammax_d 0011 10000110 01011 ..... ..... ..... @fmt_rdrjrk
+ammin_w 0011 10000110 01100 ..... ..... ..... @fmt_rdrjrk
+ammin_d 0011 10000110 01101 ..... ..... ..... @fmt_rdrjrk
+ammax_wu 0011 10000110 01110 ..... ..... ..... @fmt_rdrjrk
+ammax_du 0011 10000110 01111 ..... ..... ..... @fmt_rdrjrk
+ammin_wu 0011 10000110 10000 ..... ..... ..... @fmt_rdrjrk
+ammin_du 0011 10000110 10001 ..... ..... ..... @fmt_rdrjrk
+amswap_db_w 0011 10000110 10010 ..... ..... ..... @fmt_rdrjrk
+amswap_db_d 0011 10000110 10011 ..... ..... ..... @fmt_rdrjrk
+amadd_db_w 0011 10000110 10100 ..... ..... ..... @fmt_rdrjrk
+amadd_db_d 0011 10000110 10101 ..... ..... ..... @fmt_rdrjrk
+amand_db_w 0011 10000110 10110 ..... ..... ..... @fmt_rdrjrk
+amand_db_d 0011 10000110 10111 ..... ..... ..... @fmt_rdrjrk
+amor_db_w 0011 10000110 11000 ..... ..... ..... @fmt_rdrjrk
+amor_db_d 0011 10000110 11001 ..... ..... ..... @fmt_rdrjrk
+amxor_db_w 0011 10000110 11010 ..... ..... ..... @fmt_rdrjrk
+amxor_db_d 0011 10000110 11011 ..... ..... ..... @fmt_rdrjrk
+ammax_db_w 0011 10000110 11100 ..... ..... ..... @fmt_rdrjrk
+ammax_db_d 0011 10000110 11101 ..... ..... ..... @fmt_rdrjrk
+ammin_db_w 0011 10000110 11110 ..... ..... ..... @fmt_rdrjrk
+ammin_db_d 0011 10000110 11111 ..... ..... ..... @fmt_rdrjrk
+ammax_db_wu 0011 10000111 00000 ..... ..... ..... @fmt_rdrjrk
+ammax_db_du 0011 10000111 00001 ..... ..... ..... @fmt_rdrjrk
+ammin_db_wu 0011 10000111 00010 ..... ..... ..... @fmt_rdrjrk
+ammin_db_du 0011 10000111 00011 ..... ..... ..... @fmt_rdrjrk
+dbar 0011 10000111 00100 ............... @fmt_whint
+ibar 0011 10000111 00101 ............... @fmt_whint
+fldgt_s 0011 10000111 01000 ..... ..... ..... @fmt_fdrjrk
+fldgt_d 0011 10000111 01001 ..... ..... ..... @fmt_fdrjrk
+fldle_s 0011 10000111 01010 ..... ..... ..... @fmt_fdrjrk
+fldle_d 0011 10000111 01011 ..... ..... ..... @fmt_fdrjrk
+fstgt_s 0011 10000111 01100 ..... ..... ..... @fmt_fdrjrk
+fstgt_d 0011 10000111 01101 ..... ..... ..... @fmt_fdrjrk
+fstle_s 0011 10000111 01110 ..... ..... ..... @fmt_fdrjrk
+fstle_d 0011 10000111 01111 ..... ..... ..... @fmt_fdrjrk
+ldgt_b 0011 10000111 10000 ..... ..... ..... @fmt_rdrjrk
+ldgt_h 0011 10000111 10001 ..... ..... ..... @fmt_rdrjrk
+ldgt_w 0011 10000111 10010 ..... ..... ..... @fmt_rdrjrk
+ldgt_d 0011 10000111 10011 ..... ..... ..... @fmt_rdrjrk
+ldle_b 0011 10000111 10100 ..... ..... ..... @fmt_rdrjrk
+ldle_h 0011 10000111 10101 ..... ..... ..... @fmt_rdrjrk
+ldle_w 0011 10000111 10110 ..... ..... ..... @fmt_rdrjrk
+ldle_d 0011 10000111 10111 ..... ..... ..... @fmt_rdrjrk
+stgt_b 0011 10000111 11000 ..... ..... ..... @fmt_rdrjrk
+stgt_h 0011 10000111 11001 ..... ..... ..... @fmt_rdrjrk
+stgt_w 0011 10000111 11010 ..... ..... ..... @fmt_rdrjrk
+stgt_d 0011 10000111 11011 ..... ..... ..... @fmt_rdrjrk
+stle_b 0011 10000111 11100 ..... ..... ..... @fmt_rdrjrk
+stle_h 0011 10000111 11101 ..... ..... ..... @fmt_rdrjrk
+stle_w 0011 10000111 11110 ..... ..... ..... @fmt_rdrjrk
+stle_d 0011 10000111 11111 ..... ..... ..... @fmt_rdrjrk
+
+# jump Instructions
+beqz 0100 00 ................ ..... ..... @fmt_rjoffs21
+bnez 0100 01 ................ ..... ..... @fmt_rjoffs21
+bceqz 0100 10 ................ 00 ... ..... @fmt_cjoffs21
+bcnez 0100 10 ................ 01 ... ..... @fmt_cjoffs21
+jirl 0100 11 ................ ..... ..... @fmt_rdrjoffs16
+b 0101 00 .......................... @fmt_offs
+bl 0101 01 .......................... @fmt_offs
+beq 0101 10 ................ ..... ..... @fmt_rjrdoffs16
+bne 0101 11 ................ ..... ..... @fmt_rjrdoffs16
+blt 0110 00 ................ ..... ..... @fmt_rjrdoffs16
+bge 0110 01 ................ ..... ..... @fmt_rjrdoffs16
+bltu 0110 10 ................ ..... ..... @fmt_rjrdoffs16
+bgeu 0110 11 ................ ..... ..... @fmt_rjrdoffs16
diff --git a/target/loongarch64/instmap.h b/target/loongarch64/instmap.h
new file mode 100644
index 0000000000000000000000000000000000000000..6e85847f8a4ced32b0b958a84e4a12ae3c291f15
--- /dev/null
+++ b/target/loongarch64/instmap.h
@@ -0,0 +1,216 @@
+/*
+ * Loongarch emulation for qemu: instruction opcode
+ *
+ * Copyright (c) 2020-2021
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see .
+ */
+
+#ifndef TARGET_LARCH_INSTMAP_H
+#define TARGET_LARCH_INSTMAP_H
+
+enum {
+ /* fix opcodes */
+ OPC_LARCH_CLO_W = (0x000004 << 10),
+ OPC_LARCH_CLZ_W = (0x000005 << 10),
+ OPC_LARCH_CLO_D = (0x000008 << 10),
+ OPC_LARCH_CLZ_D = (0x000009 << 10),
+ OPC_LARCH_REVB_2H = (0x00000C << 10),
+ OPC_LARCH_REVB_4H = (0x00000D << 10),
+ OPC_LARCH_REVH_D = (0x000011 << 10),
+ OPC_LARCH_BREV_4B = (0x000012 << 10),
+ OPC_LARCH_BREV_8B = (0x000013 << 10),
+ OPC_LARCH_EXT_WH = (0x000016 << 10),
+ OPC_LARCH_EXT_WB = (0x000017 << 10),
+
+ OPC_LARCH_ADD_W = (0x00020 << 15),
+ OPC_LARCH_ADD_D = (0x00021 << 15),
+ OPC_LARCH_SUB_W = (0x00022 << 15),
+ OPC_LARCH_SUB_D = (0x00023 << 15),
+ OPC_LARCH_SLT = (0x00024 << 15),
+ OPC_LARCH_SLTU = (0x00025 << 15),
+ OPC_LARCH_MASKEQZ = (0x00026 << 15),
+ OPC_LARCH_MASKNEZ = (0x00027 << 15),
+ OPC_LARCH_NOR = (0x00028 << 15),
+ OPC_LARCH_AND = (0x00029 << 15),
+ OPC_LARCH_OR = (0x0002A << 15),
+ OPC_LARCH_XOR = (0x0002B << 15),
+ OPC_LARCH_SLL_W = (0x0002E << 15),
+ OPC_LARCH_SRL_W = (0x0002F << 15),
+ OPC_LARCH_SRA_W = (0x00030 << 15),
+ OPC_LARCH_SLL_D = (0x00031 << 15),
+ OPC_LARCH_SRL_D = (0x00032 << 15),
+ OPC_LARCH_SRA_D = (0x00033 << 15),
+ OPC_LARCH_ROTR_W = (0x00036 << 15),
+ OPC_LARCH_ROTR_D = (0x00037 << 15),
+ OPC_LARCH_MUL_W = (0x00038 << 15),
+ OPC_LARCH_MULH_W = (0x00039 << 15),
+ OPC_LARCH_MULH_WU = (0x0003A << 15),
+ OPC_LARCH_MUL_D = (0x0003B << 15),
+ OPC_LARCH_MULH_D = (0x0003C << 15),
+ OPC_LARCH_MULH_DU = (0x0003D << 15),
+ OPC_LARCH_DIV_W = (0x00040 << 15),
+ OPC_LARCH_MOD_W = (0x00041 << 15),
+ OPC_LARCH_DIV_WU = (0x00042 << 15),
+ OPC_LARCH_MOD_WU = (0x00043 << 15),
+ OPC_LARCH_DIV_D = (0x00044 << 15),
+ OPC_LARCH_MOD_D = (0x00045 << 15),
+ OPC_LARCH_DIV_DU = (0x00046 << 15),
+ OPC_LARCH_MOD_DU = (0x00047 << 15),
+ OPC_LARCH_SRLI_W = (0x00089 << 15),
+ OPC_LARCH_SRAI_W = (0x00091 << 15),
+ OPC_LARCH_ROTRI_W = (0x00099 << 15),
+
+ OPC_LARCH_ALSL_W = (0x0002 << 17),
+ OPC_LARCH_ALSL_D = (0x0016 << 17),
+
+ OPC_LARCH_TRINS_W = (0x003 << 21) | (0x0 << 15),
+ OPC_LARCH_TRPICK_W = (0x003 << 21) | (0x1 << 15),
+};
+
+enum {
+ /* float opcodes */
+ OPC_LARCH_FABS_S = (0x004501 << 10),
+ OPC_LARCH_FABS_D = (0x004502 << 10),
+ OPC_LARCH_FNEG_S = (0x004505 << 10),
+ OPC_LARCH_FNEG_D = (0x004506 << 10),
+ OPC_LARCH_FCLASS_S = (0x00450D << 10),
+ OPC_LARCH_FCLASS_D = (0x00450E << 10),
+ OPC_LARCH_FSQRT_S = (0x004511 << 10),
+ OPC_LARCH_FSQRT_D = (0x004512 << 10),
+ OPC_LARCH_FRECIP_S = (0x004515 << 10),
+ OPC_LARCH_FRECIP_D = (0x004516 << 10),
+ OPC_LARCH_FRSQRT_S = (0x004519 << 10),
+ OPC_LARCH_FRSQRT_D = (0x00451A << 10),
+ OPC_LARCH_FMOV_S = (0x004525 << 10),
+ OPC_LARCH_FMOV_D = (0x004526 << 10),
+ OPC_LARCH_GR2FR_W = (0x004529 << 10),
+ OPC_LARCH_GR2FR_D = (0x00452A << 10),
+ OPC_LARCH_GR2FRH_W = (0x00452B << 10),
+ OPC_LARCH_FR2GR_S = (0x00452D << 10),
+ OPC_LARCH_FR2GR_D = (0x00452E << 10),
+ OPC_LARCH_FRH2GR_S = (0x00452F << 10),
+
+ OPC_LARCH_FCVT_S_D = (0x004646 << 10),
+ OPC_LARCH_FCVT_D_S = (0x004649 << 10),
+ OPC_LARCH_FTINTRM_W_S = (0x004681 << 10),
+ OPC_LARCH_FTINTRM_W_D = (0x004682 << 10),
+ OPC_LARCH_FTINTRM_L_S = (0x004689 << 10),
+ OPC_LARCH_FTINTRM_L_D = (0x00468A << 10),
+ OPC_LARCH_FTINTRP_W_S = (0x004691 << 10),
+ OPC_LARCH_FTINTRP_W_D = (0x004692 << 10),
+ OPC_LARCH_FTINTRP_L_S = (0x004699 << 10),
+ OPC_LARCH_FTINTRP_L_D = (0x00469A << 10),
+ OPC_LARCH_FTINTRZ_W_S = (0x0046A1 << 10),
+ OPC_LARCH_FTINTRZ_W_D = (0x0046A2 << 10),
+ OPC_LARCH_FTINTRZ_L_S = (0x0046A9 << 10),
+ OPC_LARCH_FTINTRZ_L_D = (0x0046AA << 10),
+ OPC_LARCH_FTINTRNE_W_S = (0x0046B1 << 10),
+ OPC_LARCH_FTINTRNE_W_D = (0x0046B2 << 10),
+ OPC_LARCH_FTINTRNE_L_S = (0x0046B9 << 10),
+ OPC_LARCH_FTINTRNE_L_D = (0x0046BA << 10),
+ OPC_LARCH_FTINT_W_S = (0x0046C1 << 10),
+ OPC_LARCH_FTINT_W_D = (0x0046C2 << 10),
+ OPC_LARCH_FTINT_L_S = (0x0046C9 << 10),
+ OPC_LARCH_FTINT_L_D = (0x0046CA << 10),
+ OPC_LARCH_FFINT_S_W = (0x004744 << 10),
+ OPC_LARCH_FFINT_S_L = (0x004746 << 10),
+ OPC_LARCH_FFINT_D_W = (0x004748 << 10),
+ OPC_LARCH_FFINT_D_L = (0x00474A << 10),
+ OPC_LARCH_FRINT_S = (0x004791 << 10),
+ OPC_LARCH_FRINT_D = (0x004792 << 10),
+
+ OPC_LARCH_FADD_S = (0x00201 << 15),
+ OPC_LARCH_FADD_D = (0x00202 << 15),
+ OPC_LARCH_FSUB_S = (0x00205 << 15),
+ OPC_LARCH_FSUB_D = (0x00206 << 15),
+ OPC_LARCH_FMUL_S = (0x00209 << 15),
+ OPC_LARCH_FMUL_D = (0x0020A << 15),
+ OPC_LARCH_FDIV_S = (0x0020D << 15),
+ OPC_LARCH_FDIV_D = (0x0020E << 15),
+ OPC_LARCH_FMAX_S = (0x00211 << 15),
+ OPC_LARCH_FMAX_D = (0x00212 << 15),
+ OPC_LARCH_FMIN_S = (0x00215 << 15),
+ OPC_LARCH_FMIN_D = (0x00216 << 15),
+ OPC_LARCH_FMAXA_S = (0x00219 << 15),
+ OPC_LARCH_FMAXA_D = (0x0021A << 15),
+ OPC_LARCH_FMINA_S = (0x0021D << 15),
+ OPC_LARCH_FMINA_D = (0x0021E << 15),
+};
+
+enum {
+ /* 12 bit immediate opcodes */
+ OPC_LARCH_SLTI = (0x008 << 22),
+ OPC_LARCH_SLTIU = (0x009 << 22),
+ OPC_LARCH_ADDI_W = (0x00A << 22),
+ OPC_LARCH_ADDI_D = (0x00B << 22),
+ OPC_LARCH_ANDI = (0x00D << 22),
+ OPC_LARCH_ORI = (0x00E << 22),
+ OPC_LARCH_XORI = (0x00F << 22),
+};
+
+enum {
+ /* load/store opcodes */
+ OPC_LARCH_FLDX_S = (0x07060 << 15),
+ OPC_LARCH_FLDX_D = (0x07068 << 15),
+ OPC_LARCH_FSTX_S = (0x07070 << 15),
+ OPC_LARCH_FSTX_D = (0x07078 << 15),
+ OPC_LARCH_FLDGT_S = (0x070E8 << 15),
+ OPC_LARCH_FLDGT_D = (0x070E9 << 15),
+ OPC_LARCH_FLDLE_S = (0x070EA << 15),
+ OPC_LARCH_FLDLE_D = (0x070EB << 15),
+ OPC_LARCH_FSTGT_S = (0x070EC << 15),
+ OPC_LARCH_FSTGT_D = (0x070ED << 15),
+ OPC_LARCH_FSTLE_S = (0x070EE << 15),
+ OPC_LARCH_FSTLE_D = (0x070EF << 15),
+
+ OPC_LARCH_LD_B = (0x0A0 << 22),
+ OPC_LARCH_LD_H = (0x0A1 << 22),
+ OPC_LARCH_LD_W = (0x0A2 << 22),
+ OPC_LARCH_LD_D = (0x0A3 << 22),
+ OPC_LARCH_ST_B = (0x0A4 << 22),
+ OPC_LARCH_ST_H = (0x0A5 << 22),
+ OPC_LARCH_ST_W = (0x0A6 << 22),
+ OPC_LARCH_ST_D = (0x0A7 << 22),
+ OPC_LARCH_LD_BU = (0x0A8 << 22),
+ OPC_LARCH_LD_HU = (0x0A9 << 22),
+ OPC_LARCH_LD_WU = (0x0AA << 22),
+ OPC_LARCH_FLD_S = (0x0AC << 22),
+ OPC_LARCH_FST_S = (0x0AD << 22),
+ OPC_LARCH_FLD_D = (0x0AE << 22),
+ OPC_LARCH_FST_D = (0x0AF << 22),
+
+ OPC_LARCH_LL_W = (0x20 << 24),
+ OPC_LARCH_SC_W = (0x21 << 24),
+ OPC_LARCH_LL_D = (0x22 << 24),
+ OPC_LARCH_SC_D = (0x23 << 24),
+ OPC_LARCH_LDPTR_W = (0x24 << 24),
+ OPC_LARCH_STPTR_W = (0x25 << 24),
+ OPC_LARCH_LDPTR_D = (0x26 << 24),
+ OPC_LARCH_STPTR_D = (0x27 << 24),
+};
+
+enum {
+ /* jump opcodes */
+ OPC_LARCH_BEQZ = (0x10 << 26),
+ OPC_LARCH_BNEZ = (0x11 << 26),
+ OPC_LARCH_B = (0x14 << 26),
+ OPC_LARCH_BEQ = (0x16 << 26),
+ OPC_LARCH_BNE = (0x17 << 26),
+ OPC_LARCH_BLT = (0x18 << 26),
+ OPC_LARCH_BGE = (0x19 << 26),
+ OPC_LARCH_BLTU = (0x1A << 26),
+ OPC_LARCH_BGEU = (0x1B << 26),
+};
+
+#endif
diff --git a/target/loongarch64/internal.h b/target/loongarch64/internal.h
new file mode 100644
index 0000000000000000000000000000000000000000..274916d7a9db959d2ba23868ada8077680e13a8d
--- /dev/null
+++ b/target/loongarch64/internal.h
@@ -0,0 +1,184 @@
+#ifndef LOONGARCH_INTERNAL_H
+#define LOONGARCH_INTERNAL_H
+
+#include "cpu-csr.h"
+
+/* MMU types, the first four entries have the same layout as the
+ CP0C0_MT field. */
+enum loongarch_mmu_types {
+ MMU_TYPE_NONE,
+ MMU_TYPE_LS3A5K, /* LISA CSR */
+};
+
+
+
+struct loongarch_def_t {
+ const char *name;
+ int32_t CSR_PRid;
+ int32_t FCSR0;
+ int32_t FCSR0_rw_bitmask;
+ int32_t PABITS;
+ CPU_LOONGARCH_CSR
+ uint64_t insn_flags;
+ enum loongarch_mmu_types mmu_type;
+ int cpu_cfg[64];
+};
+
+/* loongarch 3a5000 TLB entry */
+struct ls3a5k_tlb_t {
+ target_ulong VPN;
+ uint64_t PageMask; /* CSR_TLBIDX[29:24] */
+ uint32_t PageSize;
+ uint16_t ASID;
+ unsigned int G:1; /* CSR_TLBLO[6] */
+
+ unsigned int C0:3; /* CSR_TLBLO[5:4] */
+ unsigned int C1:3;
+
+ unsigned int V0:1; /* CSR_TLBLO[0] */
+ unsigned int V1:1;
+
+ unsigned int WE0:1; /* CSR_TLBLO[1] */
+ unsigned int WE1:1;
+
+ unsigned int XI0:1; /* CSR_TLBLO[62] */
+ unsigned int XI1:1;
+
+ unsigned int RI0:1; /* CSR_TLBLO[61] */
+ unsigned int RI1:1;
+
+ unsigned int EHINV:1;/* CSR_TLBIDX[31] */
+
+ unsigned int PLV0:2; /* CSR_TLBLO[3:2] */
+ unsigned int PLV1:2;
+
+ unsigned int RPLV0:1;
+ unsigned int RPLV1:1; /* CSR_TLBLO[63] */
+
+ uint64_t PPN0; /* CSR_TLBLO[47:12] */
+ uint64_t PPN1; /* CSR_TLBLO[47:12] */
+};
+typedef struct ls3a5k_tlb_t ls3a5k_tlb_t;
+
+
+struct CPULOONGARCHTLBContext {
+ uint32_t nb_tlb;
+ uint32_t tlb_in_use;
+ int (*map_address)(struct CPULOONGARCHState *env, hwaddr *physical, int *prot,
+ target_ulong address, int rw, int access_type);
+ void (*helper_tlbwr)(struct CPULOONGARCHState *env);
+ void (*helper_tlbfill)(struct CPULOONGARCHState *env);
+ void (*helper_tlbsrch)(struct CPULOONGARCHState *env);
+ void (*helper_tlbrd)(struct CPULOONGARCHState *env);
+ void (*helper_tlbclr)(struct CPULOONGARCHState *env);
+ void (*helper_tlbflush)(struct CPULOONGARCHState *env);
+ void (*helper_invtlb)(struct CPULOONGARCHState *env, target_ulong addr,
+ target_ulong info, int op);
+ union {
+ struct {
+ uint64_t ftlb_mask;
+ uint32_t ftlb_size; /* at most : 8 * 256 = 2048 */
+ uint32_t vtlb_size; /* at most : 64 */
+ ls3a5k_tlb_t tlb[2048 + 64]; /* at most : 2048 FTLB + 64 VTLB */
+ } ls3a5k;
+ } mmu;
+};
+
+enum {
+ TLBRET_PE = -7,
+ TLBRET_XI = -6,
+ TLBRET_RI = -5,
+ TLBRET_DIRTY = -4,
+ TLBRET_INVALID = -3,
+ TLBRET_NOMATCH = -2,
+ TLBRET_BADADDR = -1,
+ TLBRET_MATCH = 0
+};
+
+
+extern unsigned int ieee_rm[];
+
+static inline void restore_rounding_mode(CPULOONGARCHState *env)
+{
+ set_float_rounding_mode(ieee_rm[(env->active_fpu.fcsr0 >> FCSR0_RM) & 0x3],
+ &env->active_fpu.fp_status);
+}
+
+static inline void restore_flush_mode(CPULOONGARCHState *env)
+{
+ set_flush_to_zero(0, &env->active_fpu.fp_status);
+}
+
+static inline void restore_fp_status(CPULOONGARCHState *env)
+{
+ restore_rounding_mode(env);
+ restore_flush_mode(env);
+}
+static inline void compute_hflags(CPULOONGARCHState *env)
+{
+ env->hflags &= ~(LARCH_HFLAG_64 | LARCH_HFLAG_FPU | LARCH_HFLAG_KSU |
+ LARCH_HFLAG_AWRAP | LARCH_HFLAG_LSX | LARCH_HFLAG_LASX);
+
+ env->hflags |= (env->CSR_CRMD & CSR_CRMD_PLV);
+ env->hflags |= LARCH_HFLAG_64;
+
+ if (env->CSR_EUEN & CSR_EUEN_FPEN) {
+ env->hflags |= LARCH_HFLAG_FPU;
+ }
+ if (env->CSR_EUEN & CSR_EUEN_LSXEN) {
+ env->hflags |= LARCH_HFLAG_LSX;
+ }
+ if (env->CSR_EUEN & CSR_EUEN_LASXEN) {
+ env->hflags |= LARCH_HFLAG_LASX;
+ }
+ if (env->CSR_EUEN & CSR_EUEN_LBTEN) {
+ env->hflags |= LARCH_HFLAG_LBT;
+ }
+}
+
+/* Check if there is pending and not masked out interrupt */
+static inline bool cpu_loongarch_hw_interrupts_pending(CPULOONGARCHState *env)
+{
+ int32_t pending;
+ int32_t status;
+ bool r;
+
+ pending = env->CSR_ESTAT & CSR_ESTAT_IPMASK;
+ status = env->CSR_ECFG & CSR_ECFG_IPMASK;
+
+ /* Configured with compatibility or VInt (Vectored Interrupts)
+ treats the pending lines as individual interrupt lines, the status
+ lines are individual masks. */
+ r = (pending & status) != 0;
+
+ return r;
+}
+
+
+/* stabletimer.c */
+uint32_t cpu_loongarch_get_random_ls3a5k_tlb(uint32_t low, uint32_t high);
+uint64_t cpu_loongarch_get_stable_counter(CPULOONGARCHState *env);
+uint64_t cpu_loongarch_get_stable_timer_ticks(CPULOONGARCHState *env);
+void cpu_loongarch_store_stable_timer_config(CPULOONGARCHState *env, uint64_t value);
+int loongarch_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
+ int cpuid, DumpState *opaque);
+
+void loongarch_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
+
+/* TODO QOM'ify CPU reset and remove */
+void cpu_state_reset(CPULOONGARCHState *s);
+void cpu_loongarch_realize_env(CPULOONGARCHState *env);
+
+int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n);
+int loongarch_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+#ifdef CONFIG_TCG
+#include "fpu_helper.h"
+#endif
+
+#ifndef CONFIG_USER_ONLY
+extern const struct VMStateDescription vmstate_loongarch_cpu;
+hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+#endif
+
+#endif
diff --git a/target/loongarch64/kvm.c b/target/loongarch64/kvm.c
new file mode 100644
index 0000000000000000000000000000000000000000..0eaabe39436c57a43d38fef1ee58a8623ab69c27
--- /dev/null
+++ b/target/loongarch64/kvm.c
@@ -0,0 +1,1419 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/LOONGARCH: LOONGARCH specific KVM APIs
+ *
+ * Copyright (C) 2012-2014 Imagination Technologies Ltd.
+ * Authors: Sanjay Lal
+*/
+
+#include "qemu/osdep.h"
+#include
+
+#include
+
+#include "qemu-common.h"
+#include "cpu.h"
+#include "internal.h"
+#include "qemu/error-report.h"
+#include "qemu/timer.h"
+#include "qemu/main-loop.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+#include "sysemu/runstate.h"
+#include "sysemu/cpus.h"
+#include "kvm_larch.h"
+#include "exec/memattrs.h"
+#include "exec/gdbstub.h"
+
+#define DEBUG_KVM 0
+/* A 16384-byte buffer can hold the 8-byte kvm_msrs header, plus
+ * 2047 kvm_msr_entry structs */
+#define CSR_BUF_SIZE 16384
+
+#define DPRINTF(fmt, ...) \
+ do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0)
+
+/*
+ * Define loongarch kvm version.
+ * Add version number when
+ * qemu/kvm interface changed
+ */
+#define KVM_LOONGARCH_VERSION 1
+
+static struct {
+ target_ulong addr;
+ int len;
+ int type;
+} inst_breakpoint[8], data_breakpoint[8];
+
+int nb_data_breakpoint = 0, nb_inst_breakpoint = 0;
+static int kvm_loongarch_version_cap;
+
+/* Hardware breakpoint control register
+ * 4:1 plv0-plv3 enable
+ * 6:5 config virtualization mode
+ * 9:8 load store */
+static const int type_code[] = {
+ [GDB_BREAKPOINT_HW] = 0x5e,
+ [GDB_WATCHPOINT_READ] = (0x5e | 1 << 8),
+ [GDB_WATCHPOINT_WRITE] = (0x5e | 1 << 9),
+ [GDB_WATCHPOINT_ACCESS] = (0x5e | 1 << 8 | 1 << 9)
+};
+
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+ KVM_CAP_LAST_INFO
+};
+
+static void kvm_loongarch_update_state(void *opaque, bool running, RunState state);
+static inline int kvm_larch_putq(CPUState *cs, uint64_t reg_id, uint64_t *addr);
+
+unsigned long kvm_arch_vcpu_id(CPUState *cs)
+{
+ return cs->cpu_index;
+}
+
+int kvm_arch_init(MachineState *ms, KVMState *s)
+{
+ /* LOONGARCH has 128 signals */
+ kvm_set_sigmask_len(s, 16);
+
+ kvm_loongarch_version_cap = kvm_check_extension(s, KVM_CAP_LOONGARCH_VZ);
+
+ if (kvm_loongarch_version_cap != KVM_LOONGARCH_VERSION) {
+ warn_report("QEMU/KVM version not match, qemu_la_version: lvz-%d,\
+ kvm_la_version: lvz-%d \n",
+ KVM_LOONGARCH_VERSION, kvm_loongarch_version_cap);
+ }
+ return 0;
+}
+
+int kvm_arch_irqchip_create(KVMState *s)
+{
+ return 0;
+}
+
+static void kvm_csr_set_addr(uint64_t **addr, uint32_t index, uint64_t *p)
+{
+ addr[index] = p;
+}
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ uint64_t **addr;
+ CPULOONGARCHState *env = &cpu->env;
+ int ret = 0;
+
+ kvm_vcpu_enable_cap(cs, KVM_CAP_LOONGARCH_FPU, 0, 0);
+ kvm_vcpu_enable_cap(cs, KVM_CAP_LOONGARCH_LSX, 0, 0);
+
+ cpu->cpuStateEntry = qemu_add_vm_change_state_handler(kvm_loongarch_update_state, cs);
+ cpu->kvm_csr_buf = g_malloc0(CSR_BUF_SIZE + CSR_BUF_SIZE);
+
+ addr = (void *)cpu->kvm_csr_buf + CSR_BUF_SIZE;
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_CRMD, &env->CSR_CRMD);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PRMD, &env->CSR_PRMD);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_EUEN, &env->CSR_EUEN);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_MISC, &env->CSR_MISC);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ECFG, &env->CSR_ECFG);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ESTAT, &env->CSR_ESTAT);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ERA, &env->CSR_ERA);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_BADV, &env->CSR_BADV);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_BADI, &env->CSR_BADI);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_EEPN, &env->CSR_EEPN);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBIDX, &env->CSR_TLBIDX);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBEHI, &env->CSR_TLBEHI);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBELO0, &env->CSR_TLBELO0);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBELO1, &env->CSR_TLBELO1);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_GTLBC, &env->CSR_GTLBC);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TRGP, &env->CSR_TRGP);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ASID, &env->CSR_ASID);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PGDL, &env->CSR_PGDL);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PGDH, &env->CSR_PGDH);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PGD, &env->CSR_PGD);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PWCTL0, &env->CSR_PWCTL0);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PWCTL1, &env->CSR_PWCTL1);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_STLBPGSIZE, &env->CSR_STLBPGSIZE);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_RVACFG, &env->CSR_RVACFG);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_CPUID, &env->CSR_CPUID);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PRCFG1, &env->CSR_PRCFG1);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PRCFG2, &env->CSR_PRCFG2);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PRCFG3, &env->CSR_PRCFG3);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_KS0, &env->CSR_KS0);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_KS1, &env->CSR_KS1);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_KS2, &env->CSR_KS2);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_KS3, &env->CSR_KS3);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_KS4, &env->CSR_KS4);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_KS5, &env->CSR_KS5);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_KS6, &env->CSR_KS6);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_KS7, &env->CSR_KS7);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TMID, &env->CSR_TMID);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_CNTC, &env->CSR_CNTC);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TINTCLR, &env->CSR_TINTCLR);
+
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_GSTAT, &env->CSR_GSTAT);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_GCFG, &env->CSR_GCFG);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_GINTC, &env->CSR_GINTC);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_GCNTC, &env->CSR_GCNTC);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_LLBCTL, &env->CSR_LLBCTL);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IMPCTL1, &env->CSR_IMPCTL1);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IMPCTL2, &env->CSR_IMPCTL2);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_GNMI, &env->CSR_GNMI);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRENT, &env->CSR_TLBRENT);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRBADV, &env->CSR_TLBRBADV);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRERA, &env->CSR_TLBRERA);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRSAVE, &env->CSR_TLBRSAVE);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRELO0, &env->CSR_TLBRELO0);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRELO1, &env->CSR_TLBRELO1);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBREHI, &env->CSR_TLBREHI);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRPRMD, &env->CSR_TLBRPRMD);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRCTL, &env->CSR_ERRCTL);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRINFO, &env->CSR_ERRINFO);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRINFO1, &env->CSR_ERRINFO1);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRENT, &env->CSR_ERRENT);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRERA, &env->CSR_ERRERA);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRSAVE, &env->CSR_ERRSAVE);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_CTAG, &env->CSR_CTAG);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DMWIN0, &env->CSR_DMWIN0);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DMWIN1, &env->CSR_DMWIN1);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DMWIN2, &env->CSR_DMWIN2);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DMWIN3, &env->CSR_DMWIN3);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCTRL0, &env->CSR_PERFCTRL0);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCNTR0, &env->CSR_PERFCNTR0);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCTRL1, &env->CSR_PERFCTRL1);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCNTR1, &env->CSR_PERFCNTR1);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCTRL2, &env->CSR_PERFCTRL2);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCNTR2, &env->CSR_PERFCNTR2);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCTRL3, &env->CSR_PERFCTRL3);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCNTR3, &env->CSR_PERFCNTR3);
+
+ /* debug */
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_MWPC, &env->CSR_MWPC);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_MWPS, &env->CSR_MWPS);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB0ADDR, &env->CSR_DB0ADDR);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB0MASK, &env->CSR_DB0MASK);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB0CTL, &env->CSR_DB0CTL);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB0ASID, &env->CSR_DB0ASID);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB1ADDR, &env->CSR_DB1ADDR);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB1MASK, &env->CSR_DB1MASK);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB1CTL, &env->CSR_DB1CTL);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB1ASID, &env->CSR_DB1ASID);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB2ADDR, &env->CSR_DB2ADDR);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB2MASK, &env->CSR_DB2MASK);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB2CTL, &env->CSR_DB2CTL);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB2ASID, &env->CSR_DB2ASID);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB3ADDR, &env->CSR_DB3ADDR);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB3MASK, &env->CSR_DB3MASK);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB3CTL, &env->CSR_DB3CTL);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB3ASID, &env->CSR_DB3ASID);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_FWPC, &env->CSR_FWPC);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_FWPS, &env->CSR_FWPS);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB0ADDR, &env->CSR_IB0ADDR);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB0MASK, &env->CSR_IB0MASK);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB0CTL, &env->CSR_IB0CTL);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB0ASID, &env->CSR_IB0ASID);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB1ADDR, &env->CSR_IB1ADDR);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB1MASK, &env->CSR_IB1MASK);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB1CTL, &env->CSR_IB1CTL);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB1ASID, &env->CSR_IB1ASID);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB2ADDR, &env->CSR_IB2ADDR);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB2MASK, &env->CSR_IB2MASK);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB2CTL, &env->CSR_IB2CTL);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB2ASID, &env->CSR_IB2ASID);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB3ADDR, &env->CSR_IB3ADDR);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB3MASK, &env->CSR_IB3MASK);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB3CTL, &env->CSR_IB3CTL);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB3ASID, &env->CSR_IB3ASID);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB4ADDR, &env->CSR_IB4ADDR);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB4MASK, &env->CSR_IB4MASK);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB4CTL, &env->CSR_IB4CTL);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB4ASID, &env->CSR_IB4ASID);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB5ADDR, &env->CSR_IB5ADDR);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB5MASK, &env->CSR_IB5MASK);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB5CTL, &env->CSR_IB5CTL);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB5ASID, &env->CSR_IB5ASID);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB6ADDR, &env->CSR_IB6ADDR);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB6MASK, &env->CSR_IB6MASK);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB6CTL, &env->CSR_IB6CTL);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB6ASID, &env->CSR_IB6ASID);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB7ADDR, &env->CSR_IB7ADDR);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB7MASK, &env->CSR_IB7MASK);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB7CTL, &env->CSR_IB7CTL);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB7ASID, &env->CSR_IB7ASID);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DEBUG, &env->CSR_DEBUG);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DERA, &env->CSR_DERA);
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DESAVE, &env->CSR_DESAVE);
+
+ DPRINTF("%s\n", __func__);
+ return ret;
+}
+
+int kvm_arch_destroy_vcpu(CPUState *cs)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+
+ g_free(cpu->kvm_csr_buf);
+ cpu->kvm_csr_buf = NULL;
+ return 0;
+}
+
+static void kvm_csr_buf_reset(LOONGARCHCPU *cpu)
+{
+ memset(cpu->kvm_csr_buf, 0, CSR_BUF_SIZE);
+}
+
+static void kvm_csr_entry_add(LOONGARCHCPU *cpu, uint32_t index, uint64_t value)
+{
+ struct kvm_msrs *msrs = cpu->kvm_csr_buf;
+ void *limit = ((void *)msrs) + CSR_BUF_SIZE;
+ struct kvm_csr_entry *entry = &msrs->entries[msrs->ncsrs];
+
+ assert((void *)(entry + 1) <= limit);
+
+ entry->index = index;
+ entry->reserved = 0;
+ entry->data = value;
+ msrs->ncsrs++;
+}
+
+void kvm_loongarch_reset_vcpu(LOONGARCHCPU *cpu)
+{
+ int ret = 0;
+ uint64_t reset = 1;
+
+ if (CPU(cpu)->kvm_fd > 0) {
+ ret = kvm_larch_putq(CPU(cpu), KVM_REG_LOONGARCH_VCPU_RESET, &reset);
+ if (ret < 0) {
+ error_report("%s reset vcpu failed:%d", __func__, ret);
+ }
+ }
+
+ DPRINTF("%s\n", __func__);
+}
+
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
+{
+ int n;
+ if (kvm_sw_breakpoints_active(cpu)) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
+ }
+ if (nb_data_breakpoint > 0) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
+ for (n = 0; n < nb_data_breakpoint; n++) {
+ dbg->arch.data_breakpoint[n].addr = data_breakpoint[n].addr;
+ dbg->arch.data_breakpoint[n].mask = 0;
+ dbg->arch.data_breakpoint[n].asid = 0;
+ dbg->arch.data_breakpoint[n].ctrl = type_code[data_breakpoint[n].type];
+ }
+ dbg->arch.data_bp_nums = nb_data_breakpoint;
+ } else {
+ dbg->arch.data_bp_nums = 0;
+ }
+ if (nb_inst_breakpoint > 0) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
+ for (n = 0; n < nb_inst_breakpoint; n++) {
+ dbg->arch.inst_breakpoint[n].addr = inst_breakpoint[n].addr;
+ dbg->arch.inst_breakpoint[n].mask = 0;
+ dbg->arch.inst_breakpoint[n].asid = 0;
+ dbg->arch.inst_breakpoint[n].ctrl = type_code[inst_breakpoint[n].type];
+ }
+ dbg->arch.inst_bp_nums = nb_inst_breakpoint;
+ } else {
+ dbg->arch.inst_bp_nums = 0;
+ }
+}
+
+static const unsigned int brk_insn = 0x002b8005;
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ DPRINTF("%s\n", __func__);
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
+ error_report("%s failed", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ static uint32_t brk;
+
+ DPRINTF("%s\n", __func__);
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
+ brk != brk_insn ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
+ error_report("%s failed", __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int find_hw_breakpoint(uint64_t addr, int len, int type)
+{
+ int n;
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ if (nb_inst_breakpoint == 0) {
+ return -1;
+ }
+ for (n = 0; n < nb_inst_breakpoint; n++) {
+ if (inst_breakpoint[n].addr == addr && inst_breakpoint[n].type == type) {
+ return n;
+ }
+ }
+ break;
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_ACCESS:
+ if (nb_data_breakpoint == 0) {
+ return -1;
+ }
+ for (n = 0; n < nb_data_breakpoint; n++) {
+ if (data_breakpoint[n].addr == addr && data_breakpoint[n].type == type &&
+ data_breakpoint[n].len == len) {
+ return n;
+ }
+ }
+ break;
+ default:
+ return -1;
+ }
+ return -1;
+}
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ len = 1;
+ if (nb_inst_breakpoint == 8) {
+ return -ENOBUFS;
+ }
+ if (find_hw_breakpoint(addr, len, type) >= 0) {
+ return -EEXIST;
+ }
+ inst_breakpoint[nb_inst_breakpoint].addr = addr;
+ inst_breakpoint[nb_inst_breakpoint].len = len;
+ inst_breakpoint[nb_inst_breakpoint].type = type;
+ nb_inst_breakpoint++;
+ break;
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_ACCESS:
+ switch (len) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ if (addr & (len - 1)) {
+ return -EINVAL;
+ }
+ if (nb_data_breakpoint == 8) {
+ return -ENOBUFS;
+ }
+ if (find_hw_breakpoint(addr, len, type) >= 0) {
+ return -EEXIST;
+ }
+ data_breakpoint[nb_data_breakpoint].addr = addr;
+ data_breakpoint[nb_data_breakpoint].len = len;
+ data_breakpoint[nb_data_breakpoint].type = type;
+ nb_data_breakpoint++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+ return 0;
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ int n;
+ n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
+ if (n < 0) {
+ printf("err not find remove target\n");
+ return -ENOENT;
+ }
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ nb_inst_breakpoint--;
+ inst_breakpoint[n] = inst_breakpoint[nb_inst_breakpoint];
+ break;
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_ACCESS:
+ nb_data_breakpoint--;
+ data_breakpoint[n] = data_breakpoint[nb_data_breakpoint];
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+ DPRINTF("%s\n", __func__);
+ nb_data_breakpoint = 0;
+ nb_inst_breakpoint = 0;
+}
+
+static inline int cpu_loongarch_io_interrupts_pending(LOONGARCHCPU *cpu)
+{
+ CPULOONGARCHState *env = &cpu->env;
+
+ return env->CSR_ESTAT & (0x1 << 2);
+}
+
+void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ int r;
+ struct kvm_loongarch_interrupt intr;
+
+ qemu_mutex_lock_iothread();
+
+ if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ cpu_loongarch_io_interrupts_pending(cpu)) {
+ intr.cpu = -1;
+ intr.irq = 2;
+ r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
+ if (r < 0) {
+ error_report("%s: cpu %d: failed to inject IRQ %x",
+ __func__, cs->cpu_index, intr.irq);
+ }
+ }
+
+ qemu_mutex_unlock_iothread();
+}
+
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
+{
+ return MEMTXATTRS_UNSPECIFIED;
+}
+
+int kvm_arch_process_async_events(CPUState *cs)
+{
+ return cs->halted;
+}
+
+static CPUWatchpoint hw_watchpoint;
+
+static bool kvm_loongarch_handle_debug(CPUState *cs, struct kvm_run *run)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+ int i;
+ bool ret = false;
+ kvm_cpu_synchronize_state(cs);
+ if (cs->singlestep_enabled) {
+ return true;
+ }
+ if (kvm_find_sw_breakpoint(cs, env->active_tc.PC)) {
+ return true;
+ }
+ /* hw breakpoint */
+ if (run->debug.arch.exception == EXCCODE_WATCH) {
+ for (i = 0; i < 8; i++) {
+ if (run->debug.arch.fwps & (1 << i)) {
+ ret = true;
+ break;
+ }
+ }
+ for (i = 0; i < 8; i++) {
+ if (run->debug.arch.mwps & (1 << i)) {
+ cs->watchpoint_hit = &hw_watchpoint;
+ hw_watchpoint.vaddr = data_breakpoint[i].addr;
+ switch (data_breakpoint[i].type) {
+ case GDB_WATCHPOINT_READ:
+ ret = true;
+ hw_watchpoint.flags = BP_MEM_READ;
+ break;
+ case GDB_WATCHPOINT_WRITE:
+ ret = true;
+ hw_watchpoint.flags = BP_MEM_WRITE;
+ break;
+ case GDB_WATCHPOINT_ACCESS:
+ ret = true;
+ hw_watchpoint.flags = BP_MEM_ACCESS;
+ break;
+ }
+ }
+ }
+ run->debug.arch.exception = 0;
+ run->debug.arch.fwps = 0;
+ run->debug.arch.mwps = 0;
+ }
+ return ret;
+}
+
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
+{
+ int ret;
+
+ DPRINTF("%s\n", __func__);
+ switch (run->exit_reason) {
+ case KVM_EXIT_HYPERCALL:
+ DPRINTF("handle LOONGARCH hypercall\n");
+ ret = 0;
+ run->hypercall.ret = ret;
+ break;
+
+ case KVM_EXIT_DEBUG:
+ ret = 0;
+ if (kvm_loongarch_handle_debug(cs, run)) {
+ ret = EXCP_DEBUG;
+ }
+ break;
+ default:
+ error_report("%s: unknown exit reason %d",
+ __func__, run->exit_reason);
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cs)
+{
+ DPRINTF("%s\n", __func__);
+ return true;
+}
+/*
+#if 0
+int kvmloongarch_load_kernel(CPUState *env, void *ram_base)
+{
+ int ret;
+
+ ret = kvm_vcpu_ioctl(env, KVM_LOAD_KERNEL, ram_base);
+
+ return ret;
+}
+#endif
+*/
+void kvm_arch_init_irq_routing(KVMState *s)
+{
+}
+
+int kvm_loongarch_set_interrupt(LOONGARCHCPU *cpu, int irq, int level)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_loongarch_interrupt intr;
+
+ if (!kvm_enabled()) {
+ return 0;
+ }
+
+ intr.cpu = -1;
+
+ if (level) {
+ intr.irq = irq;
+ } else {
+ intr.irq = -irq;
+ }
+
+ kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
+
+ return 0;
+}
+
+int kvm_loongarch_set_ipi_interrupt(LOONGARCHCPU *cpu, int irq, int level)
+{
+ CPUState *cs = current_cpu;
+ CPUState *dest_cs = CPU(cpu);
+ struct kvm_loongarch_interrupt intr;
+
+ if (!kvm_enabled()) {
+ return 0;
+ }
+
+ intr.cpu = dest_cs->cpu_index;
+
+ if (level) {
+ intr.irq = irq;
+ } else {
+ intr.irq = -irq;
+ }
+
+ DPRINTF("%s: IRQ: %d\n", __func__, intr.irq);
+ if (!current_cpu) {
+ cs = dest_cs;
+ }
+ kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
+
+ return 0;
+}
+
+static inline int kvm_loongarch_put_one_reg(CPUState *cs, uint64_t reg_id,
+ int32_t *addr)
+{
+ struct kvm_one_reg csrreg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &csrreg);
+}
+
+static inline int kvm_loongarch_put_one_ureg(CPUState *cs, uint64_t reg_id,
+ uint32_t *addr)
+{
+ struct kvm_one_reg csrreg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &csrreg);
+}
+
+static inline int kvm_loongarch_put_one_ulreg(CPUState *cs, uint64_t reg_id,
+ target_ulong *addr)
+{
+ uint64_t val64 = *addr;
+ struct kvm_one_reg csrreg = {
+ .id = reg_id,
+ .addr = (uintptr_t)&val64
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &csrreg);
+}
+
+static inline int kvm_loongarch_put_one_reg64(CPUState *cs, int64_t reg_id,
+ int64_t *addr)
+{
+ struct kvm_one_reg csrreg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &csrreg);
+}
+
+static inline int kvm_larch_putq(CPUState *cs, uint64_t reg_id,
+ uint64_t *addr)
+{
+ struct kvm_one_reg csrreg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &csrreg);
+}
+
+static inline int kvm_loongarch_get_one_reg(CPUState *cs, uint64_t reg_id,
+ int32_t *addr)
+{
+ struct kvm_one_reg csrreg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &csrreg);
+}
+
+static inline int kvm_loongarch_get_one_ureg(CPUState *cs, uint64_t reg_id,
+ uint32_t *addr)
+{
+ struct kvm_one_reg csrreg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &csrreg);
+}
+
+static inline int kvm_loongarch_get_one_ulreg(CPUState *cs, uint64_t reg_id,
+ target_ulong *addr)
+{
+ int ret;
+ uint64_t val64 = 0;
+ struct kvm_one_reg csrreg = {
+ .id = reg_id,
+ .addr = (uintptr_t)&val64
+ };
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &csrreg);
+ if (ret >= 0) {
+ *addr = val64;
+ }
+ return ret;
+}
+
+static inline int kvm_loongarch_get_one_reg64(CPUState *cs, int64_t reg_id,
+ int64_t *addr)
+{
+ struct kvm_one_reg csrreg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &csrreg);
+}
+
+static inline int kvm_larch_getq(CPUState *cs, uint64_t reg_id,
+ uint64_t *addr)
+{
+ struct kvm_one_reg csrreg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &csrreg);
+}
+
+static inline int kvm_loongarch_change_one_reg(CPUState *cs, uint64_t reg_id,
+ int32_t *addr, int32_t mask)
+{
+ int err;
+ int32_t tmp, change;
+
+ err = kvm_loongarch_get_one_reg(cs, reg_id, &tmp);
+ if (err < 0) {
+ return err;
+ }
+
+ /* only change bits in mask */
+ change = (*addr ^ tmp) & mask;
+ if (!change) {
+ return 0;
+ }
+
+ tmp = tmp ^ change;
+ return kvm_loongarch_put_one_reg(cs, reg_id, &tmp);
+}
+
+static inline int kvm_loongarch_change_one_reg64(CPUState *cs, uint64_t reg_id,
+ int64_t *addr, int64_t mask)
+{
+ int err;
+ int64_t tmp, change;
+
+ err = kvm_loongarch_get_one_reg64(cs, reg_id, &tmp);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CSR_CONFIG7 (%d)\n", __func__, err);
+ return err;
+ }
+
+ /* only change bits in mask */
+ change = (*addr ^ tmp) & mask;
+ if (!change) {
+ return 0;
+ }
+
+ tmp = tmp ^ change;
+ return kvm_loongarch_put_one_reg64(cs, reg_id, &tmp);
+}
+/*
+ * Handle the VM clock being started or stopped
+ */
+static void kvm_loongarch_update_state(void *opaque, bool running, RunState state)
+{
+ CPUState *cs = opaque;
+ int ret;
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+
+ /*
+ * If state is already dirty (synced to QEMU) then the KVM timer state is
+ * already saved and can be restored when it is synced back to KVM.
+ */
+ if (!running) {
+ ret = kvm_larch_getq(cs,
+ KVM_REG_LOONGARCH_COUNTER, &cpu->counter_value);
+ if (ret < 0) {
+ printf("%s: Failed to get counter_value (%d)\n", __func__, ret);
+ }
+
+ } else {
+ ret = kvm_larch_putq(cs, KVM_REG_LOONGARCH_COUNTER,
+ &(LOONGARCH_CPU(cs))->counter_value);
+ if (ret < 0) {
+ printf("%s: Failed to put counter_value (%d)\n", __func__, ret);
+ }
+ }
+}
+
+static int kvm_loongarch_put_fpu_registers(CPUState *cs, int level)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+ int err, ret = 0;
+ unsigned int i;
+ struct kvm_fpu fpu;
+
+ fpu.fcsr = env->active_fpu.fcsr0;
+ for (i = 0; i < 32; i++) {
+ memcpy(&fpu.fpr[i], &env->active_fpu.fpr[i], sizeof(struct kvm_fpureg));
+ }
+ for (i = 0; i < 8; i++) {
+ ((char *)&fpu.fcc)[i] = env->active_fpu.cf[i];
+ }
+ fpu.vcsr = env->active_fpu.vcsr16;
+
+ err = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get FPU (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ return ret;
+}
+
+static int kvm_loongarch_get_fpu_registers(CPUState *cs)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+ int err, ret = 0;
+ unsigned int i;
+ struct kvm_fpu fpu;
+
+ err = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get FPU (%d)\n", __func__, err);
+ ret = err;
+ } else {
+ env->active_fpu.fcsr0 = fpu.fcsr;
+ for (i = 0; i < 32; i++) {
+ memcpy(&env->active_fpu.fpr[i], &fpu.fpr[i], sizeof(struct kvm_fpureg));
+ }
+ for (i = 0; i < 8; i++) {
+ env->active_fpu.cf[i] = ((char *)&fpu.fcc)[i];
+ }
+ env->active_fpu.vcsr16 = fpu.vcsr;
+ }
+
+ return ret;
+}
+
+#define KVM_PUT_ONE_UREG64(cs, regidx, addr) \
+ ({ \
+ int err; \
+ uint64_t csrid = 0; \
+ csrid = (KVM_IOC_CSRID(regidx)); \
+ err = kvm_larch_putq(cs, csrid, addr); \
+ if (err < 0) { \
+ DPRINTF("%s: Failed to put regidx 0x%x err:%d\n", __func__, regidx, err); \
+ } \
+ err; \
+ })
+
+
+static int kvm_loongarch_put_csr_registers(CPUState *cs, int level)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+ int ret = 0;
+
+ (void)level;
+
+ kvm_csr_buf_reset(cpu);
+
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_CRMD, env->CSR_CRMD);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRMD, env->CSR_PRMD);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_EUEN, env->CSR_EUEN);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_MISC, env->CSR_MISC);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ECFG, env->CSR_ECFG);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ESTAT, env->CSR_ESTAT);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERA, env->CSR_ERA);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_BADV, env->CSR_BADV);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_BADI, env->CSR_BADI);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_EEPN, env->CSR_EEPN);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBIDX, env->CSR_TLBIDX);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBEHI, env->CSR_TLBEHI);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBELO0, env->CSR_TLBELO0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBELO1, env->CSR_TLBELO1);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GTLBC, env->CSR_GTLBC);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TRGP, env->CSR_TRGP);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ASID, env->CSR_ASID);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGDL, env->CSR_PGDL);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGDH, env->CSR_PGDH);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGD, env->CSR_PGD);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PWCTL0, env->CSR_PWCTL0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PWCTL1, env->CSR_PWCTL1);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_STLBPGSIZE, env->CSR_STLBPGSIZE);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_RVACFG, env->CSR_RVACFG);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_CPUID, env->CSR_CPUID);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG1, env->CSR_PRCFG1);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG2, env->CSR_PRCFG2);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG3, env->CSR_PRCFG3);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS0, env->CSR_KS0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS1, env->CSR_KS1);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS2, env->CSR_KS2);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS3, env->CSR_KS3);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS4, env->CSR_KS4);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS5, env->CSR_KS5);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS6, env->CSR_KS6);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS7, env->CSR_KS7);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TMID, env->CSR_TMID);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_CNTC, env->CSR_CNTC);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TINTCLR, env->CSR_TINTCLR);
+
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GSTAT, env->CSR_GSTAT);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GCFG, env->CSR_GCFG);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GINTC, env->CSR_GINTC);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GCNTC, env->CSR_GCNTC);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_LLBCTL, env->CSR_LLBCTL);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IMPCTL1, env->CSR_IMPCTL1);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IMPCTL2, env->CSR_IMPCTL2);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GNMI, env->CSR_GNMI);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRENT, env->CSR_TLBRENT);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRBADV, env->CSR_TLBRBADV);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRERA, env->CSR_TLBRERA);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRSAVE, env->CSR_TLBRSAVE);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRELO0, env->CSR_TLBRELO0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRELO1, env->CSR_TLBRELO1);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBREHI, env->CSR_TLBREHI);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRPRMD, env->CSR_TLBRPRMD);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRCTL, env->CSR_ERRCTL);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRINFO, env->CSR_ERRINFO);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRINFO1, env->CSR_ERRINFO1);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRENT, env->CSR_ERRENT);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRERA, env->CSR_ERRERA);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRSAVE, env->CSR_ERRSAVE);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_CTAG, env->CSR_CTAG);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN0, env->CSR_DMWIN0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN1, env->CSR_DMWIN1);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN2, env->CSR_DMWIN2);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN3, env->CSR_DMWIN3);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL0, env->CSR_PERFCTRL0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR0, env->CSR_PERFCNTR0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL1, env->CSR_PERFCTRL1);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR1, env->CSR_PERFCNTR1);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL2, env->CSR_PERFCTRL2);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR2, env->CSR_PERFCNTR2);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL3, env->CSR_PERFCTRL3);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR3, env->CSR_PERFCNTR3);
+
+ /* debug */
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_MWPC, env->CSR_MWPC);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_MWPS, env->CSR_MWPS);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0ADDR, env->CSR_DB0ADDR);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0MASK, env->CSR_DB0MASK);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0CTL, env->CSR_DB0CTL);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0ASID, env->CSR_DB0ASID);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1ADDR, env->CSR_DB1ADDR);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1MASK, env->CSR_DB1MASK);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1CTL, env->CSR_DB1CTL);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1ASID, env->CSR_DB1ASID);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2ADDR, env->CSR_DB2ADDR);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2MASK, env->CSR_DB2MASK);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2CTL, env->CSR_DB2CTL);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2ASID, env->CSR_DB2ASID);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3ADDR, env->CSR_DB3ADDR);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3MASK, env->CSR_DB3MASK);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3CTL, env->CSR_DB3CTL);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3ASID, env->CSR_DB3ASID);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_FWPC, env->CSR_FWPC);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_FWPS, env->CSR_FWPS);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0ADDR, env->CSR_IB0ADDR);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0MASK, env->CSR_IB0MASK);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0CTL, env->CSR_IB0CTL);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0ASID, env->CSR_IB0ASID);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1ADDR, env->CSR_IB1ADDR);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1MASK, env->CSR_IB1MASK);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1CTL, env->CSR_IB1CTL);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1ASID, env->CSR_IB1ASID);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2ADDR, env->CSR_IB2ADDR);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2MASK, env->CSR_IB2MASK);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2CTL, env->CSR_IB2CTL);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2ASID, env->CSR_IB2ASID);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3ADDR, env->CSR_IB3ADDR);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3MASK, env->CSR_IB3MASK);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3CTL, env->CSR_IB3CTL);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3ASID, env->CSR_IB3ASID);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4ADDR, env->CSR_IB4ADDR);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4MASK, env->CSR_IB4MASK);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4CTL, env->CSR_IB4CTL);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4ASID, env->CSR_IB4ASID);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5ADDR, env->CSR_IB5ADDR);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5MASK, env->CSR_IB5MASK);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5CTL, env->CSR_IB5CTL);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5ASID, env->CSR_IB5ASID);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6ADDR, env->CSR_IB6ADDR);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6MASK, env->CSR_IB6MASK);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6CTL, env->CSR_IB6CTL);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6ASID, env->CSR_IB6ASID);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7ADDR, env->CSR_IB7ADDR);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7MASK, env->CSR_IB7MASK);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7CTL, env->CSR_IB7CTL);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7ASID, env->CSR_IB7ASID);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DEBUG, env->CSR_DEBUG);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DERA, env->CSR_DERA);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DESAVE, env->CSR_DESAVE);
+
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_MSRS, cpu->kvm_csr_buf);
+ if (ret < cpu->kvm_csr_buf->ncsrs) {
+ struct kvm_csr_entry *e = &cpu->kvm_csr_buf->entries[ret];
+ printf("error: failed to set CSR 0x%" PRIx32 " to 0x%" PRIx64"\n",
+ (uint32_t)e->index, (uint64_t)e->data);
+ }
+
+ /*
+ * timer cfg must be put at last since it is used to enable
+ * guest timer
+ */
+ ret |= KVM_PUT_ONE_UREG64(cs, LOONGARCH_CSR_TVAL, &env->CSR_TVAL);
+ ret |= KVM_PUT_ONE_UREG64(cs, LOONGARCH_CSR_TCFG, &env->CSR_TCFG);
+ return ret;
+}
+
+#define KVM_GET_ONE_UREG64(cs, regidx, addr) \
+ ({ \
+ int err; \
+ uint64_t csrid = 0; \
+ csrid = (KVM_IOC_CSRID(regidx)); \
+ err = kvm_larch_getq(cs, csrid, addr); \
+ if (err < 0) { \
+ DPRINTF("%s: Failed to put regidx 0x%x err:%d\n", __func__, regidx, err); \
+ } \
+ err; \
+ })
+
+static int kvm_loongarch_get_csr_registers(CPUState *cs)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+ int ret = 0, i;
+ struct kvm_csr_entry *csrs = cpu->kvm_csr_buf->entries;
+ uint64_t **addr;
+
+ kvm_csr_buf_reset(cpu);
+ addr = (void *)cpu->kvm_csr_buf + CSR_BUF_SIZE;
+
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_CRMD, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRMD, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_EUEN, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_MISC, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ECFG, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ESTAT, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERA, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_BADV, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_BADI, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_EEPN, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBIDX, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBEHI, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBELO0, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBELO1, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GTLBC, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TRGP, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ASID, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGDL, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGDH, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGD, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PWCTL0, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PWCTL1, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_STLBPGSIZE, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_RVACFG, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_CPUID, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG1, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG2, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG3, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS0, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS1, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS2, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS3, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS4, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS5, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS6, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS7, 0);
+
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TMID, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_CNTC, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TINTCLR, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GSTAT, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GCFG, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GINTC, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GCNTC, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_LLBCTL, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IMPCTL1, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IMPCTL2, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GNMI, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRENT, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRBADV, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRERA, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRSAVE, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRELO0, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRELO1, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBREHI, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRPRMD, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRCTL, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRINFO, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRINFO1, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRENT, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRERA, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRSAVE, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_CTAG, 0);
+
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN0, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN1, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN2, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN3, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL0, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR0, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL1, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR1, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL2, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR2, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL3, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR3, 0);
+
+ /* debug */
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_MWPC, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_MWPS, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0ADDR, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0MASK, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0CTL, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0ASID, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1ADDR, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1MASK, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1CTL, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1ASID, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2ADDR, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2MASK, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2CTL, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2ASID, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3ADDR, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3MASK, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3CTL, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3ASID, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_FWPC, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_FWPS, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0ADDR, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0MASK, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0CTL, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0ASID, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1ADDR, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1MASK, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1CTL, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1ASID, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2ADDR, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2MASK, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2CTL, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2ASID, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3ADDR, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3MASK, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3CTL, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3ASID, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4ADDR, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4MASK, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4CTL, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4ASID, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5ADDR, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5MASK, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5CTL, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5ASID, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6ADDR, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6MASK, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6CTL, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6ASID, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7ADDR, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7MASK, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7CTL, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7ASID, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DEBUG, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DERA, 0);
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DESAVE, 0);
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, cpu->kvm_csr_buf);
+ if (ret < cpu->kvm_csr_buf->ncsrs) {
+ struct kvm_csr_entry *e = &cpu->kvm_csr_buf->entries[ret];
+ printf("error: failed to get CSR 0x%" PRIx32"\n",
+ (uint32_t)e->index);
+ }
+
+ for (i = 0; i < ret; i++) {
+ uint32_t index = csrs[i].index;
+ if (addr[index]) {
+ *addr[index] = csrs[i].data;
+ } else {
+ printf("Failed to get addr CSR 0x%"PRIx32"\n", i);
+ }
+ }
+
+ ret |= KVM_GET_ONE_UREG64(cs, LOONGARCH_CSR_TVAL, &env->CSR_TVAL);
+ ret |= KVM_GET_ONE_UREG64(cs, LOONGARCH_CSR_TCFG, &env->CSR_TCFG);
+ return ret;
+}
+
+int kvm_loongarch_put_pvtime(LOONGARCHCPU *cpu)
+{
+ CPULOONGARCHState *env = &cpu->env;
+ int err;
+ struct kvm_device_attr attr = {
+ .group = KVM_LARCH_VCPU_PVTIME_CTRL,
+ .attr = KVM_LARCH_VCPU_PVTIME_IPA,
+ .addr = (uint64_t)&env->st.guest_addr,
+ };
+
+ err = kvm_vcpu_ioctl(CPU(cpu), KVM_HAS_DEVICE_ATTR, attr);
+ if (err != 0) {
+ /* It's ok even though kvm has not such attr */
+ return 0;
+ }
+
+ err = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEVICE_ATTR, attr);
+ if (err != 0) {
+ error_report("PVTIME IPA: KVM_SET_DEVICE_ATTR: %s", strerror(-err));
+ return err;
+ }
+
+ return 0;
+}
+
+int kvm_loongarch_get_pvtime(LOONGARCHCPU *cpu)
+{
+ CPULOONGARCHState *env = &cpu->env;
+ int err;
+ struct kvm_device_attr attr = {
+ .group = KVM_LARCH_VCPU_PVTIME_CTRL,
+ .attr = KVM_LARCH_VCPU_PVTIME_IPA,
+ .addr = (uint64_t)&env->st.guest_addr,
+ };
+
+ err = kvm_vcpu_ioctl(CPU(cpu), KVM_HAS_DEVICE_ATTR, attr);
+ if (err != 0) {
+ /* It's ok even though kvm has not such attr */
+ return 0;
+ }
+
+ err = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEVICE_ATTR, attr);
+ if (err != 0) {
+ error_report("PVTIME IPA: KVM_GET_DEVICE_ATTR: %s", strerror(-err));
+ return err;
+ }
+
+ return 0;
+}
+
+
+static int kvm_loongarch_put_lbt_registers(CPUState *cs)
+{
+ int ret = 0;
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+
+ ret |= kvm_larch_putq(cs, KVM_REG_LBT_SCR0, &env->lbt.scr0);
+ ret |= kvm_larch_putq(cs, KVM_REG_LBT_SCR1, &env->lbt.scr1);
+ ret |= kvm_larch_putq(cs, KVM_REG_LBT_SCR2, &env->lbt.scr2);
+ ret |= kvm_larch_putq(cs, KVM_REG_LBT_SCR3, &env->lbt.scr3);
+ ret |= kvm_larch_putq(cs, KVM_REG_LBT_FLAGS, &env->lbt.eflag);
+ ret |= kvm_larch_putq(cs, KVM_REG_LBT_FTOP, &env->active_fpu.ftop);
+
+ return ret;
+}
+
+static int kvm_loongarch_get_lbt_registers(CPUState *cs)
+{
+ int ret = 0;
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+
+ ret |= kvm_larch_getq(cs, KVM_REG_LBT_SCR0, &env->lbt.scr0);
+ ret |= kvm_larch_getq(cs, KVM_REG_LBT_SCR1, &env->lbt.scr1);
+ ret |= kvm_larch_getq(cs, KVM_REG_LBT_SCR2, &env->lbt.scr2);
+ ret |= kvm_larch_getq(cs, KVM_REG_LBT_SCR3, &env->lbt.scr3);
+ ret |= kvm_larch_getq(cs, KVM_REG_LBT_FLAGS, &env->lbt.eflag);
+ ret |= kvm_larch_getq(cs, KVM_REG_LBT_FTOP, &env->active_fpu.ftop);
+
+ return ret;
+}
+
+int kvm_arch_put_registers(CPUState *cs, int level)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+ struct kvm_regs regs;
+ int ret;
+ int i;
+
+ /* Set the registers based on QEMU's view of things */
+ for (i = 0; i < 32; i++) {
+ regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i];
+ }
+
+ regs.pc = (int64_t)(target_long)env->active_tc.PC;
+
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
+
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = kvm_loongarch_put_csr_registers(cs, level);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = kvm_loongarch_put_fpu_registers(cs, level);
+ if (ret < 0) {
+ return ret;
+ }
+
+ kvm_loongarch_put_lbt_registers(cs);
+ return ret;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
+ CPULOONGARCHState *env = &cpu->env;
+ int ret = 0;
+ struct kvm_regs regs;
+ int i;
+
+ /* Get the current register set as KVM seems it */
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
+
+ if (ret < 0) {
+ return ret;
+ }
+
+ for (i = 0; i < 32; i++) {
+ env->active_tc.gpr[i] = regs.gpr[i];
+ }
+
+ env->active_tc.PC = regs.pc;
+
+ kvm_loongarch_get_csr_registers(cs);
+ kvm_loongarch_get_fpu_registers(cs);
+ kvm_loongarch_get_lbt_registers(cs);
+
+ return ret;
+}
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data, PCIDevice *dev)
+{
+ return 0;
+}
+
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
+ int vector, PCIDevice *dev)
+{
+ return 0;
+}
+
+bool kvm_arch_cpu_check_are_resettable(void)
+{
+ return true;
+}
+
+int kvm_arch_release_virq_post(int virq)
+{
+ return 0;
+}
+
+int kvm_arch_msi_data_to_gsi(uint32_t data)
+{
+ abort();
+}
diff --git a/target/loongarch64/kvm_larch.h b/target/loongarch64/kvm_larch.h
new file mode 100644
index 0000000000000000000000000000000000000000..a56026d10c6e9d8625901fa3cf831322af90e9b8
--- /dev/null
+++ b/target/loongarch64/kvm_larch.h
@@ -0,0 +1,41 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/LOONGARCH: LOONGARCH specific KVM APIs
+ *
+ * Copyright (C) 2012-2014 Imagination Technologies Ltd.
+ * Authors: Sanjay Lal
+*/
+
+#ifndef KVM_LOONGARCH_H
+#define KVM_LOONGARCH_H
+
+/**
+ * kvm_loongarch_reset_vcpu:
+ * @cpu: LOONGARCHCPU
+ *
+ * Called at reset time to set kernel registers to their initial values.
+ */
+void kvm_loongarch_reset_vcpu(LOONGARCHCPU *cpu);
+
+int kvm_loongarch_set_interrupt(LOONGARCHCPU *cpu, int irq, int level);
+int kvm_loongarch_set_ipi_interrupt(LOONGARCHCPU *cpu, int irq, int level);
+
+int kvm_loongarch_put_pvtime(LOONGARCHCPU *cpu);
+int kvm_loongarch_get_pvtime(LOONGARCHCPU *cpu);
+
+#ifndef KVM_INTERRUPT_SET
+#define KVM_INTERRUPT_SET -1
+#endif
+
+#ifndef KVM_INTERRUPT_UNSET
+#define KVM_INTERRUPT_UNSET -2
+#endif
+
+#ifndef KVM_INTERRUPT_SET_LEVEL
+#define KVM_INTERRUPT_SET_LEVEL -3
+#endif
+
+#endif /* KVM_LOONGARCH_H */
diff --git a/target/loongarch64/larch-defs.h b/target/loongarch64/larch-defs.h
new file mode 100644
index 0000000000000000000000000000000000000000..d3a61cf2557420c04556ce7cefe6ecf2fe5e95ef
--- /dev/null
+++ b/target/loongarch64/larch-defs.h
@@ -0,0 +1,27 @@
+#ifndef QEMU_LOONGARCH_DEFS_H
+#define QEMU_LOONGARCH_DEFS_H
+
+/* If we want to use host float regs... */
+/* #define USE_HOST_FLOAT_REGS */
+
+/* Real pages are variable size... */
+#define TARGET_PAGE_BITS 14
+
+#define LOONGARCH_TLB_MAX 2112
+
+#define TARGET_LONG_BITS 64
+#define TARGET_PHYS_ADDR_SPACE_BITS 48
+#define TARGET_VIRT_ADDR_SPACE_BITS 48
+
+/*
+ * bit definitions for insn_flags (ISAs/ASEs flags)
+ * ------------------------------------------------
+ */
+#define ISA_LARCH32 0x00000001ULL
+#define ISA_LARCH64 0x00000002ULL
+#define INSN_LOONGARCH 0x00010000ULL
+
+#define CPU_LARCH32 (ISA_LARCH32)
+#define CPU_LARCH64 (ISA_LARCH32 | ISA_LARCH64)
+
+#endif /* QEMU_LOONGARCH_DEFS_H */
diff --git a/target/loongarch64/machine.c b/target/loongarch64/machine.c
new file mode 100644
index 0000000000000000000000000000000000000000..dea6a7034c2799c9209108af653c8eb1c911de7f
--- /dev/null
+++ b/target/loongarch64/machine.c
@@ -0,0 +1,423 @@
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "internal.h"
+#include "hw/hw.h"
+#include "kvm_larch.h"
+#include "migration/cpu.h"
+#include "linux/kvm.h"
+#include "sysemu/kvm.h"
+#include "qemu/error-report.h"
+
+static int cpu_post_load(void *opaque, int version_id)
+{
+ LOONGARCHCPU *cpu = opaque;
+ CPULOONGARCHState *env = &cpu->env;
+ int r = 0;
+
+ if (!kvm_enabled()) {
+ return 0;
+ }
+
+#ifdef CONFIG_KVM
+ struct kvm_loongarch_vcpu_state vcpu_state;
+ int i;
+
+ vcpu_state.online_vcpus = cpu->online_vcpus;
+ vcpu_state.is_migrate = cpu->is_migrate;
+ vcpu_state.cpu_freq = cpu->cpu_freq;
+ vcpu_state.count_ctl = cpu->count_ctl;
+ vcpu_state.pending_exceptions = cpu->pending_exceptions;
+ vcpu_state.pending_exceptions_clr = cpu->pending_exceptions_clr;
+ for (i = 0; i < 4; i++) {
+ vcpu_state.core_ext_ioisr[i] = cpu->core_ext_ioisr[i];
+ }
+ r = kvm_vcpu_ioctl(CPU(cpu), KVM_LARCH_SET_VCPU_STATE, &vcpu_state);
+ if (r) {
+ error_report("set vcpu state failed %d", r);
+ }
+
+ kvm_loongarch_put_pvtime(cpu);
+#endif
+
+ restore_fp_status(env);
+ compute_hflags(env);
+
+ return r;
+}
+
+static int cpu_pre_save(void *opaque)
+{
+#ifdef CONFIG_KVM
+ LOONGARCHCPU *cpu = opaque;
+ struct kvm_loongarch_vcpu_state vcpu_state;
+ int i, r = 0;
+ if (!kvm_enabled()) {
+ return 0;
+ }
+
+ r = kvm_vcpu_ioctl(CPU(cpu), KVM_LARCH_GET_VCPU_STATE, &vcpu_state);
+ if (r < 0) {
+ error_report("get vcpu state failed %d", r);
+ return r;
+ }
+
+ cpu->online_vcpus = vcpu_state.online_vcpus;
+ cpu->is_migrate = vcpu_state.is_migrate;
+ cpu->cpu_freq = vcpu_state.cpu_freq;
+ cpu->count_ctl = vcpu_state.count_ctl;
+ cpu->pending_exceptions = vcpu_state.pending_exceptions;
+ cpu->pending_exceptions_clr = vcpu_state.pending_exceptions_clr;
+ for (i = 0; i < 4; i++) {
+ cpu->core_ext_ioisr[i] = vcpu_state.core_ext_ioisr[i];
+ }
+
+ kvm_loongarch_get_pvtime(cpu);
+#endif
+ return 0;
+}
+
+/* FPU state */
+
+static int get_fpr(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field)
+{
+ fpr_t *v = pv;
+ qemu_get_be64s(f, &v->d);
+ return 0;
+}
+
+static int put_fpr(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ fpr_t *v = pv;
+ qemu_put_be64s(f, &v->d);
+ return 0;
+}
+
+const VMStateInfo vmstate_info_fpr = {
+ .name = "fpr",
+ .get = get_fpr,
+ .put = put_fpr,
+};
+
+#define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_fpr, fpr_t)
+
+#define VMSTATE_FPR_ARRAY(_f, _s, _n) \
+ VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0)
+
+static VMStateField vmstate_fpu_fields[] = {
+ VMSTATE_FPR_ARRAY(fpr, CPULOONGARCHFPUContext, 32),
+ VMSTATE_UINT32(fcsr0, CPULOONGARCHFPUContext),
+ VMSTATE_END_OF_LIST()
+};
+
+const VMStateDescription vmstate_fpu = {
+ .name = "cpu/fpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_fpu_fields
+};
+
+const VMStateDescription vmstate_inactive_fpu = {
+ .name = "cpu/inactive_fpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_fpu_fields
+};
+
+/* TC state */
+
+static VMStateField vmstate_tc_fields[] = {
+ VMSTATE_UINTTL_ARRAY(gpr, TCState, 32),
+ VMSTATE_UINTTL(PC, TCState),
+ VMSTATE_END_OF_LIST()
+};
+
+const VMStateDescription vmstate_tc = {
+ .name = "cpu/tc",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_tc_fields
+};
+
+const VMStateDescription vmstate_inactive_tc = {
+ .name = "cpu/inactive_tc",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_tc_fields
+};
+
+/* TLB state */
+
+static int get_tlb(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field)
+{
+ ls3a5k_tlb_t *v = pv;
+ uint32_t flags;
+
+ qemu_get_betls(f, &v->VPN);
+ qemu_get_be64s(f, &v->PageMask);
+ qemu_get_be32s(f, &v->PageSize);
+ qemu_get_be16s(f, &v->ASID);
+ qemu_get_be32s(f, &flags);
+ v->RPLV1 = (flags >> 21) & 1;
+ v->RPLV0 = (flags >> 20) & 1;
+ v->PLV1 = (flags >> 18) & 3;
+ v->PLV0 = (flags >> 16) & 3;
+ v->EHINV = (flags >> 15) & 1;
+ v->RI1 = (flags >> 14) & 1;
+ v->RI0 = (flags >> 13) & 1;
+ v->XI1 = (flags >> 12) & 1;
+ v->XI0 = (flags >> 11) & 1;
+ v->WE1 = (flags >> 10) & 1;
+ v->WE0 = (flags >> 9) & 1;
+ v->V1 = (flags >> 8) & 1;
+ v->V0 = (flags >> 7) & 1;
+ v->C1 = (flags >> 4) & 7;
+ v->C0 = (flags >> 1) & 7;
+ v->G = (flags >> 0) & 1;
+ qemu_get_be64s(f, &v->PPN0);
+ qemu_get_be64s(f, &v->PPN1);
+
+ return 0;
+}
+
+static int put_tlb(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field, JSONWriter *vmdesc)
+{
+ ls3a5k_tlb_t *v = pv;
+
+ uint16_t asid = v->ASID;
+ uint32_t flags = ((v->RPLV1 << 21) |
+ (v->RPLV0 << 20) |
+ (v->PLV1 << 18) |
+ (v->PLV0 << 16) |
+ (v->EHINV << 15) |
+ (v->RI1 << 14) |
+ (v->RI0 << 13) |
+ (v->XI1 << 12) |
+ (v->XI0 << 11) |
+ (v->WE1 << 10) |
+ (v->WE0 << 9) |
+ (v->V1 << 8) |
+ (v->V0 << 7) |
+ (v->C1 << 4) |
+ (v->C0 << 1) |
+ (v->G << 0));
+
+ qemu_put_betls(f, &v->VPN);
+ qemu_put_be64s(f, &v->PageMask);
+ qemu_put_be32s(f, &v->PageSize);
+ qemu_put_be16s(f, &asid);
+ qemu_put_be32s(f, &flags);
+ qemu_put_be64s(f, &v->PPN0);
+ qemu_put_be64s(f, &v->PPN1);
+
+ return 0;
+}
+
+const VMStateInfo vmstate_info_tlb = {
+ .name = "tlb_entry",
+ .get = get_tlb,
+ .put = put_tlb,
+};
+
+#define VMSTATE_TLB_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_tlb, ls3a5k_tlb_t)
+
+#define VMSTATE_TLB_ARRAY(_f, _s, _n) \
+ VMSTATE_TLB_ARRAY_V(_f, _s, _n, 0)
+
+const VMStateDescription vmstate_tlb = {
+ .name = "cpu/tlb",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(nb_tlb, CPULOONGARCHTLBContext),
+ VMSTATE_UINT32(tlb_in_use, CPULOONGARCHTLBContext),
+ VMSTATE_TLB_ARRAY(mmu.ls3a5k.tlb, CPULOONGARCHTLBContext, LOONGARCH_TLB_MAX),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+/* LOONGARCH CPU state */
+
+const VMStateDescription vmstate_loongarch_cpu = {
+ .name = "cpu",
+ .version_id = 15,
+ .minimum_version_id = 15,
+ .post_load = cpu_post_load,
+ .pre_save = cpu_pre_save,
+ .fields = (VMStateField[]) {
+ /* Active TC */
+ VMSTATE_STRUCT(env.active_tc, LOONGARCHCPU, 1, vmstate_tc, TCState),
+
+ /* Active FPU */
+ VMSTATE_STRUCT(env.active_fpu, LOONGARCHCPU, 1, vmstate_fpu,
+ CPULOONGARCHFPUContext),
+
+ /* TLB */
+ VMSTATE_STRUCT_POINTER(env.tlb, LOONGARCHCPU, vmstate_tlb,
+ CPULOONGARCHTLBContext),
+ /* CPU metastate */
+ VMSTATE_UINT32(env.current_tc, LOONGARCHCPU),
+ VMSTATE_INT32(env.error_code, LOONGARCHCPU),
+ VMSTATE_UINTTL(env.btarget, LOONGARCHCPU),
+ VMSTATE_UINTTL(env.bcond, LOONGARCHCPU),
+
+ VMSTATE_UINT64(env.lladdr, LOONGARCHCPU),
+
+ /* PV time */
+ VMSTATE_UINT64(env.st.guest_addr, LOONGARCHCPU),
+
+ /* Remaining CSR registers */
+ VMSTATE_UINT64(env.CSR_CRMD, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PRMD, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_EUEN, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_MISC, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_ECFG, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_ESTAT, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_ERA, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_BADV, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_BADI, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_EEPN, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TLBIDX, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TLBEHI, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TLBELO0, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TLBELO1, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TLBWIRED, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_GTLBC, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TRGP, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_ASID, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PGDL, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PGDH, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PGD, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PWCTL0, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PWCTL1, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_STLBPGSIZE, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_RVACFG, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_CPUID, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PRCFG1, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PRCFG2, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PRCFG3, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_KS0, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_KS1, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_KS2, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_KS3, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_KS4, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_KS5, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_KS6, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_KS7, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TMID, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TCFG, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TVAL, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_CNTC, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TINTCLR, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_GSTAT, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_GCFG, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_GINTC, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_GCNTC, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_LLBCTL, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IMPCTL1, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IMPCTL2, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_GNMI, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TLBRENT, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TLBRBADV, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TLBRERA, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TLBRSAVE, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TLBRELO0, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TLBRELO1, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TLBREHI, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_TLBRPRMD, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_ERRCTL, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_ERRINFO, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_ERRINFO1, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_ERRENT, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_ERRERA, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_ERRSAVE, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_CTAG, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DMWIN0, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DMWIN1, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DMWIN2, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DMWIN3, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PERFCTRL0, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PERFCNTR0, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PERFCTRL1, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PERFCNTR1, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PERFCTRL2, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PERFCNTR2, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PERFCTRL3, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_PERFCNTR3, LOONGARCHCPU),
+ /* debug */
+ VMSTATE_UINT64(env.CSR_MWPC, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_MWPS, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DB0ADDR, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DB0MASK, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DB0CTL, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DB0ASID, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DB1ADDR, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DB1MASK, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DB1CTL, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DB1ASID, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DB2ADDR, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DB2MASK, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DB2CTL, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DB2ASID, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DB3ADDR, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DB3MASK, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DB3CTL, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DB3ASID, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_FWPC, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_FWPS, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB0ADDR, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB0MASK, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB0CTL, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB0ASID, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB1ADDR, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB1MASK, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB1CTL, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB1ASID, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB2ADDR, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB2MASK, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB2CTL, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB2ASID, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB3ADDR, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB3MASK, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB3CTL, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB3ASID, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB4ADDR, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB4MASK, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB4CTL, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB4ASID, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB5ADDR, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB5MASK, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB5CTL, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB5ASID, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB6ADDR, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB6MASK, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB6CTL, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB6ASID, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB7ADDR, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB7MASK, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB7CTL, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_IB7ASID, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DEBUG, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DERA, LOONGARCHCPU),
+ VMSTATE_UINT64(env.CSR_DESAVE, LOONGARCHCPU),
+
+ VMSTATE_STRUCT_ARRAY(env.fpus, LOONGARCHCPU, LOONGARCH_FPU_MAX, 1,
+ vmstate_inactive_fpu, CPULOONGARCHFPUContext),
+ VMSTATE_UINT8(online_vcpus, LOONGARCHCPU),
+ VMSTATE_UINT8(is_migrate, LOONGARCHCPU),
+ VMSTATE_UINT64(counter_value, LOONGARCHCPU),
+ VMSTATE_UINT32(cpu_freq, LOONGARCHCPU),
+ VMSTATE_UINT32(count_ctl, LOONGARCHCPU),
+ VMSTATE_UINT64(pending_exceptions, LOONGARCHCPU),
+ VMSTATE_UINT64(pending_exceptions_clr, LOONGARCHCPU),
+ VMSTATE_UINT64_ARRAY(core_ext_ioisr, LOONGARCHCPU, 4),
+
+ VMSTATE_END_OF_LIST()
+ },
+};
diff --git a/target/loongarch64/meson.build b/target/loongarch64/meson.build
new file mode 100644
index 0000000000000000000000000000000000000000..6badf4484e610792e9930d995f9a245abc991622
--- /dev/null
+++ b/target/loongarch64/meson.build
@@ -0,0 +1,35 @@
+loongarch_user_ss = ss.source_set()
+loongarch_softmmu_ss = ss.source_set()
+loongarch_ss = ss.source_set()
+loongarch_ss.add(files(
+ 'cpu.c',
+ 'fpu.c',
+ 'gdbstub.c',
+))
+
+gen = [
+ decodetree.process('insn.decode', extra_args: [ '--decode', 'decode_insn',
+ '--insnwidth', '32' ])
+]
+
+loongarch_ss.add(gen)
+loongarch_ss.add(when: 'CONFIG_TCG', if_true: files(
+ 'helper.c',
+ 'translate.c',
+ 'op_helper.c',
+ 'fpu_helper.c',
+ 'tlb_helper.c',
+ 'csr_helper.c',
+))
+
+loongarch_softmmu_ss.add(when: 'CONFIG_SOFTMMU', if_true: files(
+ 'machine.c',
+ 'stabletimer.c',
+ 'arch_dump.c',
+))
+
+loongarch_softmmu_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'))
+
+target_arch += {'loongarch64': loongarch_ss}
+target_softmmu_arch += {'loongarch64': loongarch_softmmu_ss}
+target_user_arch += {'loongarch64': loongarch_user_ss}
diff --git a/target/loongarch64/op_helper.c b/target/loongarch64/op_helper.c
new file mode 100644
index 0000000000000000000000000000000000000000..9a34c0d25ee32718173229d88d795e49c465762e
--- /dev/null
+++ b/target/loongarch64/op_helper.c
@@ -0,0 +1,533 @@
+/*
+ * LOONGARCH emulation helpers for qemu.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see .
+ */
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "internal.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "sysemu/kvm.h"
+#include "qemu/crc32c.h"
+#include
+#include "hw/irq.h"
+#include "hw/core/cpu.h"
+#include "instmap.h"
+
+/*****************************************************************************/
+/* Exceptions processing helpers */
+
+void helper_raise_exception_err(CPULOONGARCHState *env, uint32_t exception,
+ int error_code)
+{
+ do_raise_exception_err(env, exception, error_code, 0);
+}
+
+void helper_raise_exception(CPULOONGARCHState *env, uint32_t exception)
+{
+ do_raise_exception(env, exception, GETPC());
+}
+
+void helper_raise_exception_debug(CPULOONGARCHState *env)
+{
+ do_raise_exception(env, EXCP_DEBUG, 0);
+}
+
+static void raise_exception(CPULOONGARCHState *env, uint32_t exception)
+{
+ do_raise_exception(env, exception, 0);
+}
+
+#if defined(CONFIG_USER_ONLY)
+#define HELPER_LD(name, insn, type) \
+static inline type do_##name(CPULOONGARCHState *env, target_ulong addr, \
+ int mem_idx, uintptr_t retaddr) \
+{ \
+ return (type) cpu_##insn##_data_ra(env, addr, retaddr); \
+}
+#else
+
+#define HF_SMAP_SHIFT 23 /* CR4.SMAP */
+#define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
+#define MMU_KNOSMAP_IDX 2
+#define HF_CPL_SHIFT 0
+#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
+#define AC_MASK 0x00040000
+#define MMU_KSMAP_IDX 0
+static inline int cpu_mmu_index_kernel(CPULOONGARCHState *env)
+{
+ return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
+ ((env->hflags & HF_CPL_MASK) < 3 && (env->hflags & AC_MASK))
+ ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
+}
+
+#define cpu_ldl_kernel_ra(e, p, r) \
+ cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
+
+#define HELPER_LD(name, insn, type) \
+static inline type do_##name(CPULOONGARCHState *env, target_ulong addr, \
+ int mem_idx, uintptr_t retaddr) \
+{ \
+/* \
+ switch (mem_idx) { \
+ case 0: return (type) cpu_##insn##_kernel_ra(env, addr, retaddr); \
+ case 1: return (type) cpu_##insn##_super_ra(env, addr, retaddr); \
+ default: \
+ case 2: return (type) cpu_##insn##_user_ra(env, addr, retaddr); \
+ case 3: return (type) cpu_##insn##_error_ra(env, addr, retaddr); \
+ } \
+*/ \
+}
+#endif
+#if 0
+HELPER_LD(lw, ldl, int32_t)
+HELPER_LD(ld, ldq, int64_t)
+#endif
+#undef HELPER_LD
+
+#if defined(CONFIG_USER_ONLY)
+#define HELPER_ST(name, insn, type) \
+static inline void do_##name(CPULOONGARCHState *env, target_ulong addr, \
+ type val, int mem_idx, uintptr_t retaddr) \
+{ \
+/* \
+ cpu_##insn##_data_ra(env, addr, val, retaddr); \
+*/ \
+}
+#else
+#define HELPER_ST(name, insn, type) \
+static inline void do_##name(CPULOONGARCHState *env, target_ulong addr, \
+ type val, int mem_idx, uintptr_t retaddr) \
+{ \
+/* \
+ switch (mem_idx) { \
+ case 0: \
+ cpu_##insn##_kernel_ra(env, addr, val, retaddr); \
+ break; \
+ case 1: \
+ cpu_##insn##_super_ra(env, addr, val, retaddr); \
+ break; \
+ default: \
+ case 2: \
+ cpu_##insn##_user_ra(env, addr, val, retaddr); \
+ break; \
+ case 3: \
+ cpu_##insn##_error_ra(env, addr, val, retaddr); \
+ break; \
+ } \
+*/ \
+}
+#endif
+#if 0
+HELPER_ST(sb, stb, uint8_t)
+HELPER_ST(sw, stl, uint32_t)
+HELPER_ST(sd, stq, uint64_t)
+#endif
+#undef HELPER_ST
+
+static inline target_ulong bitswap(target_ulong v)
+{
+ v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) |
+ ((v & (target_ulong)0x5555555555555555ULL) << 1);
+ v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) |
+ ((v & (target_ulong)0x3333333333333333ULL) << 2);
+ v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) |
+ ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4);
+ return v;
+}
+
+target_ulong helper_dbitswap(target_ulong rt)
+{
+ return bitswap(rt);
+}
+
+target_ulong helper_bitswap(target_ulong rt)
+{
+ return (int32_t)bitswap(rt);
+}
+
+/* these crc32 functions are based on target/arm/helper-a64.c */
+target_ulong helper_crc32(target_ulong val, target_ulong m, uint32_t sz)
+{
+ uint8_t buf[8];
+ target_ulong mask = ((sz * 8) == 64) ? -1ULL : ((1ULL << (sz * 8)) - 1);
+
+ m &= mask;
+ stq_le_p(buf, m);
+ return (int32_t) (crc32(val ^ 0xffffffff, buf, sz) ^ 0xffffffff);
+}
+
+target_ulong helper_crc32c(target_ulong val, target_ulong m, uint32_t sz)
+{
+ uint8_t buf[8];
+ target_ulong mask = ((sz * 8) == 64) ? -1ULL : ((1ULL << (sz * 8)) - 1);
+ m &= mask;
+ stq_le_p(buf, m);
+ return (int32_t) (crc32c(val, buf, sz) ^ 0xffffffff);
+}
+
+#ifndef CONFIG_USER_ONLY
+
+#define HELPER_LD_ATOMIC(name, insn, almask) \
+target_ulong helper_##name(CPULOONGARCHState *env, target_ulong arg, int mem_idx) \
+{ \
+/* \
+ if (arg & almask) { \
+ env->CSR_BADV = arg; \
+ do_raise_exception(env, EXCP_AdEL, GETPC()); \
+ } \
+ env->lladdr = arg; \
+ env->llval = do_##insn(env, arg, mem_idx, GETPC()); \
+ return env->llval; \
+*/ \
+}
+#if 0
+HELPER_LD_ATOMIC(ll, lw, 0x3)
+HELPER_LD_ATOMIC(lld, ld, 0x7)
+#endif
+#undef HELPER_LD_ATOMIC
+#endif
+
+#ifndef CONFIG_USER_ONLY
+void helper_drdtime(CPULOONGARCHState *env, target_ulong rd, target_ulong rs)
+{
+ env->active_tc.gpr[rd] = cpu_loongarch_get_stable_counter(env);
+ env->active_tc.gpr[rs] = env->CSR_TMID;
+}
+#endif
+
+#ifndef CONFIG_USER_ONLY
+static void debug_pre_ertn(CPULOONGARCHState *env)
+{
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+ qemu_log("ERTN: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx,
+ env->active_tc.PC, env->CSR_ERA);
+ qemu_log("\n");
+ }
+}
+
+static void debug_post_ertn(CPULOONGARCHState *env)
+{
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+ qemu_log("ERTN: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx,
+ env->active_tc.PC, env->CSR_ERA);
+ }
+}
+
+static void set_pc(CPULOONGARCHState *env, target_ulong error_pc)
+{
+ env->active_tc.PC = error_pc & ~(target_ulong)1;
+}
+
+static inline void exception_return(CPULOONGARCHState *env)
+{
+ debug_pre_ertn(env);
+
+ if (cpu_refill_state(env)) {
+ env->CSR_CRMD &= (~0x7);
+ env->CSR_CRMD |= (env->CSR_TLBRPRMD & 0x7);
+ /* Clear Refill flag and set pc */
+ env->CSR_TLBRERA &= (~0x1);
+ set_pc(env, env->CSR_TLBRERA);
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ qemu_log("%s: TLBRERA 0x%lx\n", __func__, env->CSR_TLBRERA);
+ }
+ } else {
+ env->CSR_CRMD &= (~0x7);
+ env->CSR_CRMD |= (env->CSR_PRMD & 0x7);
+ /* Clear Refill flag and set pc*/
+ set_pc(env, env->CSR_ERA);
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ qemu_log("%s: ERA 0x%lx\n", __func__, env->CSR_ERA);
+ }
+ }
+
+ compute_hflags(env);
+ debug_post_ertn(env);
+}
+
+void helper_ertn(CPULOONGARCHState *env)
+{
+ exception_return(env);
+ env->lladdr = 1;
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+void helper_idle(CPULOONGARCHState *env)
+{
+ CPUState *cs = CPU(loongarch_env_get_cpu(env));
+
+ cs->halted = 1;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
+ /* Last instruction in the block, PC was updated before
+ - no need to recover PC and icount */
+ raise_exception(env, EXCP_HLT);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+
+void loongarch_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ while(1);
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+void helper_store_scr(CPULOONGARCHState *env, uint32_t n, target_ulong val)
+{
+ env->scr[n & 0x3] = val;
+}
+
+target_ulong helper_load_scr(CPULOONGARCHState *env, uint32_t n)
+{
+ return env->scr[n & 0x3];
+}
+
+/* loongarch assert op */
+void helper_asrtle_d(CPULOONGARCHState *env, target_ulong rs, target_ulong rt)
+{
+ if (rs > rt) {
+ do_raise_exception(env, EXCP_AdEL, GETPC());
+ }
+}
+
+void helper_asrtgt_d(CPULOONGARCHState *env, target_ulong rs, target_ulong rt)
+{
+ if (rs <= rt) {
+ do_raise_exception(env, EXCP_AdEL, GETPC());
+ }
+}
+
+target_ulong helper_cto_w(CPULOONGARCHState *env, target_ulong a0)
+{
+ uint32_t v = (uint32_t)a0;
+ int temp = 0;
+
+ while ((v & 0x1) == 1) {
+ temp++;
+ v = v >> 1;
+ }
+
+ return (target_ulong)temp;
+}
+
+target_ulong helper_ctz_w(CPULOONGARCHState *env, target_ulong a0)
+{
+ uint32_t v = (uint32_t)a0;
+
+ if (v == 0) {
+ return 32;
+ }
+
+ int temp = 0;
+ while ((v & 0x1) == 0) {
+ temp++;
+ v = v >> 1;
+ }
+
+ return (target_ulong)temp;
+}
+
+target_ulong helper_cto_d(CPULOONGARCHState *env, target_ulong a0)
+{
+ uint64_t v = a0;
+ int temp = 0;
+
+ while ((v & 0x1) == 1) {
+ temp++;
+ v = v >> 1;
+ }
+
+ return (target_ulong)temp;
+}
+
+target_ulong helper_ctz_d(CPULOONGARCHState *env, target_ulong a0)
+{
+ uint64_t v = a0;
+
+ if (v == 0) {
+ return 64;
+ }
+
+ int temp = 0;
+ while ((v & 0x1) == 0) {
+ temp++;
+ v = v >> 1;
+ }
+
+ return (target_ulong)temp;
+}
+
+target_ulong helper_bitrev_w(CPULOONGARCHState *env, target_ulong a0)
+{
+ int32_t v = (int32_t)a0;
+ const int SIZE = 32;
+ uint8_t bytes[SIZE];
+
+ int i;
+ for (i = 0; i < SIZE; i++) {
+ bytes[i] = v & 0x1;
+ v = v >> 1;
+ }
+ /* v == 0 */
+ for (i = 0; i < SIZE; i++) {
+ v = v | ((uint32_t)bytes[i] << (SIZE - 1 - i));
+ }
+
+ return (target_ulong)(int32_t)v;
+}
+
+target_ulong helper_bitrev_d(CPULOONGARCHState *env, target_ulong a0)
+{
+ uint64_t v = a0;
+ const int SIZE = 64;
+ uint8_t bytes[SIZE];
+
+ int i;
+ for (i = 0; i < SIZE; i++) {
+ bytes[i] = v & 0x1;
+ v = v >> 1;
+ }
+ /* v == 0 */
+ for (i = 0; i < SIZE; i++) {
+ v = v | ((uint64_t)bytes[i] << (SIZE - 1 - i));
+ }
+
+ return (target_ulong)v;
+}
+
+void helper_memtrace_addr(CPULOONGARCHState *env,
+ target_ulong address, uint32_t op)
+{
+ qemu_log("[cpu %d asid 0x%lx pc 0x%lx] addr 0x%lx op",
+ CPU(loongarch_env_get_cpu(env))->cpu_index,
+ env->CSR_ASID, env->active_tc.PC, address);
+ switch (op) {
+ case OPC_LARCH_LDPTR_D:
+ qemu_log("OPC_LARCH_LDPTR_D");
+ break;
+ case OPC_LARCH_LD_D:
+ qemu_log("OPC_LARCH_LD_D");
+ break;
+ case OPC_LARCH_LDPTR_W:
+ qemu_log("OPC_LARCH_LDPTR_W");
+ break;
+ case OPC_LARCH_LD_W:
+ qemu_log("OPC_LARCH_LD_W");
+ break;
+ case OPC_LARCH_LD_H:
+ qemu_log("OPC_LARCH_LD_H");
+ break;
+ case OPC_LARCH_LD_B:
+ qemu_log("OPC_LARCH_LD_B");
+ break;
+ case OPC_LARCH_LD_WU:
+ qemu_log("OPC_LARCH_LD_WU");
+ break;
+ case OPC_LARCH_LD_HU:
+ qemu_log("OPC_LARCH_LD_HU");
+ break;
+ case OPC_LARCH_LD_BU:
+ qemu_log("OPC_LARCH_LD_BU");
+ break;
+ case OPC_LARCH_STPTR_D:
+ qemu_log("OPC_LARCH_STPTR_D");
+ break;
+ case OPC_LARCH_ST_D:
+ qemu_log("OPC_LARCH_ST_D");
+ break;
+ case OPC_LARCH_STPTR_W:
+ qemu_log("OPC_LARCH_STPTR_W");
+ break;
+ case OPC_LARCH_ST_W:
+ qemu_log("OPC_LARCH_ST_W");
+ break;
+ case OPC_LARCH_ST_H:
+ qemu_log("OPC_LARCH_ST_H");
+ break;
+ case OPC_LARCH_ST_B:
+ qemu_log("OPC_LARCH_ST_B");
+ break;
+ case OPC_LARCH_FLD_S:
+ qemu_log("OPC_LARCH_FLD_S");
+ break;
+ case OPC_LARCH_FLD_D:
+ qemu_log("OPC_LARCH_FLD_D");
+ break;
+ case OPC_LARCH_FST_S:
+ qemu_log("OPC_LARCH_FST_S");
+ break;
+ case OPC_LARCH_FST_D:
+ qemu_log("OPC_LARCH_FST_D");
+ break;
+ case OPC_LARCH_FLDX_S:
+ qemu_log("OPC_LARCH_FLDX_S");
+ break;
+ case OPC_LARCH_FLDGT_S:
+ qemu_log("OPC_LARCH_FLDGT_S");
+ break;
+ case OPC_LARCH_FLDLE_S:
+ qemu_log("OPC_LARCH_FLDLE_S");
+ break;;
+ case OPC_LARCH_FSTX_S:
+ qemu_log("OPC_LARCH_FSTX_S");
+ break;
+ case OPC_LARCH_FSTGT_S:
+ qemu_log("OPC_LARCH_FSTGT_S");
+ break;
+ case OPC_LARCH_FSTLE_S:
+ qemu_log("OPC_LARCH_FSTLE_S");
+ break;
+ case OPC_LARCH_FLDX_D:
+ qemu_log("OPC_LARCH_FLDX_D");
+ break;
+ case OPC_LARCH_FLDGT_D:
+ qemu_log("OPC_LARCH_FLDGT_D");
+ break;
+ case OPC_LARCH_FLDLE_D:
+ qemu_log("OPC_LARCH_FLDLE_D");
+ break;;
+ case OPC_LARCH_FSTX_D:
+ qemu_log("OPC_LARCH_FSTX_D");
+ break;
+ case OPC_LARCH_FSTGT_D:
+ qemu_log("OPC_LARCH_FSTGT_D");
+ break;
+ case OPC_LARCH_FSTLE_D:
+ qemu_log("OPC_LARCH_FSTLE_D");
+ break;
+ case OPC_LARCH_LL_W:
+ qemu_log("OPC_LARCH_LL_W");
+ break;
+ case OPC_LARCH_LL_D:
+ qemu_log("OPC_LARCH_LL_D");
+ break;
+ default:
+ qemu_log("0x%x", op);
+ }
+}
+
+void helper_memtrace_val(CPULOONGARCHState *env, target_ulong val)
+{
+ qemu_log("val 0x%lx\n", val);
+}
diff --git a/target/loongarch64/stabletimer.c b/target/loongarch64/stabletimer.c
new file mode 100644
index 0000000000000000000000000000000000000000..b86fecf899a1ca65add389e33ef36c2fb6fa377d
--- /dev/null
+++ b/target/loongarch64/stabletimer.c
@@ -0,0 +1,122 @@
+/*
+ * QEMU LOONGARCH timer support
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/loongarch/cpudevs.h"
+#include "qemu/timer.h"
+#include "sysemu/kvm.h"
+#include "internal.h"
+#include "hw/irq.h"
+
+
+#ifdef DEBUG_TIMER
+#define debug_timer(fmt, args...) printf("%s(%d)-%s -> " #fmt "\n", \
+ __FILE__, __LINE__, __func__, ##args);
+#else
+#define debug_timer(fmt, args...)
+#endif
+
+#define TIMER_PERIOD 10 /* 10 ns period for 100 Mhz frequency */
+#define STABLETIMER_TICK_MASK 0xfffffffffffcUL
+#define STABLETIMER_ENABLE 0x1UL
+#define STABLETIMER_PERIOD 0x2UL
+
+/* return random value in [low, high] */
+uint32_t cpu_loongarch_get_random_ls3a5k_tlb(uint32_t low, uint32_t high)
+{
+ static uint32_t seed = 5;
+ static uint32_t prev_idx;
+ uint32_t idx;
+ uint32_t nb_rand_tlb = high - low + 1;
+
+ do {
+ seed = 1103515245 * seed + 12345;
+ idx = (seed >> 16) % nb_rand_tlb + low;
+ } while (idx == prev_idx);
+ prev_idx = idx;
+
+ return idx;
+}
+
+/* LOONGARCH timer */
+uint64_t cpu_loongarch_get_stable_counter(CPULOONGARCHState *env)
+{
+ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / TIMER_PERIOD;
+}
+
+uint64_t cpu_loongarch_get_stable_timer_ticks(CPULOONGARCHState *env)
+{
+ uint64_t now, expire;
+
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ expire = timer_expire_time_ns(env->timer);
+
+ return (expire - now) / TIMER_PERIOD;
+}
+
+void cpu_loongarch_store_stable_timer_config(CPULOONGARCHState *env,
+ uint64_t value)
+{
+ uint64_t now, next;
+
+ env->CSR_TCFG = value;
+ if (value & STABLETIMER_ENABLE) {
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ next = now + (value & STABLETIMER_TICK_MASK) * TIMER_PERIOD;
+ timer_mod(env->timer, next);
+ }
+ debug_timer("0x%lx 0x%lx now 0x%lx, next 0x%lx",
+ value, env->CSR_TCFG, now, next);
+}
+
+static void loongarch_stable_timer_cb(void *opaque)
+{
+ CPULOONGARCHState *env;
+ uint64_t now, next;
+
+ env = opaque;
+ debug_timer();
+ if (env->CSR_TCFG & STABLETIMER_PERIOD) {
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ next = now + (env->CSR_TCFG & STABLETIMER_TICK_MASK) * TIMER_PERIOD;
+ timer_mod(env->timer, next);
+ } else {
+ env->CSR_TCFG &= ~STABLETIMER_ENABLE;
+ }
+
+ qemu_irq_raise(env->irq[IRQ_TIMER]);
+
+}
+
+void cpu_loongarch_clock_init(LOONGARCHCPU *cpu)
+{
+ CPULOONGARCHState *env = &cpu->env;
+
+ /*
+ * If we're in KVM mode, don't create the periodic timer, that is handled in
+ * kernel.
+ */
+ if (!kvm_enabled()) {
+ env->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ &loongarch_stable_timer_cb, env);
+ }
+}
diff --git a/target/loongarch64/tlb_helper.c b/target/loongarch64/tlb_helper.c
new file mode 100644
index 0000000000000000000000000000000000000000..f5e68349a9a1e8f665f51745f53b68160c5eb919
--- /dev/null
+++ b/target/loongarch64/tlb_helper.c
@@ -0,0 +1,729 @@
+/*
+ * loongarch tlb emulation helpers for qemu.
+ *
+ * Copyright (c) 2020
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see .
+ */
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "cpu.h"
+#include "internal.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+#ifndef CONFIG_USER_ONLY
+
+#define HELPER_LD(name, insn, type) \
+static inline type do_##name(CPULOONGARCHState *env, target_ulong addr, \
+ int mem_idx, uintptr_t retaddr) \
+{ \
+/* \
+ switch (mem_idx) { \
+ case 0: return (type) cpu_##insn##_kernel_ra(env, addr, retaddr); \
+ case 1: return (type) cpu_##insn##_super_ra(env, addr, retaddr); \
+ default: \
+ case 2: return (type) cpu_##insn##_user_ra(env, addr, retaddr); \
+ case 3: return (type) cpu_##insn##_error_ra(env, addr, retaddr); \
+ } \
+*/ \
+}
+#if 0
+HELPER_LD(lw, ldl, int32_t)
+HELPER_LD(ld, ldq, int64_t)
+#endif
+
+void helper_lddir(CPULOONGARCHState *env, target_ulong base, target_ulong rt,
+ target_ulong level, uint32_t mem_idx)
+{
+#if 0
+ target_ulong pointer = env->active_tc.gpr[base];
+ target_ulong badvaddr;
+ target_ulong index;
+ target_ulong vaddr;
+ int shift;
+
+ badvaddr = env->CSR_TLBRBADV;
+
+ /* 0:8B, 1:16B, 2:32B, 3:64B */
+ shift = (env->CSR_PWCTL0 >> 30) & 0x3;
+ shift = (shift + 1) * 3;
+
+ switch (level) {
+ case 1:
+ index = (badvaddr >> ((env->CSR_PWCTL0 >> 10) & 0x1f)) & \
+ ((1 << ((env->CSR_PWCTL0 >> 15) & 0x1f)) - 1);
+ break;
+ case 3:
+ index = (badvaddr >> ((env->CSR_PWCTL1 >> 0) & 0x3f)) & \
+ ((1 << ((env->CSR_PWCTL1 >> 6) & 0x3f)) - 1);
+ break;
+ default:
+ do_raise_exception(env, EXCP_RI, GETPC());
+ return;
+ }
+
+ vaddr = pointer | index << shift;
+ env->active_tc.gpr[rt] = do_ld(env, vaddr, mem_idx, GETPC());
+ return;
+#endif
+}
+
+void helper_ldpte(CPULOONGARCHState *env, target_ulong base, target_ulong odd,
+ uint32_t mem_idx)
+{
+#if 0
+ target_ulong pointer = env->active_tc.gpr[base];
+ target_ulong vaddr;
+ target_ulong tmp0 = 1;
+ target_ulong ptindex, ptoffset0, ptoffset1;
+ target_ulong pagesize;
+ target_ulong badv;
+ int shift;
+ bool huge = pointer & LOONGARCH_PAGE_HUGE;
+
+ if (huge) {
+ /* Huge Page. pointer is paddr */
+ tmp0 = pointer ^ LOONGARCH_PAGE_HUGE;
+ /* move Global bit */
+ tmp0 |= ((tmp0 & LOONGARCH_HUGE_GLOBAL)
+ >> (LOONGARCH_HUGE_GLOBAL_SH - CSR_TLBLO0_GLOBAL_SHIFT));
+ pagesize = (env->CSR_PWCTL0 & 0x1f) +
+ ((env->CSR_PWCTL0 >> 5) & 0x1f) - 1;
+ if (odd) {
+ tmp0 += (1 << pagesize);
+ }
+ } else {
+ /* 0:8B, 1:16B, 2:32B, 3:64B */
+ shift = (env->CSR_PWCTL0 >> 30) & 0x3;
+ shift = (shift + 1) * 3;
+ badv = env->CSR_TLBRBADV;
+
+ ptindex = (badv >> (env->CSR_PWCTL0 & 0x1f)) &
+ ((1 << ((env->CSR_PWCTL0 >> 5) & 0x1f)) - 1);
+ ptindex = ptindex & ~0x1; /* clear bit 0 */
+ ptoffset0 = ptindex << shift;
+ ptoffset1 = (ptindex + 1) << shift;
+
+ vaddr = pointer | (odd ? ptoffset1 : ptoffset0);
+ tmp0 = do_ld(env, vaddr, mem_idx, GETPC());
+ pagesize = (env->CSR_PWCTL0 & 0x1f);
+ }
+ if (odd) {
+ env->CSR_TLBRELO1 = tmp0;
+ } else {
+ env->CSR_TLBRELO0 = tmp0;
+ }
+ env->CSR_TLBREHI = env->CSR_TLBREHI & (~0x3f);
+ env->CSR_TLBREHI = env->CSR_TLBREHI | pagesize;
+#endif
+ return;
+}
+
+target_ulong helper_read_pgd(CPULOONGARCHState *env)
+{
+ uint64_t badv;
+
+ assert(env->CSR_TLBRERA & 0x1);
+
+ if (env->CSR_TLBRERA & 0x1) {
+ badv = env->CSR_TLBRBADV;
+ } else {
+ badv = env->CSR_BADV;
+ }
+
+ if ((badv >> 63) & 0x1) {
+ return env->CSR_PGDH;
+ } else {
+ return env->CSR_PGDL;
+ }
+}
+
+/* TLB management */
+static uint64_t ls3a5k_pagesize_to_mask(int pagesize)
+{
+ /* 4KB - 1GB */
+ if (pagesize < 12 && pagesize > 30) {
+ printf("[ERROR] unsupported page size %d\n", pagesize);
+ exit(-1);
+ }
+
+ return (1 << (pagesize + 1)) - 1;
+}
+
+static void ls3a5k_fill_tlb_entry(CPULOONGARCHState *env,
+ ls3a5k_tlb_t *tlb, int is_ftlb)
+{
+ uint64_t page_mask; /* 0000...00001111...1111 */
+ uint32_t page_size;
+ uint64_t entryhi;
+ uint64_t lo0, lo1;
+
+ if (env->CSR_TLBRERA & 0x1) {
+ page_size = env->CSR_TLBREHI & 0x3f;
+ entryhi = env->CSR_TLBREHI;
+ lo0 = env->CSR_TLBRELO0;
+ lo1 = env->CSR_TLBRELO1;
+ } else {
+ page_size = (env->CSR_TLBIDX >> CSR_TLBIDX_PS_SHIFT) & 0x3f;
+ entryhi = env->CSR_TLBEHI;
+ lo0 = env->CSR_TLBELO0;
+ lo1 = env->CSR_TLBELO1;
+ }
+
+ if (page_size == 0) {
+ printf("Warning: page_size is 0\n");
+ }
+
+ /* 15-12 11-8 7-4 3-0
+ * 4KB: 0001 1111 1111 1111 // double 4KB mask [12:0]
+ * 16KB: 0111 1111 1111 1111 // double 16KB mask [14:0]
+ */
+ if (is_ftlb) {
+ page_mask = env->tlb->mmu.ls3a5k.ftlb_mask;
+ } else {
+ page_mask = ls3a5k_pagesize_to_mask(page_size);
+ }
+
+ tlb->VPN = entryhi & 0xffffffffe000 & ~page_mask;
+
+ tlb->ASID = env->CSR_ASID & 0x3ff; /* CSR_ASID[9:0] */
+ tlb->EHINV = 0;
+ tlb->G = (lo0 >> CSR_TLBLO0_GLOBAL_SHIFT) & /* CSR_TLBLO[6] */
+ (lo1 >> CSR_TLBLO1_GLOBAL_SHIFT) & 1;
+
+ tlb->PageMask = page_mask;
+ tlb->PageSize = page_size;
+
+ tlb->V0 = (lo0 >> CSR_TLBLO0_V_SHIFT) & 0x1; /* [0] */
+ tlb->WE0 = (lo0 >> CSR_TLBLO0_WE_SHIFT) & 0x1; /* [1] */
+ tlb->PLV0 = (lo0 >> CSR_TLBLO0_PLV_SHIFT) & 0x3; /* [3:2] */
+ tlb->C0 = (lo0 >> CSR_TLBLO0_CCA_SHIFT) & 0x3; /* [5:4] */
+ tlb->PPN0 = (lo0 & 0xfffffffff000 & ~(page_mask >> 1));
+ tlb->RI0 = (lo0 >> CSR_TLBLO0_RI_SHIFT) & 0x1; /* [61] */
+ tlb->XI0 = (lo0 >> CSR_TLBLO0_XI_SHIFT) & 0x1; /* [62] */
+ tlb->RPLV0 = (lo0 >> CSR_TLBLO0_RPLV_SHIFT) & 0x1; /* [63] */
+
+ tlb->V1 = (lo1 >> CSR_TLBLO1_V_SHIFT) & 0x1; /* [0] */
+ tlb->WE1 = (lo1 >> CSR_TLBLO1_WE_SHIFT) & 0x1; /* [1] */
+ tlb->PLV1 = (lo1 >> CSR_TLBLO1_PLV_SHIFT) & 0x3; /* [3:2] */
+ tlb->C1 = (lo1 >> CSR_TLBLO1_CCA_SHIFT) & 0x3; /* [5:4] */
+ tlb->PPN1 = (lo1 & 0xfffffffff000 & ~(page_mask >> 1));
+ tlb->RI1 = (lo1 >> CSR_TLBLO1_RI_SHIFT) & 0x1; /* [61] */
+ tlb->XI1 = (lo1 >> CSR_TLBLO1_XI_SHIFT) & 0x1; /* [62] */
+ tlb->RPLV1 = (lo1 >> CSR_TLBLO1_RPLV_SHIFT) & 0x1; /* [63] */
+}
+
+static void ls3a5k_fill_tlb(CPULOONGARCHState *env, int idx, bool tlbwr)
+{
+ ls3a5k_tlb_t *tlb;
+
+ tlb = &env->tlb->mmu.ls3a5k.tlb[idx];
+ if (tlbwr) {
+ if ((env->CSR_TLBIDX >> CSR_TLBIDX_EHINV_SHIFT) & 0x1) {
+ tlb->EHINV = 1;
+ return;
+ }
+ }
+
+ if (idx < 2048) {
+ ls3a5k_fill_tlb_entry(env, tlb, 1);
+ } else {
+ ls3a5k_fill_tlb_entry(env, tlb, 0);
+ }
+}
+
+void ls3a5k_flush_vtlb(CPULOONGARCHState *env)
+{
+ uint32_t ftlb_size = env->tlb->mmu.ls3a5k.ftlb_size;
+ uint32_t vtlb_size = env->tlb->mmu.ls3a5k.vtlb_size;
+ int i;
+
+ ls3a5k_tlb_t *tlb;
+
+ for (i = ftlb_size; i < ftlb_size + vtlb_size; ++i) {
+ tlb = &env->tlb->mmu.ls3a5k.tlb[i];
+ tlb->EHINV = 1;
+ }
+
+ cpu_loongarch_tlb_flush(env);
+}
+
+void ls3a5k_flush_ftlb(CPULOONGARCHState *env)
+{
+ uint32_t ftlb_size = env->tlb->mmu.ls3a5k.ftlb_size;
+ int i;
+
+ ls3a5k_tlb_t *tlb;
+
+ for (i = 0; i < ftlb_size; ++i) {
+ tlb = &env->tlb->mmu.ls3a5k.tlb[i];
+ tlb->EHINV = 1;
+ }
+
+ cpu_loongarch_tlb_flush(env);
+}
+
+void ls3a5k_helper_tlbclr(CPULOONGARCHState *env)
+{
+ int i;
+ uint16_t asid;
+ int vsize, fsize, index;
+ int start = 0, end = -1;
+
+ asid = env->CSR_ASID & 0x3ff;
+ vsize = env->tlb->mmu.ls3a5k.vtlb_size;
+ fsize = env->tlb->mmu.ls3a5k.ftlb_size;
+ index = env->CSR_TLBIDX & CSR_TLBIDX_IDX;
+
+ if (index < fsize) {
+ /* FTLB. One line per operation */
+ int set = index % 256;
+ start = set * 8;
+ end = start + 7;
+ } else if (index < (fsize + vsize)) {
+ /* VTLB. All entries */
+ start = fsize;
+ end = fsize + vsize - 1;
+ } else {
+ /* Ignore */
+ }
+
+ for (i = start; i <= end; i++) {
+ ls3a5k_tlb_t *tlb;
+ tlb = &env->tlb->mmu.ls3a5k.tlb[i];
+ if (!tlb->G && tlb->ASID == asid) {
+ tlb->EHINV = 1;
+ }
+ }
+
+ cpu_loongarch_tlb_flush(env);
+}
+
+void ls3a5k_helper_tlbflush(CPULOONGARCHState *env)
+{
+ int i;
+ int vsize, fsize, index;
+ int start = 0, end = -1;
+
+ vsize = env->tlb->mmu.ls3a5k.vtlb_size;
+ fsize = env->tlb->mmu.ls3a5k.ftlb_size;
+ index = env->CSR_TLBIDX & CSR_TLBIDX_IDX;
+
+ if (index < fsize) {
+ /* FTLB. One line per operation */
+ int set = index % 256;
+ start = set * 8;
+ end = start + 7;
+ } else if (index < (fsize + vsize)) {
+ /* VTLB. All entries */
+ start = fsize;
+ end = fsize + vsize - 1;
+ } else {
+ /* Ignore */
+ }
+
+ for (i = start; i <= end; i++) {
+ env->tlb->mmu.ls3a5k.tlb[i].EHINV = 1;
+ }
+
+ cpu_loongarch_tlb_flush(env);
+}
+
+void ls3a5k_helper_invtlb(CPULOONGARCHState *env, target_ulong addr,
+ target_ulong info, int op)
+{
+ uint32_t asid = info & 0x3ff;
+ int i;
+
+ switch (op) {
+ case 0:
+ case 1:
+ for (i = 0; i < env->tlb->nb_tlb; i++) {
+ env->tlb->mmu.ls3a5k.tlb[i].EHINV = 1;
+ }
+ break;
+ case 4: {
+ int i;
+ for (i = 0; i < env->tlb->nb_tlb; i++) {
+ struct ls3a5k_tlb_t *tlb = &env->tlb->mmu.ls3a5k.tlb[i];
+
+ if (!tlb->G && tlb->ASID == asid) {
+ tlb->EHINV = 1;
+ }
+ }
+ break;
+ }
+
+ case 5: {
+ int i;
+ for (i = 0; i < env->tlb->nb_tlb; i++) {
+ struct ls3a5k_tlb_t *tlb = &env->tlb->mmu.ls3a5k.tlb[i];
+ uint64_t vpn = addr & 0xffffffffe000 & ~tlb->PageMask;
+
+ if (!tlb->G && tlb->ASID == asid && vpn == tlb->VPN) {
+ tlb->EHINV = 1;
+ }
+ }
+ break;
+ }
+ case 6: {
+ int i;
+ for (i = 0; i < env->tlb->nb_tlb; i++) {
+ struct ls3a5k_tlb_t *tlb = &env->tlb->mmu.ls3a5k.tlb[i];
+ uint64_t vpn = addr & 0xffffffffe000 & ~tlb->PageMask;
+
+ if ((tlb->G || tlb->ASID == asid) && vpn == tlb->VPN) {
+ tlb->EHINV = 1;
+ }
+ }
+ break;
+ }
+ default:
+ helper_raise_exception(env, EXCP_RI);
+ }
+
+ cpu_loongarch_tlb_flush(env);
+}
+
+static void ls3a5k_invalidate_tlb_entry(CPULOONGARCHState *env,
+ ls3a5k_tlb_t *tlb)
+{
+ LOONGARCHCPU *cpu = loongarch_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ target_ulong addr;
+ target_ulong end;
+ target_ulong mask;
+
+ mask = tlb->PageMask; /* 000...000111...111 */
+
+ if (tlb->V0) {
+ addr = tlb->VPN & ~mask; /* xxx...xxx[0]000..0000 */
+ end = addr | (mask >> 1); /* xxx...xxx[0]111..1111 */
+ while (addr < end) {
+ tlb_flush_page(cs, addr);
+ addr += TARGET_PAGE_SIZE;
+ }
+ }
+
+ if (tlb->V1) {
+ /* xxx...xxx[1]000..0000 */
+ addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
+ end = addr | mask; /* xxx...xxx[1]111..1111 */
+ while (addr - 1 < end) {
+ tlb_flush_page(cs, addr);
+ addr += TARGET_PAGE_SIZE;
+ }
+ }
+}
+
+void ls3a5k_invalidate_tlb(CPULOONGARCHState *env, int idx)
+{
+ ls3a5k_tlb_t *tlb;
+ int asid = env->CSR_ASID & 0x3ff;
+ tlb = &env->tlb->mmu.ls3a5k.tlb[idx];
+ if (tlb->G == 0 && tlb->ASID != asid) {
+ return;
+ }
+ ls3a5k_invalidate_tlb_entry(env, tlb);
+}
+
+void ls3a5k_helper_tlbwr(CPULOONGARCHState *env)
+{
+ int idx = env->CSR_TLBIDX & CSR_TLBIDX_IDX; /* [11:0] */
+
+ /* Convert idx if in FTLB */
+ if (idx < env->tlb->mmu.ls3a5k.ftlb_size) {
+ /*
+ * 0 3 6 0 1 2
+ * 1 4 7 => 3 4 5
+ * 2 5 8 6 7 8
+ */
+ int set = idx % 256;
+ int way = idx / 256;
+ idx = set * 8 + way;
+ }
+ ls3a5k_invalidate_tlb(env, idx);
+ ls3a5k_fill_tlb(env, idx, true);
+}
+
+void ls3a5k_helper_tlbfill(CPULOONGARCHState *env)
+{
+ uint64_t mask;
+ uint64_t address;
+ int idx;
+ int set, ftlb_idx;
+
+ uint64_t entryhi;
+ uint32_t pagesize;
+
+ if (env->CSR_TLBRERA & 0x1) {
+ entryhi = env->CSR_TLBREHI & ~0x3f;
+ pagesize = env->CSR_TLBREHI & 0x3f;
+ } else {
+ entryhi = env->CSR_TLBEHI;
+ pagesize = (env->CSR_TLBIDX >> CSR_TLBIDX_PS_SHIFT) & 0x3f;
+ }
+
+ uint32_t ftlb_size = env->tlb->mmu.ls3a5k.ftlb_size;
+ uint32_t vtlb_size = env->tlb->mmu.ls3a5k.vtlb_size;
+
+ mask = ls3a5k_pagesize_to_mask(pagesize);
+
+ if (mask == env->tlb->mmu.ls3a5k.ftlb_mask &&
+ env->tlb->mmu.ls3a5k.ftlb_size > 0) {
+ /* only write into FTLB */
+ address = entryhi & 0xffffffffe000; /* [47:13] */
+
+ /* choose one set ramdomly */
+ set = cpu_loongarch_get_random_ls3a5k_tlb(0, 7);
+
+ /* index in one set */
+ ftlb_idx = (address >> 15) & 0xff; /* [0,255] */
+
+ /* final idx */
+ idx = ftlb_idx * 8 + set; /* max is 7 + 8 * 255 = 2047 */
+ } else {
+ /* only write into VTLB */
+ int wired_nr = env->CSR_TLBWIRED & 0x3f;
+ idx = cpu_loongarch_get_random_ls3a5k_tlb(
+ ftlb_size + wired_nr, ftlb_size + vtlb_size - 1);
+ }
+
+ ls3a5k_invalidate_tlb(env, idx);
+ ls3a5k_fill_tlb(env, idx, false);
+}
+
+void ls3a5k_helper_tlbsrch(CPULOONGARCHState *env)
+{
+ uint64_t mask;
+ uint64_t vpn;
+ uint64_t tag;
+ uint16_t asid;
+
+ int ftlb_size = env->tlb->mmu.ls3a5k.ftlb_size;
+ int vtlb_size = env->tlb->mmu.ls3a5k.vtlb_size;
+ int i;
+ int ftlb_idx; /* [0,255] 2^8 0xff */
+
+ ls3a5k_tlb_t *tlb;
+
+ asid = env->CSR_ASID & 0x3ff;
+
+ /* search VTLB */
+ for (i = ftlb_size; i < ftlb_size + vtlb_size; ++i) {
+ tlb = &env->tlb->mmu.ls3a5k.tlb[i];
+ mask = tlb->PageMask;
+
+ vpn = env->CSR_TLBEHI & 0xffffffffe000 & ~mask;
+ tag = tlb->VPN & ~mask;
+
+ if ((tlb->G == 1 || tlb->ASID == asid) && vpn == tag && tlb->EHINV != 1)
+ {
+ env->CSR_TLBIDX = (i & 0xfff) |
+ ((tlb->PageSize & 0x3f) << CSR_TLBIDX_PS_SHIFT);
+ goto _MATCH_OUT_;
+ }
+ }
+
+ if (ftlb_size == 0) {
+ goto _NO_MATCH_OUT_;
+ }
+
+ /* search FTLB */
+ mask = env->tlb->mmu.ls3a5k.ftlb_mask;
+ vpn = env->CSR_TLBEHI & 0xffffffffe000 & ~mask;
+
+ ftlb_idx = (env->CSR_TLBEHI & 0xffffffffe000) >> 15; /* 16 KB */
+ ftlb_idx = ftlb_idx & 0xff; /* [0,255] */
+
+ for (i = 0; i < 8; ++i) {
+ tlb = &env->tlb->mmu.ls3a5k.tlb[ftlb_idx * 8 + i];
+ tag = tlb->VPN & ~mask;
+
+ if ((tlb->G == 1 || tlb->ASID == asid) && vpn == tag && tlb->EHINV != 1)
+ {
+ env->CSR_TLBIDX = ((i * 256 + ftlb_idx) & 0xfff) |
+ ((tlb->PageSize & 0x3f) << CSR_TLBIDX_PS_SHIFT);
+ goto _MATCH_OUT_;
+ }
+ }
+
+_NO_MATCH_OUT_:
+ env->CSR_TLBIDX = 1 << CSR_TLBIDX_EHINV_SHIFT;
+_MATCH_OUT_:
+ return;
+}
+
+void ls3a5k_helper_tlbrd(CPULOONGARCHState *env)
+{
+ ls3a5k_tlb_t *tlb;
+ int idx;
+ uint16_t asid;
+
+ idx = env->CSR_TLBIDX & CSR_TLBIDX_IDX;
+ if (idx < env->tlb->mmu.ls3a5k.ftlb_size) {
+ int set = idx % 256;
+ int way = idx / 256;
+ idx = set * 8 + way;
+ }
+
+ tlb = &env->tlb->mmu.ls3a5k.tlb[idx];
+
+ asid = env->CSR_ASID & 0x3ff;
+
+ if (asid != tlb->ASID) {
+ cpu_loongarch_tlb_flush(env);
+ }
+
+ if (tlb->EHINV) {
+ /* invalid TLB entry */
+ env->CSR_TLBIDX = 1 << CSR_TLBIDX_EHINV_SHIFT;
+ env->CSR_TLBEHI = 0;
+ env->CSR_TLBELO0 = 0;
+ env->CSR_TLBELO1 = 0;
+ } else {
+ /* valid TLB entry */
+ env->CSR_TLBIDX = (env->CSR_TLBIDX & 0xfff) |
+ ((tlb->PageSize & 0x3f) << CSR_TLBIDX_PS_SHIFT);
+ env->CSR_TLBEHI = tlb->VPN;
+ env->CSR_TLBELO0 = (tlb->V0 << CSR_TLBLO0_V_SHIFT) |
+ (tlb->WE0 << CSR_TLBLO0_WE_SHIFT) |
+ (tlb->PLV0 << CSR_TLBLO0_PLV_SHIFT) |
+ (tlb->C0 << CSR_TLBLO0_CCA_SHIFT) |
+ (tlb->G << CSR_TLBLO0_GLOBAL_SHIFT) |
+ (tlb->PPN0 & 0xfffffffff000) |
+ ((uint64_t)tlb->RI0 << CSR_TLBLO0_RI_SHIFT) |
+ ((uint64_t)tlb->XI0 << CSR_TLBLO0_XI_SHIFT) |
+ ((uint64_t)tlb->RPLV0 << CSR_TLBLO0_RPLV_SHIFT);
+ env->CSR_TLBELO1 = (tlb->V1 << CSR_TLBLO1_V_SHIFT) |
+ (tlb->WE1 << CSR_TLBLO1_WE_SHIFT) |
+ (tlb->PLV1 << CSR_TLBLO1_PLV_SHIFT) |
+ (tlb->C1 << CSR_TLBLO1_CCA_SHIFT) |
+ (tlb->G << CSR_TLBLO0_GLOBAL_SHIFT) |
+ (tlb->PPN1 & 0xfffffffff000) |
+ ((uint64_t)tlb->RI1 << CSR_TLBLO1_RI_SHIFT) |
+ ((uint64_t)tlb->XI1 << CSR_TLBLO1_XI_SHIFT) |
+ ((uint64_t)tlb->RPLV1 << CSR_TLBLO1_RPLV_SHIFT);
+ env->CSR_ASID = (tlb->ASID << CSR_ASID_ASID_SHIFT) |
+ (env->CSR_ASID & 0xff0000);
+ }
+}
+
+void helper_tlbwr(CPULOONGARCHState *env)
+{
+ env->tlb->helper_tlbwr(env);
+}
+
+void helper_tlbfill(CPULOONGARCHState *env)
+{
+ env->tlb->helper_tlbfill(env);
+}
+
+void helper_tlbsrch(CPULOONGARCHState *env)
+{
+ env->tlb->helper_tlbsrch(env);
+}
+
+void helper_tlbrd(CPULOONGARCHState *env)
+{
+ env->tlb->helper_tlbrd(env);
+}
+
+void helper_tlbclr(CPULOONGARCHState *env)
+{
+ env->tlb->helper_tlbclr(env);
+}
+
+void helper_tlbflush(CPULOONGARCHState *env)
+{
+ env->tlb->helper_tlbflush(env);
+}
+
+void helper_invtlb(CPULOONGARCHState *env, target_ulong addr, target_ulong info,
+ target_ulong op)
+{
+ env->tlb->helper_invtlb(env, addr, info, op);
+}
+
+static void ls3a5k_mmu_init(CPULOONGARCHState *env, const loongarch_def_t *def)
+{
+ /* number of VTLB */
+ env->tlb->nb_tlb = 64;
+ env->tlb->mmu.ls3a5k.vtlb_size = 64;
+
+ /* number of FTLB */
+ env->tlb->nb_tlb += 2048;
+ env->tlb->mmu.ls3a5k.ftlb_size = 2048;
+ env->tlb->mmu.ls3a5k.ftlb_mask = (1 << 15) - 1; /* 16 KB */
+ /*
+ * page_size | ftlb_mask | party field
+ * ----------------------------------------------------------------
+ * 4 KB = 12 | ( 1 << 13 ) - 1 = [12:0] | [12]
+ * 16 KB = 14 | ( 1 << 15 ) - 1 = [14:0] | [14]
+ * 64 KB = 16 | ( 1 << 17 ) - 1 = [16:0] | [16]
+ * 256 KB = 18 | ( 1 << 19 ) - 1 = [18:0] | [18]
+ * 1 MB = 20 | ( 1 << 21 ) - 1 = [20:0] | [20]
+ * 4 MB = 22 | ( 1 << 23 ) - 1 = [22:0] | [22]
+ * 16 MB = 24 | ( 1 << 25 ) - 1 = [24:0] | [24]
+ * 64 MB = 26 | ( 1 << 27 ) - 1 = [26:0] | [26]
+ * 256 MB = 28 | ( 1 << 29 ) - 1 = [28:0] | [28]
+ * 1 GB = 30 | ( 1 << 31 ) - 1 = [30:0] | [30]
+ * ----------------------------------------------------------------
+ * take party field index as @n. eg. For 16 KB, n = 14
+ * ----------------------------------------------------------------
+ * tlb->VPN = TLBEHI & 0xffffffffe000[47:13] & ~mask = [47:n+1]
+ * tlb->PPN = TLBLO0 & 0xffffffffe000[47:13] & ~mask = [47:n+1]
+ * tlb->PPN = TLBLO1 & 0xffffffffe000[47:13] & ~mask = [47:n+1]
+ * ----------------------------------------------------------------
+ * On mapping :
+ * > vpn = address & 0xffffffffe000[47:13] & ~mask = [47:n+1]
+ * > tag = tlb->VPN & ~mask = [47:n+1]
+ * ----------------------------------------------------------------
+ * physical address = [47:n+1] | [n:0]
+ * physical address = tlb->PPN0 | (address & mask)
+ * physical address = tlb->PPN1 | (address & mask)
+ */
+
+ int i;
+ for (i = 0; i < env->tlb->nb_tlb; i++) {
+ env->tlb->mmu.ls3a5k.tlb[i].EHINV = 1;
+ }
+
+ /* TLB's helper functions */
+ env->tlb->map_address = &ls3a5k_map_address;
+ env->tlb->helper_tlbwr = ls3a5k_helper_tlbwr;
+ env->tlb->helper_tlbfill = ls3a5k_helper_tlbfill;
+ env->tlb->helper_tlbsrch = ls3a5k_helper_tlbsrch;
+ env->tlb->helper_tlbrd = ls3a5k_helper_tlbrd;
+ env->tlb->helper_tlbclr = ls3a5k_helper_tlbclr;
+ env->tlb->helper_tlbflush = ls3a5k_helper_tlbflush;
+ env->tlb->helper_invtlb = ls3a5k_helper_invtlb;
+}
+
+void mmu_init(CPULOONGARCHState *env, const loongarch_def_t *def)
+{
+ env->tlb = g_malloc0(sizeof(CPULOONGARCHTLBContext));
+
+ switch (def->mmu_type) {
+ case MMU_TYPE_LS3A5K:
+ ls3a5k_mmu_init(env, def);
+ break;
+ default:
+ cpu_abort(CPU(loongarch_env_get_cpu(env)), "MMU type not supported\n");
+ }
+}
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/loongarch64/trace-events b/target/loongarch64/trace-events
new file mode 100644
index 0000000000000000000000000000000000000000..e0bca4f82eaa02a45d680c070646db5c02e9c18e
--- /dev/null
+++ b/target/loongarch64/trace-events
@@ -0,0 +1,3 @@
+# See docs/devel/tracing.txt for syntax documentation.
+
+# target/loongarch/translate.c
diff --git a/target/loongarch64/trans.inc.c b/target/loongarch64/trans.inc.c
new file mode 100644
index 0000000000000000000000000000000000000000..e50670be470ab61fd68013ccdac0f09924f1bd61
--- /dev/null
+++ b/target/loongarch64/trans.inc.c
@@ -0,0 +1,3472 @@
+static bool trans_syscall(DisasContext *ctx, arg_syscall *a)
+{
+ generate_exception_end(ctx, EXCP_SYSCALL);
+ return true;
+}
+
+static bool trans_break(DisasContext *ctx, arg_break *a)
+{
+ generate_exception_end(ctx, EXCP_BREAK);
+ return true;
+}
+
+static bool trans_dbcl(DisasContext *ctx, arg_dbcl *a)
+{
+ /*
+ * dbcl instruction is not support in tcg
+ */
+ generate_exception_end(ctx, EXCP_RI);
+ return true;
+}
+
+static bool trans_addi_w(DisasContext *ctx, arg_addi_w *a)
+{
+ gen_arith_imm(ctx, OPC_LARCH_ADDI_W, a->rd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_addi_d(DisasContext *ctx, arg_addi_d *a)
+{
+ gen_arith_imm(ctx, OPC_LARCH_ADDI_D, a->rd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_slli_d(DisasContext *ctx, arg_slli_d *a)
+{
+ if (a->rd == 0) {
+ /* Nop */
+ return true;
+ }
+
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, a->rj);
+ tcg_gen_shli_tl(cpu_gpr[a->rd], t0, a->ui6);
+
+ tcg_temp_free(t0);
+ return true;
+}
+
+static bool trans_andi(DisasContext *ctx, arg_andi *a)
+{
+ gen_logic_imm(ctx, OPC_LARCH_ANDI, a->rd, a->rj, a->ui12);
+ return true;
+}
+
+static bool trans_srli_d(DisasContext *ctx, arg_srli_d *a)
+{
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, a->rj);
+ tcg_gen_shri_tl(cpu_gpr[a->rd], t0, a->ui6);
+
+ tcg_temp_free(t0);
+ return true;
+}
+
+static bool trans_slli_w(DisasContext *ctx, arg_slli_w *a)
+{
+ if (a->rd == 0) {
+ /* Nop */
+ return true;
+ }
+
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, a->rj);
+ tcg_gen_shli_tl(t0, t0, a->ui5);
+ tcg_gen_ext32s_tl(cpu_gpr[a->rd], t0);
+
+ tcg_temp_free(t0);
+ return true;
+}
+
+static bool trans_addu16i_d(DisasContext *ctx, arg_addu16i_d *a)
+{
+ if (a->rj != 0) {
+ tcg_gen_addi_tl(cpu_gpr[a->rd], cpu_gpr[a->rj], a->si16 << 16);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[a->rd], a->si16 << 16);
+ }
+ return true;
+}
+
+static bool trans_lu12i_w(DisasContext *ctx, arg_lu12i_w *a)
+{
+ tcg_gen_movi_tl(cpu_gpr[a->rd], a->si20 << 12);
+ return true;
+}
+
+static bool trans_lu32i_d(DisasContext *ctx, arg_lu32i_d *a)
+{
+ TCGv_i64 t0, t1;
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ tcg_gen_movi_tl(t0, a->si20);
+ tcg_gen_concat_tl_i64(t1, cpu_gpr[a->rd], t0);
+ gen_store_gpr(t1, a->rd);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return true;
+}
+
+static bool trans_pcaddi(DisasContext *ctx, arg_pcaddi *a)
+{
+ target_ulong pc = ctx->base.pc_next;
+ target_ulong addr = pc + (a->si20 << 2);
+ tcg_gen_movi_tl(cpu_gpr[a->rd], addr);
+ return true;
+}
+
+static bool trans_pcalau12i(DisasContext *ctx, arg_pcalau12i *a)
+{
+ target_ulong pc = ctx->base.pc_next;
+ target_ulong addr = (pc + (a->si20 << 12)) & ~0xfff;
+ tcg_gen_movi_tl(cpu_gpr[a->rd], addr);
+ return true;
+}
+
+static bool trans_pcaddu12i(DisasContext *ctx, arg_pcaddu12i *a)
+{
+ target_ulong pc = ctx->base.pc_next;
+ target_ulong addr = pc + (a->si20 << 12);
+ tcg_gen_movi_tl(cpu_gpr[a->rd], addr);
+ return true;
+}
+
+static bool trans_pcaddu18i(DisasContext *ctx, arg_pcaddu18i *a)
+{
+ target_ulong pc = ctx->base.pc_next;
+ target_ulong addr = pc + ((target_ulong)(a->si20) << 18);
+ tcg_gen_movi_tl(cpu_gpr[a->rd], addr);
+ return true;
+}
+
+static bool trans_slti(DisasContext *ctx, arg_slti *a)
+{
+ gen_slt_imm(ctx, OPC_LARCH_SLTI, a->rd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_sltui(DisasContext *ctx, arg_sltui *a)
+{
+ gen_slt_imm(ctx, OPC_LARCH_SLTIU, a->rd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_lu52i_d(DisasContext *ctx, arg_lu52i_d *a)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ gen_load_gpr(t1, a->rj);
+
+ tcg_gen_movi_tl(t0, a->si12);
+ tcg_gen_shli_tl(t0, t0, 52);
+ tcg_gen_andi_tl(t1, t1, 0xfffffffffffffU);
+ tcg_gen_or_tl(cpu_gpr[a->rd], t0, t1);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return true;
+}
+
+static bool trans_ori(DisasContext *ctx, arg_ori *a)
+{
+ gen_logic_imm(ctx, OPC_LARCH_ORI, a->rd, a->rj, a->ui12);
+ return true;
+}
+
+static bool trans_xori(DisasContext *ctx, arg_xori *a)
+{
+ gen_logic_imm(ctx, OPC_LARCH_XORI, a->rd, a->rj, a->ui12);
+ return true;
+}
+
+static bool trans_bstrins_d(DisasContext *ctx, arg_bstrins_d *a)
+{
+ int lsb = a->lsbd;
+ int msb = a->msbd;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ if (lsb > msb) {
+ return false;
+ }
+
+ gen_load_gpr(t1, a->rj);
+ gen_load_gpr(t0, a->rd);
+ tcg_gen_deposit_tl(t0, t0, t1, lsb, msb - lsb + 1);
+ gen_store_gpr(t0, a->rd);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return true;
+}
+
+static bool trans_bstrpick_d(DisasContext *ctx, arg_bstrpick_d *a)
+{
+ int lsb = a->lsbd;
+ int msb = a->msbd;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ if (lsb > msb) {
+ return false;
+ }
+
+ gen_load_gpr(t1, a->rj);
+ gen_load_gpr(t0, a->rd);
+ tcg_gen_extract_tl(t0, t1, lsb, msb - lsb + 1);
+ gen_store_gpr(t0, a->rd);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return true;
+}
+
+static bool trans_bstrins_w(DisasContext *ctx, arg_bstrins_w *a)
+{
+ gen_bitops(ctx, OPC_LARCH_TRINS_W, a->rd, a->rj, a->lsbw, a->msbw);
+ return true;
+}
+
+static bool trans_bstrpick_w(DisasContext *ctx, arg_bstrpick_w *a)
+{
+ if (a->lsbw > a->msbw) {
+ return false;
+ }
+ gen_bitops(ctx, OPC_LARCH_TRPICK_W,
+ a->rd, a->rj, a->lsbw, a->msbw - a->lsbw);
+ return true;
+}
+
+static bool trans_ldptr_w(DisasContext *ctx, arg_ldptr_w *a)
+{
+ gen_ld(ctx, OPC_LARCH_LDPTR_W, a->rd, a->rj, a->si14 << 2);
+ return true;
+}
+
+static bool trans_stptr_w(DisasContext *ctx, arg_stptr_w *a)
+{
+ gen_st(ctx, OPC_LARCH_STPTR_W, a->rd, a->rj, a->si14 << 2);
+ return true;
+}
+
+static bool trans_ldptr_d(DisasContext *ctx, arg_ldptr_d *a)
+{
+ gen_ld(ctx, OPC_LARCH_LDPTR_D, a->rd, a->rj, a->si14 << 2);
+ return true;
+}
+
+static bool trans_stptr_d(DisasContext *ctx, arg_stptr_d *a)
+{
+ gen_st(ctx, OPC_LARCH_STPTR_D, a->rd, a->rj, a->si14 << 2);
+ return true;
+}
+
+static bool trans_ld_b(DisasContext *ctx, arg_ld_b *a)
+{
+ gen_ld(ctx, OPC_LARCH_LD_B, a->rd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_ld_h(DisasContext *ctx, arg_ld_h *a)
+{
+ gen_ld(ctx, OPC_LARCH_LD_H, a->rd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_ld_w(DisasContext *ctx, arg_ld_w *a)
+{
+ gen_ld(ctx, OPC_LARCH_LD_W, a->rd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_ld_d(DisasContext *ctx, arg_ld_d *a)
+{
+ gen_ld(ctx, OPC_LARCH_LD_D, a->rd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_st_b(DisasContext *ctx, arg_st_b *a)
+{
+ gen_st(ctx, OPC_LARCH_ST_B, a->rd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_st_h(DisasContext *ctx, arg_st_h *a)
+{
+ gen_st(ctx, OPC_LARCH_ST_H, a->rd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_st_w(DisasContext *ctx, arg_st_w *a)
+{
+ gen_st(ctx, OPC_LARCH_ST_W, a->rd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_st_d(DisasContext *ctx, arg_st_d *a)
+{
+ gen_st(ctx, OPC_LARCH_ST_D, a->rd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_ld_bu(DisasContext *ctx, arg_ld_bu *a)
+{
+ gen_ld(ctx, OPC_LARCH_LD_BU, a->rd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_ld_hu(DisasContext *ctx, arg_ld_hu *a)
+{
+ gen_ld(ctx, OPC_LARCH_LD_HU, a->rd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_ld_wu(DisasContext *ctx, arg_ld_wu *a)
+{
+ gen_ld(ctx, OPC_LARCH_LD_WU, a->rd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_preld(DisasContext *ctx, arg_preld *a)
+{
+ /* Treat as NOP. */
+ return true;
+}
+
+static bool trans_ll_w(DisasContext *ctx, arg_ll_w *a)
+{
+ gen_ld(ctx, OPC_LARCH_LL_W, a->rd, a->rj, a->si14 << 2);
+ return true;
+}
+
+static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a)
+{
+ gen_st_cond(ctx, a->rd, a->rj, a->si14 << 2, MO_TESL, false);
+ return true;
+}
+
+static bool trans_ll_d(DisasContext *ctx, arg_ll_d *a)
+{
+ gen_ld(ctx, OPC_LARCH_LL_D, a->rd, a->rj, a->si14 << 2);
+ return true;
+}
+
+static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a)
+{
+ gen_st_cond(ctx, a->rd, a->rj, a->si14 << 2, MO_TEQ, false);
+ return true;
+}
+
+static bool trans_fld_s(DisasContext *ctx, arg_fld_s *a)
+{
+ gen_fp_ldst(ctx, OPC_LARCH_FLD_S, a->fd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_fst_s(DisasContext *ctx, arg_fst_s *a)
+{
+ gen_fp_ldst(ctx, OPC_LARCH_FST_S, a->fd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_fld_d(DisasContext *ctx, arg_fld_d *a)
+{
+ gen_fp_ldst(ctx, OPC_LARCH_FLD_D, a->fd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_fst_d(DisasContext *ctx, arg_fst_d *a)
+{
+ gen_fp_ldst(ctx, OPC_LARCH_FST_D, a->fd, a->rj, a->si12);
+ return true;
+}
+
+static bool trans_ldx_b(DisasContext *ctx, arg_ldx_b *a)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int mem_idx = ctx->mem_idx;
+
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_SB);
+ gen_store_gpr(t1, a->rd);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return true;
+}
+
+static bool trans_ldx_h(DisasContext *ctx, arg_ldx_h *a)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int mem_idx = ctx->mem_idx;
+
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_TESW | ctx->default_tcg_memop_mask);
+ gen_store_gpr(t1, a->rd);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return true;
+}
+
+static bool trans_ldx_w(DisasContext *ctx, arg_ldx_w *a)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int mem_idx = ctx->mem_idx;
+
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_TESL | ctx->default_tcg_memop_mask);
+ gen_store_gpr(t1, a->rd);
+ tcg_temp_free(t0);
+ return true;
+}
+
+static bool trans_ldx_d(DisasContext *ctx, arg_ldx_d *a)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int mem_idx = ctx->mem_idx;
+
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_TEQ | ctx->default_tcg_memop_mask);
+ gen_store_gpr(t1, a->rd);
+ tcg_temp_free(t1);
+ return true;
+}
+
+static bool trans_stx_b(DisasContext *ctx, arg_stx_b *a)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int mem_idx = ctx->mem_idx;
+
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
+ gen_load_gpr(t1, a->rd);
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_8);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return true;
+}
+
+static bool trans_stx_h(DisasContext *ctx, arg_stx_h *a)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int mem_idx = ctx->mem_idx;
+
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
+ gen_load_gpr(t1, a->rd);
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUW |
+ ctx->default_tcg_memop_mask);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return true;
+}
+
+static bool trans_stx_w(DisasContext *ctx, arg_stx_w *a)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int mem_idx = ctx->mem_idx;
+
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
+ gen_load_gpr(t1, a->rd);
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUL |
+ ctx->default_tcg_memop_mask);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return true;
+}
+
+static bool trans_stx_d(DisasContext *ctx, arg_stx_d *a)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int mem_idx = ctx->mem_idx;
+
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
+ gen_load_gpr(t1, a->rd);
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return true;
+}
+
+static bool trans_ldx_bu(DisasContext *ctx, arg_ldx_bu *a)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int mem_idx = ctx->mem_idx;
+
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
+ gen_store_gpr(t1, a->rd);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return true;
+}
+
+static bool trans_ldx_hu(DisasContext *ctx, arg_ldx_hu *a)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int mem_idx = ctx->mem_idx;
+
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_TEUW |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t1, a->rd);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return true;
+}
+
+static bool trans_ldx_wu(DisasContext *ctx, arg_ldx_wu *a)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int mem_idx = ctx->mem_idx;
+
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_TEUL |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t1, a->rd);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return true;
+}
+
+static bool trans_fldx_s(DisasContext *ctx, arg_fldx_s *a)
+{
+ gen_flt3_ldst(ctx, OPC_LARCH_FLDX_S, a->fd, 0, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_fldx_d(DisasContext *ctx, arg_fldx_d *a)
+{
+ gen_flt3_ldst(ctx, OPC_LARCH_FLDX_D, a->fd, 0, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_fstx_s(DisasContext *ctx, arg_fstx_s *a)
+{
+ gen_flt3_ldst(ctx, OPC_LARCH_FSTX_S, 0, a->fd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_fstx_d(DisasContext *ctx, arg_fstx_d *a)
+{
+ gen_flt3_ldst(ctx, OPC_LARCH_FSTX_D, 0, a->fd, a->rj, a->rk);
+ return true;
+}
+
+#define TRANS_AM_W(name, op) \
+static bool trans_ ## name(DisasContext *ctx, arg_ ## name * a) \
+{ \
+ if ((a->rd != 0) && ((a->rj == a->rd) || (a->rk == a->rd))) { \
+ printf("%s: warning, register equal\n", __func__); \
+ return false; \
+ } \
+ int mem_idx = ctx->mem_idx; \
+ TCGv addr = tcg_temp_new(); \
+ TCGv val = tcg_temp_new(); \
+ TCGv ret = tcg_temp_new(); \
+\
+ gen_load_gpr(addr, a->rj); \
+ gen_load_gpr(val, a->rk); \
+ tcg_gen_atomic_##op##_tl(ret, addr, val, mem_idx, MO_TESL | \
+ ctx->default_tcg_memop_mask); \
+ gen_store_gpr(ret, a->rd); \
+\
+ tcg_temp_free(addr); \
+ tcg_temp_free(val); \
+ tcg_temp_free(ret); \
+ return true; \
+}
+#define TRANS_AM_D(name, op) \
+static bool trans_ ## name(DisasContext *ctx, arg_ ## name * a) \
+{ \
+ if ((a->rd != 0) && ((a->rj == a->rd) || (a->rk == a->rd))) { \
+ printf("%s: warning, register equal\n", __func__); \
+ return false; \
+ } \
+ int mem_idx = ctx->mem_idx; \
+ TCGv addr = tcg_temp_new(); \
+ TCGv val = tcg_temp_new(); \
+ TCGv ret = tcg_temp_new(); \
+\
+ gen_load_gpr(addr, a->rj); \
+ gen_load_gpr(val, a->rk); \
+ tcg_gen_atomic_##op##_tl(ret, addr, val, mem_idx, MO_TEQ | \
+ ctx->default_tcg_memop_mask); \
+ gen_store_gpr(ret, a->rd); \
+\
+ tcg_temp_free(addr); \
+ tcg_temp_free(val); \
+ tcg_temp_free(ret); \
+ return true; \
+}
+#define TRANS_AM(name, op) \
+ TRANS_AM_W(name##_w, op) \
+ TRANS_AM_D(name##_d, op)
+TRANS_AM(amswap, xchg) /* trans_amswap_w, trans_amswap_d */
+TRANS_AM(amadd, fetch_add) /* trans_amadd_w, trans_amadd_d */
+TRANS_AM(amand, fetch_and) /* trans_amand_w, trans_amand_d */
+TRANS_AM(amor, fetch_or) /* trans_amor_w, trans_amor_d */
+TRANS_AM(amxor, fetch_xor) /* trans_amxor_w, trans_amxor_d */
+TRANS_AM(ammax, fetch_smax) /* trans_ammax_w, trans_ammax_d */
+TRANS_AM(ammin, fetch_smin) /* trans_ammin_w, trans_ammin_d */
+TRANS_AM_W(ammax_wu, fetch_umax) /* trans_ammax_wu */
+TRANS_AM_D(ammax_du, fetch_umax) /* trans_ammax_du */
+TRANS_AM_W(ammin_wu, fetch_umin) /* trans_ammin_wu */
+TRANS_AM_D(ammin_du, fetch_umin) /* trans_ammin_du */
+#undef TRANS_AM
+#undef TRANS_AM_W
+#undef TRANS_AM_D
+
+#define TRANS_AM_DB_W(name, op) \
+static bool trans_ ## name(DisasContext *ctx, arg_ ## name * a) \
+{ \
+ if ((a->rd != 0) && ((a->rj == a->rd) || (a->rk == a->rd))) { \
+ printf("%s: warning, register equal\n", __func__); \
+ return false; \
+ } \
+ int mem_idx = ctx->mem_idx; \
+ TCGv addr = tcg_temp_new(); \
+ TCGv val = tcg_temp_new(); \
+ TCGv ret = tcg_temp_new(); \
+\
+ gen_sync(0x10); /* TCG_MO_ALL */ \
+ gen_load_gpr(addr, a->rj); \
+ gen_load_gpr(val, a->rk); \
+ tcg_gen_atomic_##op##_tl(ret, addr, val, mem_idx, MO_TESL | \
+ ctx->default_tcg_memop_mask); \
+ gen_store_gpr(ret, a->rd); \
+\
+ tcg_temp_free(addr); \
+ tcg_temp_free(val); \
+ tcg_temp_free(ret); \
+ return true; \
+}
+#define TRANS_AM_DB_D(name, op) \
+static bool trans_ ## name(DisasContext *ctx, arg_ ## name * a) \
+{ \
+ if ((a->rd != 0) && ((a->rj == a->rd) || (a->rk == a->rd))) { \
+ printf("%s: warning, register equal\n", __func__); \
+ return false; \
+ } \
+ int mem_idx = ctx->mem_idx; \
+ TCGv addr = tcg_temp_new(); \
+ TCGv val = tcg_temp_new(); \
+ TCGv ret = tcg_temp_new(); \
+\
+ gen_sync(0x10); /* TCG_MO_ALL */ \
+ gen_load_gpr(addr, a->rj); \
+ gen_load_gpr(val, a->rk); \
+ tcg_gen_atomic_##op##_tl(ret, addr, val, mem_idx, MO_TEQ | \
+ ctx->default_tcg_memop_mask); \
+ gen_store_gpr(ret, a->rd); \
+\
+ tcg_temp_free(addr); \
+ tcg_temp_free(val); \
+ tcg_temp_free(ret); \
+ return true; \
+}
+#define TRANS_AM_DB(name, op) \
+ TRANS_AM_DB_W(name##_db_w, op) \
+ TRANS_AM_DB_D(name##_db_d, op)
+TRANS_AM_DB(amswap, xchg) /* trans_amswap_db_w, trans_amswap_db_d */
+TRANS_AM_DB(amadd, fetch_add) /* trans_amadd_db_w, trans_amadd_db_d */
+TRANS_AM_DB(amand, fetch_and) /* trans_amand_db_w, trans_amand_db_d */
+TRANS_AM_DB(amor, fetch_or) /* trans_amor_db_w, trans_amor_db_d */
+TRANS_AM_DB(amxor, fetch_xor) /* trans_amxor_db_w, trans_amxor_db_d */
+TRANS_AM_DB(ammax, fetch_smax) /* trans_ammax_db_w, trans_ammax_db_d */
+TRANS_AM_DB(ammin, fetch_smin) /* trans_ammin_db_w, trans_ammin_db_d */
+TRANS_AM_DB_W(ammax_db_wu, fetch_umax) /* trans_ammax_db_wu */
+TRANS_AM_DB_D(ammax_db_du, fetch_umax) /* trans_ammax_db_du */
+TRANS_AM_DB_W(ammin_db_wu, fetch_umin) /* trans_ammin_db_wu */
+TRANS_AM_DB_D(ammin_db_du, fetch_umin) /* trans_ammin_db_du */
+#undef TRANS_AM_DB
+#undef TRANS_AM_DB_W
+#undef TRANS_AM_DB_D
+
+static bool trans_dbar(DisasContext *ctx, arg_dbar * a)
+{
+ gen_sync(a->whint);
+ return true;
+}
+
+static bool trans_ibar(DisasContext *ctx, arg_ibar *a)
+{
+ /*
+ * FENCE_I is a no-op in QEMU,
+ * however we need to end the translation block
+ */
+ ctx->base.is_jmp = DISAS_STOP;
+ return true;
+}
+
+#define ASRTGT \
+do { \
+ TCGv t1 = tcg_temp_new(); \
+ TCGv t2 = tcg_temp_new(); \
+ gen_load_gpr(t1, a->rj); \
+ gen_load_gpr(t2, a->rk); \
+ gen_helper_asrtgt_d(cpu_env, t1, t2); \
+ tcg_temp_free(t1); \
+ tcg_temp_free(t2); \
+} while (0)
+
+#define ASRTLE \
+do {\
+ TCGv t1 = tcg_temp_new(); \
+ TCGv t2 = tcg_temp_new(); \
+ gen_load_gpr(t1, a->rj); \
+ gen_load_gpr(t2, a->rk); \
+ gen_helper_asrtle_d(cpu_env, t1, t2); \
+ tcg_temp_free(t1); \
+ tcg_temp_free(t2); \
+} while (0)
+
+static bool trans_fldgt_s(DisasContext *ctx, arg_fldgt_s *a)
+{
+ ASRTGT;
+ gen_flt3_ldst(ctx, OPC_LARCH_FLDGT_S, a->fd, 0, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_fldgt_d(DisasContext *ctx, arg_fldgt_d *a)
+{
+ ASRTGT;
+ gen_flt3_ldst(ctx, OPC_LARCH_FLDGT_D, a->fd, 0, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_fldle_s(DisasContext *ctx, arg_fldle_s *a)
+{
+ ASRTLE;
+ gen_flt3_ldst(ctx, OPC_LARCH_FLDLE_S, a->fd, 0, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_fldle_d(DisasContext *ctx, arg_fldle_d *a)
+{
+ ASRTLE;
+ gen_flt3_ldst(ctx, OPC_LARCH_FLDLE_D, a->fd, 0, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_fstgt_s(DisasContext *ctx, arg_fstgt_s *a)
+{
+ ASRTGT;
+ gen_flt3_ldst(ctx, OPC_LARCH_FSTGT_S, 0, a->fd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_fstgt_d(DisasContext *ctx, arg_fstgt_d *a)
+{
+ ASRTGT;
+ gen_flt3_ldst(ctx, OPC_LARCH_FSTGT_D, 0, a->fd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_fstle_s(DisasContext *ctx, arg_fstle_s *a)
+{
+ ASRTLE;
+ gen_flt3_ldst(ctx, OPC_LARCH_FSTLE_S, 0, a->fd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_fstle_d(DisasContext *ctx, arg_fstle_d *a)
+{
+ ASRTLE;
+ gen_flt3_ldst(ctx, OPC_LARCH_FSTLE_D, 0, a->fd, a->rj, a->rk);
+ return true;
+}
+
+#define DECL_ARG(name) \
+ arg_ ## name arg = { \
+ .rd = a->rd, \
+ .rj = a->rj, \
+ .rk = a->rk, \
+ };
+
+static bool trans_ldgt_b(DisasContext *ctx, arg_ldgt_b *a)
+{
+ ASRTGT;
+ DECL_ARG(ldx_b)
+ trans_ldx_b(ctx, &arg);
+ return true;
+}
+
+static bool trans_ldgt_h(DisasContext *ctx, arg_ldgt_h *a)
+{
+ ASRTGT;
+ DECL_ARG(ldx_h)
+ trans_ldx_h(ctx, &arg);
+ return true;
+}
+
+static bool trans_ldgt_w(DisasContext *ctx, arg_ldgt_w *a)
+{
+ ASRTGT;
+ DECL_ARG(ldx_w)
+ trans_ldx_w(ctx, &arg);
+ return true;
+}
+
+static bool trans_ldgt_d(DisasContext *ctx, arg_ldgt_d *a)
+{
+ ASRTGT;
+ DECL_ARG(ldx_d)
+ trans_ldx_d(ctx, &arg);
+ return true;
+}
+
+static bool trans_ldle_b(DisasContext *ctx, arg_ldle_b *a)
+{
+ ASRTLE;
+ DECL_ARG(ldx_b)
+ trans_ldx_b(ctx, &arg);
+ return true;
+}
+
+static bool trans_ldle_h(DisasContext *ctx, arg_ldle_h *a)
+{
+ ASRTLE;
+ DECL_ARG(ldx_h)
+ trans_ldx_h(ctx, &arg);
+ return true;
+}
+
+static bool trans_ldle_w(DisasContext *ctx, arg_ldle_w *a)
+{
+ ASRTLE;
+ DECL_ARG(ldx_w)
+ trans_ldx_w(ctx, &arg);
+ return true;
+}
+
+static bool trans_ldle_d(DisasContext *ctx, arg_ldle_d *a)
+{
+ ASRTLE;
+ DECL_ARG(ldx_d)
+ trans_ldx_d(ctx, &arg);
+ return true;
+}
+
+static bool trans_stgt_b(DisasContext *ctx, arg_stgt_b *a)
+{
+ ASRTGT;
+ DECL_ARG(stx_b)
+ trans_stx_b(ctx, &arg);
+ return true;
+}
+
+static bool trans_stgt_h(DisasContext *ctx, arg_stgt_h *a)
+{
+ ASRTGT;
+ DECL_ARG(stx_h)
+ trans_stx_h(ctx, &arg);
+ return true;
+}
+
+static bool trans_stgt_w(DisasContext *ctx, arg_stgt_w *a)
+{
+ ASRTGT;
+ DECL_ARG(stx_w)
+ trans_stx_w(ctx, &arg);
+ return true;
+}
+
+static bool trans_stgt_d(DisasContext *ctx, arg_stgt_d *a)
+{
+ ASRTGT;
+ DECL_ARG(stx_d)
+ trans_stx_d(ctx, &arg);
+ return true;
+}
+
+static bool trans_stle_b(DisasContext *ctx, arg_stle_b *a)
+{
+ ASRTLE;
+ DECL_ARG(stx_b)
+ trans_stx_b(ctx, &arg);
+ return true;
+}
+
+static bool trans_stle_h(DisasContext *ctx, arg_stle_h *a)
+{
+ ASRTLE;
+ DECL_ARG(stx_h)
+ trans_stx_h(ctx, &arg);
+ return true;
+}
+
+static bool trans_stle_w(DisasContext *ctx, arg_stle_w *a)
+{
+ ASRTLE;
+ DECL_ARG(stx_w)
+ trans_stx_w(ctx, &arg);
+ return true;
+}
+
+static bool trans_stle_d(DisasContext *ctx, arg_stle_d *a)
+{
+ ASRTLE;
+ DECL_ARG(stx_d)
+ trans_stx_d(ctx, &arg);
+ return true;
+}
+
+#undef ASRTGT
+#undef ASRTLE
+#undef DECL_ARG
+
+static bool trans_beqz(DisasContext *ctx, arg_beqz *a)
+{
+ gen_compute_branch(ctx, OPC_LARCH_BEQZ, 4, a->rj, 0, a->offs21 << 2);
+ return true;
+}
+
+static bool trans_bnez(DisasContext *ctx, arg_bnez *a)
+{
+ gen_compute_branch(ctx, OPC_LARCH_BNEZ, 4, a->rj, 0, a->offs21 << 2);
+ return true;
+}
+
+static bool trans_bceqz(DisasContext *ctx, arg_bceqz *a)
+{
+ TCGv_i32 cj = tcg_const_i32(a->cj);
+ TCGv v0 = tcg_temp_new();
+ TCGv v1 = tcg_const_i64(0);
+
+ gen_helper_movcf2reg(v0, cpu_env, cj);
+ tcg_gen_setcond_tl(TCG_COND_EQ, bcond, v0, v1);
+ ctx->hflags |= LARCH_HFLAG_BC;
+ ctx->btarget = ctx->base.pc_next + (a->offs21 << 2);
+
+ tcg_temp_free_i32(cj);
+ tcg_temp_free(v0);
+ tcg_temp_free(v1);
+ return true;
+}
+
+static bool trans_bcnez(DisasContext *ctx, arg_bcnez *a)
+{
+ TCGv_i32 cj = tcg_const_i32(a->cj);
+ TCGv v0 = tcg_temp_new();
+ TCGv v1 = tcg_const_i64(0);
+
+ gen_helper_movcf2reg(v0, cpu_env, cj);
+ tcg_gen_setcond_tl(TCG_COND_NE, bcond, v0, v1);
+ ctx->hflags |= LARCH_HFLAG_BC;
+ ctx->btarget = ctx->base.pc_next + (a->offs21 << 2);
+
+ tcg_temp_free_i32(cj);
+ tcg_temp_free(v0);
+ tcg_temp_free(v1);
+ return true;
+}
+
+static bool trans_b(DisasContext *ctx, arg_b *a)
+{
+ gen_compute_branch(ctx, OPC_LARCH_B, 4, 0, 0, a->offs << 2);
+ return true;
+}
+
+static bool trans_bl(DisasContext *ctx, arg_bl *a)
+{
+ ctx->btarget = ctx->base.pc_next + (a->offs << 2);
+ tcg_gen_movi_tl(cpu_gpr[1], ctx->base.pc_next + 4);
+ ctx->hflags |= LARCH_HFLAG_B;
+ gen_branch(ctx, 4);
+ return true;
+}
+
+static bool trans_blt(DisasContext *ctx, arg_blt *a)
+{
+ gen_compute_branch(ctx, OPC_LARCH_BLT, 4, a->rj, a->rd, a->offs16 << 2);
+ return true;
+}
+
+static bool trans_bge(DisasContext *ctx, arg_bge *a)
+{
+ gen_compute_branch(ctx, OPC_LARCH_BGE, 4, a->rj, a->rd, a->offs16 << 2);
+ return true;
+}
+
+static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
+{
+ gen_compute_branch(ctx, OPC_LARCH_BLTU, 4, a->rj, a->rd, a->offs16 << 2);
+ return true;
+}
+
+static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
+{
+ gen_compute_branch(ctx, OPC_LARCH_BGEU, 4, a->rj, a->rd, a->offs16 << 2);
+ return true;
+}
+
+static bool trans_beq(DisasContext *ctx, arg_beq *a)
+{
+ gen_compute_branch(ctx, OPC_LARCH_BEQ, 4, a->rj, a->rd, a->offs16 << 2);
+ return true;
+}
+
+static bool trans_bne(DisasContext *ctx, arg_bne *a)
+{
+ gen_compute_branch(ctx, OPC_LARCH_BNE, 4, a->rj, a->rd, a->offs16 << 2);
+ return true;
+}
+
+static bool trans_jirl(DisasContext *ctx, arg_jirl *a)
+{
+ gen_base_offset_addr(ctx, btarget, a->rj, a->offs16 << 2);
+ if (a->rd != 0) {
+ tcg_gen_movi_tl(cpu_gpr[a->rd], ctx->base.pc_next + 4);
+ }
+ ctx->hflags |= LARCH_HFLAG_BR;
+ gen_branch(ctx, 4);
+
+ return true;
+}
+
+#define TRANS_F4FR(name, fmt, op, bits) \
+static bool trans_ ## name ## _ ## fmt(DisasContext *ctx, \
+ arg_##name##_##fmt * a) \
+{ \
+ check_cp1_enabled(ctx); \
+ TCGv_i ## bits fp0 = tcg_temp_new_i ## bits(); \
+ TCGv_i ## bits fp1 = tcg_temp_new_i ## bits(); \
+ TCGv_i ## bits fp2 = tcg_temp_new_i ## bits(); \
+ TCGv_i ## bits fp3 = tcg_temp_new_i ## bits(); \
+ check_cp1_enabled(ctx); \
+ gen_load_fpr ## bits(ctx, fp0, a->fj); \
+ gen_load_fpr ## bits(ctx, fp1, a->fk); \
+ gen_load_fpr ## bits(ctx, fp2, a->fa); \
+ gen_helper_float_ ## op ## _ ## fmt(fp3, \
+ cpu_env, fp0, fp1, fp2); \
+ gen_store_fpr ## bits(ctx, fp3, a->fd); \
+ tcg_temp_free_i ## bits(fp3); \
+ tcg_temp_free_i ## bits(fp2); \
+ tcg_temp_free_i ## bits(fp1); \
+ tcg_temp_free_i ## bits(fp0); \
+ return true; \
+}
+
+TRANS_F4FR(fmadd , s, maddf , 32) /* trans_fmadd_s */
+TRANS_F4FR(fmadd , d, maddf , 64) /* trans_fmadd_d */
+TRANS_F4FR(fmsub , s, msubf , 32) /* trans_fmsub_s */
+TRANS_F4FR(fmsub , d, msubf , 64) /* trans_fmsub_d */
+TRANS_F4FR(fnmadd, s, nmaddf, 32) /* trans_fnmadd_s */
+TRANS_F4FR(fnmadd, d, nmaddf, 64) /* trans_fnmadd_d */
+TRANS_F4FR(fnmsub, s, nmsubf, 32) /* trans_fnmsub_s */
+TRANS_F4FR(fnmsub, d, nmsubf, 64) /* trans_fnmsub_d */
+#undef TRANS_F4FR
+
+static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s * a)
+{
+ gen_farith(ctx, OPC_LARCH_FADD_S, a->fk, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FADD_D, a->fk, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FSUB_S, a->fk, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FSUB_D, a->fk, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FMUL_S, a->fk, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FMUL_D, a->fk, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FDIV_S, a->fk, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FDIV_D, a->fk, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FMAX_S, a->fk, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FMAX_D, a->fk, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FMIN_S, a->fk, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FMIN_D, a->fk, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fmaxa_s(DisasContext *ctx, arg_fmaxa_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FMAXA_S, a->fk, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fmaxa_d(DisasContext *ctx, arg_fmaxa_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FMAXA_D, a->fk, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fmina_s(DisasContext *ctx, arg_fmina_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FMINA_S, a->fk, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fmina_d(DisasContext *ctx, arg_fmina_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FMINA_D, a->fk, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fscaleb_s(DisasContext *ctx, arg_fscaleb_s *a)
+{
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ check_cp1_enabled(ctx);
+ gen_load_fpr32(ctx, fp0, a->fj);
+ gen_load_fpr32(ctx, fp1, a->fk);
+ gen_helper_float_exp2_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, a->fd);
+ tcg_temp_free_i32(fp0);
+ return true;
+}
+
+static bool trans_fscaleb_d(DisasContext *ctx, arg_fscaleb_d *a)
+{
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ check_cp1_enabled(ctx);
+ gen_load_fpr64(ctx, fp0, a->fj);
+ gen_load_fpr64(ctx, fp1, a->fk);
+ gen_helper_float_exp2_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, a->fd);
+ tcg_temp_free_i64(fp0);
+ return true;
+}
+
+static bool trans_fcopysign_s(DisasContext *ctx, arg_fcopysign_s *a)
+{
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+
+ check_cp1_enabled(ctx);
+ gen_load_fpr32(ctx, fp0, a->fj);
+ gen_load_fpr32(ctx, fp1, a->fk);
+ tcg_gen_deposit_i32(fp2, fp1, fp0, 0, 31);
+ gen_store_fpr32(ctx, fp2, a->fd);
+
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ return true;
+}
+
+static bool trans_fcopysign_d(DisasContext *ctx, arg_fcopysign_d *a)
+{
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ check_cp1_enabled(ctx);
+ gen_load_fpr64(ctx, fp0, a->fj);
+ gen_load_fpr64(ctx, fp1, a->fk);
+ tcg_gen_deposit_i64(fp2, fp1, fp0, 0, 63);
+ gen_store_fpr64(ctx, fp2, a->fd);
+
+ tcg_temp_free_i64(fp2);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ return true;
+}
+
+static bool trans_fabs_s(DisasContext *ctx, arg_fabs_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FABS_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fabs_d(DisasContext *ctx, arg_fabs_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FABS_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fneg_s(DisasContext *ctx, arg_fneg_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FNEG_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fneg_d(DisasContext *ctx, arg_fneg_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FNEG_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_flogb_s(DisasContext *ctx, arg_flogb_s *a)
+{
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ check_cp1_enabled(ctx);
+ gen_load_fpr32(ctx, fp0, a->fj);
+ gen_helper_float_logb_s(fp1, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp1, a->fd);
+
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ return true;
+}
+
+static bool trans_flogb_d(DisasContext *ctx, arg_flogb_d *a)
+{
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ check_cp1_enabled(ctx);
+ gen_load_fpr64(ctx, fp0, a->fj);
+ gen_helper_float_logb_d(fp1, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp1, a->fd);
+
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ return true;
+}
+
+static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FCLASS_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FCLASS_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FSQRT_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fsqrt_d(DisasContext *ctx, arg_fsqrt_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FSQRT_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_frecip_s(DisasContext *ctx, arg_frecip_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FRECIP_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_frecip_d(DisasContext *ctx, arg_frecip_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FRECIP_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_frsqrt_s(DisasContext *ctx, arg_frsqrt_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FRSQRT_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_frsqrt_d(DisasContext *ctx, arg_frsqrt_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FRSQRT_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fmov_s(DisasContext *ctx, arg_fmov_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FMOV_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fmov_d(DisasContext *ctx, arg_fmov_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FMOV_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_movgr2fr_w(DisasContext *ctx, arg_movgr2fr_w *a)
+{
+ gen_cp1(ctx, OPC_LARCH_GR2FR_W, a->rj, a->fd);
+ return true;
+}
+
+static bool trans_movgr2fr_d(DisasContext *ctx, arg_movgr2fr_d *a)
+{
+ gen_cp1(ctx, OPC_LARCH_GR2FR_D, a->rj, a->fd);
+ return true;
+}
+
+static bool trans_movgr2frh_w(DisasContext *ctx, arg_movgr2frh_w *a)
+{
+ gen_cp1(ctx, OPC_LARCH_GR2FRH_W, a->rj, a->fd);
+ return true;
+}
+
+static bool trans_movfr2gr_s(DisasContext *ctx, arg_movfr2gr_s *a)
+{
+ gen_cp1(ctx, OPC_LARCH_FR2GR_S, a->rd, a->fj);
+ return true;
+}
+
+static bool trans_movfr2gr_d(DisasContext *ctx, arg_movfr2gr_d *a)
+{
+ gen_cp1(ctx, OPC_LARCH_FR2GR_D, a->rd, a->fj);
+ return true;
+}
+
+static bool trans_movfrh2gr_s(DisasContext *ctx, arg_movfrh2gr_s *a)
+{
+ gen_cp1(ctx, OPC_LARCH_FRH2GR_S, a->rd, a->fj);
+ return true;
+}
+
+static bool trans_movgr2fcsr(DisasContext *ctx, arg_movgr2fcsr *a)
+{
+ TCGv t0 = tcg_temp_new();
+
+ check_cp1_enabled(ctx);
+ gen_load_gpr(t0, a->rj);
+ save_cpu_state(ctx, 0);
+ {
+ TCGv_i32 fs_tmp = tcg_const_i32(a->fcsrd);
+ gen_helper_0e2i(movgr2fcsr, t0, fs_tmp, a->rj);
+ tcg_temp_free_i32(fs_tmp);
+ }
+ /* Stop translation as we may have changed hflags */
+ ctx->base.is_jmp = DISAS_STOP;
+
+ tcg_temp_free(t0);
+ return true;
+}
+
+static bool trans_movfcsr2gr(DisasContext *ctx, arg_movfcsr2gr *a)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_helper_1e0i(movfcsr2gr, t0, a->fcsrs);
+ gen_store_gpr(t0, a->rd);
+ tcg_temp_free(t0);
+ return true;
+}
+
+static bool trans_movfr2cf(DisasContext *ctx, arg_movfr2cf *a)
+{
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i32 cd = tcg_const_i32(a->cd);
+
+ check_cp1_enabled(ctx);
+ gen_load_fpr64(ctx, fp0, a->fj);
+ gen_helper_movreg2cf(cpu_env, cd, fp0);
+
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i32(cd);
+ return true;
+}
+
+static bool trans_movcf2fr(DisasContext *ctx, arg_movcf2fr *a)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 cj = tcg_const_i32(a->cj);
+
+ check_cp1_enabled(ctx);
+ gen_helper_movcf2reg(t0, cpu_env, cj);
+ gen_store_fpr64(ctx, t0, a->fd);
+
+ tcg_temp_free(t0);
+ return true;
+}
+
+static bool trans_movgr2cf(DisasContext *ctx, arg_movgr2cf *a)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 cd = tcg_const_i32(a->cd);
+
+ check_cp1_enabled(ctx);
+ gen_load_gpr(t0, a->rj);
+ gen_helper_movreg2cf(cpu_env, cd, t0);
+
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(cd);
+ return true;
+}
+
+static bool trans_movcf2gr(DisasContext *ctx, arg_movcf2gr *a)
+{
+ TCGv_i32 cj = tcg_const_i32(a->cj);
+
+ check_cp1_enabled(ctx);
+ gen_helper_movcf2reg(cpu_gpr[a->rd], cpu_env, cj);
+
+ tcg_temp_free_i32(cj);
+ return true;
+}
+
+static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FCVT_S_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FCVT_D_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftintrm_w_s(DisasContext *ctx, arg_ftintrm_l_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINTRM_W_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftintrm_w_d(DisasContext *ctx, arg_ftintrm_l_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINTRM_W_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftintrm_l_s(DisasContext *ctx, arg_ftintrm_l_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINTRM_L_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftintrm_l_d(DisasContext *ctx, arg_ftintrm_l_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINTRM_L_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftintrp_w_s(DisasContext *ctx, arg_ftintrp_w_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINTRP_W_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftintrp_w_d(DisasContext *ctx, arg_ftintrp_w_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINTRP_W_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftintrp_l_s(DisasContext *ctx, arg_ftintrp_l_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINTRP_L_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftintrp_l_d(DisasContext *ctx, arg_ftintrp_l_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINTRP_L_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftintrz_w_s(DisasContext *ctx, arg_ftintrz_w_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINTRZ_W_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftintrz_w_d(DisasContext *ctx, arg_ftintrz_w_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINTRZ_W_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftintrz_l_s(DisasContext *ctx, arg_ftintrz_l_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINTRZ_L_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftintrz_l_d(DisasContext *ctx, arg_ftintrz_l_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINTRZ_L_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftintrne_w_s(DisasContext *ctx, arg_ftintrne_w_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINTRNE_W_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftintrne_w_d(DisasContext *ctx, arg_ftintrne_w_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINTRNE_W_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftintrne_l_s(DisasContext *ctx, arg_ftintrne_l_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINTRNE_L_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftintrne_l_d(DisasContext *ctx, arg_ftintrne_l_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINTRNE_L_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftint_w_s(DisasContext *ctx, arg_ftint_w_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINT_W_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftint_w_d(DisasContext *ctx, arg_ftint_w_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINT_W_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftint_l_s(DisasContext *ctx, arg_ftint_l_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINT_L_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ftint_l_d(DisasContext *ctx, arg_ftint_l_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FTINT_L_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ffint_s_w(DisasContext *ctx, arg_ffint_s_w *a)
+{
+ gen_farith(ctx, OPC_LARCH_FFINT_S_W, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ffint_s_l(DisasContext *ctx, arg_ffint_s_l *a)
+{
+ gen_farith(ctx, OPC_LARCH_FFINT_S_L, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ffint_d_w(DisasContext *ctx, arg_ffint_d_w *a)
+{
+ gen_farith(ctx, OPC_LARCH_FFINT_D_W, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_ffint_d_l(DisasContext *ctx, arg_ffint_d_l *a)
+{
+ gen_farith(ctx, OPC_LARCH_FFINT_D_L, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_frint_s(DisasContext *ctx, arg_frint_s *a)
+{
+ gen_farith(ctx, OPC_LARCH_FRINT_S, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_frint_d(DisasContext *ctx, arg_frint_d *a)
+{
+ gen_farith(ctx, OPC_LARCH_FRINT_D, 0, a->fj, a->fd, 0);
+ return true;
+}
+
+static bool trans_alsl_w(DisasContext *ctx, arg_alsl_w *a)
+{
+ gen_lsa(ctx, OPC_LARCH_ALSL_W, a->rd, a->rj, a->rk, a->sa2);
+ return true;
+}
+
+static bool trans_alsl_wu(DisasContext *ctx, arg_alsl_wu *a)
+{
+ TCGv t0, t1;
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_load_gpr(t0, a->rj);
+ gen_load_gpr(t1, a->rk);
+ tcg_gen_shli_tl(t0, t0, a->sa2 + 1);
+ tcg_gen_add_tl(t0, t0, t1);
+ tcg_gen_ext32u_tl(cpu_gpr[a->rd], t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+
+ return true;
+}
+
+static bool trans_alsl_d(DisasContext *ctx, arg_alsl_d *a)
+{
+ check_larch_64(ctx);
+ gen_lsa(ctx, OPC_LARCH_ALSL_D, a->rd, a->rj, a->rk, a->sa2);
+ return true;
+}
+
+static bool trans_bytepick_w(DisasContext *ctx, arg_bytepick_w *a)
+{
+ gen_align(ctx, 32, a->rd, a->rj, a->rk, a->sa2);
+ return true;
+}
+
+static bool trans_bytepick_d(DisasContext *ctx, arg_bytepick_d *a)
+{
+ check_larch_64(ctx);
+ gen_align(ctx, 64, a->rd, a->rj, a->rk, a->sa3);
+ return true;
+}
+
+static bool trans_add_w(DisasContext *ctx, arg_add_w *a)
+{
+ gen_arith(ctx, OPC_LARCH_ADD_W, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_sub_w(DisasContext *ctx, arg_sub_w *a)
+{
+ gen_arith(ctx, OPC_LARCH_SUB_W, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_add_d(DisasContext *ctx, arg_add_d *a)
+{
+ gen_arith(ctx, OPC_LARCH_ADD_D, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_sub_d(DisasContext *ctx, arg_sub_d *a)
+{
+ check_larch_64(ctx);
+ gen_arith(ctx, OPC_LARCH_SUB_D, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_slt(DisasContext *ctx, arg_slt *a)
+{
+ gen_slt(ctx, OPC_LARCH_SLT, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
+{
+ gen_slt(ctx, OPC_LARCH_SLTU, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_maskeqz(DisasContext *ctx, arg_maskeqz *a)
+{
+ gen_cond_move(ctx, OPC_LARCH_MASKEQZ, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_masknez(DisasContext *ctx, arg_masknez *a)
+{
+ gen_cond_move(ctx, OPC_LARCH_MASKNEZ, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_nor(DisasContext *ctx, arg_nor *a)
+{
+ gen_logic(ctx, OPC_LARCH_NOR, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_and(DisasContext *ctx, arg_and *a)
+{
+ gen_logic(ctx, OPC_LARCH_AND, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_or(DisasContext *ctx, arg_or *a)
+{
+ gen_logic(ctx, OPC_LARCH_OR, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_xor(DisasContext *ctx, arg_xor *a)
+{
+ gen_logic(ctx, OPC_LARCH_XOR, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_orn(DisasContext *ctx, arg_orn *a)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_load_gpr(t0, a->rk);
+ tcg_gen_not_tl(t0, t0);
+ tcg_gen_or_tl(cpu_gpr[a->rd], cpu_gpr[a->rj], t0);
+ tcg_temp_free(t0);
+ return true;
+}
+
+static bool trans_andn(DisasContext *ctx, arg_andn *a)
+{
+ TCGv t0, t1;
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_load_gpr(t0, a->rk);
+ gen_load_gpr(t1, a->rj);
+ tcg_gen_not_tl(t0, t0);
+ tcg_gen_and_tl(cpu_gpr[a->rd], t1, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return true;
+}
+
+static bool trans_sll_w(DisasContext *ctx, arg_sll_w *a)
+{
+ gen_shift(ctx, OPC_LARCH_SLL_W, a->rd, a->rk, a->rj);
+ return true;
+}
+
+static bool trans_srl_w(DisasContext *ctx, arg_srl_w *a)
+{
+ gen_shift(ctx, OPC_LARCH_SRL_W, a->rd, a->rk, a->rj);
+ return true;
+}
+
+static bool trans_sra_w(DisasContext *ctx, arg_sra_w *a)
+{
+ gen_shift(ctx, OPC_LARCH_SRA_W, a->rd, a->rk, a->rj);
+ return true;
+}
+
+static bool trans_sll_d(DisasContext *ctx, arg_sll_d *a)
+{
+ check_larch_64(ctx);
+ gen_shift(ctx, OPC_LARCH_SLL_D, a->rd, a->rk, a->rj);
+ return true;
+}
+
+static bool trans_srl_d(DisasContext *ctx, arg_srl_d *a)
+{
+ check_larch_64(ctx);
+ gen_shift(ctx, OPC_LARCH_SRL_D, a->rd, a->rk, a->rj);
+ return true;
+}
+
+static bool trans_sra_d(DisasContext *ctx, arg_sra_d *a)
+{
+ check_larch_64(ctx);
+ gen_shift(ctx, OPC_LARCH_SRA_D, a->rd, a->rk, a->rj);
+ return true;
+}
+
+static bool trans_rotr_w(DisasContext *ctx, arg_rotr_w *a)
+{
+ gen_shift(ctx, OPC_LARCH_ROTR_W, a->rd, a->rk, a->rj);
+ return true;
+}
+
+static bool trans_rotr_d(DisasContext *ctx, arg_rotr_d *a)
+{
+ check_larch_64(ctx);
+ gen_shift(ctx, OPC_LARCH_ROTR_D, a->rd, a->rk, a->rj);
+ return true;
+}
+
+static bool trans_crc_w_b_w(DisasContext *ctx, arg_crc_w_b_w *a)
+{
+ gen_crc32(ctx, a->rd, a->rj, a->rk, 1, 0);
+ return true;
+}
+
+static bool trans_crc_w_h_w(DisasContext *ctx, arg_crc_w_h_w *a)
+{
+ gen_crc32(ctx, a->rd, a->rj, a->rk, 2, 0);
+ return true;
+}
+
+static bool trans_crc_w_w_w(DisasContext *ctx, arg_crc_w_w_w *a)
+{
+ gen_crc32(ctx, a->rd, a->rj, a->rk, 4, 0);
+ return true;
+}
+
+static bool trans_crc_w_d_w(DisasContext *ctx, arg_crc_w_d_w *a)
+{
+ gen_crc32(ctx, a->rd, a->rj, a->rk, 8, 0);
+ return true;
+}
+
+static bool trans_crcc_w_b_w(DisasContext *ctx, arg_crcc_w_b_w *a)
+{
+ gen_crc32(ctx, a->rd, a->rj, a->rk, 1, 1);
+ return true;
+}
+
+static bool trans_crcc_w_h_w(DisasContext *ctx, arg_crcc_w_h_w *a)
+{
+ gen_crc32(ctx, a->rd, a->rj, a->rk, 2, 1);
+ return true;
+}
+
+static bool trans_crcc_w_w_w(DisasContext *ctx, arg_crcc_w_w_w *a)
+{
+ gen_crc32(ctx, a->rd, a->rj, a->rk, 4, 1);
+ return true;
+}
+
+static bool trans_crcc_w_d_w(DisasContext *ctx, arg_crcc_w_d_w *a)
+{
+ gen_crc32(ctx, a->rd, a->rj, a->rk, 8, 1);
+ return true;
+}
+
+static bool trans_mul_w(DisasContext *ctx, arg_mul_w *a)
+{
+ gen_r6_muldiv(ctx, OPC_LARCH_MUL_W, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_mulh_w(DisasContext *ctx, arg_mulh_w *a)
+{
+ gen_r6_muldiv(ctx, OPC_LARCH_MULH_W, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_mulh_wu(DisasContext *ctx, arg_mulh_wu *a)
+{
+ gen_r6_muldiv(ctx, OPC_LARCH_MULH_WU, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_mul_d(DisasContext *ctx, arg_mul_d *a)
+{
+ check_larch_64(ctx);
+ gen_r6_muldiv(ctx, OPC_LARCH_MUL_D, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_mulh_d(DisasContext *ctx, arg_mulh_d *a)
+{
+ check_larch_64(ctx);
+ gen_r6_muldiv(ctx, OPC_LARCH_MULH_D, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_mulh_du(DisasContext *ctx, arg_mulh_du *a)
+{
+ check_larch_64(ctx);
+ gen_r6_muldiv(ctx, OPC_LARCH_MULH_DU, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_mulw_d_w(DisasContext *ctx, arg_mulw_d_w *a)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ gen_load_gpr(t0, a->rj);
+ gen_load_gpr(t1, a->rk);
+ tcg_gen_ext32s_i64(t0, t0);
+ tcg_gen_ext32s_i64(t1, t1);
+ tcg_gen_mul_i64(t2, t0, t1);
+ gen_store_gpr(t2, a->rd);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ return true;
+}
+
+static bool trans_mulw_d_wu(DisasContext *ctx, arg_mulw_d_wu *a)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ gen_load_gpr(t0, a->rj);
+ gen_load_gpr(t1, a->rk);
+ tcg_gen_ext32u_i64(t0, t0);
+ tcg_gen_ext32u_i64(t1, t1);
+ tcg_gen_mul_i64(t2, t0, t1);
+ gen_store_gpr(t2, a->rd);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ return true;
+}
+
+static bool trans_div_w(DisasContext *ctx, arg_div_w *a)
+{
+ gen_r6_muldiv(ctx, OPC_LARCH_DIV_W, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_mod_w(DisasContext *ctx, arg_mod_w *a)
+{
+ gen_r6_muldiv(ctx, OPC_LARCH_MOD_W, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_div_wu(DisasContext *ctx, arg_div_wu *a)
+{
+ gen_r6_muldiv(ctx, OPC_LARCH_DIV_WU, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_mod_wu(DisasContext *ctx, arg_mod_wu *a)
+{
+ gen_r6_muldiv(ctx, OPC_LARCH_MOD_WU, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_div_d(DisasContext *ctx, arg_div_d *a)
+{
+ check_larch_64(ctx);
+ gen_r6_muldiv(ctx, OPC_LARCH_DIV_D, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_mod_d(DisasContext *ctx, arg_mod_d *a)
+{
+ check_larch_64(ctx);
+ gen_r6_muldiv(ctx, OPC_LARCH_MOD_D, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_div_du(DisasContext *ctx, arg_div_du *a)
+{
+ check_larch_64(ctx);
+ gen_r6_muldiv(ctx, OPC_LARCH_DIV_DU, a->rd, a->rj, a->rk);
+ return true;
+}
+
+static bool trans_mod_du(DisasContext *ctx, arg_mod_du *a)
+{
+ check_larch_64(ctx);
+ gen_r6_muldiv(ctx, OPC_LARCH_MOD_DU, a->rd, a->rj, a->rk);
+ return true;
+}
+
+/* do not update CP0.BadVaddr */
+static bool trans_asrtle_d(DisasContext *ctx, arg_asrtle_d * a)
+{
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ gen_load_gpr(t1, a->rj);
+ gen_load_gpr(t2, a->rk);
+ gen_helper_asrtle_d(cpu_env, t1, t2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ return true;
+}
+
+/* do not update CP0.BadVaddr */
+static bool trans_asrtgt_d(DisasContext *ctx, arg_asrtgt_d * a)
+{
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ gen_load_gpr(t1, a->rj);
+ gen_load_gpr(t2, a->rk);
+ gen_helper_asrtgt_d(cpu_env, t1, t2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ return true;
+}
+
+#ifdef CONFIG_USER_ONLY
+static bool trans_gr2scr(DisasContext *ctx, arg_gr2scr *a)
+{
+ return false;
+}
+
+static bool trans_scr2gr(DisasContext *ctx, arg_scr2gr *a)
+{
+ return false;
+}
+#else
+static bool trans_gr2scr(DisasContext *ctx, arg_gr2scr *a)
+{
+ TCGv_i32 sd = tcg_const_i32(a->sd);
+ TCGv val = tcg_temp_new();
+ check_lbt_enabled(ctx);
+ gen_load_gpr(val, a->rj);
+ gen_helper_store_scr(cpu_env, sd, val);
+ tcg_temp_free_i32(sd);
+ tcg_temp_free(val);
+ return true;
+}
+
+static bool trans_scr2gr(DisasContext *ctx, arg_scr2gr *a)
+{
+ if (a->rd == 0) {
+ /* Nop */
+ return true;
+ }
+
+ TCGv_i32 tsj = tcg_const_i32(a->sj);
+ check_lbt_enabled(ctx);
+ gen_helper_load_scr(cpu_gpr[a->rd], cpu_env, tsj);
+ tcg_temp_free_i32(tsj);
+ return true;
+}
+#endif
+
+static bool trans_clo_w(DisasContext *ctx, arg_clo_w *a)
+{
+ gen_cl(ctx, OPC_LARCH_CLO_W, a->rd, a->rj);
+ return true;
+}
+
+static bool trans_clz_w(DisasContext *ctx, arg_clz_w *a)
+{
+ gen_cl(ctx, OPC_LARCH_CLZ_W, a->rd, a->rj);
+ return true;
+}
+
+static bool trans_cto_w(DisasContext *ctx, arg_cto_w *a)
+{
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, a->rj);
+ gen_helper_cto_w(cpu_gpr[a->rd], cpu_env, t0);
+
+ tcg_temp_free(t0);
+ return true;
+}
+
+static bool trans_ctz_w(DisasContext *ctx, arg_ctz_w *a)
+{
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, a->rj);
+ gen_helper_ctz_w(cpu_gpr[a->rd], cpu_env, t0);
+
+ tcg_temp_free(t0);
+ return true;
+}
+
+static bool trans_clo_d(DisasContext *ctx, arg_clo_d *a)
+{
+ check_larch_64(ctx);
+ gen_cl(ctx, OPC_LARCH_CLO_D, a->rd, a->rj);
+ return true;
+}
+
+static bool trans_clz_d(DisasContext *ctx, arg_clz_d *a)
+{
+ check_larch_64(ctx);
+ gen_cl(ctx, OPC_LARCH_CLZ_D, a->rd, a->rj);
+ return true;
+}
+
+static bool trans_cto_d(DisasContext *ctx, arg_cto_d *a)
+{
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, a->rj);
+ gen_helper_cto_d(cpu_gpr[a->rd], cpu_env, t0);
+
+ tcg_temp_free(t0);
+ return true;
+}
+
+static bool trans_ctz_d(DisasContext *ctx, arg_ctz_d *a)
+{
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, a->rj);
+ gen_helper_ctz_d(cpu_gpr[a->rd], cpu_env, t0);
+
+ tcg_temp_free(t0);
+ return true;
+}
+
+static bool trans_revb_2h(DisasContext *ctx, arg_revb_2h *a)
+{
+ gen_bshfl(ctx, OPC_LARCH_REVB_2H, a->rj, a->rd);
+ return true;
+}
+
+static bool trans_revb_4h(DisasContext *ctx, arg_revb_4h *a)
+{
+ check_larch_64(ctx);
+ gen_bshfl(ctx, OPC_LARCH_REVB_4H, a->rj, a->rd);
+ return true;
+}
+
+static bool trans_revb_2w(DisasContext *ctx, arg_revb_2w *a)
+{
+ handle_rev32(ctx, a->rj, a->rd);
+ return true;
+}
+
+static bool trans_revb_d(DisasContext *ctx, arg_revb_d *a)
+{
+ handle_rev64(ctx, a->rj, a->rd);
+ return true;
+}
+
+static bool trans_revh_2w(DisasContext *ctx, arg_revh_2w *a)
+{
+ handle_rev16(ctx, a->rj, a->rd);
+ return true;
+}
+
+static bool trans_revh_d(DisasContext *ctx, arg_revh_d *a)
+{
+ check_larch_64(ctx);
+ gen_bshfl(ctx, OPC_LARCH_REVH_D, a->rj, a->rd);
+ return true;
+}
+
+static bool trans_bitrev_4b(DisasContext *ctx, arg_bitrev_4b *a)
+{
+ gen_bitswap(ctx, OPC_LARCH_BREV_4B, a->rd, a->rj);
+ return true;
+}
+
+static bool trans_bitrev_8b(DisasContext *ctx, arg_bitrev_8b *a)
+{
+ check_larch_64(ctx);
+ gen_bitswap(ctx, OPC_LARCH_BREV_8B, a->rd, a->rj);
+ return true;
+}
+
+static bool trans_bitrev_w(DisasContext *ctx, arg_bitrev_w *a)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_load_gpr(t0, a->rj);
+ gen_helper_bitrev_w(cpu_gpr[a->rd], cpu_env, t0);
+ tcg_temp_free(t0);
+ return true;
+}
+
+static bool trans_bitrev_d(DisasContext *ctx, arg_bitrev_d *a)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_load_gpr(t0, a->rj);
+ gen_helper_bitrev_d(cpu_gpr[a->rd], cpu_env, t0);
+ tcg_temp_free(t0);
+ return true;
+}
+
+static bool trans_ext_w_h(DisasContext *ctx, arg_ext_w_h *a)
+{
+ gen_bshfl(ctx, OPC_LARCH_EXT_WH, a->rj, a->rd);
+ return true;
+}
+
+static bool trans_ext_w_b(DisasContext *ctx, arg_ext_w_b *a)
+{
+ gen_bshfl(ctx, OPC_LARCH_EXT_WB, a->rj, a->rd);
+ return true;
+}
+
+static bool trans_srli_w(DisasContext *ctx, arg_srli_w *a)
+{
+ gen_shift_imm(ctx, OPC_LARCH_SRLI_W, a->rd, a->rj, a->ui5);
+ return true;
+}
+
+static bool trans_srai_w(DisasContext *ctx, arg_srai_w *a)
+{
+ gen_shift_imm(ctx, OPC_LARCH_SRAI_W, a->rd, a->rj, a->ui5);
+ return true;
+}
+
+static bool trans_srai_d(DisasContext *ctx, arg_srai_d *a)
+{
+ TCGv t0;
+ check_larch_64(ctx);
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, a->rj);
+ tcg_gen_sari_tl(cpu_gpr[a->rd], t0, a->ui6);
+ tcg_temp_free(t0);
+ return true;
+}
+
+static bool trans_rotri_w(DisasContext *ctx, arg_rotri_w *a)
+{
+ gen_shift_imm(ctx, OPC_LARCH_ROTRI_W, a->rd, a->rj, a->ui5);
+ return true;
+}
+
+static bool trans_rotri_d(DisasContext *ctx, arg_rotri_d *a)
+{
+ TCGv t0;
+ check_larch_64(ctx);
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, a->rj);
+ tcg_gen_rotri_tl(cpu_gpr[a->rd], t0, a->ui6);
+ tcg_temp_free(t0);
+ return true;
+}
+
+static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a)
+{
+ check_cp1_enabled(ctx);
+ gen_fcmp_s(ctx, a->fcond, a->fk, a->fj, a->cd);
+ return true;
+}
+
+static bool trans_fcmp_cond_d(DisasContext *ctx, arg_fcmp_cond_d *a)
+{
+ check_cp1_enabled(ctx);
+ gen_fcmp_d(ctx, a->fcond, a->fk, a->fj, a->cd);
+ return true;
+}
+
+static bool trans_fsel(DisasContext *ctx, arg_fsel *a)
+{
+ TCGv_i64 fj = tcg_temp_new_i64();
+ TCGv_i64 fk = tcg_temp_new_i64();
+ TCGv_i64 fd = tcg_temp_new_i64();
+ TCGv_i32 ca = tcg_const_i32(a->ca);
+ check_cp1_enabled(ctx);
+ gen_load_fpr64(ctx, fj, a->fj);
+ gen_load_fpr64(ctx, fk, a->fk);
+ gen_helper_fsel(fd, cpu_env, fj, fk, ca);
+ gen_store_fpr64(ctx, fd, a->fd);
+ tcg_temp_free_i64(fj);
+ tcg_temp_free_i64(fk);
+ tcg_temp_free_i64(fd);
+ tcg_temp_free_i32(ca);
+ return true;
+}
+
+#include "cpu-csr.h"
+
+#ifdef CONFIG_USER_ONLY
+
+static bool trans_csrxchg(DisasContext *ctx, arg_csrxchg *a)
+{
+ return false;
+}
+
+#else
+
+
+#define GEN_CSRRQ_CASE(name) \
+ do { \
+ case LOONGARCH_CSR_ ## name: \
+ gen_csr_rdq(ctx, cpu_gpr[rd], LOONGARCH_CSR_ ## name);\
+ } while (0)
+
+static bool trans_csrrd(DisasContext *ctx, unsigned rd, unsigned csr)
+{
+ switch (csr) {
+ GEN_CSRRQ_CASE(CRMD);
+ break;
+ GEN_CSRRQ_CASE(PRMD);
+ break;
+ GEN_CSRRQ_CASE(EUEN);
+ break;
+ GEN_CSRRQ_CASE(MISC);
+ break;
+ GEN_CSRRQ_CASE(ECFG);
+ break;
+ GEN_CSRRQ_CASE(ESTAT);
+ break;
+ GEN_CSRRQ_CASE(ERA);
+ break;
+ GEN_CSRRQ_CASE(BADV);
+ break;
+ GEN_CSRRQ_CASE(BADI);
+ break;
+ GEN_CSRRQ_CASE(EEPN);
+ break;
+ GEN_CSRRQ_CASE(TLBIDX);
+ break;
+ GEN_CSRRQ_CASE(TLBEHI);
+ break;
+ GEN_CSRRQ_CASE(TLBELO0);
+ break;
+ GEN_CSRRQ_CASE(TLBELO1);
+ break;
+ GEN_CSRRQ_CASE(TLBWIRED);
+ break;
+ GEN_CSRRQ_CASE(GTLBC);
+ break;
+ GEN_CSRRQ_CASE(TRGP);
+ break;
+ GEN_CSRRQ_CASE(ASID);
+ break;
+ GEN_CSRRQ_CASE(PGDL);
+ break;
+ GEN_CSRRQ_CASE(PGDH);
+ break;
+ case LOONGARCH_CSR_PGD:
+ gen_helper_read_pgd(cpu_gpr[rd], cpu_env);
+ break;
+ GEN_CSRRQ_CASE(PWCTL0);
+ break;
+ GEN_CSRRQ_CASE(PWCTL1);
+ break;
+ GEN_CSRRQ_CASE(STLBPGSIZE);
+ break;
+ GEN_CSRRQ_CASE(RVACFG);
+ break;
+ GEN_CSRRQ_CASE(CPUID);
+ break;
+ GEN_CSRRQ_CASE(PRCFG1);
+ break;
+ GEN_CSRRQ_CASE(PRCFG2);
+ break;
+ GEN_CSRRQ_CASE(PRCFG3);
+ break;
+ GEN_CSRRQ_CASE(KS0);
+ break;
+ GEN_CSRRQ_CASE(KS1);
+ break;
+ GEN_CSRRQ_CASE(KS2);
+ break;
+ GEN_CSRRQ_CASE(KS3);
+ break;
+ GEN_CSRRQ_CASE(KS4);
+ break;
+ GEN_CSRRQ_CASE(KS5);
+ break;
+ GEN_CSRRQ_CASE(KS6);
+ break;
+ GEN_CSRRQ_CASE(KS7);
+ break;
+ GEN_CSRRQ_CASE(KS8);
+ break;
+ GEN_CSRRQ_CASE(TMID);
+ break;
+ GEN_CSRRQ_CASE(TCFG);
+ break;
+ GEN_CSRRQ_CASE(TVAL);
+ break;
+ GEN_CSRRQ_CASE(CNTC);
+ break;
+ GEN_CSRRQ_CASE(TINTCLR);
+ break;
+ GEN_CSRRQ_CASE(GSTAT);
+ break;
+ GEN_CSRRQ_CASE(GCFG);
+ break;
+ GEN_CSRRQ_CASE(GINTC);
+ break;
+ GEN_CSRRQ_CASE(GCNTC);
+ break;
+ GEN_CSRRQ_CASE(LLBCTL);
+ break;
+ GEN_CSRRQ_CASE(IMPCTL1);
+ break;
+ GEN_CSRRQ_CASE(IMPCTL2);
+ break;
+ GEN_CSRRQ_CASE(GNMI);
+ break;
+ GEN_CSRRQ_CASE(TLBRENT);
+ break;
+ GEN_CSRRQ_CASE(TLBRBADV);
+ break;
+ GEN_CSRRQ_CASE(TLBRERA);
+ break;
+ GEN_CSRRQ_CASE(TLBRSAVE);
+ break;
+ GEN_CSRRQ_CASE(TLBRELO0);
+ break;
+ GEN_CSRRQ_CASE(TLBRELO1);
+ break;
+ GEN_CSRRQ_CASE(TLBREHI);
+ break;
+ GEN_CSRRQ_CASE(TLBRPRMD);
+ break;
+ GEN_CSRRQ_CASE(ERRCTL);
+ break;
+ GEN_CSRRQ_CASE(ERRINFO);
+ break;
+ GEN_CSRRQ_CASE(ERRINFO1);
+ break;
+ GEN_CSRRQ_CASE(ERRENT);
+ break;
+ GEN_CSRRQ_CASE(ERRERA);
+ break;
+ GEN_CSRRQ_CASE(ERRSAVE);
+ break;
+ GEN_CSRRQ_CASE(CTAG);
+ break;
+ GEN_CSRRQ_CASE(DMWIN0);
+ break;
+ GEN_CSRRQ_CASE(DMWIN1);
+ break;
+ GEN_CSRRQ_CASE(DMWIN2);
+ break;
+ GEN_CSRRQ_CASE(DMWIN3);
+ break;
+ GEN_CSRRQ_CASE(PERFCTRL0);
+ break;
+ GEN_CSRRQ_CASE(PERFCNTR0);
+ break;
+ GEN_CSRRQ_CASE(PERFCTRL1);
+ break;
+ GEN_CSRRQ_CASE(PERFCNTR1);
+ break;
+ GEN_CSRRQ_CASE(PERFCTRL2);
+ break;
+ GEN_CSRRQ_CASE(PERFCNTR2);
+ break;
+ GEN_CSRRQ_CASE(PERFCTRL3);
+ break;
+ GEN_CSRRQ_CASE(PERFCNTR3);
+ break;
+ /* debug */
+ GEN_CSRRQ_CASE(MWPC);
+ break;
+ GEN_CSRRQ_CASE(MWPS);
+ break;
+ GEN_CSRRQ_CASE(DB0ADDR);
+ break;
+ GEN_CSRRQ_CASE(DB0MASK);
+ break;
+ GEN_CSRRQ_CASE(DB0CTL);
+ break;
+ GEN_CSRRQ_CASE(DB0ASID);
+ break;
+ GEN_CSRRQ_CASE(DB1ADDR);
+ break;
+ GEN_CSRRQ_CASE(DB1MASK);
+ break;
+ GEN_CSRRQ_CASE(DB1CTL);
+ break;
+ GEN_CSRRQ_CASE(DB1ASID);
+ break;
+ GEN_CSRRQ_CASE(DB2ADDR);
+ break;
+ GEN_CSRRQ_CASE(DB2MASK);
+ break;
+ GEN_CSRRQ_CASE(DB2CTL);
+ break;
+ GEN_CSRRQ_CASE(DB2ASID);
+ break;
+ GEN_CSRRQ_CASE(DB3ADDR);
+ break;
+ GEN_CSRRQ_CASE(DB3MASK);
+ break;
+ GEN_CSRRQ_CASE(DB3CTL);
+ break;
+ GEN_CSRRQ_CASE(DB3ASID);
+ break;
+ GEN_CSRRQ_CASE(FWPC);
+ break;
+ GEN_CSRRQ_CASE(FWPS);
+ break;
+ GEN_CSRRQ_CASE(IB0ADDR);
+ break;
+ GEN_CSRRQ_CASE(IB0MASK);
+ break;
+ GEN_CSRRQ_CASE(IB0CTL);
+ break;
+ GEN_CSRRQ_CASE(IB0ASID);
+ break;
+ GEN_CSRRQ_CASE(IB1ADDR);
+ break;
+ GEN_CSRRQ_CASE(IB1MASK);
+ break;
+ GEN_CSRRQ_CASE(IB1CTL);
+ break;
+ GEN_CSRRQ_CASE(IB1ASID);
+ break;
+ GEN_CSRRQ_CASE(IB2ADDR);
+ break;
+ GEN_CSRRQ_CASE(IB2MASK);
+ break;
+ GEN_CSRRQ_CASE(IB2CTL);
+ break;
+ GEN_CSRRQ_CASE(IB2ASID);
+ break;
+ GEN_CSRRQ_CASE(IB3ADDR);
+ break;
+ GEN_CSRRQ_CASE(IB3MASK);
+ break;
+ GEN_CSRRQ_CASE(IB3CTL);
+ break;
+ GEN_CSRRQ_CASE(IB3ASID);
+ break;
+ GEN_CSRRQ_CASE(IB4ADDR);
+ break;
+ GEN_CSRRQ_CASE(IB4MASK);
+ break;
+ GEN_CSRRQ_CASE(IB4CTL);
+ break;
+ GEN_CSRRQ_CASE(IB4ASID);
+ break;
+ GEN_CSRRQ_CASE(IB5ADDR);
+ break;
+ GEN_CSRRQ_CASE(IB5MASK);
+ break;
+ GEN_CSRRQ_CASE(IB5CTL);
+ break;
+ GEN_CSRRQ_CASE(IB5ASID);
+ break;
+ GEN_CSRRQ_CASE(IB6ADDR);
+ break;
+ GEN_CSRRQ_CASE(IB6MASK);
+ break;
+ GEN_CSRRQ_CASE(IB6CTL);
+ break;
+ GEN_CSRRQ_CASE(IB6ASID);
+ break;
+ GEN_CSRRQ_CASE(IB7ADDR);
+ break;
+ GEN_CSRRQ_CASE(IB7MASK);
+ break;
+ GEN_CSRRQ_CASE(IB7CTL);
+ break;
+ GEN_CSRRQ_CASE(IB7ASID);
+ break;
+ GEN_CSRRQ_CASE(DEBUG);
+ break;
+ GEN_CSRRQ_CASE(DERA);
+ break;
+ GEN_CSRRQ_CASE(DESAVE);
+ break;
+ default:
+ return false;
+ }
+
+ #undef GEN_CSRRQ_CASE
+
+ return true;
+}
+
+#define GEN_CSRWQ_CASE(name) \
+do { \
+ case LOONGARCH_CSR_ ## name: \
+ gen_csr_wrq(ctx, cpu_gpr[rd], LOONGARCH_CSR_ ## name); \
+} while (0)
+
+static bool trans_csrwr(DisasContext *ctx, unsigned rd, unsigned csr)
+{
+
+ switch (csr) {
+ case LOONGARCH_CSR_CRMD:
+ save_cpu_state(ctx, 1);
+ gen_csr_wrq(ctx, cpu_gpr[rd], LOONGARCH_CSR_CRMD);
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ break;
+ GEN_CSRWQ_CASE(PRMD);
+ break;
+ case LOONGARCH_CSR_EUEN:
+ gen_csr_wrq(ctx, cpu_gpr[rd], LOONGARCH_CSR_EUEN);
+ /* Stop translation */
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ break;
+ GEN_CSRWQ_CASE(MISC);
+ break;
+ GEN_CSRWQ_CASE(ECFG);
+ break;
+ GEN_CSRWQ_CASE(ESTAT);
+ break;
+ GEN_CSRWQ_CASE(ERA);
+ break;
+ GEN_CSRWQ_CASE(BADV);
+ break;
+ GEN_CSRWQ_CASE(BADI);
+ break;
+ GEN_CSRWQ_CASE(EEPN);
+ break;
+ GEN_CSRWQ_CASE(TLBIDX);
+ break;
+ GEN_CSRWQ_CASE(TLBEHI);
+ break;
+ GEN_CSRWQ_CASE(TLBELO0);
+ break;
+ GEN_CSRWQ_CASE(TLBELO1);
+ break;
+ GEN_CSRWQ_CASE(TLBWIRED);
+ break;
+ GEN_CSRWQ_CASE(GTLBC);
+ break;
+ GEN_CSRWQ_CASE(TRGP);
+ break;
+ GEN_CSRWQ_CASE(ASID);
+ break;
+ GEN_CSRWQ_CASE(PGDL);
+ break;
+ GEN_CSRWQ_CASE(PGDH);
+ break;
+ GEN_CSRWQ_CASE(PGD);
+ break;
+ GEN_CSRWQ_CASE(PWCTL0);
+ break;
+ GEN_CSRWQ_CASE(PWCTL1);
+ break;
+ GEN_CSRWQ_CASE(STLBPGSIZE);
+ break;
+ GEN_CSRWQ_CASE(RVACFG);
+ break;
+ GEN_CSRWQ_CASE(CPUID);
+ break;
+ GEN_CSRWQ_CASE(PRCFG1);
+ break;
+ GEN_CSRWQ_CASE(PRCFG2);
+ break;
+ GEN_CSRWQ_CASE(PRCFG3);
+ break;
+ GEN_CSRWQ_CASE(KS0);
+ break;
+ GEN_CSRWQ_CASE(KS1);
+ break;
+ GEN_CSRWQ_CASE(KS2);
+ break;
+ GEN_CSRWQ_CASE(KS3);
+ break;
+ GEN_CSRWQ_CASE(KS4);
+ break;
+ GEN_CSRWQ_CASE(KS5);
+ break;
+ GEN_CSRWQ_CASE(KS6);
+ break;
+ GEN_CSRWQ_CASE(KS7);
+ break;
+ GEN_CSRWQ_CASE(KS8);
+ break;
+ GEN_CSRWQ_CASE(TMID);
+ break;
+ GEN_CSRWQ_CASE(TCFG);
+ break;
+ GEN_CSRWQ_CASE(TVAL);
+ break;
+ GEN_CSRWQ_CASE(CNTC);
+ break;
+ GEN_CSRWQ_CASE(TINTCLR);
+ break;
+ GEN_CSRWQ_CASE(GSTAT);
+ break;
+ GEN_CSRWQ_CASE(GCFG);
+ break;
+ GEN_CSRWQ_CASE(GINTC);
+ break;
+ GEN_CSRWQ_CASE(GCNTC);
+ break;
+ GEN_CSRWQ_CASE(LLBCTL);
+ break;
+ GEN_CSRWQ_CASE(IMPCTL1);
+ break;
+ GEN_CSRWQ_CASE(IMPCTL2);
+ break;
+ GEN_CSRWQ_CASE(GNMI);
+ break;
+ GEN_CSRWQ_CASE(TLBRENT);
+ break;
+ GEN_CSRWQ_CASE(TLBRBADV);
+ break;
+ GEN_CSRWQ_CASE(TLBRERA);
+ break;
+ GEN_CSRWQ_CASE(TLBRSAVE);
+ break;
+ GEN_CSRWQ_CASE(TLBRELO0);
+ break;
+ GEN_CSRWQ_CASE(TLBRELO1);
+ break;
+ GEN_CSRWQ_CASE(TLBREHI);
+ break;
+ GEN_CSRWQ_CASE(TLBRPRMD);
+ break;
+ GEN_CSRWQ_CASE(ERRCTL);
+ break;
+ GEN_CSRWQ_CASE(ERRINFO);
+ break;
+ GEN_CSRWQ_CASE(ERRINFO1);
+ break;
+ GEN_CSRWQ_CASE(ERRENT);
+ break;
+ GEN_CSRWQ_CASE(ERRERA);
+ break;
+ GEN_CSRWQ_CASE(ERRSAVE);
+ break;
+ GEN_CSRWQ_CASE(CTAG);
+ break;
+ GEN_CSRWQ_CASE(DMWIN0);
+ break;
+ GEN_CSRWQ_CASE(DMWIN1);
+ break;
+ GEN_CSRWQ_CASE(DMWIN2);
+ break;
+ GEN_CSRWQ_CASE(DMWIN3);
+ break;
+ GEN_CSRWQ_CASE(PERFCTRL0);
+ break;
+ GEN_CSRWQ_CASE(PERFCNTR0);
+ break;
+ GEN_CSRWQ_CASE(PERFCTRL1);
+ break;
+ GEN_CSRWQ_CASE(PERFCNTR1);
+ break;
+ GEN_CSRWQ_CASE(PERFCTRL2);
+ break;
+ GEN_CSRWQ_CASE(PERFCNTR2);
+ break;
+ GEN_CSRWQ_CASE(PERFCTRL3);
+ break;
+ GEN_CSRWQ_CASE(PERFCNTR3);
+ break;
+ /* debug */
+ GEN_CSRWQ_CASE(MWPC);
+ break;
+ GEN_CSRWQ_CASE(MWPS);
+ break;
+ GEN_CSRWQ_CASE(DB0ADDR);
+ break;
+ GEN_CSRWQ_CASE(DB0MASK);
+ break;
+ GEN_CSRWQ_CASE(DB0CTL);
+ break;
+ GEN_CSRWQ_CASE(DB0ASID);
+ break;
+ GEN_CSRWQ_CASE(DB1ADDR);
+ break;
+ GEN_CSRWQ_CASE(DB1MASK);
+ break;
+ GEN_CSRWQ_CASE(DB1CTL);
+ break;
+ GEN_CSRWQ_CASE(DB1ASID);
+ break;
+ GEN_CSRWQ_CASE(DB2ADDR);
+ break;
+ GEN_CSRWQ_CASE(DB2MASK);
+ break;
+ GEN_CSRWQ_CASE(DB2CTL);
+ break;
+ GEN_CSRWQ_CASE(DB2ASID);
+ break;
+ GEN_CSRWQ_CASE(DB3ADDR);
+ break;
+ GEN_CSRWQ_CASE(DB3MASK);
+ break;
+ GEN_CSRWQ_CASE(DB3CTL);
+ break;
+ GEN_CSRWQ_CASE(DB3ASID);
+ break;
+ GEN_CSRWQ_CASE(FWPC);
+ break;
+ GEN_CSRWQ_CASE(FWPS);
+ break;
+ GEN_CSRWQ_CASE(IB0ADDR);
+ break;
+ GEN_CSRWQ_CASE(IB0MASK);
+ break;
+ GEN_CSRWQ_CASE(IB0CTL);
+ break;
+ GEN_CSRWQ_CASE(IB0ASID);
+ break;
+ GEN_CSRWQ_CASE(IB1ADDR);
+ break;
+ GEN_CSRWQ_CASE(IB1MASK);
+ break;
+ GEN_CSRWQ_CASE(IB1CTL);
+ break;
+ GEN_CSRWQ_CASE(IB1ASID);
+ break;
+ GEN_CSRWQ_CASE(IB2ADDR);
+ break;
+ GEN_CSRWQ_CASE(IB2MASK);
+ break;
+ GEN_CSRWQ_CASE(IB2CTL);
+ break;
+ GEN_CSRWQ_CASE(IB2ASID);
+ break;
+ GEN_CSRWQ_CASE(IB3ADDR);
+ break;
+ GEN_CSRWQ_CASE(IB3MASK);
+ break;
+ GEN_CSRWQ_CASE(IB3CTL);
+ break;
+ GEN_CSRWQ_CASE(IB3ASID);
+ break;
+ GEN_CSRWQ_CASE(IB4ADDR);
+ break;
+ GEN_CSRWQ_CASE(IB4MASK);
+ break;
+ GEN_CSRWQ_CASE(IB4CTL);
+ break;
+ GEN_CSRWQ_CASE(IB4ASID);
+ break;
+ GEN_CSRWQ_CASE(IB5ADDR);
+ break;
+ GEN_CSRWQ_CASE(IB5MASK);
+ break;
+ GEN_CSRWQ_CASE(IB5CTL);
+ break;
+ GEN_CSRWQ_CASE(IB5ASID);
+ break;
+ GEN_CSRWQ_CASE(IB6ADDR);
+ break;
+ GEN_CSRWQ_CASE(IB6MASK);
+ break;
+ GEN_CSRWQ_CASE(IB6CTL);
+ break;
+ GEN_CSRWQ_CASE(IB6ASID);
+ break;
+ GEN_CSRWQ_CASE(IB7ADDR);
+ break;
+ GEN_CSRWQ_CASE(IB7MASK);
+ break;
+ GEN_CSRWQ_CASE(IB7CTL);
+ break;
+ GEN_CSRWQ_CASE(IB7ASID);
+ break;
+ GEN_CSRWQ_CASE(DEBUG);
+ break;
+ GEN_CSRWQ_CASE(DERA);
+ break;
+ GEN_CSRWQ_CASE(DESAVE);
+ break;
+ default:
+ return false;
+ }
+
+ #undef GEN_CSRWQ_CASE
+
+ return true;
+}
+
+#define GEN_CSRXQ_CASE(name) \
+do { \
+ case LOONGARCH_CSR_ ## name: \
+ if (rd == 0) { \
+ gen_csr_xchgq(ctx, zero, cpu_gpr[rj], \
+ LOONGARCH_CSR_ ## name); \
+ } else { \
+ gen_csr_xchgq(ctx, cpu_gpr[rd], cpu_gpr[rj], \
+ LOONGARCH_CSR_ ## name); \
+ } \
+} while (0)
+
+static bool trans_csrxchg(DisasContext *ctx, arg_csrxchg *a)
+{
+ unsigned rd, rj, csr;
+ TCGv zero = tcg_const_tl(0);
+ rd = a->rd;
+ rj = a->rj;
+ csr = a->csr;
+
+ if (rj == 0) {
+ return trans_csrrd(ctx, rd, csr);
+ } else if (rj == 1) {
+ return trans_csrwr(ctx, rd, csr);
+ }
+
+ switch (csr) {
+ case LOONGARCH_CSR_CRMD:
+ save_cpu_state(ctx, 1);
+ if (rd == 0) {
+ gen_csr_xchgq(ctx, zero, cpu_gpr[rj], LOONGARCH_CSR_CRMD);
+ } else {
+ gen_csr_xchgq(ctx, cpu_gpr[rd], cpu_gpr[rj], LOONGARCH_CSR_CRMD);
+ }
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ break;
+
+ GEN_CSRXQ_CASE(PRMD);
+ break;
+ case LOONGARCH_CSR_EUEN:
+ if (rd == 0) {
+ gen_csr_xchgq(ctx, zero, cpu_gpr[rj], LOONGARCH_CSR_EUEN);
+ } else {
+ gen_csr_xchgq(ctx, cpu_gpr[rd], cpu_gpr[rj], LOONGARCH_CSR_EUEN);
+ }
+ /* Stop translation */
+ gen_save_pc(ctx->base.pc_next + 4);
+ ctx->base.is_jmp = DISAS_EXIT;
+ break;
+ GEN_CSRXQ_CASE(MISC);
+ break;
+ GEN_CSRXQ_CASE(ECFG);
+ break;
+ GEN_CSRXQ_CASE(ESTAT);
+ break;
+ GEN_CSRXQ_CASE(ERA);
+ break;
+ GEN_CSRXQ_CASE(BADV);
+ break;
+ GEN_CSRXQ_CASE(BADI);
+ break;
+ GEN_CSRXQ_CASE(EEPN);
+ break;
+ GEN_CSRXQ_CASE(TLBIDX);
+ break;
+ GEN_CSRXQ_CASE(TLBEHI);
+ break;
+ GEN_CSRXQ_CASE(TLBELO0);
+ break;
+ GEN_CSRXQ_CASE(TLBELO1);
+ break;
+ GEN_CSRXQ_CASE(TLBWIRED);
+ break;
+ GEN_CSRXQ_CASE(GTLBC);
+ break;
+ GEN_CSRXQ_CASE(TRGP);
+ break;
+ GEN_CSRXQ_CASE(ASID);
+ break;
+ GEN_CSRXQ_CASE(PGDL);
+ break;
+ GEN_CSRXQ_CASE(PGDH);
+ break;
+ GEN_CSRXQ_CASE(PGD);
+ break;
+ GEN_CSRXQ_CASE(PWCTL0);
+ break;
+ GEN_CSRXQ_CASE(PWCTL1);
+ break;
+ GEN_CSRXQ_CASE(STLBPGSIZE);
+ break;
+ GEN_CSRXQ_CASE(RVACFG);
+ break;
+ GEN_CSRXQ_CASE(CPUID);
+ break;
+ GEN_CSRXQ_CASE(PRCFG1);
+ break;
+ GEN_CSRXQ_CASE(PRCFG2);
+ break;
+ GEN_CSRXQ_CASE(PRCFG3);
+ break;
+ GEN_CSRXQ_CASE(KS0);
+ break;
+ GEN_CSRXQ_CASE(KS1);
+ break;
+ GEN_CSRXQ_CASE(KS2);
+ break;
+ GEN_CSRXQ_CASE(KS3);
+ break;
+ GEN_CSRXQ_CASE(KS4);
+ break;
+ GEN_CSRXQ_CASE(KS5);
+ break;
+ GEN_CSRXQ_CASE(KS6);
+ break;
+ GEN_CSRXQ_CASE(KS7);
+ break;
+ GEN_CSRXQ_CASE(KS8);
+ break;
+ GEN_CSRXQ_CASE(TMID);
+ break;
+ GEN_CSRXQ_CASE(TCFG);
+ break;
+ GEN_CSRXQ_CASE(TVAL);
+ break;
+ GEN_CSRXQ_CASE(CNTC);
+ break;
+ GEN_CSRXQ_CASE(TINTCLR);
+ break;
+ GEN_CSRXQ_CASE(GSTAT);
+ break;
+ GEN_CSRXQ_CASE(GCFG);
+ break;
+ GEN_CSRXQ_CASE(GINTC);
+ break;
+ GEN_CSRXQ_CASE(GCNTC);
+ break;
+ GEN_CSRXQ_CASE(LLBCTL);
+ break;
+ GEN_CSRXQ_CASE(IMPCTL1);
+ break;
+ GEN_CSRXQ_CASE(IMPCTL2);
+ break;
+ GEN_CSRXQ_CASE(GNMI);
+ break;
+ GEN_CSRXQ_CASE(TLBRENT);
+ break;
+ GEN_CSRXQ_CASE(TLBRBADV);
+ break;
+ GEN_CSRXQ_CASE(TLBRERA);
+ break;
+ GEN_CSRXQ_CASE(TLBRSAVE);
+ break;
+ GEN_CSRXQ_CASE(TLBRELO0);
+ break;
+ GEN_CSRXQ_CASE(TLBRELO1);
+ break;
+ GEN_CSRXQ_CASE(TLBREHI);
+ break;
+ GEN_CSRXQ_CASE(TLBRPRMD);
+ break;
+ GEN_CSRXQ_CASE(ERRCTL);
+ break;
+ GEN_CSRXQ_CASE(ERRINFO);
+ break;
+ GEN_CSRXQ_CASE(ERRINFO1);
+ break;
+ GEN_CSRXQ_CASE(ERRENT);
+ break;
+ GEN_CSRXQ_CASE(ERRERA);
+ break;
+ GEN_CSRXQ_CASE(ERRSAVE);
+ break;
+ GEN_CSRXQ_CASE(CTAG);
+ break;
+ GEN_CSRXQ_CASE(DMWIN0);
+ break;
+ GEN_CSRXQ_CASE(DMWIN1);
+ break;
+ GEN_CSRXQ_CASE(DMWIN2);
+ break;
+ GEN_CSRXQ_CASE(DMWIN3);
+ break;
+ GEN_CSRXQ_CASE(PERFCTRL0);
+ break;
+ GEN_CSRXQ_CASE(PERFCNTR0);
+ break;
+ GEN_CSRXQ_CASE(PERFCTRL1);
+ break;
+ GEN_CSRXQ_CASE(PERFCNTR1);
+ break;
+ GEN_CSRXQ_CASE(PERFCTRL2);
+ break;
+ GEN_CSRXQ_CASE(PERFCNTR2);
+ break;
+ GEN_CSRXQ_CASE(PERFCTRL3);
+ break;
+ GEN_CSRXQ_CASE(PERFCNTR3);
+ break;
+ /* debug */
+ GEN_CSRXQ_CASE(MWPC);
+ break;
+ GEN_CSRXQ_CASE(MWPS);
+ break;
+ GEN_CSRXQ_CASE(DB0ADDR);
+ break;
+ GEN_CSRXQ_CASE(DB0MASK);
+ break;
+ GEN_CSRXQ_CASE(DB0CTL);
+ break;
+ GEN_CSRXQ_CASE(DB0ASID);
+ break;
+ GEN_CSRXQ_CASE(DB1ADDR);
+ break;
+ GEN_CSRXQ_CASE(DB1MASK);
+ break;
+ GEN_CSRXQ_CASE(DB1CTL);
+ break;
+ GEN_CSRXQ_CASE(DB1ASID);
+ break;
+ GEN_CSRXQ_CASE(DB2ADDR);
+ break;
+ GEN_CSRXQ_CASE(DB2MASK);
+ break;
+ GEN_CSRXQ_CASE(DB2CTL);
+ break;
+ GEN_CSRXQ_CASE(DB2ASID);
+ break;
+ GEN_CSRXQ_CASE(DB3ADDR);
+ break;
+ GEN_CSRXQ_CASE(DB3MASK);
+ break;
+ GEN_CSRXQ_CASE(DB3CTL);
+ break;
+ GEN_CSRXQ_CASE(DB3ASID);
+ break;
+ GEN_CSRXQ_CASE(FWPC);
+ break;
+ GEN_CSRXQ_CASE(FWPS);
+ break;
+ GEN_CSRXQ_CASE(IB0ADDR);
+ break;
+ GEN_CSRXQ_CASE(IB0MASK);
+ break;
+ GEN_CSRXQ_CASE(IB0CTL);
+ break;
+ GEN_CSRXQ_CASE(IB0ASID);
+ break;
+ GEN_CSRXQ_CASE(IB1ADDR);
+ break;
+ GEN_CSRXQ_CASE(IB1MASK);
+ break;
+ GEN_CSRXQ_CASE(IB1CTL);
+ break;
+ GEN_CSRXQ_CASE(IB1ASID);
+ break;
+ GEN_CSRXQ_CASE(IB2ADDR);
+ break;
+ GEN_CSRXQ_CASE(IB2MASK);
+ break;
+ GEN_CSRXQ_CASE(IB2CTL);
+ break;
+ GEN_CSRXQ_CASE(IB2ASID);
+ break;
+ GEN_CSRXQ_CASE(IB3ADDR);
+ break;
+ GEN_CSRXQ_CASE(IB3MASK);
+ break;
+ GEN_CSRXQ_CASE(IB3CTL);
+ break;
+ GEN_CSRXQ_CASE(IB3ASID);
+ break;
+ GEN_CSRXQ_CASE(IB4ADDR);
+ break;
+ GEN_CSRXQ_CASE(IB4MASK);
+ break;
+ GEN_CSRXQ_CASE(IB4CTL);
+ break;
+ GEN_CSRXQ_CASE(IB4ASID);
+ break;
+ GEN_CSRXQ_CASE(IB5ADDR);
+ break;
+ GEN_CSRXQ_CASE(IB5MASK);
+ break;
+ GEN_CSRXQ_CASE(IB5CTL);
+ break;
+ GEN_CSRXQ_CASE(IB5ASID);
+ break;
+ GEN_CSRXQ_CASE(IB6ADDR);
+ break;
+ GEN_CSRXQ_CASE(IB6MASK);
+ break;
+ GEN_CSRXQ_CASE(IB6CTL);
+ break;
+ GEN_CSRXQ_CASE(IB6ASID);
+ break;
+ GEN_CSRXQ_CASE(IB7ADDR);
+ break;
+ GEN_CSRXQ_CASE(IB7MASK);
+ break;
+ GEN_CSRXQ_CASE(IB7CTL);
+ break;
+ GEN_CSRXQ_CASE(IB7ASID);
+ break;
+ GEN_CSRXQ_CASE(DEBUG);
+ break;
+ GEN_CSRXQ_CASE(DERA);
+ break;
+ GEN_CSRXQ_CASE(DESAVE);
+ break;
+ default:
+ return false;
+ }
+
+ #undef GEN_CSRXQ_CASE
+ tcg_temp_free(zero);
+ return true;
+}
+
+#endif
+
+static bool trans_cacop(DisasContext *ctx, arg_cacop *a)
+{
+ /* Treat as NOP. */
+ return true;
+}
+
+#ifdef CONFIG_USER_ONLY
+
+static bool trans_ldpte(DisasContext *ctx, arg_ldpte *a)
+{
+ return false;
+}
+
+static bool trans_lddir(DisasContext *ctx, arg_lddir *a)
+{
+ return false;
+}
+
+static bool trans_iocsrrd_b(DisasContext *ctx, arg_iocsrrd_b *a)
+{
+ return false;
+}
+
+static bool trans_iocsrrd_h(DisasContext *ctx, arg_iocsrrd_h *a)
+{
+ return false;
+}
+
+static bool trans_iocsrrd_w(DisasContext *ctx, arg_iocsrrd_w *a)
+{
+ return false;
+}
+
+static bool trans_iocsrrd_d(DisasContext *ctx, arg_iocsrrd_d *a)
+{
+ return false;
+}
+
+static bool trans_iocsrwr_b(DisasContext *ctx, arg_iocsrwr_b *a)
+{
+ return false;
+}
+
+static bool trans_iocsrwr_h(DisasContext *ctx, arg_iocsrwr_h *a)
+{
+ return false;
+}
+
+static bool trans_iocsrwr_w(DisasContext *ctx, arg_iocsrwr_w *a)
+{
+ return false;
+}
+
+static bool trans_iocsrwr_d(DisasContext *ctx, arg_iocsrwr_d *a)
+{
+ return false;
+}
+#else
+
+static bool trans_ldpte(DisasContext *ctx, arg_ldpte *a)
+{
+ TCGv t0, t1;
+ TCGv_i32 t2;
+ t0 = tcg_const_tl(a->rj);
+ t1 = tcg_const_tl(a->seq);
+ t2 = tcg_const_i32(ctx->mem_idx);
+ gen_helper_ldpte(cpu_env, t0, t1, t2);
+
+ return true;
+}
+
+static bool trans_lddir(DisasContext *ctx, arg_lddir *a)
+{
+ TCGv t0, t1, t2;
+ TCGv_i32 t3;
+ t0 = tcg_const_tl(a->rj);
+ t1 = tcg_const_tl(a->rd);
+ t2 = tcg_const_tl(a->level);
+ t3 = tcg_const_i32(ctx->mem_idx);
+ gen_helper_lddir(cpu_env, t0, t1, t2, t3);
+
+ return true;
+}
+
+static bool trans_iocsrrd_b(DisasContext *ctx, arg_iocsrrd_b *a)
+{
+ return false;
+}
+
+static bool trans_iocsrrd_h(DisasContext *ctx, arg_iocsrrd_h *a)
+{
+ return false;
+}
+
+static bool trans_iocsrrd_w(DisasContext *ctx, arg_iocsrrd_w *a)
+{
+ TCGv_i32 iocsr_op = tcg_const_i32(OPC_LARCH_LD_W);
+ TCGv t0, t1;
+ t0 = tcg_const_tl(a->rj);
+ t1 = tcg_const_tl(a->rd);
+ gen_helper_iocsr(cpu_env, t0, t1, iocsr_op);
+ return true;
+}
+
+static bool trans_iocsrrd_d(DisasContext *ctx, arg_iocsrrd_d *a)
+{
+ TCGv_i32 iocsr_op = tcg_const_i32(OPC_LARCH_LD_D);
+ TCGv t0, t1;
+ t0 = tcg_const_tl(a->rj);
+ t1 = tcg_const_tl(a->rd);
+ gen_helper_iocsr(cpu_env, t0, t1, iocsr_op);
+ return true;
+}
+
+static bool trans_iocsrwr_b(DisasContext *ctx, arg_iocsrwr_b *a)
+{
+ TCGv_i32 iocsr_op = tcg_const_i32(OPC_LARCH_ST_B);
+ TCGv t0, t1;
+ t0 = tcg_const_tl(a->rj);
+ t1 = tcg_const_tl(a->rd);
+ gen_helper_iocsr(cpu_env, t0, t1, iocsr_op);
+ return true;
+}
+
+static bool trans_iocsrwr_h(DisasContext *ctx, arg_iocsrwr_h *a)
+{
+ return false;
+}
+
+static bool trans_iocsrwr_w(DisasContext *ctx, arg_iocsrwr_w *a)
+{
+ TCGv_i32 iocsr_op = tcg_const_i32(OPC_LARCH_ST_W);
+ TCGv t0, t1;
+ t0 = tcg_const_tl(a->rj);
+ t1 = tcg_const_tl(a->rd);
+ gen_helper_iocsr(cpu_env, t0, t1, iocsr_op);
+ return true;
+}
+
+static bool trans_iocsrwr_d(DisasContext *ctx, arg_iocsrwr_d *a)
+{
+ TCGv_i32 iocsr_op = tcg_const_i32(OPC_LARCH_ST_D);
+ TCGv t0, t1;
+ t0 = tcg_const_tl(a->rj);
+ t1 = tcg_const_tl(a->rd);
+ gen_helper_iocsr(cpu_env, t0, t1, iocsr_op);
+ return true;
+}
+#endif /* !CONFIG_USER_ONLY */
+
+
+#ifdef CONFIG_USER_ONLY
+
+#define GEN_FALSE_TRANS(name) \
+static bool trans_##name(DisasContext *ctx, arg_##name * a) \
+{ \
+ return false; \
+}
+
+GEN_FALSE_TRANS(tlbclr)
+GEN_FALSE_TRANS(invtlb)
+GEN_FALSE_TRANS(tlbflush)
+GEN_FALSE_TRANS(tlbsrch)
+GEN_FALSE_TRANS(tlbrd)
+GEN_FALSE_TRANS(tlbwr)
+GEN_FALSE_TRANS(tlbfill)
+GEN_FALSE_TRANS(ertn)
+
+#else
+
+static bool trans_tlbclr(DisasContext *ctx, arg_tlbclr *a)
+{
+ gen_helper_tlbclr(cpu_env);
+ return true;
+}
+
+static bool trans_tlbflush(DisasContext *ctx, arg_tlbflush *a)
+{
+ gen_helper_tlbflush(cpu_env);
+ return true;
+}
+
+static bool trans_invtlb(DisasContext *ctx, arg_invtlb *a)
+{
+ TCGv addr = tcg_temp_new();
+ TCGv info = tcg_temp_new();
+ TCGv op = tcg_const_tl(a->invop);
+
+ gen_load_gpr(addr, a->addr);
+ gen_load_gpr(info, a->info);
+ gen_helper_invtlb(cpu_env, addr, info, op);
+
+ tcg_temp_free(addr);
+ tcg_temp_free(info);
+ tcg_temp_free(op);
+ return true;
+}
+
+static bool trans_tlbsrch(DisasContext *ctx, arg_tlbsrch *a)
+{
+ gen_helper_tlbsrch(cpu_env);
+ return true;
+}
+
+static bool trans_tlbrd(DisasContext *ctx, arg_tlbrd *a)
+{
+ gen_helper_tlbrd(cpu_env);
+ return true;
+}
+
+static bool trans_tlbwr(DisasContext *ctx, arg_tlbwr *a)
+{
+ gen_helper_tlbwr(cpu_env);
+ return true;
+}
+
+static bool trans_tlbfill(DisasContext *ctx, arg_tlbfill *a)
+{
+ gen_helper_tlbfill(cpu_env);
+ return true;
+}
+
+static bool trans_ertn(DisasContext *ctx, arg_ertn *a)
+{
+ gen_helper_ertn(cpu_env);
+ ctx->base.is_jmp = DISAS_EXIT;
+ return true;
+}
+
+#endif /* CONFIG_USER_ONLY */
+
+static bool trans_idle(DisasContext *ctx, arg_idle *a)
+{
+ ctx->base.pc_next += 4;
+ save_cpu_state(ctx, 1);
+ ctx->base.pc_next -= 4;
+ gen_helper_idle(cpu_env);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ return true;
+}
+
+#ifdef CONFIG_USER_ONLY
+
+static bool trans_rdtime_d(DisasContext *ctx, arg_rdtime_d *a)
+{
+ /* Nop */
+ return true;
+}
+
+#else
+
+static bool trans_rdtime_d(DisasContext *ctx, arg_rdtime_d *a)
+{
+ TCGv t0, t1;
+ t0 = tcg_const_tl(a->rd);
+ t1 = tcg_const_tl(a->rj);
+ gen_helper_drdtime(cpu_env, t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return true;
+}
+
+#endif
+
+static bool trans_cpucfg(DisasContext *ctx, arg_cpucfg *a)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_load_gpr(t0, a->rj);
+ gen_helper_cpucfg(cpu_gpr[a->rd], cpu_env, t0);
+ tcg_temp_free(t0);
+ return true;
+}
diff --git a/target/loongarch64/translate.c b/target/loongarch64/translate.c
new file mode 100644
index 0000000000000000000000000000000000000000..fe122e4c31ddc3754b4a4c1ec9446924aa9ccef8
--- /dev/null
+++ b/target/loongarch64/translate.c
@@ -0,0 +1,2892 @@
+/*
+ * LOONGARCH emulation for QEMU - main translation routines
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see .
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg/tcg-op.h"
+#include "exec/cpu_ldst.h"
+#include "hw/loongarch/cpudevs.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "semihosting/semihost.h"
+
+#include "trace-tcg.h"
+#include "exec/translator.h"
+#include "exec/log.h"
+
+#include "instmap.h"
+
+#define LARCH_DEBUG_DISAS 0
+
+/* Values for the fmt field in FP instructions */
+enum {
+ /* 0 - 15 are reserved */
+ FMT_S = 16, /* single fp */
+ FMT_D = 17, /* double fp */
+};
+
+/* global register indices */
+static TCGv cpu_gpr[32], cpu_PC;
+static TCGv btarget, bcond;
+static TCGv cpu_lladdr, cpu_llval;
+static TCGv_i32 hflags;
+static TCGv_i32 fpu_fcsr0;
+static TCGv_i64 fpu_f64[32];
+
+#include "exec/gen-icount.h"
+
+#define gen_helper_0e0i(name, arg) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg); \
+ gen_helper_##name(cpu_env, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while (0)
+
+#define gen_helper_0e1i(name, arg1, arg2) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg2); \
+ gen_helper_##name(cpu_env, arg1, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while (0)
+
+#define gen_helper_1e0i(name, ret, arg1) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg1); \
+ gen_helper_##name(ret, cpu_env, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while (0)
+
+#define gen_helper_1e1i(name, ret, arg1, arg2) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg2); \
+ gen_helper_##name(ret, cpu_env, arg1, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while (0)
+
+#define gen_helper_0e2i(name, arg1, arg2, arg3) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg3); \
+ gen_helper_##name(cpu_env, arg1, arg2, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while (0)
+
+#define gen_helper_1e2i(name, ret, arg1, arg2, arg3) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg3); \
+ gen_helper_##name(ret, cpu_env, arg1, arg2, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while (0)
+
+#define gen_helper_0e3i(name, arg1, arg2, arg3, arg4) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg4); \
+ gen_helper_##name(cpu_env, arg1, arg2, arg3, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while (0)
+
+typedef struct DisasContext {
+ DisasContextBase base;
+ target_ulong saved_pc;
+ target_ulong page_start;
+ uint32_t opcode;
+ uint64_t insn_flags;
+ /* Routine used to access memory */
+ int mem_idx;
+ MemOp default_tcg_memop_mask;
+ uint32_t hflags, saved_hflags;
+ target_ulong btarget;
+} DisasContext;
+
+#define DISAS_STOP DISAS_TARGET_0
+#define DISAS_EXIT DISAS_TARGET_1
+
+#define LOG_DISAS(...) \
+ do { \
+ if (LARCH_DEBUG_DISAS) { \
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+#define LARCH_INVAL(op) \
+ do { \
+ if (LARCH_DEBUG_DISAS) { \
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, \
+ TARGET_FMT_lx ": %08x Invalid %s %03x %03x %03x\n", \
+ ctx->base.pc_next, ctx->opcode, op, \
+ ctx->opcode >> 26, ctx->opcode & 0x3F, \
+ ((ctx->opcode >> 16) & 0x1F)); \
+ } \
+ } while (0)
+
+/* General purpose registers moves. */
+static inline void gen_load_gpr(TCGv t, int reg)
+{
+ if (reg == 0) {
+ tcg_gen_movi_tl(t, 0);
+ } else {
+ tcg_gen_mov_tl(t, cpu_gpr[reg]);
+ }
+}
+
+static inline void gen_store_gpr(TCGv t, int reg)
+{
+ if (reg != 0) {
+ tcg_gen_mov_tl(cpu_gpr[reg], t);
+ }
+}
+
+/* Moves to/from shadow registers. */
+/* Tests */
+static inline void gen_save_pc(target_ulong pc)
+{
+ tcg_gen_movi_tl(cpu_PC, pc);
+}
+
+static inline void save_cpu_state(DisasContext *ctx, int do_save_pc)
+{
+ LOG_DISAS("hflags %08x saved %08x\n", ctx->hflags, ctx->saved_hflags);
+ if (do_save_pc && ctx->base.pc_next != ctx->saved_pc) {
+ gen_save_pc(ctx->base.pc_next);
+ ctx->saved_pc = ctx->base.pc_next;
+ }
+ if (ctx->hflags != ctx->saved_hflags) {
+ tcg_gen_movi_i32(hflags, ctx->hflags);
+ ctx->saved_hflags = ctx->hflags;
+ switch (ctx->hflags & LARCH_HFLAG_BMASK) {
+ case LARCH_HFLAG_BR:
+ break;
+ case LARCH_HFLAG_BC:
+ case LARCH_HFLAG_B:
+ tcg_gen_movi_tl(btarget, ctx->btarget);
+ break;
+ }
+ }
+}
+
+static inline void restore_cpu_state(CPULOONGARCHState *env, DisasContext *ctx)
+{
+ ctx->saved_hflags = ctx->hflags;
+ switch (ctx->hflags & LARCH_HFLAG_BMASK) {
+ case LARCH_HFLAG_BR:
+ break;
+ case LARCH_HFLAG_BC:
+ case LARCH_HFLAG_B:
+ ctx->btarget = env->btarget;
+ break;
+ }
+}
+
+static inline void generate_exception_err(DisasContext *ctx, int excp, int err)
+{
+ TCGv_i32 texcp = tcg_const_i32(excp);
+ TCGv_i32 terr = tcg_const_i32(err);
+ save_cpu_state(ctx, 1);
+ gen_helper_raise_exception_err(cpu_env, texcp, terr);
+ tcg_temp_free_i32(terr);
+ tcg_temp_free_i32(texcp);
+ ctx->base.is_jmp = DISAS_NORETURN;
+}
+
+static inline void generate_exception_end(DisasContext *ctx, int excp)
+{
+ generate_exception_err(ctx, excp, 0);
+}
+
+/* Floating point register moves. */
+static void gen_load_fpr32(DisasContext *ctx, TCGv_i32 t, int reg)
+{
+ tcg_gen_extrl_i64_i32(t, fpu_f64[reg]);
+}
+
+static void gen_store_fpr32(DisasContext *ctx, TCGv_i32 t, int reg)
+{
+ TCGv_i64 t64;
+ t64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(t64, t);
+ tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 0, 32);
+ tcg_temp_free_i64(t64);
+}
+
+static void gen_load_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg)
+{
+ tcg_gen_extrh_i64_i32(t, fpu_f64[reg]);
+}
+
+static void gen_store_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg)
+{
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(t64, t);
+ tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 32, 32);
+ tcg_temp_free_i64(t64);
+}
+
+static void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
+{
+ tcg_gen_mov_i64(t, fpu_f64[reg]);
+}
+
+static void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
+{
+ tcg_gen_mov_i64(fpu_f64[reg], t);
+}
+
+static inline int get_fp_bit(int cc)
+{
+ if (cc) {
+ return 24 + cc;
+ } else {
+ return 23;
+ }
+}
+
+/* Addresses computation */
+static inline void gen_op_addr_add(DisasContext *ctx,
+ TCGv ret, TCGv arg0, TCGv arg1)
+{
+ tcg_gen_add_tl(ret, arg0, arg1);
+
+ if (ctx->hflags & LARCH_HFLAG_AWRAP) {
+ tcg_gen_ext32s_i64(ret, ret);
+ }
+}
+
+static inline void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base,
+ target_long ofs)
+{
+ tcg_gen_addi_tl(ret, base, ofs);
+
+ if (ctx->hflags & LARCH_HFLAG_AWRAP) {
+ tcg_gen_ext32s_i64(ret, ret);
+ }
+}
+
+/* Sign-extract the low 32-bits to a target_long. */
+static inline void gen_move_low32(TCGv ret, TCGv_i64 arg)
+{
+ tcg_gen_ext32s_i64(ret, arg);
+}
+
+/* Sign-extract the high 32-bits to a target_long. */
+static inline void gen_move_high32(TCGv ret, TCGv_i64 arg)
+{
+ tcg_gen_sari_i64(ret, arg, 32);
+}
+
+static inline void check_cp1_enabled(DisasContext *ctx)
+{
+#ifndef CONFIG_USER_ONLY
+ if (unlikely(!(ctx->hflags & LARCH_HFLAG_FPU))) {
+ generate_exception_err(ctx, EXCP_FPDIS, 1);
+ }
+#endif
+}
+
+static inline void check_lsx_enabled(DisasContext *ctx)
+{
+#ifndef CONFIG_USER_ONLY
+ if (unlikely(!(ctx->hflags & LARCH_HFLAG_LSX))) {
+ generate_exception_err(ctx, EXCP_LSXDIS, 1);
+ }
+#endif
+}
+
+static inline void check_lasx_enabled(DisasContext *ctx)
+{
+#ifndef CONFIG_USER_ONLY
+ if (unlikely(!(ctx->hflags & LARCH_HFLAG_LASX))) {
+ generate_exception_err(ctx, EXCP_LASXDIS, 1);
+ }
+#endif
+}
+
+static inline void check_lbt_enabled(DisasContext *ctx)
+{
+#ifndef CONFIG_USER_ONLY
+ if (unlikely(!(ctx->hflags & LARCH_HFLAG_LBT))) {
+ generate_exception_err(ctx, EXCP_BTDIS, 1);
+ }
+#endif
+}
+
+/* This code generates a "reserved instruction" exception if the
+ CPU does not support the instruction set corresponding to flags. */
+static inline void check_insn(DisasContext *ctx, uint64_t flags)
+{
+ if (unlikely(!(ctx->insn_flags & flags))) {
+ generate_exception_end(ctx, EXCP_RI);
+ }
+}
+
+/* This code generates a "reserved instruction" exception if the
+ CPU has corresponding flag set which indicates that the instruction
+ has been removed. */
+static inline void check_insn_opc_removed(DisasContext *ctx, uint64_t flags)
+{
+ if (unlikely(ctx->insn_flags & flags)) {
+ generate_exception_end(ctx, EXCP_RI);
+ }
+}
+
+/*
+ * The Linux kernel traps certain reserved instruction exceptions to
+ * emulate the corresponding instructions. QEMU is the kernel in user
+ * mode, so those traps are emulated by accepting the instructions.
+ *
+ * A reserved instruction exception is generated for flagged CPUs if
+ * QEMU runs in system mode.
+ */
+static inline void check_insn_opc_user_only(DisasContext *ctx, uint64_t flags)
+{
+#ifndef CONFIG_USER_ONLY
+ check_insn_opc_removed(ctx, flags);
+#endif
+}
+
+/* This code generates a "reserved instruction" exception if 64-bit
+ instructions are not enabled. */
+static inline void check_larch_64(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->hflags & LARCH_HFLAG_64))) {
+ generate_exception_end(ctx, EXCP_RI);
+ }
+}
+
+/* Define small wrappers for gen_load_fpr* so that we have a uniform
+ calling interface for 32 and 64-bit FPRs. No sense in changing
+ all callers for gen_load_fpr32 when we need the CTX parameter for
+ this one use. */
+#define gen_ldcmp_fpr32(ctx, x, y) gen_load_fpr32(ctx, x, y)
+#define gen_ldcmp_fpr64(ctx, x, y) gen_load_fpr64(ctx, x, y)
+#define FCOP_CONDNS(fmt, ifmt, bits, STORE) \
+static inline void gen_fcmp_ ## fmt(DisasContext *ctx, int n, \
+ int ft, int fs, int cd) \
+{ \
+ TCGv_i ## bits fp0 = tcg_temp_new_i ## bits(); \
+ TCGv_i ## bits fp1 = tcg_temp_new_i ## bits(); \
+ TCGv_i32 fcc = tcg_const_i32(cd); \
+ check_cp1_enabled(ctx); \
+ gen_ldcmp_fpr ## bits(ctx, fp0, fs); \
+ gen_ldcmp_fpr ## bits(ctx, fp1, ft); \
+ switch (n) { \
+ case 0: \
+ gen_helper_cmp_ ## fmt ## _af(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 1: \
+ gen_helper_cmp_ ## fmt ## _saf(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 2: \
+ gen_helper_cmp_ ## fmt ## _lt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 3: \
+ gen_helper_cmp_ ## fmt ## _slt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 4: \
+ gen_helper_cmp_ ## fmt ## _eq(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 5: \
+ gen_helper_cmp_ ## fmt ## _seq(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 6: \
+ gen_helper_cmp_ ## fmt ## _le(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 7: \
+ gen_helper_cmp_ ## fmt ## _sle(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 8: \
+ gen_helper_cmp_ ## fmt ## _un(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 9: \
+ gen_helper_cmp_ ## fmt ## _sun(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 10: \
+ gen_helper_cmp_ ## fmt ## _ult(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 11: \
+ gen_helper_cmp_ ## fmt ## _sult(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 12: \
+ gen_helper_cmp_ ## fmt ## _ueq(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 13: \
+ gen_helper_cmp_ ## fmt ## _sueq(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 14: \
+ gen_helper_cmp_ ## fmt ## _ule(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 15: \
+ gen_helper_cmp_ ## fmt ## _sule(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 16: \
+ gen_helper_cmp_ ## fmt ## _ne(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 17: \
+ gen_helper_cmp_ ## fmt ## _sne(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 20: \
+ gen_helper_cmp_ ## fmt ## _or(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 21: \
+ gen_helper_cmp_ ## fmt ## _sor(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 24: \
+ gen_helper_cmp_ ## fmt ## _une(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 25: \
+ gen_helper_cmp_ ## fmt ## _sune(fp0, cpu_env, fp0, fp1); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ STORE; \
+ tcg_temp_free_i ## bits(fp0); \
+ tcg_temp_free_i ## bits(fp1); \
+ tcg_temp_free_i32(fcc); \
+}
+
+FCOP_CONDNS(d, FMT_D, 64, gen_helper_movreg2cf_i64(cpu_env, fcc, fp0))
+FCOP_CONDNS(s, FMT_S, 32, gen_helper_movreg2cf_i32(cpu_env, fcc, fp0))
+#undef FCOP_CONDNS
+#undef gen_ldcmp_fpr32
+#undef gen_ldcmp_fpr64
+
+/* load/store instructions. */
+#ifdef CONFIG_USER_ONLY
+#define OP_LD_ATOMIC(insn, fname) \
+static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
+ DisasContext *ctx) \
+{ \
+ TCGv t0 = tcg_temp_new(); \
+ tcg_gen_mov_tl(t0, arg1); \
+ tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \
+ tcg_gen_st_tl(t0, cpu_env, \
+ offsetof(CPULOONGARCHState, lladdr)); \
+ tcg_gen_st_tl(ret, cpu_env, \
+ offsetof(CPULOONGARCHState, llval)); \
+ tcg_temp_free(t0); \
+}
+#else
+#define OP_LD_ATOMIC(insn, fname) \
+static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
+ DisasContext *ctx) \
+{ \
+ gen_helper_1e1i(insn, ret, arg1, mem_idx); \
+}
+#endif
+#if 0
+OP_LD_ATOMIC(ll, ld32s);
+OP_LD_ATOMIC(lld, ld64);
+#endif
+#undef OP_LD_ATOMIC
+
+static void gen_base_offset_addr(DisasContext *ctx, TCGv addr,
+ int base, int offset)
+{
+ if (base == 0) {
+ tcg_gen_movi_tl(addr, offset);
+ } else if (offset == 0) {
+ gen_load_gpr(addr, base);
+ } else {
+ tcg_gen_movi_tl(addr, offset);
+ gen_op_addr_add(ctx, addr, cpu_gpr[base], addr);
+ }
+}
+
+/* Load */
+static void gen_ld(DisasContext *ctx, uint32_t opc,
+ int rt, int base, int offset)
+{
+ TCGv t0;
+ int mem_idx = ctx->mem_idx;
+
+ t0 = tcg_temp_new();
+ gen_base_offset_addr(ctx, t0, base, offset);
+
+ switch (opc) {
+ case OPC_LARCH_LD_WU:
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUL |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LARCH_LDPTR_D:
+ case OPC_LARCH_LD_D:
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LARCH_LL_D:
+#if 0
+ op_ld_lld(t0, t0, mem_idx, ctx);
+#endif
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LARCH_LDPTR_W:
+ case OPC_LARCH_LD_W:
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESL |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LARCH_LD_H:
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TESW |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LARCH_LD_HU:
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_TEUW |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LARCH_LD_B:
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_SB);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LARCH_LD_BU:
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_UB);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LARCH_LL_W:
+#if 0
+ op_ld_ll(t0, t0, mem_idx, ctx);
+#endif
+ gen_store_gpr(t0, rt);
+ break;
+ }
+
+ tcg_temp_free(t0);
+}
+
+/* Store */
+static void gen_st(DisasContext *ctx, uint32_t opc, int rt,
+ int base, int offset)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int mem_idx = ctx->mem_idx;
+
+ gen_base_offset_addr(ctx, t0, base, offset);
+ gen_load_gpr(t1, rt);
+
+ switch (opc) {
+ case OPC_LARCH_STPTR_D:
+ case OPC_LARCH_ST_D:
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ break;
+ case OPC_LARCH_STPTR_W:
+ case OPC_LARCH_ST_W:
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUL |
+ ctx->default_tcg_memop_mask);
+ break;
+ case OPC_LARCH_ST_H:
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUW |
+ ctx->default_tcg_memop_mask);
+ break;
+ case OPC_LARCH_ST_B:
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_8);
+ break;
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* Store conditional */
+static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset,
+ MemOp tcg_mo, bool eva)
+{
+ TCGv addr, t0, val;
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *done = gen_new_label();
+
+ t0 = tcg_temp_new();
+ addr = tcg_temp_new();
+ /* compare the address against that of the preceeding LL */
+ gen_base_offset_addr(ctx, addr, base, offset);
+ tcg_gen_brcond_tl(TCG_COND_EQ, addr, cpu_lladdr, l1);
+ tcg_temp_free(addr);
+ tcg_gen_movi_tl(t0, 0);
+ gen_store_gpr(t0, rt);
+ tcg_gen_br(done);
+
+ gen_set_label(l1);
+ /* generate cmpxchg */
+ val = tcg_temp_new();
+ gen_load_gpr(val, rt);
+ tcg_gen_atomic_cmpxchg_tl(t0, cpu_lladdr, cpu_llval, val,
+ eva ? LARCH_HFLAG_UM : ctx->mem_idx, tcg_mo);
+ tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_llval);
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(val);
+
+ gen_set_label(done);
+ tcg_temp_free(t0);
+}
+
+/* Load and store */
+static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft,
+ TCGv t0)
+{
+ /* Don't do NOP if destination is zero: we must perform the actual
+ memory access. */
+ switch (opc) {
+ case OPC_LARCH_FLD_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL |
+ ctx->default_tcg_memop_mask);
+ gen_store_fpr32(ctx, fp0, ft);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FST_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, ft);
+ tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL |
+ ctx->default_tcg_memop_mask);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FLD_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ gen_store_fpr64(ctx, fp0, ft);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FST_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, ft);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ default:
+ LARCH_INVAL("flt_ldst");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static void gen_fp_ldst(DisasContext *ctx, uint32_t op, int rt,
+ int rs, int16_t imm)
+{
+ TCGv t0 = tcg_temp_new();
+
+ check_cp1_enabled(ctx);
+ gen_base_offset_addr(ctx, t0, rs, imm);
+ gen_flt_ldst(ctx, op, rt, t0);
+ tcg_temp_free(t0);
+}
+
+/* Arithmetic with immediate operand */
+static void gen_arith_imm(DisasContext *ctx, uint32_t opc,
+ int rt, int rs, int imm)
+{
+ target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */
+
+ if (rt == 0) {
+ /* If no destination, treat it as a NOP.
+ For addi, we must generate the overflow exception when needed. */
+ return;
+ }
+ switch (opc) {
+ case OPC_LARCH_ADDI_W:
+ if (rs != 0) {
+ tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
+ }
+ break;
+ case OPC_LARCH_ADDI_D:
+ if (rs != 0) {
+ tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
+ }
+ break;
+ }
+}
+
+/* Logic with immediate operand */
+static void gen_logic_imm(DisasContext *ctx, uint32_t opc,
+ int rt, int rs, int16_t imm)
+{
+ target_ulong uimm;
+
+ if (rt == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+ uimm = (uint16_t)imm;
+ switch (opc) {
+ case OPC_LARCH_ANDI:
+ if (likely(rs != 0)) {
+ tcg_gen_andi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], 0);
+ }
+ break;
+ case OPC_LARCH_ORI:
+ if (rs != 0) {
+ tcg_gen_ori_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
+ }
+ break;
+ case OPC_LARCH_XORI:
+ if (likely(rs != 0)) {
+ tcg_gen_xori_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/* Set on less than with immediate operand */
+static void gen_slt_imm(DisasContext *ctx, uint32_t opc,
+ int rt, int rs, int16_t imm)
+{
+ target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */
+ TCGv t0;
+
+ if (rt == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ switch (opc) {
+ case OPC_LARCH_SLTI:
+ tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr[rt], t0, uimm);
+ break;
+ case OPC_LARCH_SLTIU:
+ tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr[rt], t0, uimm);
+ break;
+ }
+ tcg_temp_free(t0);
+}
+
+/* Shifts with immediate operand */
+static void gen_shift_imm(DisasContext *ctx, uint32_t opc,
+ int rt, int rs, int16_t imm)
+{
+ target_ulong uimm = ((uint16_t)imm) & 0x1f;
+ TCGv t0;
+
+ if (rt == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ switch (opc) {
+ case OPC_LARCH_SRAI_W:
+ tcg_gen_sari_tl(cpu_gpr[rt], t0, uimm);
+ break;
+ case OPC_LARCH_SRLI_W:
+ if (uimm != 0) {
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm);
+ } else {
+ tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
+ }
+ break;
+ case OPC_LARCH_ROTRI_W:
+ if (uimm != 0) {
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t1, t0);
+ tcg_gen_rotri_i32(t1, t1, uimm);
+ tcg_gen_ext_i32_tl(cpu_gpr[rt], t1);
+ tcg_temp_free_i32(t1);
+ } else {
+ tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
+ }
+ break;
+ }
+ tcg_temp_free(t0);
+}
+
+/* Arithmetic */
+static void gen_arith(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP.
+ For add & sub, we must generate the
+ overflow exception when needed. */
+ return;
+ }
+
+ switch (opc) {
+ case OPC_LARCH_ADD_W:
+ if (rs != 0 && rt != 0) {
+ tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ case OPC_LARCH_SUB_W:
+ if (rs != 0 && rt != 0) {
+ tcg_gen_sub_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_neg_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ case OPC_LARCH_ADD_D:
+ if (rs != 0 && rt != 0) {
+ tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ case OPC_LARCH_SUB_D:
+ if (rs != 0 && rt != 0) {
+ tcg_gen_sub_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_neg_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ }
+}
+
+/* Conditional move */
+static void gen_cond_move(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ TCGv t0, t1, t2;
+
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rt);
+ t1 = tcg_const_tl(0);
+ t2 = tcg_temp_new();
+ gen_load_gpr(t2, rs);
+ switch (opc) {
+ case OPC_LARCH_MASKEQZ:
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rd], t0, t1, t2, t1);
+ break;
+ case OPC_LARCH_MASKNEZ:
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr[rd], t0, t1, t2, t1);
+ break;
+ }
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+}
+
+/* Logic */
+static void gen_logic(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+
+ switch (opc) {
+ case OPC_LARCH_AND:
+ if (likely(rs != 0 && rt != 0)) {
+ tcg_gen_and_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ case OPC_LARCH_NOR:
+ if (rs != 0 && rt != 0) {
+ tcg_gen_nor_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_not_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_not_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], ~((target_ulong)0));
+ }
+ break;
+ case OPC_LARCH_OR:
+ if (likely(rs != 0 && rt != 0)) {
+ tcg_gen_or_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ case OPC_LARCH_XOR:
+ if (likely(rs != 0 && rt != 0)) {
+ tcg_gen_xor_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ }
+}
+
+/* Set on lower than */
+static void gen_slt(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ TCGv t0, t1;
+
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ switch (opc) {
+ case OPC_LARCH_SLT:
+ tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr[rd], t0, t1);
+ break;
+ case OPC_LARCH_SLTU:
+ tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr[rd], t0, t1);
+ break;
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* Shifts */
+static void gen_shift(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ TCGv t0, t1;
+
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP.
+ For add & sub, we must generate the
+ overflow exception when needed. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ switch (opc) {
+ case OPC_LARCH_SLL_W:
+ tcg_gen_andi_tl(t0, t0, 0x1f);
+ tcg_gen_shl_tl(t0, t1, t0);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
+ break;
+ case OPC_LARCH_SRA_W:
+ tcg_gen_andi_tl(t0, t0, 0x1f);
+ tcg_gen_sar_tl(cpu_gpr[rd], t1, t0);
+ break;
+ case OPC_LARCH_SRL_W:
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_andi_tl(t0, t0, 0x1f);
+ tcg_gen_shr_tl(t0, t1, t0);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
+ break;
+ case OPC_LARCH_ROTR_W:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_andi_i32(t2, t2, 0x1f);
+ tcg_gen_rotr_i32(t2, t3, t2);
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case OPC_LARCH_SLL_D:
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_shl_tl(cpu_gpr[rd], t1, t0);
+ break;
+ case OPC_LARCH_SRA_D:
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_sar_tl(cpu_gpr[rd], t1, t0);
+ break;
+ case OPC_LARCH_SRL_D:
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_shr_tl(cpu_gpr[rd], t1, t0);
+ break;
+ case OPC_LARCH_ROTR_D:
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_rotr_tl(cpu_gpr[rd], t1, t0);
+ break;
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static inline void gen_r6_ld(target_long addr, int reg, int memidx,
+ MemOp memop)
+{
+ TCGv t0 = tcg_const_tl(addr);
+ tcg_gen_qemu_ld_tl(t0, t0, memidx, memop);
+ gen_store_gpr(t0, reg);
+ tcg_temp_free(t0);
+}
+
+static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt)
+{
+ TCGv t0, t1;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ switch (opc) {
+ case OPC_LARCH_DIV_W:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_LARCH_MOD_W:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_LARCH_DIV_WU:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_divu_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_LARCH_MOD_WU:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_remu_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_LARCH_MUL_W:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_mul_i32(t2, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case OPC_LARCH_MULH_W:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_muls2_i32(t2, t3, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case OPC_LARCH_MULH_WU:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_mulu2_i32(t2, t3, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case OPC_LARCH_DIV_D:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, -1LL << 63);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1LL);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_LARCH_MOD_D:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, -1LL << 63);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1LL);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_LARCH_DIV_DU:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_divu_i64(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_LARCH_MOD_DU:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_remu_i64(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_LARCH_MUL_D:
+ tcg_gen_mul_i64(cpu_gpr[rd], t0, t1);
+ break;
+ case OPC_LARCH_MULH_D:
+ {
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_muls2_i64(t2, cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_LARCH_MULH_DU:
+ {
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_mulu2_i64(t2, cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t2);
+ }
+ break;
+ default:
+ LARCH_INVAL("r6 mul/div");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_cl(DisasContext *ctx, uint32_t opc,
+ int rd, int rs)
+{
+ TCGv t0;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ t0 = cpu_gpr[rd];
+ gen_load_gpr(t0, rs);
+
+ switch (opc) {
+ case OPC_LARCH_CLO_W:
+ case OPC_LARCH_CLO_D:
+ tcg_gen_not_tl(t0, t0);
+ break;
+ }
+
+ switch (opc) {
+ case OPC_LARCH_CLO_W:
+ case OPC_LARCH_CLZ_W:
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_clzi_tl(t0, t0, TARGET_LONG_BITS);
+ tcg_gen_subi_tl(t0, t0, TARGET_LONG_BITS - 32);
+ break;
+ case OPC_LARCH_CLO_D:
+ case OPC_LARCH_CLZ_D:
+ tcg_gen_clzi_i64(t0, t0, 64);
+ break;
+ }
+}
+
+static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+{
+ if (unlikely(ctx->base.singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+{
+ if (use_goto_tb(ctx, dest)) {
+ tcg_gen_goto_tb(n);
+ gen_save_pc(dest);
+ tcg_gen_exit_tb(ctx->base.tb, n);
+ } else {
+ gen_save_pc(dest);
+ if (ctx->base.singlestep_enabled) {
+ save_cpu_state(ctx, 0);
+ gen_helper_raise_exception_debug(cpu_env);
+ }
+ tcg_gen_lookup_and_goto_ptr();
+ }
+}
+
+/* Branches */
+static void gen_compute_branch(DisasContext *ctx, uint32_t opc,
+ int insn_bytes,
+ int rs, int rt, int32_t offset)
+{
+ target_ulong btgt = -1;
+ int bcond_compute = 0;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ if (ctx->hflags & LARCH_HFLAG_BMASK) {
+#ifdef LARCH_DEBUG_DISAS
+ LOG_DISAS("Branch at PC 0x"
+ TARGET_FMT_lx "\n", ctx->base.pc_next);
+#endif
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+
+ /* Load needed operands */
+ switch (opc) {
+ case OPC_LARCH_BLT:
+ case OPC_LARCH_BGE:
+ case OPC_LARCH_BLTU:
+ case OPC_LARCH_BGEU:
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ bcond_compute = 1;
+ btgt = ctx->base.pc_next + offset;
+ break;
+ case OPC_LARCH_BEQZ:
+ case OPC_LARCH_B:
+ case OPC_LARCH_BEQ:
+ case OPC_LARCH_BNEZ:
+ case OPC_LARCH_BNE:
+ /* Compare two registers */
+ if (rs != rt) {
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ bcond_compute = 1;
+ }
+ btgt = ctx->base.pc_next + offset;
+ break;
+ default:
+ LARCH_INVAL("branch/jump");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+ if (bcond_compute == 0) {
+ /* No condition to be computed */
+ switch (opc) {
+ case OPC_LARCH_BEQZ: /* rx == rx */
+ case OPC_LARCH_B:
+ case OPC_LARCH_BEQ:
+ /* Always take */
+ ctx->hflags |= LARCH_HFLAG_B;
+ break;
+ case OPC_LARCH_BNEZ:
+ case OPC_LARCH_BNE:
+ /* Treat as NOP. */
+ goto out;
+ default:
+ LARCH_INVAL("branch/jump");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+ } else {
+ switch (opc) {
+ case OPC_LARCH_BLT:
+ tcg_gen_setcond_tl(TCG_COND_LT, bcond, t0, t1);
+ goto not_likely;
+ case OPC_LARCH_BGE:
+ tcg_gen_setcond_tl(TCG_COND_GE, bcond, t0, t1);
+ goto not_likely;
+ case OPC_LARCH_BLTU:
+ tcg_gen_setcond_tl(TCG_COND_LTU, bcond, t0, t1);
+ goto not_likely;
+ case OPC_LARCH_BGEU:
+ tcg_gen_setcond_tl(TCG_COND_GEU, bcond, t0, t1);
+ goto not_likely;
+ case OPC_LARCH_BEQZ:
+ case OPC_LARCH_B:
+ case OPC_LARCH_BEQ:
+ tcg_gen_setcond_tl(TCG_COND_EQ, bcond, t0, t1);
+ goto not_likely;
+ case OPC_LARCH_BNEZ:
+ case OPC_LARCH_BNE:
+ tcg_gen_setcond_tl(TCG_COND_NE, bcond, t0, t1);
+ goto not_likely;
+ not_likely:
+ ctx->hflags |= LARCH_HFLAG_BC;
+ break;
+ default:
+ LARCH_INVAL("conditional branch/jump");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+ }
+
+ ctx->btarget = btgt;
+
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* special3 bitfield operations */
+static void gen_bitops(DisasContext *ctx, uint32_t opc, int rt,
+ int rs, int lsb, int msb)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ gen_load_gpr(t1, rs);
+ switch (opc) {
+ case OPC_LARCH_TRPICK_W:
+ if (lsb + msb > 31) {
+ goto fail;
+ }
+ if (msb != 31) {
+ tcg_gen_extract_tl(t0, t1, lsb, msb + 1);
+ } else {
+ /*
+ * The two checks together imply that lsb == 0,
+ * so this is a simple sign-extension.
+ */
+ tcg_gen_ext32s_tl(t0, t1);
+ }
+ break;
+ case OPC_LARCH_TRINS_W:
+ if (lsb > msb) {
+ goto fail;
+ }
+ gen_load_gpr(t0, rt);
+ tcg_gen_deposit_tl(t0, t0, t1, lsb, msb - lsb + 1);
+ tcg_gen_ext32s_tl(t0, t0);
+ break;
+ default:
+fail:
+ LARCH_INVAL("bitops");
+ generate_exception_end(ctx, EXCP_RI);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return;
+ }
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd)
+{
+ TCGv t0;
+
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rt);
+ switch (op2) {
+ case OPC_LARCH_REVB_2H:
+ {
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_const_tl(0x00FF00FF);
+
+ tcg_gen_shri_tl(t1, t0, 8);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_gen_and_tl(t0, t0, t2);
+ tcg_gen_shli_tl(t0, t0, 8);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
+ }
+ break;
+ case OPC_LARCH_EXT_WB:
+ tcg_gen_ext8s_tl(cpu_gpr[rd], t0);
+ break;
+ case OPC_LARCH_EXT_WH:
+ tcg_gen_ext16s_tl(cpu_gpr[rd], t0);
+ break;
+ case OPC_LARCH_REVB_4H:
+ {
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_const_tl(0x00FF00FF00FF00FFULL);
+
+ tcg_gen_shri_tl(t1, t0, 8);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_gen_and_tl(t0, t0, t2);
+ tcg_gen_shli_tl(t0, t0, 8);
+ tcg_gen_or_tl(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ }
+ break;
+ case OPC_LARCH_REVH_D:
+ {
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_const_tl(0x0000FFFF0000FFFFULL);
+
+ tcg_gen_shri_tl(t1, t0, 16);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_gen_and_tl(t0, t0, t2);
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_gen_shri_tl(t1, t0, 32);
+ tcg_gen_shli_tl(t0, t0, 32);
+ tcg_gen_or_tl(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ }
+ break;
+ default:
+ LARCH_INVAL("bsfhl");
+ generate_exception_end(ctx, EXCP_RI);
+ tcg_temp_free(t0);
+ return;
+ }
+ tcg_temp_free(t0);
+}
+
+/* REV with sf==1, opcode==3 ("REV64") */
+static void handle_rev64(DisasContext *ctx,
+ unsigned int rn, unsigned int rd)
+{
+ tcg_gen_bswap64_i64(cpu_gpr[rd], cpu_gpr[rn]);
+}
+
+/* REV with sf==0, opcode==2
+ * REV32 (sf==1, opcode==2)
+ */
+static void handle_rev32(DisasContext *ctx,
+ unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
+ gen_load_gpr(tcg_rd, rd);
+
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+ TCGv_i64 tcg_rn = tcg_temp_new_i64();
+ gen_load_gpr(tcg_rn, rn);
+
+ /* bswap32_i64 requires zero high word */
+ tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
+ tcg_gen_bswap32_i64(tcg_rd, tcg_tmp, TCG_BSWAP_OZ);
+ tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
+ tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_OZ);
+ tcg_gen_concat32_i64(cpu_gpr[rd], tcg_rd, tcg_tmp);
+
+ tcg_temp_free_i64(tcg_tmp);
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i64(tcg_rn);
+}
+
+/* REV16 */
+static void handle_rev16(DisasContext *ctx, unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
+ TCGv_i64 tcg_rn = tcg_temp_new_i64();
+ gen_load_gpr(tcg_rd, rd);
+ gen_load_gpr(tcg_rn, rn);
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+ TCGv_i64 mask = tcg_const_i64(0x0000ffff0000ffffull);
+
+ tcg_gen_shri_i64(tcg_tmp, tcg_rn, 16);
+ tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
+ tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
+ tcg_gen_shli_i64(tcg_rd, tcg_rd, 16);
+ tcg_gen_or_i64(cpu_gpr[rd], tcg_rd, tcg_tmp);
+
+ tcg_temp_free_i64(mask);
+ tcg_temp_free_i64(tcg_tmp);
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i64(tcg_rn);
+}
+
+static void gen_lsa(DisasContext *ctx, int opc, int rd, int rs, int rt,
+ int imm2)
+{
+ TCGv t0;
+ TCGv t1;
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ tcg_gen_shli_tl(t0, t0, imm2 + 1);
+ tcg_gen_add_tl(cpu_gpr[rd], t0, t1);
+ if (opc == OPC_LARCH_ALSL_W) {
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ }
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+
+ return;
+}
+
+static void gen_align_bits(DisasContext *ctx, int wordsz, int rd, int rs,
+ int rt, int bits)
+{
+ TCGv t0;
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ t0 = tcg_temp_new();
+ if (bits == 0 || bits == wordsz) {
+ if (bits == 0) {
+ gen_load_gpr(t0, rt);
+ } else {
+ gen_load_gpr(t0, rs);
+ }
+ switch (wordsz) {
+ case 32:
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
+ break;
+ case 64:
+ tcg_gen_mov_tl(cpu_gpr[rd], t0);
+ break;
+ }
+ } else {
+ TCGv t1 = tcg_temp_new();
+ gen_load_gpr(t0, rt);
+ gen_load_gpr(t1, rs);
+ switch (wordsz) {
+ case 32:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ tcg_gen_concat_tl_i64(t2, t1, t0);
+ tcg_gen_shri_i64(t2, t2, 32 - bits);
+ gen_move_low32(cpu_gpr[rd], t2);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+ case 64:
+ tcg_gen_shli_tl(t0, t0, bits);
+ tcg_gen_shri_tl(t1, t1, 64 - bits);
+ tcg_gen_or_tl(cpu_gpr[rd], t1, t0);
+ break;
+ }
+ tcg_temp_free(t1);
+ }
+
+ tcg_temp_free(t0);
+}
+
+static void gen_align(DisasContext *ctx, int wordsz, int rd, int rs, int rt,
+ int bp)
+{
+ gen_align_bits(ctx, wordsz, rd, rs, rt, bp * 8);
+}
+
+static void gen_bitswap(DisasContext *ctx, int opc, int rd, int rt)
+{
+ TCGv t0;
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rt);
+ switch (opc) {
+ case OPC_LARCH_BREV_4B:
+ gen_helper_bitswap(cpu_gpr[rd], t0);
+ break;
+ case OPC_LARCH_BREV_8B:
+ gen_helper_dbitswap(cpu_gpr[rd], t0);
+ break;
+ }
+ tcg_temp_free(t0);
+}
+
+static void gen_cp1(DisasContext *ctx, uint32_t opc, int rt, int fs)
+{
+ TCGv t0 = tcg_temp_new();
+ check_cp1_enabled(ctx);
+
+ switch (opc) {
+ case OPC_LARCH_FR2GR_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ tcg_gen_ext_i32_tl(t0, fp0);
+ tcg_temp_free_i32(fp0);
+ }
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LARCH_GR2FR_W:
+ gen_load_gpr(t0, rt);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(fp0, t0);
+ gen_store_fpr32(ctx, fp0, fs);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FR2GR_D:
+ gen_load_fpr64(ctx, t0, fs);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LARCH_GR2FR_D:
+ gen_load_gpr(t0, rt);
+ gen_store_fpr64(ctx, t0, fs);
+ break;
+ case OPC_LARCH_FRH2GR_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32h(ctx, fp0, fs);
+ tcg_gen_ext_i32_tl(t0, fp0);
+ tcg_temp_free_i32(fp0);
+ }
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LARCH_GR2FRH_W:
+ gen_load_gpr(t0, rt);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(fp0, t0);
+ gen_store_fpr32h(ctx, fp0, fs);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ default:
+ LARCH_INVAL("cp1 move");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+
+ out:
+ tcg_temp_free(t0);
+}
+
+static inline void gen_movcf_ps(DisasContext *ctx, int fs, int fd,
+ int cc, int tf)
+{
+ int cond;
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+
+ if (tf) {
+ cond = TCG_COND_EQ;
+ } else {
+ cond = TCG_COND_NE;
+ }
+
+ tcg_gen_andi_i32(t0, fpu_fcsr0, 1 << get_fp_bit(cc));
+ tcg_gen_brcondi_i32(cond, t0, 0, l1);
+ gen_load_fpr32(ctx, t0, fs);
+ gen_store_fpr32(ctx, t0, fd);
+ gen_set_label(l1);
+
+ tcg_gen_andi_i32(t0, fpu_fcsr0, 1 << get_fp_bit(cc + 1));
+ tcg_gen_brcondi_i32(cond, t0, 0, l2);
+ gen_load_fpr32h(ctx, t0, fs);
+ gen_store_fpr32h(ctx, t0, fd);
+ tcg_temp_free_i32(t0);
+ gen_set_label(l2);
+}
+
+static void gen_farith(DisasContext *ctx, uint32_t opc,
+ int ft, int fs, int fd, int cc)
+{
+ check_cp1_enabled(ctx);
+ switch (opc) {
+ case OPC_LARCH_FADD_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_add_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FSUB_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_sub_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FMUL_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_mul_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FDIV_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_div_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FSQRT_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_sqrt_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FABS_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_abs_s(fp0, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FMOV_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FNEG_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_chs_s(fp0, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FTINTRNE_L_S:
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ gen_helper_float_round_l_s(fp64, cpu_env, fp32);
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_LARCH_FTINTRZ_L_S:
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ gen_helper_float_trunc_l_s(fp64, cpu_env, fp32);
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_LARCH_FTINTRP_L_S:
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ gen_helper_float_ceil_l_s(fp64, cpu_env, fp32);
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_LARCH_FTINTRM_L_S:
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ gen_helper_float_floor_l_s(fp64, cpu_env, fp32);
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_LARCH_FTINTRNE_W_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_round_w_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FTINTRZ_W_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_trunc_w_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FTINTRP_W_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_ceil_w_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FTINTRM_W_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_floor_w_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FRECIP_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_recip_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FRSQRT_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_rsqrt_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FRINT_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_rint_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FCLASS_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_class_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FMIN_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_min_s(fp2, cpu_env, fp0, fp1);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FMINA_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_mina_s(fp2, cpu_env, fp0, fp1);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FMAX_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_max_s(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr32(ctx, fp1, fd);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FMAXA_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_maxa_s(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr32(ctx, fp1, fd);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FCVT_D_S:
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ gen_helper_float_cvtd_s(fp64, cpu_env, fp32);
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_LARCH_FTINT_W_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_cvt_w_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FTINT_L_S:
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ gen_helper_float_cvt_l_s(fp64, cpu_env, fp32);
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_LARCH_FADD_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_add_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FSUB_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_sub_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FMUL_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_mul_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FDIV_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_div_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FSQRT_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_sqrt_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FABS_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_abs_d(fp0, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FMOV_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FNEG_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_chs_d(fp0, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FTINTRNE_L_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_round_l_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FTINTRZ_L_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_trunc_l_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FTINTRP_L_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_ceil_l_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FTINTRM_L_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_floor_l_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FTINTRNE_W_D:
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ gen_helper_float_round_w_d(fp32, cpu_env, fp64);
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_LARCH_FTINTRZ_W_D:
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ gen_helper_float_trunc_w_d(fp32, cpu_env, fp64);
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_LARCH_FTINTRP_W_D:
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ gen_helper_float_ceil_w_d(fp32, cpu_env, fp64);
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_LARCH_FTINTRM_W_D:
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ gen_helper_float_floor_w_d(fp32, cpu_env, fp64);
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_LARCH_FRECIP_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_recip_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FRSQRT_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_rsqrt_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FRINT_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_rint_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FCLASS_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_class_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FMIN_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_min_d(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr64(ctx, fp1, fd);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FMINA_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_mina_d(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr64(ctx, fp1, fd);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FMAX_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_max_d(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr64(ctx, fp1, fd);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FMAXA_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_maxa_d(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr64(ctx, fp1, fd);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FCVT_S_D:
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ gen_helper_float_cvts_d(fp32, cpu_env, fp64);
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_LARCH_FTINT_W_D:
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ gen_helper_float_cvt_w_d(fp32, cpu_env, fp64);
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_LARCH_FTINT_L_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_cvt_l_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FFINT_S_W:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_cvts_w(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FFINT_D_W:
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ gen_helper_float_cvtd_w(fp64, cpu_env, fp32);
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_LARCH_FFINT_S_L:
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ gen_helper_float_cvts_l(fp32, cpu_env, fp64);
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_LARCH_FFINT_D_L:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_cvtd_l(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ default:
+ LARCH_INVAL("farith");
+ generate_exception_end(ctx, EXCP_RI);
+ return;
+ }
+}
+
+/* Coprocessor 3 (FPU) */
+static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc,
+ int fd, int fs, int base, int index)
+{
+ TCGv t0 = tcg_temp_new();
+
+ check_cp1_enabled(ctx);
+ if (base == 0) {
+ gen_load_gpr(t0, index);
+ } else if (index == 0) {
+ gen_load_gpr(t0, base);
+ } else {
+ gen_op_addr_add(ctx, t0, cpu_gpr[base], cpu_gpr[index]);
+ }
+
+ /*
+ * Don't do NOP if destination is zero: we must perform the actual
+ * memory access.
+ */
+ switch (opc) {
+ case OPC_LARCH_FLDX_S:
+ case OPC_LARCH_FLDGT_S:
+ case OPC_LARCH_FLDLE_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
+ tcg_gen_trunc_tl_i32(fp0, t0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FLDX_D:
+ case OPC_LARCH_FLDGT_D:
+ case OPC_LARCH_FLDLE_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LARCH_FSTX_S:
+ case OPC_LARCH_FSTGT_S:
+ case OPC_LARCH_FSTLE_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LARCH_FSTX_D:
+ case OPC_LARCH_FSTGT_D:
+ case OPC_LARCH_FSTLE_D:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ }
+ tcg_temp_free(t0);
+}
+
+static inline void clear_branch_hflags(DisasContext *ctx)
+{
+ ctx->hflags &= ~LARCH_HFLAG_BMASK;
+ if (ctx->base.is_jmp == DISAS_NEXT) {
+ save_cpu_state(ctx, 0);
+ } else {
+ /*
+ * It is not safe to save ctx->hflags as hflags may be changed
+ * in execution time.
+ */
+ tcg_gen_andi_i32(hflags, hflags, ~LARCH_HFLAG_BMASK);
+ }
+}
+
+static void gen_branch(DisasContext *ctx, int insn_bytes)
+{
+ if (ctx->hflags & LARCH_HFLAG_BMASK) {
+ int proc_hflags = ctx->hflags & LARCH_HFLAG_BMASK;
+ /* Branches completion */
+ clear_branch_hflags(ctx);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ /* FIXME: Need to clear can_do_io. */
+ switch (proc_hflags & LARCH_HFLAG_BMASK) {
+ case LARCH_HFLAG_B:
+ /* unconditional branch */
+ gen_goto_tb(ctx, 0, ctx->btarget);
+ break;
+ case LARCH_HFLAG_BC:
+ /* Conditional branch */
+ {
+ TCGLabel *l1 = gen_new_label();
+
+ tcg_gen_brcondi_tl(TCG_COND_NE, bcond, 0, l1);
+ gen_goto_tb(ctx, 1, ctx->base.pc_next + insn_bytes);
+ gen_set_label(l1);
+ gen_goto_tb(ctx, 0, ctx->btarget);
+ }
+ break;
+ case LARCH_HFLAG_BR:
+ /* unconditional branch to register */
+ tcg_gen_mov_tl(cpu_PC, btarget);
+ if (ctx->base.singlestep_enabled) {
+ save_cpu_state(ctx, 0);
+ gen_helper_raise_exception_debug(cpu_env);
+ }
+ tcg_gen_lookup_and_goto_ptr();
+ break;
+ default:
+ fprintf(stderr, "unknown branch 0x%x\n", proc_hflags);
+ abort();
+ }
+ }
+}
+
+/* Signed immediate */
+#define SIMM(op, start, width) \
+ ((int32_t)(((op >> start) & ((~0U) >> (32 - width))) \
+ << (32 - width)) \
+ >> (32 - width))
+/* Zero-extended immediate */
+#define ZIMM(op, start, width) ((op >> start) & ((~0U) >> (32 - width)))
+
+static void gen_sync(int stype)
+{
+ TCGBar tcg_mo = TCG_BAR_SC;
+
+ switch (stype) {
+ case 0x4: /* SYNC_WMB */
+ tcg_mo |= TCG_MO_ST_ST;
+ break;
+ case 0x10: /* SYNC_MB */
+ tcg_mo |= TCG_MO_ALL;
+ break;
+ case 0x11: /* SYNC_ACQUIRE */
+ tcg_mo |= TCG_MO_LD_LD | TCG_MO_LD_ST;
+ break;
+ case 0x12: /* SYNC_RELEASE */
+ tcg_mo |= TCG_MO_ST_ST | TCG_MO_LD_ST;
+ break;
+ case 0x13: /* SYNC_RMB */
+ tcg_mo |= TCG_MO_LD_LD;
+ break;
+ default:
+ tcg_mo |= TCG_MO_ALL;
+ break;
+ }
+
+ tcg_gen_mb(tcg_mo);
+}
+
+static void gen_crc32(DisasContext *ctx, int rd, int rs, int rt, int sz,
+ int crc32c)
+{
+ TCGv t0;
+ TCGv t1;
+ TCGv_i32 tsz = tcg_const_i32(1 << sz);
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rt);
+ gen_load_gpr(t1, rs);
+
+ if (crc32c) {
+ gen_helper_crc32c(cpu_gpr[rd], t0, t1, tsz);
+ } else {
+ gen_helper_crc32(cpu_gpr[rd], t0, t1, tsz);
+ }
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free_i32(tsz);
+}
+
+#include "cpu-csr.h"
+
+#ifndef CONFIG_USER_ONLY
+
+/*
+ * 64-bit CSR read
+ *
+ * @arg : GPR to store the value of CSR register
+ * @csr : CSR register number
+ */
+static void gen_csr_rdq(DisasContext *ctx, TCGv rd, int64_t a1)
+{
+ TCGv_i64 csr = tcg_const_i64(a1);
+ gen_helper_csr_rdq(rd, cpu_env, csr);
+}
+
+/*
+ * 64-bit CSR write
+ *
+ * @arg : GPR that stores the new value of CSR register
+ * @csr : CSR register number
+ */
+static void gen_csr_wrq(DisasContext *ctx, TCGv val, int64_t a1)
+{
+ TCGv_i64 csr = tcg_const_i64(a1);
+ gen_helper_csr_wrq(val, cpu_env, val, csr);
+}
+
+/*
+ * 64-bit CSR exchange
+ *
+ * @arg : GPR that stores the new value of CSR register
+ * @csr : CSR register number
+ */
+static void gen_csr_xchgq(DisasContext *ctx, TCGv val, TCGv mask, int64_t a1)
+{
+ TCGv_i64 csr = tcg_const_i64(a1);
+ gen_helper_csr_xchgq(val, cpu_env, val, mask, csr);
+}
+#endif /* !CONFIG_USER_ONLY */
+
+static void loongarch_tr_init_disas_context(DisasContextBase *dcbase,
+ CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ CPULOONGARCHState *env = cs->env_ptr;
+
+ ctx->page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
+ ctx->saved_pc = -1;
+ ctx->insn_flags = env->insn_flags;
+ ctx->btarget = 0;
+ /* Restore state from the tb context. */
+ ctx->hflags = (uint32_t)ctx->base.tb->flags; /* FIXME: maybe use 64 bits? */
+ restore_cpu_state(env, ctx);
+#ifdef CONFIG_USER_ONLY
+ ctx->mem_idx = LARCH_HFLAG_UM;
+#else
+ ctx->mem_idx = hflags_mmu_index(ctx->hflags);
+#endif
+ ctx->default_tcg_memop_mask = MO_ALIGN;
+
+ LOG_DISAS("\ntb %p idx %d hflags %04x\n", ctx->base.tb, ctx->mem_idx,
+ ctx->hflags);
+}
+
+static void loongarch_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
+{
+}
+
+static void loongarch_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ tcg_gen_insn_start(ctx->base.pc_next, ctx->hflags & LARCH_HFLAG_BMASK,
+ ctx->btarget);
+}
+#if 0
+static bool loongarch_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
+ const CPUBreakpoint *bp)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ save_cpu_state(ctx, 1);
+ ctx->base.is_jmp = DISAS_NORETURN;
+ gen_helper_raise_exception_debug(cpu_env);
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ ctx->base.pc_next += 4;
+ return true;
+}
+#endif
+/* 128 and 256 lsx vector instructions are not supported yet */
+static bool decode_vector_lsx(uint32_t opcode)
+{
+ uint32_t value = (opcode & 0xff000000);
+
+ if ((opcode & 0xf0000000) == 0x70000000) {
+ return true;
+ } else if ((opcode & 0xfff00000) == 0x38400000) {
+ return true;
+ } else {
+ switch (value) {
+ case 0x09000000:
+ case 0x0a000000:
+ case 0x0e000000:
+ case 0x0f000000:
+ case 0x2c000000:
+ case 0x30000000:
+ case 0x31000000:
+ case 0x32000000:
+ case 0x33000000:
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool decode_insn(DisasContext *ctx, uint32_t insn);
+#include "decode-insn.c.inc"
+#include "trans.inc.c"
+
+static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
+{
+ CPULOONGARCHState *env = cs->env_ptr;
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ int insn_bytes = 4;
+
+ ctx->opcode = cpu_ldl_code(env, ctx->base.pc_next);
+
+ if (!decode_insn(ctx, ctx->opcode)) {
+ if (decode_vector_lsx(ctx->opcode)) {
+ generate_exception_end(ctx, EXCP_RI);
+ } else {
+ fprintf(stderr, "Error: unkown opcode. 0x%lx: 0x%x\n",
+ ctx->base.pc_next, ctx->opcode);
+ generate_exception_end(ctx, EXCP_RI);
+ }
+ }
+
+ if (ctx->hflags & LARCH_HFLAG_BMASK) {
+ gen_branch(ctx, insn_bytes);
+ }
+ ctx->base.pc_next += insn_bytes;
+}
+
+static void loongarch_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
+{
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
+
+ if (ctx->base.singlestep_enabled && ctx->base.is_jmp != DISAS_NORETURN) {
+ save_cpu_state(ctx, ctx->base.is_jmp != DISAS_EXIT);
+ gen_helper_raise_exception_debug(cpu_env);
+ } else {
+ switch (ctx->base.is_jmp) {
+ case DISAS_STOP:
+ gen_save_pc(ctx->base.pc_next);
+ tcg_gen_lookup_and_goto_ptr();
+ break;
+ case DISAS_NEXT:
+ case DISAS_TOO_MANY:
+ save_cpu_state(ctx, 0);
+ gen_goto_tb(ctx, 0, ctx->base.pc_next);
+ break;
+ case DISAS_EXIT:
+ tcg_gen_exit_tb(NULL, 0);
+ break;
+ case DISAS_NORETURN:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+}
+
+static void loongarch_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
+{
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
+ log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
+}
+
+static const TranslatorOps loongarch_tr_ops = {
+ .init_disas_context = loongarch_tr_init_disas_context,
+ .tb_start = loongarch_tr_tb_start,
+ .insn_start = loongarch_tr_insn_start,
+#if 0
+ .breakpoint_check = loongarch_tr_breakpoint_check,
+#endif
+ .translate_insn = loongarch_tr_translate_insn,
+ .tb_stop = loongarch_tr_tb_stop,
+ .disas_log = loongarch_tr_disas_log,
+};
+
+void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb, int max_insns)
+{
+ DisasContext ctx;
+
+ translator_loop(&loongarch_tr_ops, &ctx.base, cs, tb, max_insns);
+}
+
+void loongarch_tcg_init(void)
+{
+ int i;
+
+ for (i = 0; i < 32; i++)
+ cpu_gpr[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPULOONGARCHState,
+ active_tc.gpr[i]),
+ regnames[i]);
+
+ for (i = 0; i < 32; i++) {
+ int off = offsetof(CPULOONGARCHState, active_fpu.fpr[i].d);
+ fpu_f64[i] = tcg_global_mem_new_i64(cpu_env, off, fregnames[i]);
+ }
+
+ cpu_PC = tcg_global_mem_new(cpu_env,
+ offsetof(CPULOONGARCHState, active_tc.PC), "PC");
+ bcond = tcg_global_mem_new(cpu_env,
+ offsetof(CPULOONGARCHState, bcond), "bcond");
+ btarget = tcg_global_mem_new(cpu_env,
+ offsetof(CPULOONGARCHState, btarget), "btarget");
+ hflags = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPULOONGARCHState, hflags), "hflags");
+ fpu_fcsr0 = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPULOONGARCHState, active_fpu.fcsr0),
+ "fcsr0");
+ cpu_lladdr = tcg_global_mem_new(cpu_env,
+ offsetof(CPULOONGARCHState, lladdr),
+ "lladdr");
+ cpu_llval = tcg_global_mem_new(cpu_env, offsetof(CPULOONGARCHState, llval),
+ "llval");
+}
+
+void restore_state_to_opc(CPULOONGARCHState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->active_tc.PC = data[0];
+ env->hflags &= ~LARCH_HFLAG_BMASK;
+ env->hflags |= data[1];
+ switch (env->hflags & LARCH_HFLAG_BMASK) {
+ case LARCH_HFLAG_BR:
+ break;
+ case LARCH_HFLAG_BC:
+ case LARCH_HFLAG_B:
+ env->btarget = data[2];
+ break;
+ }
+}
diff --git a/target/meson.build b/target/meson.build
index 2f6940255e6583e344ec0892fd8c4b55ee75766d..ac0ce618b7d43e8837275b163c7819ddfd4d7850 100644
--- a/target/meson.build
+++ b/target/meson.build
@@ -5,6 +5,7 @@ subdir('cris')
subdir('hexagon')
subdir('hppa')
subdir('i386')
+subdir('loongarch64')
subdir('m68k')
subdir('microblaze')
subdir('mips')
diff --git a/tcg/loongarch64/tcg-insn-defs.c.inc b/tcg/loongarch64/tcg-insn-defs.c.inc
new file mode 100644
index 0000000000000000000000000000000000000000..d16257185661cc4c5a85acfa8e1d67500c4699a4
--- /dev/null
+++ b/tcg/loongarch64/tcg-insn-defs.c.inc
@@ -0,0 +1,979 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * LoongArch instruction formats, opcodes, and encoders for TCG use.
+ *
+ * This file is auto-generated by genqemutcgdefs from
+ * https://github.com/loongson-community/loongarch-opcodes,
+ * from commit 961f0c60f5b63e574d785995600c71ad5413fdc4.
+ * DO NOT EDIT.
+ */
+
+typedef enum {
+ OPC_CLZ_W = 0x00001400,
+ OPC_CTZ_W = 0x00001c00,
+ OPC_CLZ_D = 0x00002400,
+ OPC_CTZ_D = 0x00002c00,
+ OPC_REVB_2H = 0x00003000,
+ OPC_REVB_2W = 0x00003800,
+ OPC_REVB_D = 0x00003c00,
+ OPC_SEXT_H = 0x00005800,
+ OPC_SEXT_B = 0x00005c00,
+ OPC_ADD_W = 0x00100000,
+ OPC_ADD_D = 0x00108000,
+ OPC_SUB_W = 0x00110000,
+ OPC_SUB_D = 0x00118000,
+ OPC_SLT = 0x00120000,
+ OPC_SLTU = 0x00128000,
+ OPC_MASKEQZ = 0x00130000,
+ OPC_MASKNEZ = 0x00138000,
+ OPC_NOR = 0x00140000,
+ OPC_AND = 0x00148000,
+ OPC_OR = 0x00150000,
+ OPC_XOR = 0x00158000,
+ OPC_ORN = 0x00160000,
+ OPC_ANDN = 0x00168000,
+ OPC_SLL_W = 0x00170000,
+ OPC_SRL_W = 0x00178000,
+ OPC_SRA_W = 0x00180000,
+ OPC_SLL_D = 0x00188000,
+ OPC_SRL_D = 0x00190000,
+ OPC_SRA_D = 0x00198000,
+ OPC_ROTR_W = 0x001b0000,
+ OPC_ROTR_D = 0x001b8000,
+ OPC_MUL_W = 0x001c0000,
+ OPC_MULH_W = 0x001c8000,
+ OPC_MULH_WU = 0x001d0000,
+ OPC_MUL_D = 0x001d8000,
+ OPC_MULH_D = 0x001e0000,
+ OPC_MULH_DU = 0x001e8000,
+ OPC_DIV_W = 0x00200000,
+ OPC_MOD_W = 0x00208000,
+ OPC_DIV_WU = 0x00210000,
+ OPC_MOD_WU = 0x00218000,
+ OPC_DIV_D = 0x00220000,
+ OPC_MOD_D = 0x00228000,
+ OPC_DIV_DU = 0x00230000,
+ OPC_MOD_DU = 0x00238000,
+ OPC_SLLI_W = 0x00408000,
+ OPC_SLLI_D = 0x00410000,
+ OPC_SRLI_W = 0x00448000,
+ OPC_SRLI_D = 0x00450000,
+ OPC_SRAI_W = 0x00488000,
+ OPC_SRAI_D = 0x00490000,
+ OPC_ROTRI_W = 0x004c8000,
+ OPC_ROTRI_D = 0x004d0000,
+ OPC_BSTRINS_W = 0x00600000,
+ OPC_BSTRPICK_W = 0x00608000,
+ OPC_BSTRINS_D = 0x00800000,
+ OPC_BSTRPICK_D = 0x00c00000,
+ OPC_SLTI = 0x02000000,
+ OPC_SLTUI = 0x02400000,
+ OPC_ADDI_W = 0x02800000,
+ OPC_ADDI_D = 0x02c00000,
+ OPC_CU52I_D = 0x03000000,
+ OPC_ANDI = 0x03400000,
+ OPC_ORI = 0x03800000,
+ OPC_XORI = 0x03c00000,
+ OPC_LU12I_W = 0x14000000,
+ OPC_CU32I_D = 0x16000000,
+ OPC_PCADDU2I = 0x18000000,
+ OPC_PCALAU12I = 0x1a000000,
+ OPC_PCADDU12I = 0x1c000000,
+ OPC_PCADDU18I = 0x1e000000,
+ OPC_LD_B = 0x28000000,
+ OPC_LD_H = 0x28400000,
+ OPC_LD_W = 0x28800000,
+ OPC_LD_D = 0x28c00000,
+ OPC_ST_B = 0x29000000,
+ OPC_ST_H = 0x29400000,
+ OPC_ST_W = 0x29800000,
+ OPC_ST_D = 0x29c00000,
+ OPC_LD_BU = 0x2a000000,
+ OPC_LD_HU = 0x2a400000,
+ OPC_LD_WU = 0x2a800000,
+ OPC_LDX_B = 0x38000000,
+ OPC_LDX_H = 0x38040000,
+ OPC_LDX_W = 0x38080000,
+ OPC_LDX_D = 0x380c0000,
+ OPC_STX_B = 0x38100000,
+ OPC_STX_H = 0x38140000,
+ OPC_STX_W = 0x38180000,
+ OPC_STX_D = 0x381c0000,
+ OPC_LDX_BU = 0x38200000,
+ OPC_LDX_HU = 0x38240000,
+ OPC_LDX_WU = 0x38280000,
+ OPC_DBAR = 0x38720000,
+ OPC_JIRL = 0x4c000000,
+ OPC_B = 0x50000000,
+ OPC_BL = 0x54000000,
+ OPC_BEQ = 0x58000000,
+ OPC_BNE = 0x5c000000,
+ OPC_BGT = 0x60000000,
+ OPC_BLE = 0x64000000,
+ OPC_BGTU = 0x68000000,
+ OPC_BLEU = 0x6c000000,
+} LoongArchInsn;
+
+static int32_t __attribute__((unused))
+encode_d_slot(LoongArchInsn opc, uint32_t d)
+{
+ return opc | d;
+}
+
+static int32_t __attribute__((unused))
+encode_dj_slots(LoongArchInsn opc, uint32_t d, uint32_t j)
+{
+ return opc | d | j << 5;
+}
+
+static int32_t __attribute__((unused))
+encode_djk_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k)
+{
+ return opc | d | j << 5 | k << 10;
+}
+
+static int32_t __attribute__((unused))
+encode_djkm_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k,
+ uint32_t m)
+{
+ return opc | d | j << 5 | k << 10 | m << 16;
+}
+
+static int32_t __attribute__((unused))
+encode_dk_slots(LoongArchInsn opc, uint32_t d, uint32_t k)
+{
+ return opc | d | k << 10;
+}
+
+static int32_t __attribute__((unused))
+encode_dj_insn(LoongArchInsn opc, TCGReg d, TCGReg j)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ return encode_dj_slots(opc, d, j);
+}
+
+static int32_t __attribute__((unused))
+encode_djk_insn(LoongArchInsn opc, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(k >= 0 && k <= 0x1f);
+ return encode_djk_slots(opc, d, j, k);
+}
+
+static int32_t __attribute__((unused))
+encode_djsk12_insn(LoongArchInsn opc, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(sk12 >= -0x800 && sk12 <= 0x7ff);
+ return encode_djk_slots(opc, d, j, sk12 & 0xfff);
+}
+
+static int32_t __attribute__((unused))
+encode_djsk16_insn(LoongArchInsn opc, TCGReg d, TCGReg j, int32_t sk16)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(sk16 >= -0x8000 && sk16 <= 0x7fff);
+ return encode_djk_slots(opc, d, j, sk16 & 0xffff);
+}
+
+static int32_t __attribute__((unused))
+encode_djuk12_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk12)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(uk12 <= 0xfff);
+ return encode_djk_slots(opc, d, j, uk12);
+}
+
+static int32_t __attribute__((unused))
+encode_djuk5_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk5)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(uk5 <= 0x1f);
+ return encode_djk_slots(opc, d, j, uk5);
+}
+
+static int32_t __attribute__((unused))
+encode_djuk5um5_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk5,
+ uint32_t um5)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(uk5 <= 0x1f);
+ tcg_debug_assert(um5 <= 0x1f);
+ return encode_djkm_slots(opc, d, j, uk5, um5);
+}
+
+static int32_t __attribute__((unused))
+encode_djuk6_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk6)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(uk6 <= 0x3f);
+ return encode_djk_slots(opc, d, j, uk6);
+}
+
+static int32_t __attribute__((unused))
+encode_djuk6um6_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk6,
+ uint32_t um6)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(j >= 0 && j <= 0x1f);
+ tcg_debug_assert(uk6 <= 0x3f);
+ tcg_debug_assert(um6 <= 0x3f);
+ return encode_djkm_slots(opc, d, j, uk6, um6);
+}
+
+static int32_t __attribute__((unused))
+encode_dsj20_insn(LoongArchInsn opc, TCGReg d, int32_t sj20)
+{
+ tcg_debug_assert(d >= 0 && d <= 0x1f);
+ tcg_debug_assert(sj20 >= -0x80000 && sj20 <= 0x7ffff);
+ return encode_dj_slots(opc, d, sj20 & 0xfffff);
+}
+
+static int32_t __attribute__((unused))
+encode_sd10k16_insn(LoongArchInsn opc, int32_t sd10k16)
+{
+ tcg_debug_assert(sd10k16 >= -0x2000000 && sd10k16 <= 0x1ffffff);
+ return encode_dk_slots(opc, (sd10k16 >> 16) & 0x3ff, sd10k16 & 0xffff);
+}
+
+static int32_t __attribute__((unused))
+encode_ud15_insn(LoongArchInsn opc, uint32_t ud15)
+{
+ tcg_debug_assert(ud15 <= 0x7fff);
+ return encode_d_slot(opc, ud15);
+}
+
+/* Emits the `clz.w d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_clz_w(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_CLZ_W, d, j));
+}
+
+/* Emits the `ctz.w d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ctz_w(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_CTZ_W, d, j));
+}
+
+/* Emits the `clz.d d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_clz_d(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_CLZ_D, d, j));
+}
+
+/* Emits the `ctz.d d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ctz_d(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_CTZ_D, d, j));
+}
+
+/* Emits the `revb.2h d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_revb_2h(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_REVB_2H, d, j));
+}
+
+/* Emits the `revb.2w d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_revb_2w(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_REVB_2W, d, j));
+}
+
+/* Emits the `revb.d d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_revb_d(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_REVB_D, d, j));
+}
+
+/* Emits the `sext.h d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sext_h(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_SEXT_H, d, j));
+}
+
+/* Emits the `sext.b d, j` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sext_b(TCGContext *s, TCGReg d, TCGReg j)
+{
+ tcg_out32(s, encode_dj_insn(OPC_SEXT_B, d, j));
+}
+
+/* Emits the `add.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_add_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_ADD_W, d, j, k));
+}
+
+/* Emits the `add.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_add_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_ADD_D, d, j, k));
+}
+
+/* Emits the `sub.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sub_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SUB_W, d, j, k));
+}
+
+/* Emits the `sub.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sub_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SUB_D, d, j, k));
+}
+
+/* Emits the `slt d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_slt(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SLT, d, j, k));
+}
+
+/* Emits the `sltu d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sltu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SLTU, d, j, k));
+}
+
+/* Emits the `maskeqz d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_maskeqz(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MASKEQZ, d, j, k));
+}
+
+/* Emits the `masknez d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_masknez(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MASKNEZ, d, j, k));
+}
+
+/* Emits the `nor d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_nor(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_NOR, d, j, k));
+}
+
+/* Emits the `and d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_and(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_AND, d, j, k));
+}
+
+/* Emits the `or d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_or(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_OR, d, j, k));
+}
+
+/* Emits the `xor d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_xor(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_XOR, d, j, k));
+}
+
+/* Emits the `orn d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_orn(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_ORN, d, j, k));
+}
+
+/* Emits the `andn d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_andn(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_ANDN, d, j, k));
+}
+
+/* Emits the `sll.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sll_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SLL_W, d, j, k));
+}
+
+/* Emits the `srl.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_srl_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SRL_W, d, j, k));
+}
+
+/* Emits the `sra.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sra_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SRA_W, d, j, k));
+}
+
+/* Emits the `sll.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sll_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SLL_D, d, j, k));
+}
+
+/* Emits the `srl.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_srl_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SRL_D, d, j, k));
+}
+
+/* Emits the `sra.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sra_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_SRA_D, d, j, k));
+}
+
+/* Emits the `rotr.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_rotr_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_ROTR_W, d, j, k));
+}
+
+/* Emits the `rotr.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_rotr_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_ROTR_D, d, j, k));
+}
+
+/* Emits the `mul.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mul_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MUL_W, d, j, k));
+}
+
+/* Emits the `mulh.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mulh_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MULH_W, d, j, k));
+}
+
+/* Emits the `mulh.wu d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mulh_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MULH_WU, d, j, k));
+}
+
+/* Emits the `mul.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mul_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MUL_D, d, j, k));
+}
+
+/* Emits the `mulh.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mulh_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MULH_D, d, j, k));
+}
+
+/* Emits the `mulh.du d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mulh_du(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MULH_DU, d, j, k));
+}
+
+/* Emits the `div.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_div_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_DIV_W, d, j, k));
+}
+
+/* Emits the `mod.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mod_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MOD_W, d, j, k));
+}
+
+/* Emits the `div.wu d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_div_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_DIV_WU, d, j, k));
+}
+
+/* Emits the `mod.wu d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mod_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MOD_WU, d, j, k));
+}
+
+/* Emits the `div.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_div_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_DIV_D, d, j, k));
+}
+
+/* Emits the `mod.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mod_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MOD_D, d, j, k));
+}
+
+/* Emits the `div.du d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_div_du(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_DIV_DU, d, j, k));
+}
+
+/* Emits the `mod.du d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_mod_du(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_MOD_DU, d, j, k));
+}
+
+/* Emits the `slli.w d, j, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_slli_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5)
+{
+ tcg_out32(s, encode_djuk5_insn(OPC_SLLI_W, d, j, uk5));
+}
+
+/* Emits the `slli.d d, j, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_slli_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6)
+{
+ tcg_out32(s, encode_djuk6_insn(OPC_SLLI_D, d, j, uk6));
+}
+
+/* Emits the `srli.w d, j, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_srli_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5)
+{
+ tcg_out32(s, encode_djuk5_insn(OPC_SRLI_W, d, j, uk5));
+}
+
+/* Emits the `srli.d d, j, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_srli_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6)
+{
+ tcg_out32(s, encode_djuk6_insn(OPC_SRLI_D, d, j, uk6));
+}
+
+/* Emits the `srai.w d, j, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_srai_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5)
+{
+ tcg_out32(s, encode_djuk5_insn(OPC_SRAI_W, d, j, uk5));
+}
+
+/* Emits the `srai.d d, j, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_srai_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6)
+{
+ tcg_out32(s, encode_djuk6_insn(OPC_SRAI_D, d, j, uk6));
+}
+
+/* Emits the `rotri.w d, j, uk5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_rotri_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5)
+{
+ tcg_out32(s, encode_djuk5_insn(OPC_ROTRI_W, d, j, uk5));
+}
+
+/* Emits the `rotri.d d, j, uk6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_rotri_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6)
+{
+ tcg_out32(s, encode_djuk6_insn(OPC_ROTRI_D, d, j, uk6));
+}
+
+/* Emits the `bstrins.w d, j, uk5, um5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bstrins_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5,
+ uint32_t um5)
+{
+ tcg_out32(s, encode_djuk5um5_insn(OPC_BSTRINS_W, d, j, uk5, um5));
+}
+
+/* Emits the `bstrpick.w d, j, uk5, um5` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bstrpick_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5,
+ uint32_t um5)
+{
+ tcg_out32(s, encode_djuk5um5_insn(OPC_BSTRPICK_W, d, j, uk5, um5));
+}
+
+/* Emits the `bstrins.d d, j, uk6, um6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bstrins_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6,
+ uint32_t um6)
+{
+ tcg_out32(s, encode_djuk6um6_insn(OPC_BSTRINS_D, d, j, uk6, um6));
+}
+
+/* Emits the `bstrpick.d d, j, uk6, um6` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bstrpick_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6,
+ uint32_t um6)
+{
+ tcg_out32(s, encode_djuk6um6_insn(OPC_BSTRPICK_D, d, j, uk6, um6));
+}
+
+/* Emits the `slti d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_slti(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_SLTI, d, j, sk12));
+}
+
+/* Emits the `sltui d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_sltui(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_SLTUI, d, j, sk12));
+}
+
+/* Emits the `addi.w d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_addi_w(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_ADDI_W, d, j, sk12));
+}
+
+/* Emits the `addi.d d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_addi_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_ADDI_D, d, j, sk12));
+}
+
+/* Emits the `cu52i.d d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_cu52i_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_CU52I_D, d, j, sk12));
+}
+
+/* Emits the `andi d, j, uk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_andi(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12)
+{
+ tcg_out32(s, encode_djuk12_insn(OPC_ANDI, d, j, uk12));
+}
+
+/* Emits the `ori d, j, uk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ori(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12)
+{
+ tcg_out32(s, encode_djuk12_insn(OPC_ORI, d, j, uk12));
+}
+
+/* Emits the `xori d, j, uk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_xori(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12)
+{
+ tcg_out32(s, encode_djuk12_insn(OPC_XORI, d, j, uk12));
+}
+
+/* Emits the `lu12i.w d, sj20` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_lu12i_w(TCGContext *s, TCGReg d, int32_t sj20)
+{
+ tcg_out32(s, encode_dsj20_insn(OPC_LU12I_W, d, sj20));
+}
+
+/* Emits the `cu32i.d d, sj20` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_cu32i_d(TCGContext *s, TCGReg d, int32_t sj20)
+{
+ tcg_out32(s, encode_dsj20_insn(OPC_CU32I_D, d, sj20));
+}
+
+/* Emits the `pcaddu2i d, sj20` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_pcaddu2i(TCGContext *s, TCGReg d, int32_t sj20)
+{
+ tcg_out32(s, encode_dsj20_insn(OPC_PCADDU2I, d, sj20));
+}
+
+/* Emits the `pcalau12i d, sj20` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_pcalau12i(TCGContext *s, TCGReg d, int32_t sj20)
+{
+ tcg_out32(s, encode_dsj20_insn(OPC_PCALAU12I, d, sj20));
+}
+
+/* Emits the `pcaddu12i d, sj20` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_pcaddu12i(TCGContext *s, TCGReg d, int32_t sj20)
+{
+ tcg_out32(s, encode_dsj20_insn(OPC_PCADDU12I, d, sj20));
+}
+
+/* Emits the `pcaddu18i d, sj20` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_pcaddu18i(TCGContext *s, TCGReg d, int32_t sj20)
+{
+ tcg_out32(s, encode_dsj20_insn(OPC_PCADDU18I, d, sj20));
+}
+
+/* Emits the `ld.b d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ld_b(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_LD_B, d, j, sk12));
+}
+
+/* Emits the `ld.h d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ld_h(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_LD_H, d, j, sk12));
+}
+
+/* Emits the `ld.w d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ld_w(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_LD_W, d, j, sk12));
+}
+
+/* Emits the `ld.d d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ld_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_LD_D, d, j, sk12));
+}
+
+/* Emits the `st.b d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_st_b(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_ST_B, d, j, sk12));
+}
+
+/* Emits the `st.h d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_st_h(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_ST_H, d, j, sk12));
+}
+
+/* Emits the `st.w d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_st_w(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_ST_W, d, j, sk12));
+}
+
+/* Emits the `st.d d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_st_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_ST_D, d, j, sk12));
+}
+
+/* Emits the `ld.bu d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ld_bu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_LD_BU, d, j, sk12));
+}
+
+/* Emits the `ld.hu d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ld_hu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_LD_HU, d, j, sk12));
+}
+
+/* Emits the `ld.wu d, j, sk12` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ld_wu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12)
+{
+ tcg_out32(s, encode_djsk12_insn(OPC_LD_WU, d, j, sk12));
+}
+
+/* Emits the `ldx.b d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ldx_b(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_LDX_B, d, j, k));
+}
+
+/* Emits the `ldx.h d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ldx_h(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_LDX_H, d, j, k));
+}
+
+/* Emits the `ldx.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ldx_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_LDX_W, d, j, k));
+}
+
+/* Emits the `ldx.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ldx_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_LDX_D, d, j, k));
+}
+
+/* Emits the `stx.b d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_stx_b(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_STX_B, d, j, k));
+}
+
+/* Emits the `stx.h d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_stx_h(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_STX_H, d, j, k));
+}
+
+/* Emits the `stx.w d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_stx_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_STX_W, d, j, k));
+}
+
+/* Emits the `stx.d d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_stx_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_STX_D, d, j, k));
+}
+
+/* Emits the `ldx.bu d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ldx_bu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_LDX_BU, d, j, k));
+}
+
+/* Emits the `ldx.hu d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ldx_hu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_LDX_HU, d, j, k));
+}
+
+/* Emits the `ldx.wu d, j, k` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ldx_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k)
+{
+ tcg_out32(s, encode_djk_insn(OPC_LDX_WU, d, j, k));
+}
+
+/* Emits the `dbar ud15` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_dbar(TCGContext *s, uint32_t ud15)
+{
+ tcg_out32(s, encode_ud15_insn(OPC_DBAR, ud15));
+}
+
+/* Emits the `jirl d, j, sk16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_jirl(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
+{
+ tcg_out32(s, encode_djsk16_insn(OPC_JIRL, d, j, sk16));
+}
+
+/* Emits the `b sd10k16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_b(TCGContext *s, int32_t sd10k16)
+{
+ tcg_out32(s, encode_sd10k16_insn(OPC_B, sd10k16));
+}
+
+/* Emits the `bl sd10k16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bl(TCGContext *s, int32_t sd10k16)
+{
+ tcg_out32(s, encode_sd10k16_insn(OPC_BL, sd10k16));
+}
+
+/* Emits the `beq d, j, sk16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_beq(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
+{
+ tcg_out32(s, encode_djsk16_insn(OPC_BEQ, d, j, sk16));
+}
+
+/* Emits the `bne d, j, sk16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bne(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
+{
+ tcg_out32(s, encode_djsk16_insn(OPC_BNE, d, j, sk16));
+}
+
+/* Emits the `bgt d, j, sk16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bgt(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
+{
+ tcg_out32(s, encode_djsk16_insn(OPC_BGT, d, j, sk16));
+}
+
+/* Emits the `ble d, j, sk16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_ble(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
+{
+ tcg_out32(s, encode_djsk16_insn(OPC_BLE, d, j, sk16));
+}
+
+/* Emits the `bgtu d, j, sk16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bgtu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
+{
+ tcg_out32(s, encode_djsk16_insn(OPC_BGTU, d, j, sk16));
+}
+
+/* Emits the `bleu d, j, sk16` instruction. */
+static void __attribute__((unused))
+tcg_out_opc_bleu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16)
+{
+ tcg_out32(s, encode_djsk16_insn(OPC_BLEU, d, j, sk16));
+}
+
+/* End of generated code. */
diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h
new file mode 100644
index 0000000000000000000000000000000000000000..349c672687386a5b1db3728b919de2536f68e9ec
--- /dev/null
+++ b/tcg/loongarch64/tcg-target-con-set.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define LoongArch target-specific constraint sets.
+ *
+ * Copyright (c) 2021 WANG Xuerui
+ *
+ * Based on tcg/riscv/tcg-target-con-set.h
+ *
+ * Copyright (c) 2021 Linaro
+ */
+
+/*
+ * C_On_Im(...) defines a constraint set with outputs and inputs.
+ * Each operand should be a sequence of constraint letters as defined by
+ * tcg-target-con-str.h; the constraint combination is inclusive or.
+ */
+C_O0_I1(r)
+C_O0_I2(rZ, r)
+C_O0_I2(rZ, rZ)
+C_O0_I2(LZ, L)
+C_O1_I1(r, r)
+C_O1_I1(r, L)
+C_O1_I2(r, r, rC)
+C_O1_I2(r, r, ri)
+C_O1_I2(r, r, rI)
+C_O1_I2(r, r, rU)
+C_O1_I2(r, r, rW)
+C_O1_I2(r, r, rZ)
+C_O1_I2(r, 0, rZ)
+C_O1_I2(r, rZ, rN)
+C_O1_I2(r, rZ, rZ)
diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h
new file mode 100644
index 0000000000000000000000000000000000000000..c3986a4fd43e4550f337adc06413fe3820255392
--- /dev/null
+++ b/tcg/loongarch64/tcg-target-con-str.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Define LoongArch target-specific operand constraints.
+ *
+ * Copyright (c) 2021 WANG Xuerui
+ *
+ * Based on tcg/riscv/tcg-target-con-str.h
+ *
+ * Copyright (c) 2021 Linaro
+ */
+
+/*
+ * Define constraint letters for register sets:
+ * REGS(letter, register_mask)
+ */
+REGS('r', ALL_GENERAL_REGS)
+REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
+
+/*
+ * Define constraint letters for constants:
+ * CONST(letter, TCG_CT_CONST_* bit set)
+ */
+CONST('I', TCG_CT_CONST_S12)
+CONST('N', TCG_CT_CONST_N12)
+CONST('U', TCG_CT_CONST_U12)
+CONST('Z', TCG_CT_CONST_ZERO)
+CONST('C', TCG_CT_CONST_C12)
+CONST('W', TCG_CT_CONST_WSZ)
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
new file mode 100644
index 0000000000000000000000000000000000000000..9b53549edbf2f54483baf73e7b2217358dad0c8a
--- /dev/null
+++ b/tcg/loongarch64/tcg-target.c.inc
@@ -0,0 +1,1744 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2021 WANG Xuerui
+ *
+ * Based on tcg/riscv/tcg-target.c.inc
+ *
+ * Copyright (c) 2018 SiFive, Inc
+ * Copyright (c) 2008-2009 Arnaud Patard
+ * Copyright (c) 2009 Aurelien Jarno
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "../tcg-ldst.c.inc"
+
+#ifdef CONFIG_DEBUG_TCG
+static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
+ "zero",
+ "ra",
+ "tp",
+ "sp",
+ "a0",
+ "a1",
+ "a2",
+ "a3",
+ "a4",
+ "a5",
+ "a6",
+ "a7",
+ "t0",
+ "t1",
+ "t2",
+ "t3",
+ "t4",
+ "t5",
+ "t6",
+ "t7",
+ "t8",
+ "r21", /* reserved in the LP64* ABI, hence no ABI name */
+ "s9",
+ "s0",
+ "s1",
+ "s2",
+ "s3",
+ "s4",
+ "s5",
+ "s6",
+ "s7",
+ "s8"
+};
+#endif
+
+static const int tcg_target_reg_alloc_order[] = {
+ /* Registers preserved across calls */
+ /* TCG_REG_S0 reserved for TCG_AREG0 */
+ TCG_REG_S1,
+ TCG_REG_S2,
+ TCG_REG_S3,
+ TCG_REG_S4,
+ TCG_REG_S5,
+ TCG_REG_S6,
+ TCG_REG_S7,
+ TCG_REG_S8,
+ TCG_REG_S9,
+
+ /* Registers (potentially) clobbered across calls */
+ TCG_REG_T0,
+ TCG_REG_T1,
+ TCG_REG_T2,
+ TCG_REG_T3,
+ TCG_REG_T4,
+ TCG_REG_T5,
+ TCG_REG_T6,
+ TCG_REG_T7,
+ TCG_REG_T8,
+
+ /* Argument registers, opposite order of allocation. */
+ TCG_REG_A7,
+ TCG_REG_A6,
+ TCG_REG_A5,
+ TCG_REG_A4,
+ TCG_REG_A3,
+ TCG_REG_A2,
+ TCG_REG_A1,
+ TCG_REG_A0,
+};
+
+static const int tcg_target_call_iarg_regs[] = {
+ TCG_REG_A0,
+ TCG_REG_A1,
+ TCG_REG_A2,
+ TCG_REG_A3,
+ TCG_REG_A4,
+ TCG_REG_A5,
+ TCG_REG_A6,
+ TCG_REG_A7,
+};
+
+static const int tcg_target_call_oarg_regs[] = {
+ TCG_REG_A0,
+ TCG_REG_A1,
+};
+
+#ifndef CONFIG_SOFTMMU
+#define USE_GUEST_BASE (guest_base != 0)
+#define TCG_GUEST_BASE_REG TCG_REG_S1
+#endif
+
+#define TCG_CT_CONST_ZERO 0x100
+#define TCG_CT_CONST_S12 0x200
+#define TCG_CT_CONST_N12 0x400
+#define TCG_CT_CONST_U12 0x800
+#define TCG_CT_CONST_C12 0x1000
+#define TCG_CT_CONST_WSZ 0x2000
+
+#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
+/*
+ * For softmmu, we need to avoid conflicts with the first 5
+ * argument registers to call the helper. Some of these are
+ * also used for the tlb lookup.
+ */
+#ifdef CONFIG_SOFTMMU
+#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_A0, 5)
+#else
+#define SOFTMMU_RESERVE_REGS 0
+#endif
+
+
+static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
+{
+ return sextract64(val, pos, len);
+}
+
+/* test if a constant matches the constraint */
+static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
+{
+ if (ct & TCG_CT_CONST) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_N12) && -val == sextreg(-val, 0, 12)) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Relocations
+ */
+
+/*
+ * Relocation records defined in LoongArch ELF psABI v1.00 is way too
+ * complicated; a whopping stack machine is needed to stuff the fields, at
+ * the very least one SOP_PUSH and one SOP_POP (of the correct format) are
+ * needed.
+ *
+ * Hence, define our own simpler relocation types. Numbers are chosen as to
+ * not collide with potential future additions to the true ELF relocation
+ * type enum.
+ */
+
+/* Field Sk16, shifted right by 2; suitable for conditional jumps */
+#define R_LOONGARCH_BR_SK16 256
+/* Field Sd10k16, shifted right by 2; suitable for B and BL */
+#define R_LOONGARCH_BR_SD10K16 257
+
+static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
+{
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
+ intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
+
+ tcg_debug_assert((offset & 3) == 0);
+ offset >>= 2;
+ if (offset == sextreg(offset, 0, 16)) {
+ *src_rw = deposit64(*src_rw, 10, 16, offset);
+ return true;
+ }
+
+ return false;
+}
+
+static bool reloc_br_sd10k16(tcg_insn_unit *src_rw,
+ const tcg_insn_unit *target)
+{
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
+ intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
+
+ tcg_debug_assert((offset & 3) == 0);
+ offset >>= 2;
+ if (offset == sextreg(offset, 0, 26)) {
+ *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */
+ *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */
+ return true;
+ }
+
+ return false;
+}
+
+static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
+ intptr_t value, intptr_t addend)
+{
+ tcg_debug_assert(addend == 0);
+ switch (type) {
+ case R_LOONGARCH_BR_SK16:
+ return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value);
+ case R_LOONGARCH_BR_SD10K16:
+ return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+#include "tcg-insn-defs.c.inc"
+
+/*
+ * TCG intrinsics
+ */
+
+static void tcg_out_mb(TCGContext *s, TCGArg a0)
+{
+ /* Baseline LoongArch only has the full barrier, unfortunately. */
+ tcg_out_opc_dbar(s, 0);
+}
+
+static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
+{
+ if (ret == arg) {
+ return true;
+ }
+ switch (type) {
+ case TCG_TYPE_I32:
+ case TCG_TYPE_I64:
+ /*
+ * Conventional register-register move used in LoongArch is
+ * `or dst, src, zero`.
+ */
+ tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return true;
+}
+
+static bool imm_part_needs_loading(bool high_bits_are_ones,
+ tcg_target_long part)
+{
+ if (high_bits_are_ones) {
+ return part != -1;
+ } else {
+ return part != 0;
+ }
+}
+
+/* Loads a 32-bit immediate into rd, sign-extended. */
+static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val)
+{
+ tcg_target_long lo = sextreg(val, 0, 12);
+ tcg_target_long hi12 = sextreg(val, 12, 20);
+
+ /* Single-instruction cases. */
+ if (lo == val) {
+ /* val fits in simm12: addi.w rd, zero, val */
+ tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val);
+ return;
+ }
+ if (0x800 <= val && val <= 0xfff) {
+ /* val fits in uimm12: ori rd, zero, val */
+ tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val);
+ return;
+ }
+
+ /* High bits must be set; load with lu12i.w + optional ori. */
+ tcg_out_opc_lu12i_w(s, rd, hi12);
+ if (lo != 0) {
+ tcg_out_opc_ori(s, rd, rd, lo & 0xfff);
+ }
+}
+
+static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
+ tcg_target_long val)
+{
+ /*
+ * LoongArch conventionally loads 64-bit immediates in at most 4 steps,
+ * with dedicated instructions for filling the respective bitfields
+ * below:
+ *
+ * 6 5 4 3
+ * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
+ * +-----------------------+---------------------------------------+...
+ * | hi52 | hi32 |
+ * +-----------------------+---------------------------------------+...
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * ...+-------------------------------------+-------------------------+
+ * | hi12 | lo |
+ * ...+-------------------------------------+-------------------------+
+ *
+ * Check if val belong to one of the several fast cases, before falling
+ * back to the slow path.
+ */
+
+ intptr_t pc_offset;
+ tcg_target_long val_lo, val_hi, pc_hi, offset_hi;
+ tcg_target_long hi32, hi52;
+ bool rd_high_bits_are_ones;
+
+ /* Value fits in signed i32. */
+ if (type == TCG_TYPE_I32 || val == (int32_t)val) {
+ tcg_out_movi_i32(s, rd, val);
+ return;
+ }
+
+ /* PC-relative cases. */
+ pc_offset = tcg_pcrel_diff(s, (void *)val);
+ if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) {
+ /* Single pcaddu2i. */
+ tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2);
+ return;
+ }
+
+ if (pc_offset == (int32_t)pc_offset) {
+ /* Offset within 32 bits; load with pcalau12i + ori. */
+ val_lo = sextreg(val, 0, 12);
+ val_hi = val >> 12;
+ pc_hi = (val - pc_offset) >> 12;
+ offset_hi = val_hi - pc_hi;
+
+ tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20));
+ tcg_out_opc_pcalau12i(s, rd, offset_hi);
+ if (val_lo != 0) {
+ tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff);
+ }
+ return;
+ }
+
+ hi32 = sextreg(val, 32, 20);
+ hi52 = sextreg(val, 52, 12);
+
+ /* Single cu52i.d case. */
+ if (ctz64(val) >= 52) {
+ tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52);
+ return;
+ }
+
+ /* Slow path. Initialize the low 32 bits, then concat high bits. */
+ tcg_out_movi_i32(s, rd, val);
+ rd_high_bits_are_ones = (int32_t)val < 0;
+
+ if (imm_part_needs_loading(rd_high_bits_are_ones, hi32)) {
+ tcg_out_opc_cu32i_d(s, rd, hi32);
+ rd_high_bits_are_ones = hi32 < 0;
+ }
+
+ if (imm_part_needs_loading(rd_high_bits_are_ones, hi52)) {
+ tcg_out_opc_cu52i_d(s, rd, rd, hi52);
+ }
+}
+
+static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
+{
+ tcg_out_opc_andi(s, ret, arg, 0xff);
+}
+
+static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
+{
+ tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15);
+}
+
+static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
+{
+ tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31);
+}
+
+static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg)
+{
+ tcg_out_opc_sext_b(s, ret, arg);
+}
+
+static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg)
+{
+ tcg_out_opc_sext_h(s, ret, arg);
+}
+
+static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
+{
+ tcg_out_opc_addi_w(s, ret, arg, 0);
+}
+
+static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
+ TCGReg a0, TCGReg a1, TCGReg a2,
+ bool c2, bool is_32bit)
+{
+ if (c2) {
+ /*
+ * Fast path: semantics already satisfied due to constraint and
+ * insn behavior, single instruction is enough.
+ */
+ tcg_debug_assert(a2 == (is_32bit ? 32 : 64));
+ /* all clz/ctz insns belong to DJ-format */
+ tcg_out32(s, encode_dj_insn(opc, a0, a1));
+ return;
+ }
+
+ tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1));
+ /* a0 = a1 ? REG_TMP0 : a2 */
+ tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
+ tcg_out_opc_masknez(s, a0, a2, a1);
+ tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0);
+}
+
+static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
+ TCGReg arg1, TCGReg arg2, bool c2)
+{
+ TCGReg tmp;
+
+ if (c2) {
+ tcg_debug_assert(arg2 == 0);
+ }
+
+ switch (cond) {
+ case TCG_COND_EQ:
+ if (c2) {
+ tmp = arg1;
+ } else {
+ tcg_out_opc_sub_d(s, ret, arg1, arg2);
+ tmp = ret;
+ }
+ tcg_out_opc_sltui(s, ret, tmp, 1);
+ break;
+ case TCG_COND_NE:
+ if (c2) {
+ tmp = arg1;
+ } else {
+ tcg_out_opc_sub_d(s, ret, arg1, arg2);
+ tmp = ret;
+ }
+ tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
+ break;
+ case TCG_COND_LT:
+ tcg_out_opc_slt(s, ret, arg1, arg2);
+ break;
+ case TCG_COND_GE:
+ tcg_out_opc_slt(s, ret, arg1, arg2);
+ tcg_out_opc_xori(s, ret, ret, 1);
+ break;
+ case TCG_COND_LE:
+ tcg_out_setcond(s, TCG_COND_GE, ret, arg2, arg1, false);
+ break;
+ case TCG_COND_GT:
+ tcg_out_setcond(s, TCG_COND_LT, ret, arg2, arg1, false);
+ break;
+ case TCG_COND_LTU:
+ tcg_out_opc_sltu(s, ret, arg1, arg2);
+ break;
+ case TCG_COND_GEU:
+ tcg_out_opc_sltu(s, ret, arg1, arg2);
+ tcg_out_opc_xori(s, ret, ret, 1);
+ break;
+ case TCG_COND_LEU:
+ tcg_out_setcond(s, TCG_COND_GEU, ret, arg2, arg1, false);
+ break;
+ case TCG_COND_GTU:
+ tcg_out_setcond(s, TCG_COND_LTU, ret, arg2, arg1, false);
+ break;
+ default:
+ g_assert_not_reached();
+ break;
+ }
+}
+
+/*
+ * Branch helpers
+ */
+
+static const struct {
+ LoongArchInsn op;
+ bool swap;
+} tcg_brcond_to_loongarch[] = {
+ [TCG_COND_EQ] = { OPC_BEQ, false },
+ [TCG_COND_NE] = { OPC_BNE, false },
+ [TCG_COND_LT] = { OPC_BGT, true },
+ [TCG_COND_GE] = { OPC_BLE, true },
+ [TCG_COND_LE] = { OPC_BLE, false },
+ [TCG_COND_GT] = { OPC_BGT, false },
+ [TCG_COND_LTU] = { OPC_BGTU, true },
+ [TCG_COND_GEU] = { OPC_BLEU, true },
+ [TCG_COND_LEU] = { OPC_BLEU, false },
+ [TCG_COND_GTU] = { OPC_BGTU, false }
+};
+
+static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
+ TCGReg arg2, TCGLabel *l)
+{
+ LoongArchInsn op = tcg_brcond_to_loongarch[cond].op;
+
+ tcg_debug_assert(op != 0);
+
+ if (tcg_brcond_to_loongarch[cond].swap) {
+ TCGReg t = arg1;
+ arg1 = arg2;
+ arg2 = t;
+ }
+
+ /* all conditional branch insns belong to DJSk16-format */
+ tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0);
+ tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0));
+}
+
+static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
+{
+ TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
+ ptrdiff_t offset = tcg_pcrel_diff(s, arg);
+
+ tcg_debug_assert((offset & 3) == 0);
+ if (offset == sextreg(offset, 0, 28)) {
+ /* short jump: +/- 256MiB */
+ if (tail) {
+ tcg_out_opc_b(s, offset >> 2);
+ } else {
+ tcg_out_opc_bl(s, offset >> 2);
+ }
+ } else if (offset == sextreg(offset, 0, 38)) {
+ /* long jump: +/- 256GiB */
+ tcg_target_long lo = sextreg(offset, 0, 18);
+ tcg_target_long hi = offset - lo;
+ tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18);
+ tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
+ } else {
+ /* far jump: 64-bit */
+ tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18);
+ tcg_target_long hi = (tcg_target_long)arg - lo;
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi);
+ tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
+ }
+}
+
+static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
+{
+ tcg_out_call_int(s, arg, false);
+}
+
+/*
+ * Load/store helpers
+ */
+
+static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
+ TCGReg addr, intptr_t offset)
+{
+ intptr_t imm12 = sextreg(offset, 0, 12);
+
+ if (offset != imm12) {
+ intptr_t diff = offset - (uintptr_t)s->code_ptr;
+
+ if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
+ imm12 = sextreg(diff, 0, 12);
+ tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
+ if (addr != TCG_REG_ZERO) {
+ tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr);
+ }
+ }
+ addr = TCG_REG_TMP2;
+ }
+
+ switch (opc) {
+ case OPC_LD_B:
+ case OPC_LD_BU:
+ case OPC_LD_H:
+ case OPC_LD_HU:
+ case OPC_LD_W:
+ case OPC_LD_WU:
+ case OPC_LD_D:
+ case OPC_ST_B:
+ case OPC_ST_H:
+ case OPC_ST_W:
+ case OPC_ST_D:
+ tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12));
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
+ TCGReg arg1, intptr_t arg2)
+{
+ bool is_32bit = type == TCG_TYPE_I32;
+ tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2);
+}
+
+static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
+ TCGReg arg1, intptr_t arg2)
+{
+ bool is_32bit = type == TCG_TYPE_I32;
+ tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2);
+}
+
+static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
+ TCGReg base, intptr_t ofs)
+{
+ if (val == 0) {
+ tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Load/store helpers for SoftMMU, and qemu_ld/st implementations
+ */
+
+#if defined(CONFIG_SOFTMMU)
+/*
+ * helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
+ * MemOpIdx oi, uintptr_t ra)
+ */
+static void * const qemu_ld_helpers[4] = {
+ [MO_8] = helper_ret_ldub_mmu,
+ [MO_16] = helper_le_lduw_mmu,
+ [MO_32] = helper_le_ldul_mmu,
+ [MO_64] = helper_le_ldq_mmu,
+};
+
+/*
+ * helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
+ * uintxx_t val, MemOpIdx oi,
+ * uintptr_t ra)
+ */
+static void * const qemu_st_helpers[4] = {
+ [MO_8] = helper_ret_stb_mmu,
+ [MO_16] = helper_le_stw_mmu,
+ [MO_32] = helper_le_stl_mmu,
+ [MO_64] = helper_le_stq_mmu,
+};
+
+/* We expect to use a 12-bit negative offset from ENV. */
+QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
+QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
+
+static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
+{
+ tcg_out_opc_b(s, 0);
+ return reloc_br_sd10k16(s->code_ptr - 1, target);
+}
+
+/*
+ * Emits common code for TLB addend lookup, that eventually loads the
+ * addend in TCG_REG_TMP2.
+ */
+static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, MemOpIdx oi,
+ tcg_insn_unit **label_ptr, bool is_load)
+{
+ MemOp opc = get_memop(oi);
+ unsigned s_bits = opc & MO_SIZE;
+ unsigned a_bits = get_alignment_bits(opc);
+ tcg_target_long compare_mask;
+ int mem_index = get_mmuidx(oi);
+ int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
+ int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
+ int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
+
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
+
+ tcg_out_opc_srli_d(s, TCG_REG_TMP2, addrl,
+ TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
+ tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
+
+ /* Load the tlb comparator and the addend. */
+ tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
+ is_load ? offsetof(CPUTLBEntry, addr_read)
+ : offsetof(CPUTLBEntry, addr_write));
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
+ offsetof(CPUTLBEntry, addend));
+
+ /* We don't support unaligned accesses. */
+ if (a_bits < s_bits) {
+ a_bits = s_bits;
+ }
+ /* Clear the non-page, non-alignment bits from the address. */
+ compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
+ tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
+ tcg_out_opc_and(s, TCG_REG_TMP1, TCG_REG_TMP1, addrl);
+
+ /* Compare masked address with the TLB entry. */
+ label_ptr[0] = s->code_ptr;
+ tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
+
+ /* TLB Hit - addend in TCG_REG_TMP2, ready for use. */
+}
+
+static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
+ TCGType type,
+ TCGReg datalo, TCGReg addrlo,
+ void *raddr, tcg_insn_unit **label_ptr)
+{
+ TCGLabelQemuLdst *label = new_ldst_label(s);
+
+ label->is_ld = is_ld;
+ label->oi = oi;
+ label->type = type;
+ label->datalo_reg = datalo;
+ label->datahi_reg = 0; /* unused */
+ label->addrlo_reg = addrlo;
+ label->addrhi_reg = 0; /* unused */
+ label->raddr = tcg_splitwx_to_rx(raddr);
+ label->label_ptr[0] = label_ptr[0];
+}
+
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ MemOpIdx oi = l->oi;
+ MemOp opc = get_memop(oi);
+ MemOp size = opc & MO_SIZE;
+ TCGType type = l->type;
+
+ /* resolve label address */
+ if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
+ return false;
+ }
+
+ /* call load helper */
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A2, oi);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, (tcg_target_long)l->raddr);
+
+ tcg_out_call(s, qemu_ld_helpers[size]);
+
+ switch (opc & MO_SSIZE) {
+ case MO_SB:
+ tcg_out_ext8s(s, l->datalo_reg, TCG_REG_A0);
+ break;
+ case MO_SW:
+ tcg_out_ext16s(s, l->datalo_reg, TCG_REG_A0);
+ break;
+ case MO_SL:
+ tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0);
+ break;
+ case MO_UL:
+ if (type == TCG_TYPE_I32) {
+ /* MO_UL loads of i32 should be sign-extended too */
+ tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0);
+ break;
+ }
+ /* fallthrough */
+ default:
+ tcg_out_mov(s, type, l->datalo_reg, TCG_REG_A0);
+ break;
+ }
+
+ return tcg_out_goto(s, l->raddr);
+}
+
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ MemOpIdx oi = l->oi;
+ MemOp opc = get_memop(oi);
+ MemOp size = opc & MO_SIZE;
+
+ /* resolve label address */
+ if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
+ return false;
+ }
+
+ /* call store helper */
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg);
+ switch (size) {
+ case MO_8:
+ tcg_out_ext8u(s, TCG_REG_A2, l->datalo_reg);
+ break;
+ case MO_16:
+ tcg_out_ext16u(s, TCG_REG_A2, l->datalo_reg);
+ break;
+ case MO_32:
+ tcg_out_ext32u(s, TCG_REG_A2, l->datalo_reg);
+ break;
+ case MO_64:
+ tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_A2, l->datalo_reg);
+ break;
+ default:
+ g_assert_not_reached();
+ break;
+ }
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, oi);
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A4, (tcg_target_long)l->raddr);
+
+ tcg_out_call(s, qemu_st_helpers[size]);
+
+ return tcg_out_goto(s, l->raddr);
+}
+#else
+
+/*
+ * Alignment helpers for user-mode emulation
+ */
+
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
+ unsigned a_bits)
+{
+ TCGLabelQemuLdst *l = new_ldst_label(s);
+
+ l->is_ld = is_ld;
+ l->addrlo_reg = addr_reg;
+
+ /*
+ * Without micro-architecture details, we don't know which of bstrpick or
+ * andi is faster, so use bstrpick as it's not constrained by imm field
+ * width. (Not to say alignments >= 2^12 are going to happen any time
+ * soon, though)
+ */
+ tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
+
+ l->label_ptr[0] = s->code_ptr;
+ tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
+
+ l->raddr = tcg_splitwx_to_rx(s->code_ptr);
+}
+
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ /* resolve label address */
+ if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
+ return false;
+ }
+
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
+
+ /* tail call, with the return address back inline. */
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr);
+ tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld
+ : helper_unaligned_st), true);
+ return true;
+}
+
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ return tcg_out_fail_alignment(s, l);
+}
+
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ return tcg_out_fail_alignment(s, l);
+}
+
+#endif /* CONFIG_SOFTMMU */
+
+/*
+ * `ext32u` the address register into the temp register given,
+ * if target is 32-bit, no-op otherwise.
+ *
+ * Returns the address register ready for use with TLB addend.
+ */
+static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s,
+ TCGReg addr, TCGReg tmp)
+{
+ if (TARGET_LONG_BITS == 32) {
+ tcg_out_ext32u(s, tmp, addr);
+ return tmp;
+ }
+ return addr;
+}
+
+static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj,
+ TCGReg rk, MemOp opc, TCGType type)
+{
+ /* Byte swapping is left to middle-end expansion. */
+ tcg_debug_assert((opc & MO_BSWAP) == 0);
+
+ switch (opc & MO_SSIZE) {
+ case MO_UB:
+ tcg_out_opc_ldx_bu(s, rd, rj, rk);
+ break;
+ case MO_SB:
+ tcg_out_opc_ldx_b(s, rd, rj, rk);
+ break;
+ case MO_UW:
+ tcg_out_opc_ldx_hu(s, rd, rj, rk);
+ break;
+ case MO_SW:
+ tcg_out_opc_ldx_h(s, rd, rj, rk);
+ break;
+ case MO_UL:
+ if (type == TCG_TYPE_I64) {
+ tcg_out_opc_ldx_wu(s, rd, rj, rk);
+ break;
+ }
+ /* fallthrough */
+ case MO_SL:
+ tcg_out_opc_ldx_w(s, rd, rj, rk);
+ break;
+ case MO_Q:
+ tcg_out_opc_ldx_d(s, rd, rj, rk);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
+{
+ TCGReg addr_regl;
+ TCGReg data_regl;
+ MemOpIdx oi;
+ MemOp opc;
+#if defined(CONFIG_SOFTMMU)
+ tcg_insn_unit *label_ptr[1];
+#else
+ unsigned a_bits;
+#endif
+ TCGReg base;
+
+ data_regl = *args++;
+ addr_regl = *args++;
+ oi = *args++;
+ opc = get_memop(oi);
+
+#if defined(CONFIG_SOFTMMU)
+ tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 1);
+ base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
+ tcg_out_qemu_ld_indexed(s, data_regl, base, TCG_REG_TMP2, opc, type);
+ add_qemu_ldst_label(s, 1, oi, type,
+ data_regl, addr_regl,
+ s->code_ptr, label_ptr);
+#else
+ a_bits = get_alignment_bits(opc);
+ if (a_bits) {
+ tcg_out_test_alignment(s, true, addr_regl, a_bits);
+ }
+ base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
+ TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
+ tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type);
+#endif
+}
+
+static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data,
+ TCGReg rj, TCGReg rk, MemOp opc)
+{
+ /* Byte swapping is left to middle-end expansion. */
+ tcg_debug_assert((opc & MO_BSWAP) == 0);
+
+ switch (opc & MO_SIZE) {
+ case MO_8:
+ tcg_out_opc_stx_b(s, data, rj, rk);
+ break;
+ case MO_16:
+ tcg_out_opc_stx_h(s, data, rj, rk);
+ break;
+ case MO_32:
+ tcg_out_opc_stx_w(s, data, rj, rk);
+ break;
+ case MO_64:
+ tcg_out_opc_stx_d(s, data, rj, rk);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
+{
+ TCGReg addr_regl;
+ TCGReg data_regl;
+ MemOpIdx oi;
+ MemOp opc;
+#if defined(CONFIG_SOFTMMU)
+ tcg_insn_unit *label_ptr[1];
+#else
+ unsigned a_bits;
+#endif
+ TCGReg base;
+
+ data_regl = *args++;
+ addr_regl = *args++;
+ oi = *args++;
+ opc = get_memop(oi);
+
+#if defined(CONFIG_SOFTMMU)
+ tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0);
+ base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
+ tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc);
+ add_qemu_ldst_label(s, 0, oi,
+ 0, /* type param is unused for stores */
+ data_regl, addr_regl,
+ s->code_ptr, label_ptr);
+#else
+ a_bits = get_alignment_bits(opc);
+ if (a_bits) {
+ tcg_out_test_alignment(s, false, addr_regl, a_bits);
+ }
+ base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
+ TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
+ tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc);
+#endif
+}
+
+/*
+ * Entry-points
+ */
+
+static const tcg_insn_unit *tb_ret_addr;
+
+static void tcg_out_op(TCGContext *s, TCGOpcode opc,
+ const TCGArg args[TCG_MAX_OP_ARGS],
+ const int const_args[TCG_MAX_OP_ARGS])
+{
+ TCGArg a0 = args[0];
+ TCGArg a1 = args[1];
+ TCGArg a2 = args[2];
+ int c2 = const_args[2];
+
+ switch (opc) {
+ case INDEX_op_exit_tb:
+ /* Reuse the zeroing that exists for goto_ptr. */
+ if (a0 == 0) {
+ tcg_out_call_int(s, tcg_code_gen_epilogue, true);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
+ tcg_out_call_int(s, tb_ret_addr, true);
+ }
+ break;
+
+ case INDEX_op_goto_tb:
+ assert(s->tb_jmp_insn_offset == 0);
+ /* indirect jump method */
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO,
+ (uintptr_t)(s->tb_jmp_target_addr + a0));
+ tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
+ set_jmp_reset_offset(s, a0);
+ break;
+
+ case INDEX_op_mb:
+ tcg_out_mb(s, a0);
+ break;
+
+ case INDEX_op_goto_ptr:
+ tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
+ break;
+
+ case INDEX_op_br:
+ tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0),
+ 0);
+ tcg_out_opc_b(s, 0);
+ break;
+
+ case INDEX_op_brcond_i32:
+ case INDEX_op_brcond_i64:
+ tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
+ break;
+
+ case INDEX_op_ext8s_i32:
+ case INDEX_op_ext8s_i64:
+ tcg_out_ext8s(s, a0, a1);
+ break;
+
+ case INDEX_op_ext8u_i32:
+ case INDEX_op_ext8u_i64:
+ tcg_out_ext8u(s, a0, a1);
+ break;
+
+ case INDEX_op_ext16s_i32:
+ case INDEX_op_ext16s_i64:
+ tcg_out_ext16s(s, a0, a1);
+ break;
+
+ case INDEX_op_ext16u_i32:
+ case INDEX_op_ext16u_i64:
+ tcg_out_ext16u(s, a0, a1);
+ break;
+
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_extu_i32_i64:
+ tcg_out_ext32u(s, a0, a1);
+ break;
+
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_extrl_i64_i32:
+ case INDEX_op_ext_i32_i64:
+ tcg_out_ext32s(s, a0, a1);
+ break;
+
+ case INDEX_op_extrh_i64_i32:
+ tcg_out_opc_srai_d(s, a0, a1, 32);
+ break;
+
+ case INDEX_op_not_i32:
+ case INDEX_op_not_i64:
+ tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
+ break;
+
+ case INDEX_op_nor_i32:
+ case INDEX_op_nor_i64:
+ if (c2) {
+ tcg_out_opc_ori(s, a0, a1, a2);
+ tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO);
+ } else {
+ tcg_out_opc_nor(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_andc_i32:
+ case INDEX_op_andc_i64:
+ if (c2) {
+ /* guaranteed to fit due to constraint */
+ tcg_out_opc_andi(s, a0, a1, ~a2);
+ } else {
+ tcg_out_opc_andn(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_orc_i32:
+ case INDEX_op_orc_i64:
+ if (c2) {
+ /* guaranteed to fit due to constraint */
+ tcg_out_opc_ori(s, a0, a1, ~a2);
+ } else {
+ tcg_out_opc_orn(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_and_i32:
+ case INDEX_op_and_i64:
+ if (c2) {
+ tcg_out_opc_andi(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_and(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_or_i32:
+ case INDEX_op_or_i64:
+ if (c2) {
+ tcg_out_opc_ori(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_or(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_xor_i32:
+ case INDEX_op_xor_i64:
+ if (c2) {
+ tcg_out_opc_xori(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_xor(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_extract_i32:
+ tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
+ break;
+ case INDEX_op_extract_i64:
+ tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
+ break;
+
+ case INDEX_op_deposit_i32:
+ tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
+ break;
+ case INDEX_op_deposit_i64:
+ tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
+ break;
+
+ case INDEX_op_bswap16_i32:
+ case INDEX_op_bswap16_i64:
+ tcg_out_opc_revb_2h(s, a0, a1);
+ if (a2 & TCG_BSWAP_OS) {
+ tcg_out_ext16s(s, a0, a0);
+ } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
+ tcg_out_ext16u(s, a0, a0);
+ }
+ break;
+
+ case INDEX_op_bswap32_i32:
+ /* All 32-bit values are computed sign-extended in the register. */
+ a2 = TCG_BSWAP_OS;
+ /* fallthrough */
+ case INDEX_op_bswap32_i64:
+ tcg_out_opc_revb_2w(s, a0, a1);
+ if (a2 & TCG_BSWAP_OS) {
+ tcg_out_ext32s(s, a0, a0);
+ } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
+ tcg_out_ext32u(s, a0, a0);
+ }
+ break;
+
+ case INDEX_op_bswap64_i64:
+ tcg_out_opc_revb_d(s, a0, a1);
+ break;
+
+ case INDEX_op_clz_i32:
+ tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true);
+ break;
+ case INDEX_op_clz_i64:
+ tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false);
+ break;
+
+ case INDEX_op_ctz_i32:
+ tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true);
+ break;
+ case INDEX_op_ctz_i64:
+ tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
+ break;
+
+ case INDEX_op_shl_i32:
+ if (c2) {
+ tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
+ } else {
+ tcg_out_opc_sll_w(s, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_shl_i64:
+ if (c2) {
+ tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
+ } else {
+ tcg_out_opc_sll_d(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_shr_i32:
+ if (c2) {
+ tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
+ } else {
+ tcg_out_opc_srl_w(s, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_shr_i64:
+ if (c2) {
+ tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
+ } else {
+ tcg_out_opc_srl_d(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_sar_i32:
+ if (c2) {
+ tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
+ } else {
+ tcg_out_opc_sra_w(s, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_sar_i64:
+ if (c2) {
+ tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
+ } else {
+ tcg_out_opc_sra_d(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_rotl_i32:
+ /* transform into equivalent rotr/rotri */
+ if (c2) {
+ tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f);
+ } else {
+ tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
+ tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0);
+ }
+ break;
+ case INDEX_op_rotl_i64:
+ /* transform into equivalent rotr/rotri */
+ if (c2) {
+ tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f);
+ } else {
+ tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
+ tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0);
+ }
+ break;
+
+ case INDEX_op_rotr_i32:
+ if (c2) {
+ tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
+ } else {
+ tcg_out_opc_rotr_w(s, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_rotr_i64:
+ if (c2) {
+ tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
+ } else {
+ tcg_out_opc_rotr_d(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_add_i32:
+ if (c2) {
+ tcg_out_opc_addi_w(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_add_w(s, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_add_i64:
+ if (c2) {
+ tcg_out_opc_addi_d(s, a0, a1, a2);
+ } else {
+ tcg_out_opc_add_d(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_sub_i32:
+ if (c2) {
+ tcg_out_opc_addi_w(s, a0, a1, -a2);
+ } else {
+ tcg_out_opc_sub_w(s, a0, a1, a2);
+ }
+ break;
+ case INDEX_op_sub_i64:
+ if (c2) {
+ tcg_out_opc_addi_d(s, a0, a1, -a2);
+ } else {
+ tcg_out_opc_sub_d(s, a0, a1, a2);
+ }
+ break;
+
+ case INDEX_op_mul_i32:
+ tcg_out_opc_mul_w(s, a0, a1, a2);
+ break;
+ case INDEX_op_mul_i64:
+ tcg_out_opc_mul_d(s, a0, a1, a2);
+ break;
+
+ case INDEX_op_mulsh_i32:
+ tcg_out_opc_mulh_w(s, a0, a1, a2);
+ break;
+ case INDEX_op_mulsh_i64:
+ tcg_out_opc_mulh_d(s, a0, a1, a2);
+ break;
+
+ case INDEX_op_muluh_i32:
+ tcg_out_opc_mulh_wu(s, a0, a1, a2);
+ break;
+ case INDEX_op_muluh_i64:
+ tcg_out_opc_mulh_du(s, a0, a1, a2);
+ break;
+
+ case INDEX_op_div_i32:
+ tcg_out_opc_div_w(s, a0, a1, a2);
+ break;
+ case INDEX_op_div_i64:
+ tcg_out_opc_div_d(s, a0, a1, a2);
+ break;
+
+ case INDEX_op_divu_i32:
+ tcg_out_opc_div_wu(s, a0, a1, a2);
+ break;
+ case INDEX_op_divu_i64:
+ tcg_out_opc_div_du(s, a0, a1, a2);
+ break;
+
+ case INDEX_op_rem_i32:
+ tcg_out_opc_mod_w(s, a0, a1, a2);
+ break;
+ case INDEX_op_rem_i64:
+ tcg_out_opc_mod_d(s, a0, a1, a2);
+ break;
+
+ case INDEX_op_remu_i32:
+ tcg_out_opc_mod_wu(s, a0, a1, a2);
+ break;
+ case INDEX_op_remu_i64:
+ tcg_out_opc_mod_du(s, a0, a1, a2);
+ break;
+
+ case INDEX_op_setcond_i32:
+ case INDEX_op_setcond_i64:
+ tcg_out_setcond(s, args[3], a0, a1, a2, c2);
+ break;
+
+ case INDEX_op_ld8s_i32:
+ case INDEX_op_ld8s_i64:
+ tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
+ break;
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8u_i64:
+ tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2);
+ break;
+ case INDEX_op_ld16s_i32:
+ case INDEX_op_ld16s_i64:
+ tcg_out_ldst(s, OPC_LD_H, a0, a1, a2);
+ break;
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16u_i64:
+ tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2);
+ break;
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld32s_i64:
+ tcg_out_ldst(s, OPC_LD_W, a0, a1, a2);
+ break;
+ case INDEX_op_ld32u_i64:
+ tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2);
+ break;
+ case INDEX_op_ld_i64:
+ tcg_out_ldst(s, OPC_LD_D, a0, a1, a2);
+ break;
+
+ case INDEX_op_st8_i32:
+ case INDEX_op_st8_i64:
+ tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
+ break;
+ case INDEX_op_st16_i32:
+ case INDEX_op_st16_i64:
+ tcg_out_ldst(s, OPC_ST_H, a0, a1, a2);
+ break;
+ case INDEX_op_st_i32:
+ case INDEX_op_st32_i64:
+ tcg_out_ldst(s, OPC_ST_W, a0, a1, a2);
+ break;
+ case INDEX_op_st_i64:
+ tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
+ break;
+
+ case INDEX_op_qemu_ld_i32:
+ tcg_out_qemu_ld(s, args, TCG_TYPE_I32);
+ break;
+ case INDEX_op_qemu_ld_i64:
+ tcg_out_qemu_ld(s, args, TCG_TYPE_I64);
+ break;
+ case INDEX_op_qemu_st_i32:
+ tcg_out_qemu_st(s, args);
+ break;
+ case INDEX_op_qemu_st_i64:
+ tcg_out_qemu_st(s, args);
+ break;
+
+ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
+ case INDEX_op_mov_i64:
+ case INDEX_op_call: /* Always emitted via tcg_out_call. */
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
+{
+ switch (op) {
+ case INDEX_op_goto_ptr:
+ return C_O0_I1(r);
+
+ case INDEX_op_st8_i32:
+ case INDEX_op_st8_i64:
+ case INDEX_op_st16_i32:
+ case INDEX_op_st16_i64:
+ case INDEX_op_st32_i64:
+ case INDEX_op_st_i32:
+ case INDEX_op_st_i64:
+ return C_O0_I2(rZ, r);
+
+ case INDEX_op_brcond_i32:
+ case INDEX_op_brcond_i64:
+ return C_O0_I2(rZ, rZ);
+
+ case INDEX_op_qemu_st_i32:
+ case INDEX_op_qemu_st_i64:
+ return C_O0_I2(LZ, L);
+
+ case INDEX_op_ext8s_i32:
+ case INDEX_op_ext8s_i64:
+ case INDEX_op_ext8u_i32:
+ case INDEX_op_ext8u_i64:
+ case INDEX_op_ext16s_i32:
+ case INDEX_op_ext16s_i64:
+ case INDEX_op_ext16u_i32:
+ case INDEX_op_ext16u_i64:
+ case INDEX_op_ext32s_i64:
+ case INDEX_op_ext32u_i64:
+ case INDEX_op_extu_i32_i64:
+ case INDEX_op_extrl_i64_i32:
+ case INDEX_op_extrh_i64_i32:
+ case INDEX_op_ext_i32_i64:
+ case INDEX_op_not_i32:
+ case INDEX_op_not_i64:
+ case INDEX_op_extract_i32:
+ case INDEX_op_extract_i64:
+ case INDEX_op_bswap16_i32:
+ case INDEX_op_bswap16_i64:
+ case INDEX_op_bswap32_i32:
+ case INDEX_op_bswap32_i64:
+ case INDEX_op_bswap64_i64:
+ case INDEX_op_ld8s_i32:
+ case INDEX_op_ld8s_i64:
+ case INDEX_op_ld8u_i32:
+ case INDEX_op_ld8u_i64:
+ case INDEX_op_ld16s_i32:
+ case INDEX_op_ld16s_i64:
+ case INDEX_op_ld16u_i32:
+ case INDEX_op_ld16u_i64:
+ case INDEX_op_ld32s_i64:
+ case INDEX_op_ld32u_i64:
+ case INDEX_op_ld_i32:
+ case INDEX_op_ld_i64:
+ return C_O1_I1(r, r);
+
+ case INDEX_op_qemu_ld_i32:
+ case INDEX_op_qemu_ld_i64:
+ return C_O1_I1(r, L);
+
+ case INDEX_op_andc_i32:
+ case INDEX_op_andc_i64:
+ case INDEX_op_orc_i32:
+ case INDEX_op_orc_i64:
+ /*
+ * LoongArch insns for these ops don't have reg-imm forms, but we
+ * can express using andi/ori if ~constant satisfies
+ * TCG_CT_CONST_U12.
+ */
+ return C_O1_I2(r, r, rC);
+
+ case INDEX_op_shl_i32:
+ case INDEX_op_shl_i64:
+ case INDEX_op_shr_i32:
+ case INDEX_op_shr_i64:
+ case INDEX_op_sar_i32:
+ case INDEX_op_sar_i64:
+ case INDEX_op_rotl_i32:
+ case INDEX_op_rotl_i64:
+ case INDEX_op_rotr_i32:
+ case INDEX_op_rotr_i64:
+ return C_O1_I2(r, r, ri);
+
+ case INDEX_op_add_i32:
+ case INDEX_op_add_i64:
+ return C_O1_I2(r, r, rI);
+
+ case INDEX_op_and_i32:
+ case INDEX_op_and_i64:
+ case INDEX_op_nor_i32:
+ case INDEX_op_nor_i64:
+ case INDEX_op_or_i32:
+ case INDEX_op_or_i64:
+ case INDEX_op_xor_i32:
+ case INDEX_op_xor_i64:
+ /* LoongArch reg-imm bitops have their imms ZERO-extended */
+ return C_O1_I2(r, r, rU);
+
+ case INDEX_op_clz_i32:
+ case INDEX_op_clz_i64:
+ case INDEX_op_ctz_i32:
+ case INDEX_op_ctz_i64:
+ return C_O1_I2(r, r, rW);
+
+ case INDEX_op_setcond_i32:
+ case INDEX_op_setcond_i64:
+ return C_O1_I2(r, r, rZ);
+
+ case INDEX_op_deposit_i32:
+ case INDEX_op_deposit_i64:
+ /* Must deposit into the same register as input */
+ return C_O1_I2(r, 0, rZ);
+
+ case INDEX_op_sub_i32:
+ case INDEX_op_sub_i64:
+ return C_O1_I2(r, rZ, rN);
+
+ case INDEX_op_mul_i32:
+ case INDEX_op_mul_i64:
+ case INDEX_op_mulsh_i32:
+ case INDEX_op_mulsh_i64:
+ case INDEX_op_muluh_i32:
+ case INDEX_op_muluh_i64:
+ case INDEX_op_div_i32:
+ case INDEX_op_div_i64:
+ case INDEX_op_divu_i32:
+ case INDEX_op_divu_i64:
+ case INDEX_op_rem_i32:
+ case INDEX_op_rem_i64:
+ case INDEX_op_remu_i32:
+ case INDEX_op_remu_i64:
+ return C_O1_I2(r, rZ, rZ);
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static const int tcg_target_callee_save_regs[] = {
+ TCG_REG_S0, /* used for the global env (TCG_AREG0) */
+ TCG_REG_S1,
+ TCG_REG_S2,
+ TCG_REG_S3,
+ TCG_REG_S4,
+ TCG_REG_S5,
+ TCG_REG_S6,
+ TCG_REG_S7,
+ TCG_REG_S8,
+ TCG_REG_S9,
+ TCG_REG_RA, /* should be last for ABI compliance */
+};
+
+/* Stack frame parameters. */
+#define REG_SIZE (TCG_TARGET_REG_BITS / 8)
+#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
+#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
+#define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
+ + TCG_TARGET_STACK_ALIGN - 1) \
+ & -TCG_TARGET_STACK_ALIGN)
+#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
+
+/* We're expecting to be able to use an immediate for frame allocation. */
+QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
+
+/* Generate global QEMU prologue and epilogue code */
+static void tcg_target_qemu_prologue(TCGContext *s)
+{
+ int i;
+
+ tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
+
+ /* TB prologue */
+ tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
+ for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
+ tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
+ TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
+ }
+
+#if !defined(CONFIG_SOFTMMU)
+ if (USE_GUEST_BASE) {
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
+ tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
+ }
+#endif
+
+ /* Call generated code */
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
+ tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
+
+ /* Return path for goto_ptr. Set return value to 0 */
+ tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
+ tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
+
+ /* TB epilogue */
+ tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
+ for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
+ tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
+ TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
+ }
+
+ tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
+ tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
+}
+
+static void tcg_target_init(TCGContext *s)
+{
+ tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
+ tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
+
+ tcg_target_call_clobber_regs = ALL_GENERAL_REGS;
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
+
+ s->reserved_regs = 0;
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
+}
+
+typedef struct {
+ DebugFrameHeader h;
+ uint8_t fde_def_cfa[4];
+ uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
+} DebugFrame;
+
+#define ELF_HOST_MACHINE EM_LOONGARCH
+
+static const DebugFrame debug_frame = {
+ .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
+ .h.cie.id = -1,
+ .h.cie.version = 1,
+ .h.cie.code_align = 1,
+ .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
+ .h.cie.return_column = TCG_REG_RA,
+
+ /* Total FDE size does not include the "len" member. */
+ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
+
+ .fde_def_cfa = {
+ 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
+ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
+ (FRAME_SIZE >> 7)
+ },
+ .fde_reg_ofs = {
+ 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */
+ 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */
+ 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */
+ 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */
+ 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */
+ 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */
+ 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */
+ 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */
+ 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */
+ 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */
+ 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */
+ }
+};
+
+void tcg_register_jit(const void *buf, size_t buf_size)
+{
+ tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
+}
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
new file mode 100644
index 0000000000000000000000000000000000000000..d58a6162f2248f2e7e221b8408b5b6656e647460
--- /dev/null
+++ b/tcg/loongarch64/tcg-target.h
@@ -0,0 +1,178 @@
+/*
+ * Tiny Code Generator for QEMU
+ *
+ * Copyright (c) 2021 WANG Xuerui
+ *
+ * Based on tcg/riscv/tcg-target.h
+ *
+ * Copyright (c) 2018 SiFive, Inc
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef LOONGARCH_TCG_TARGET_H
+#define LOONGARCH_TCG_TARGET_H
+
+/*
+ * Loongson removed the (incomplete) 32-bit support from kernel and toolchain
+ * for the initial upstreaming of this architecture, so don't bother and just
+ * support the LP64* ABI for now.
+ */
+#if defined(__loongarch64)
+# define TCG_TARGET_REG_BITS 64
+#else
+# error unsupported LoongArch register size
+#endif
+
+#define TCG_TARGET_INSN_UNIT_SIZE 4
+#define TCG_TARGET_NB_REGS 32
+#define MAX_CODE_GEN_BUFFER_SIZE SIZE_MAX
+
+typedef enum {
+ TCG_REG_ZERO,
+ TCG_REG_RA,
+ TCG_REG_TP,
+ TCG_REG_SP,
+ TCG_REG_A0,
+ TCG_REG_A1,
+ TCG_REG_A2,
+ TCG_REG_A3,
+ TCG_REG_A4,
+ TCG_REG_A5,
+ TCG_REG_A6,
+ TCG_REG_A7,
+ TCG_REG_T0,
+ TCG_REG_T1,
+ TCG_REG_T2,
+ TCG_REG_T3,
+ TCG_REG_T4,
+ TCG_REG_T5,
+ TCG_REG_T6,
+ TCG_REG_T7,
+ TCG_REG_T8,
+ TCG_REG_RESERVED,
+ TCG_REG_S9,
+ TCG_REG_S0,
+ TCG_REG_S1,
+ TCG_REG_S2,
+ TCG_REG_S3,
+ TCG_REG_S4,
+ TCG_REG_S5,
+ TCG_REG_S6,
+ TCG_REG_S7,
+ TCG_REG_S8,
+
+ /* aliases */
+ TCG_AREG0 = TCG_REG_S0,
+ TCG_REG_TMP0 = TCG_REG_T8,
+ TCG_REG_TMP1 = TCG_REG_T7,
+ TCG_REG_TMP2 = TCG_REG_T6,
+} TCGReg;
+
+/* used for function call generation */
+#define TCG_REG_CALL_STACK TCG_REG_SP
+#define TCG_TARGET_STACK_ALIGN 16
+#define TCG_TARGET_CALL_ALIGN_ARGS 1
+#define TCG_TARGET_CALL_STACK_OFFSET 0
+
+/* optional instructions */
+#define TCG_TARGET_HAS_movcond_i32 0
+#define TCG_TARGET_HAS_div_i32 1
+#define TCG_TARGET_HAS_rem_i32 1
+#define TCG_TARGET_HAS_div2_i32 0
+#define TCG_TARGET_HAS_rot_i32 1
+#define TCG_TARGET_HAS_deposit_i32 1
+#define TCG_TARGET_HAS_extract_i32 1
+#define TCG_TARGET_HAS_sextract_i32 0
+#define TCG_TARGET_HAS_extract2_i32 0
+#define TCG_TARGET_HAS_add2_i32 0
+#define TCG_TARGET_HAS_sub2_i32 0
+#define TCG_TARGET_HAS_mulu2_i32 0
+#define TCG_TARGET_HAS_muls2_i32 0
+#define TCG_TARGET_HAS_muluh_i32 1
+#define TCG_TARGET_HAS_mulsh_i32 1
+#define TCG_TARGET_HAS_ext8s_i32 1
+#define TCG_TARGET_HAS_ext16s_i32 1
+#define TCG_TARGET_HAS_ext8u_i32 1
+#define TCG_TARGET_HAS_ext16u_i32 1
+#define TCG_TARGET_HAS_bswap16_i32 1
+#define TCG_TARGET_HAS_bswap32_i32 1
+#define TCG_TARGET_HAS_not_i32 1
+#define TCG_TARGET_HAS_neg_i32 0
+#define TCG_TARGET_HAS_andc_i32 1
+#define TCG_TARGET_HAS_orc_i32 1
+#define TCG_TARGET_HAS_eqv_i32 0
+#define TCG_TARGET_HAS_nand_i32 0
+#define TCG_TARGET_HAS_nor_i32 1
+#define TCG_TARGET_HAS_clz_i32 1
+#define TCG_TARGET_HAS_ctz_i32 1
+#define TCG_TARGET_HAS_ctpop_i32 0
+#define TCG_TARGET_HAS_direct_jump 0
+#define TCG_TARGET_HAS_brcond2 0
+#define TCG_TARGET_HAS_setcond2 0
+#define TCG_TARGET_HAS_qemu_st8_i32 0
+
+/* 64-bit operations */
+#define TCG_TARGET_HAS_movcond_i64 0
+#define TCG_TARGET_HAS_div_i64 1
+#define TCG_TARGET_HAS_rem_i64 1
+#define TCG_TARGET_HAS_div2_i64 0
+#define TCG_TARGET_HAS_rot_i64 1
+#define TCG_TARGET_HAS_deposit_i64 1
+#define TCG_TARGET_HAS_extract_i64 1
+#define TCG_TARGET_HAS_sextract_i64 0
+#define TCG_TARGET_HAS_extract2_i64 0
+#define TCG_TARGET_HAS_extrl_i64_i32 1
+#define TCG_TARGET_HAS_extrh_i64_i32 1
+#define TCG_TARGET_HAS_ext8s_i64 1
+#define TCG_TARGET_HAS_ext16s_i64 1
+#define TCG_TARGET_HAS_ext32s_i64 1
+#define TCG_TARGET_HAS_ext8u_i64 1
+#define TCG_TARGET_HAS_ext16u_i64 1
+#define TCG_TARGET_HAS_ext32u_i64 1
+#define TCG_TARGET_HAS_bswap16_i64 1
+#define TCG_TARGET_HAS_bswap32_i64 1
+#define TCG_TARGET_HAS_bswap64_i64 1
+#define TCG_TARGET_HAS_not_i64 1
+#define TCG_TARGET_HAS_neg_i64 0
+#define TCG_TARGET_HAS_andc_i64 1
+#define TCG_TARGET_HAS_orc_i64 1
+#define TCG_TARGET_HAS_eqv_i64 0
+#define TCG_TARGET_HAS_nand_i64 0
+#define TCG_TARGET_HAS_nor_i64 1
+#define TCG_TARGET_HAS_clz_i64 1
+#define TCG_TARGET_HAS_ctz_i64 1
+#define TCG_TARGET_HAS_ctpop_i64 0
+#define TCG_TARGET_HAS_add2_i64 0
+#define TCG_TARGET_HAS_sub2_i64 0
+#define TCG_TARGET_HAS_mulu2_i64 0
+#define TCG_TARGET_HAS_muls2_i64 0
+#define TCG_TARGET_HAS_muluh_i64 1
+#define TCG_TARGET_HAS_mulsh_i64 1
+
+/* not defined -- call should be eliminated at compile time */
+void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
+
+#define TCG_TARGET_DEFAULT_MO (0)
+
+#define TCG_TARGET_NEED_LDST_LABELS
+
+#define TCG_TARGET_HAS_MEMORY_BSWAP 0
+
+#endif /* LOONGARCH_TCG_TARGET_H */