From cc74177f236f9c3b70017a8dec6c765d9a774545 Mon Sep 17 00:00:00 2001 From: Song Gao Date: Thu, 1 Feb 2024 16:39:14 +0800 Subject: [PATCH] LoongArch: Add patches from master This patch adds some patches from master branch. 1. hw/loongarch/virt: Align high memory base address with super page size 2. target/loongarch: Add timer information dump support 3. target/loongarch/meson: move gdbstub.c to loongarch.ss 4. target/loongarch: move translate modules to tcg/ 5. linux-headers: Update to Linux v6.7-rc5 6. linux-headers: Synchronize linux headers from linux v6.7.0-rc8 7. target/loongarch: Define some kvm_arch interfaces 8. target/loongarch: Supplement vcpu env initial when vcpu reset 9. target/loongarch: Implement kvm get/set registers 10. target/loongarch: Implement kvm_arch_init function 11. target/loongarch: Implement kvm_arch_init_vcpu 12. target/loongarch: Implement kvm_arch_handle_exit 13. target/loongarch: Restrict TCG-specific code 14. target/loongarch: Implement set vcpu intr for kvm 15. target/loongarch: Add loongarch kvm into meson build 16. hw/intc/loongarch_ipi: Use MemTxAttrs interface for ipi ops 17. hw/loongarch/virt: Set iocsr address space per-board rather than percpu 18. hw/intc/loongarch_extioi: Add dynamic cpu number support 19. hw/intc/loongarch_extioi: Add vmstate post_load support 20. configure: Add linux header compile support for LoongArch 21. target/loongarch: Set cpuid CSR register only once with kvm mode 22. target/loongarch/kvm: Enable LSX/LASX extension 23. target/loongarch: Fix qtest test-hmp error when KVM-only build Signed-off-by: Song Gao --- configure | 1 + hw/intc/loongarch_extioi.c | 232 +++-- hw/intc/loongarch_ipi.c | 193 +++-- hw/loongarch/virt.c | 94 +- include/hw/intc/loongarch_extioi.h | 12 +- include/hw/intc/loongarch_ipi.h | 3 +- include/hw/loongarch/virt.h | 5 +- include/standard-headers/drm/drm_fourcc.h | 2 + include/standard-headers/linux/fuse.h | 10 +- include/standard-headers/linux/pci_regs.h | 24 +- include/standard-headers/linux/vhost_types.h | 7 + .../standard-headers/linux/virtio_config.h | 5 + include/standard-headers/linux/virtio_pci.h | 11 + linux-headers/asm-arm64/kvm.h | 32 + linux-headers/asm-generic/unistd.h | 14 +- linux-headers/asm-loongarch/bitsperlong.h | 1 + linux-headers/asm-loongarch/kvm.h | 109 +++ linux-headers/asm-loongarch/mman.h | 1 + linux-headers/asm-loongarch/unistd.h | 5 + linux-headers/asm-mips/unistd_n32.h | 4 + linux-headers/asm-mips/unistd_n64.h | 4 + linux-headers/asm-mips/unistd_o32.h | 4 + linux-headers/asm-powerpc/unistd_32.h | 4 + linux-headers/asm-powerpc/unistd_64.h | 4 + linux-headers/asm-riscv/kvm.h | 12 + linux-headers/asm-s390/unistd_32.h | 4 + linux-headers/asm-s390/unistd_64.h | 4 + linux-headers/asm-x86/unistd_32.h | 4 + linux-headers/asm-x86/unistd_64.h | 3 + linux-headers/asm-x86/unistd_x32.h | 3 + linux-headers/linux/iommufd.h | 180 +++- linux-headers/linux/kvm.h | 11 + linux-headers/linux/psp-sev.h | 1 + linux-headers/linux/stddef.h | 9 +- linux-headers/linux/userfaultfd.h | 9 +- linux-headers/linux/vfio.h | 47 +- linux-headers/linux/vhost.h | 8 + meson.build | 3 + target/loongarch/cpu.c | 90 +- target/loongarch/cpu.h | 9 +- target/loongarch/cpu_helper.c | 231 +++++ target/loongarch/internals.h | 23 +- target/loongarch/kvm/kvm.c | 802 ++++++++++++++++++ target/loongarch/kvm/kvm_loongarch.h | 16 + target/loongarch/kvm/meson.build | 1 + target/loongarch/meson.build | 17 +- target/loongarch/{ => tcg}/constant_timer.c | 0 target/loongarch/{ => tcg}/csr_helper.c | 0 target/loongarch/{ => tcg}/fpu_helper.c | 0 .../{ => tcg}/insn_trans/trans_arith.c.inc | 0 .../{ => tcg}/insn_trans/trans_atomic.c.inc | 0 .../{ => tcg}/insn_trans/trans_bit.c.inc | 0 .../{ => tcg}/insn_trans/trans_branch.c.inc | 0 .../{ => tcg}/insn_trans/trans_extra.c.inc | 0 .../{ => tcg}/insn_trans/trans_farith.c.inc | 0 .../{ => tcg}/insn_trans/trans_fcmp.c.inc | 0 .../{ => tcg}/insn_trans/trans_fcnv.c.inc | 0 .../{ => tcg}/insn_trans/trans_fmemory.c.inc | 0 .../{ => tcg}/insn_trans/trans_fmov.c.inc | 0 .../{ => tcg}/insn_trans/trans_memory.c.inc | 0 .../insn_trans/trans_privileged.c.inc | 0 .../{ => tcg}/insn_trans/trans_shift.c.inc | 0 .../{ => tcg}/insn_trans/trans_vec.c.inc | 0 target/loongarch/{ => tcg}/iocsr_helper.c | 16 +- target/loongarch/tcg/meson.build | 19 + target/loongarch/{ => tcg}/op_helper.c | 0 target/loongarch/{ => tcg}/tlb_helper.c | 230 ----- target/loongarch/{ => tcg}/translate.c | 0 target/loongarch/{ => tcg}/vec_helper.c | 0 target/loongarch/trace-events | 15 + target/loongarch/trace.h | 1 + 71 files changed, 2016 insertions(+), 533 deletions(-) create mode 100644 linux-headers/asm-loongarch/bitsperlong.h create mode 100644 linux-headers/asm-loongarch/kvm.h create mode 100644 linux-headers/asm-loongarch/mman.h create mode 100644 linux-headers/asm-loongarch/unistd.h create mode 100644 target/loongarch/cpu_helper.c create mode 100644 target/loongarch/kvm/kvm.c create mode 100644 target/loongarch/kvm/kvm_loongarch.h create mode 100644 target/loongarch/kvm/meson.build rename target/loongarch/{ => tcg}/constant_timer.c (100%) rename target/loongarch/{ => tcg}/csr_helper.c (100%) rename target/loongarch/{ => tcg}/fpu_helper.c (100%) rename target/loongarch/{ => tcg}/insn_trans/trans_arith.c.inc (100%) rename target/loongarch/{ => tcg}/insn_trans/trans_atomic.c.inc (100%) rename target/loongarch/{ => tcg}/insn_trans/trans_bit.c.inc (100%) rename target/loongarch/{ => tcg}/insn_trans/trans_branch.c.inc (100%) rename target/loongarch/{ => tcg}/insn_trans/trans_extra.c.inc (100%) rename target/loongarch/{ => tcg}/insn_trans/trans_farith.c.inc (100%) rename target/loongarch/{ => tcg}/insn_trans/trans_fcmp.c.inc (100%) rename target/loongarch/{ => tcg}/insn_trans/trans_fcnv.c.inc (100%) rename target/loongarch/{ => tcg}/insn_trans/trans_fmemory.c.inc (100%) rename target/loongarch/{ => tcg}/insn_trans/trans_fmov.c.inc (100%) rename target/loongarch/{ => tcg}/insn_trans/trans_memory.c.inc (100%) rename target/loongarch/{ => tcg}/insn_trans/trans_privileged.c.inc (100%) rename target/loongarch/{ => tcg}/insn_trans/trans_shift.c.inc (100%) rename target/loongarch/{ => tcg}/insn_trans/trans_vec.c.inc (100%) rename target/loongarch/{ => tcg}/iocsr_helper.c (76%) create mode 100644 target/loongarch/tcg/meson.build rename target/loongarch/{ => tcg}/op_helper.c (100%) rename target/loongarch/{ => tcg}/tlb_helper.c (71%) rename target/loongarch/{ => tcg}/translate.c (100%) rename target/loongarch/{ => tcg}/vec_helper.c (100%) create mode 100644 target/loongarch/trace-events create mode 100644 target/loongarch/trace.h diff --git a/configure b/configure index bdda912f36..6036de83a4 100755 --- a/configure +++ b/configure @@ -445,6 +445,7 @@ case "$cpu" in loongarch*) cpu=loongarch64 host_arch=loongarch64 + linux_arch=loongarch ;; mips64*) diff --git a/hw/intc/loongarch_extioi.c b/hw/intc/loongarch_extioi.c index 24fb3af8cc..bdfa3b481e 100644 --- a/hw/intc/loongarch_extioi.c +++ b/hw/intc/loongarch_extioi.c @@ -8,6 +8,7 @@ #include "qemu/osdep.h" #include "qemu/module.h" #include "qemu/log.h" +#include "qapi/error.h" #include "hw/irq.h" #include "hw/sysbus.h" #include "hw/loongarch/virt.h" @@ -32,23 +33,23 @@ static void extioi_update_irq(LoongArchExtIOI *s, int irq, int level) if (((s->enable[irq_index]) & irq_mask) == 0) { return; } - s->coreisr[cpu][irq_index] |= irq_mask; - found = find_first_bit(s->sw_isr[cpu][ipnum], EXTIOI_IRQS); - set_bit(irq, s->sw_isr[cpu][ipnum]); + s->cpu[cpu].coreisr[irq_index] |= irq_mask; + found = find_first_bit(s->cpu[cpu].sw_isr[ipnum], EXTIOI_IRQS); + set_bit(irq, s->cpu[cpu].sw_isr[ipnum]); if (found < EXTIOI_IRQS) { /* other irq is handling, need not update parent irq level */ return; } } else { - s->coreisr[cpu][irq_index] &= ~irq_mask; - clear_bit(irq, s->sw_isr[cpu][ipnum]); - found = find_first_bit(s->sw_isr[cpu][ipnum], EXTIOI_IRQS); + s->cpu[cpu].coreisr[irq_index] &= ~irq_mask; + clear_bit(irq, s->cpu[cpu].sw_isr[ipnum]); + found = find_first_bit(s->cpu[cpu].sw_isr[ipnum], EXTIOI_IRQS); if (found < EXTIOI_IRQS) { /* other irq is handling, need not update parent irq level */ return; } } - qemu_set_irq(s->parent_irq[cpu][ipnum], level); + qemu_set_irq(s->cpu[cpu].parent_irq[ipnum], level); } static void extioi_setirq(void *opaque, int irq, int level) @@ -96,7 +97,7 @@ static MemTxResult extioi_readw(void *opaque, hwaddr addr, uint64_t *data, index = (offset - EXTIOI_COREISR_START) >> 2; /* using attrs to get current cpu index */ cpu = attrs.requester_id; - *data = s->coreisr[cpu][index]; + *data = s->cpu[cpu].coreisr[index]; break; case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END - 1: index = (offset - EXTIOI_COREMAP_START) >> 2; @@ -129,12 +130,66 @@ static inline void extioi_enable_irq(LoongArchExtIOI *s, int index,\ } } +static inline void extioi_update_sw_coremap(LoongArchExtIOI *s, int irq, + uint64_t val, bool notify) +{ + int i, cpu; + + /* + * loongarch only support little endian, + * so we paresd the value with little endian. + */ + val = cpu_to_le64(val); + + for (i = 0; i < 4; i++) { + cpu = val & 0xff; + cpu = ctz32(cpu); + cpu = (cpu >= 4) ? 0 : cpu; + val = val >> 8; + + if (s->sw_coremap[irq + i] == cpu) { + continue; + } + + if (notify && test_bit(irq, (unsigned long *)s->isr)) { + /* + * lower irq at old cpu and raise irq at new cpu + */ + extioi_update_irq(s, irq + i, 0); + s->sw_coremap[irq + i] = cpu; + extioi_update_irq(s, irq + i, 1); + } else { + s->sw_coremap[irq + i] = cpu; + } + } +} + +static inline void extioi_update_sw_ipmap(LoongArchExtIOI *s, int index, + uint64_t val) +{ + int i; + uint8_t ipnum; + + /* + * loongarch only support little endian, + * so we paresd the value with little endian. + */ + val = cpu_to_le64(val); + for (i = 0; i < 4; i++) { + ipnum = val & 0xff; + ipnum = ctz32(ipnum); + ipnum = (ipnum >= 4) ? 0 : ipnum; + s->sw_ipmap[index * 4 + i] = ipnum; + val = val >> 8; + } +} + static MemTxResult extioi_writew(void *opaque, hwaddr addr, uint64_t val, unsigned size, MemTxAttrs attrs) { LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque); - int i, cpu, index, old_data, irq; + int cpu, index, old_data, irq; uint32_t offset; trace_loongarch_extioi_writew(addr, val); @@ -152,20 +207,7 @@ static MemTxResult extioi_writew(void *opaque, hwaddr addr, */ index = (offset - EXTIOI_IPMAP_START) >> 2; s->ipmap[index] = val; - /* - * loongarch only support little endian, - * so we paresd the value with little endian. - */ - val = cpu_to_le64(val); - for (i = 0; i < 4; i++) { - uint8_t ipnum; - ipnum = val & 0xff; - ipnum = ctz32(ipnum); - ipnum = (ipnum >= 4) ? 0 : ipnum; - s->sw_ipmap[index * 4 + i] = ipnum; - val = val >> 8; - } - + extioi_update_sw_ipmap(s, index, val); break; case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END - 1: index = (offset - EXTIOI_ENABLE_START) >> 2; @@ -189,8 +231,8 @@ static MemTxResult extioi_writew(void *opaque, hwaddr addr, index = (offset - EXTIOI_COREISR_START) >> 2; /* using attrs to get current cpu index */ cpu = attrs.requester_id; - old_data = s->coreisr[cpu][index]; - s->coreisr[cpu][index] = old_data & ~val; + old_data = s->cpu[cpu].coreisr[index]; + s->cpu[cpu].coreisr[index] = old_data & ~val; /* write 1 to clear interrupt */ old_data &= val; irq = ctz32(old_data); @@ -204,33 +246,8 @@ static MemTxResult extioi_writew(void *opaque, hwaddr addr, irq = offset - EXTIOI_COREMAP_START; index = irq / 4; s->coremap[index] = val; - /* - * loongarch only support little endian, - * so we paresd the value with little endian. - */ - val = cpu_to_le64(val); - - for (i = 0; i < 4; i++) { - cpu = val & 0xff; - cpu = ctz32(cpu); - cpu = (cpu >= 4) ? 0 : cpu; - val = val >> 8; - - if (s->sw_coremap[irq + i] == cpu) { - continue; - } - - if (test_bit(irq, (unsigned long *)s->isr)) { - /* - * lower irq at old cpu and raise irq at new cpu - */ - extioi_update_irq(s, irq + i, 0); - s->sw_coremap[irq + i] = cpu; - extioi_update_irq(s, irq + i, 1); - } else { - s->sw_coremap[irq + i] = cpu; - } - } + + extioi_update_sw_coremap(s, irq, val, true); break; default: break; @@ -248,65 +265,112 @@ static const MemoryRegionOps extioi_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; -static const VMStateDescription vmstate_loongarch_extioi = { - .name = TYPE_LOONGARCH_EXTIOI, +static void loongarch_extioi_realize(DeviceState *dev, Error **errp) +{ + LoongArchExtIOI *s = LOONGARCH_EXTIOI(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + int i, pin; + + if (s->num_cpu == 0) { + error_setg(errp, "num-cpu must be at least 1"); + return; + } + + for (i = 0; i < EXTIOI_IRQS; i++) { + sysbus_init_irq(sbd, &s->irq[i]); + } + + qdev_init_gpio_in(dev, extioi_setirq, EXTIOI_IRQS); + memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops, + s, "extioi_system_mem", 0x900); + sysbus_init_mmio(sbd, &s->extioi_system_mem); + s->cpu = g_new0(ExtIOICore, s->num_cpu); + if (s->cpu == NULL) { + error_setg(errp, "Memory allocation for ExtIOICore faile"); + return; + } + + for (i = 0; i < s->num_cpu; i++) { + for (pin = 0; pin < LS3A_INTC_IP; pin++) { + qdev_init_gpio_out(dev, &s->cpu[i].parent_irq[pin], 1); + } + } +} + +static void loongarch_extioi_finalize(Object *obj) +{ + LoongArchExtIOI *s = LOONGARCH_EXTIOI(obj); + + g_free(s->cpu); +} + +static int vmstate_extioi_post_load(void *opaque, int version_id) +{ + LoongArchExtIOI *s = LOONGARCH_EXTIOI(opaque); + int i, start_irq; + + for (i = 0; i < (EXTIOI_IRQS / 4); i++) { + start_irq = i * 4; + extioi_update_sw_coremap(s, start_irq, s->coremap[i], false); + } + + for (i = 0; i < (EXTIOI_IRQS_IPMAP_SIZE / 4); i++) { + extioi_update_sw_ipmap(s, i, s->ipmap[i]); + } + + return 0; +} + +static const VMStateDescription vmstate_extioi_core = { + .name = "extioi-core", .version_id = 1, .minimum_version_id = 1, - .fields = (VMStateField[]) { + .fields = (const VMStateField[]) { + VMSTATE_UINT32_ARRAY(coreisr, ExtIOICore, EXTIOI_IRQS_GROUP_COUNT), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_loongarch_extioi = { + .name = TYPE_LOONGARCH_EXTIOI, + .version_id = 2, + .minimum_version_id = 2, + .post_load = vmstate_extioi_post_load, + .fields = (const VMStateField[]) { VMSTATE_UINT32_ARRAY(bounce, LoongArchExtIOI, EXTIOI_IRQS_GROUP_COUNT), - VMSTATE_UINT32_2DARRAY(coreisr, LoongArchExtIOI, EXTIOI_CPUS, - EXTIOI_IRQS_GROUP_COUNT), VMSTATE_UINT32_ARRAY(nodetype, LoongArchExtIOI, EXTIOI_IRQS_NODETYPE_COUNT / 2), VMSTATE_UINT32_ARRAY(enable, LoongArchExtIOI, EXTIOI_IRQS / 32), VMSTATE_UINT32_ARRAY(isr, LoongArchExtIOI, EXTIOI_IRQS / 32), VMSTATE_UINT32_ARRAY(ipmap, LoongArchExtIOI, EXTIOI_IRQS_IPMAP_SIZE / 4), VMSTATE_UINT32_ARRAY(coremap, LoongArchExtIOI, EXTIOI_IRQS / 4), - VMSTATE_UINT8_ARRAY(sw_ipmap, LoongArchExtIOI, EXTIOI_IRQS_IPMAP_SIZE), - VMSTATE_UINT8_ARRAY(sw_coremap, LoongArchExtIOI, EXTIOI_IRQS), + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, LoongArchExtIOI, num_cpu, + vmstate_extioi_core, ExtIOICore), VMSTATE_END_OF_LIST() } }; -static void loongarch_extioi_instance_init(Object *obj) -{ - SysBusDevice *dev = SYS_BUS_DEVICE(obj); - LoongArchExtIOI *s = LOONGARCH_EXTIOI(obj); - int i, cpu, pin; - - for (i = 0; i < EXTIOI_IRQS; i++) { - sysbus_init_irq(dev, &s->irq[i]); - } - - qdev_init_gpio_in(DEVICE(obj), extioi_setirq, EXTIOI_IRQS); - - for (cpu = 0; cpu < EXTIOI_CPUS; cpu++) { - memory_region_init_io(&s->extioi_iocsr_mem[cpu], OBJECT(s), &extioi_ops, - s, "extioi_iocsr", 0x900); - sysbus_init_mmio(dev, &s->extioi_iocsr_mem[cpu]); - for (pin = 0; pin < LS3A_INTC_IP; pin++) { - qdev_init_gpio_out(DEVICE(obj), &s->parent_irq[cpu][pin], 1); - } - } - memory_region_init_io(&s->extioi_system_mem, OBJECT(s), &extioi_ops, - s, "extioi_system_mem", 0x900); - sysbus_init_mmio(dev, &s->extioi_system_mem); -} +static Property extioi_properties[] = { + DEFINE_PROP_UINT32("num-cpu", LoongArchExtIOI, num_cpu, 1), + DEFINE_PROP_END_OF_LIST(), +}; static void loongarch_extioi_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = loongarch_extioi_realize; + device_class_set_props(dc, extioi_properties); dc->vmsd = &vmstate_loongarch_extioi; } static const TypeInfo loongarch_extioi_info = { .name = TYPE_LOONGARCH_EXTIOI, .parent = TYPE_SYS_BUS_DEVICE, - .instance_init = loongarch_extioi_instance_init, .instance_size = sizeof(struct LoongArchExtIOI), .class_init = loongarch_extioi_class_init, + .instance_finalize = loongarch_extioi_finalize, }; static void loongarch_extioi_register_types(void) diff --git a/hw/intc/loongarch_ipi.c b/hw/intc/loongarch_ipi.c index 67858b521c..e228669aa5 100644 --- a/hw/intc/loongarch_ipi.c +++ b/hw/intc/loongarch_ipi.c @@ -9,6 +9,7 @@ #include "hw/sysbus.h" #include "hw/intc/loongarch_ipi.h" #include "hw/irq.h" +#include "hw/qdev-properties.h" #include "qapi/error.h" #include "qemu/log.h" #include "exec/address-spaces.h" @@ -17,14 +18,16 @@ #include "target/loongarch/internals.h" #include "trace.h" -static void loongarch_ipi_writel(void *, hwaddr, uint64_t, unsigned); - -static uint64_t loongarch_ipi_readl(void *opaque, hwaddr addr, unsigned size) +static MemTxResult loongarch_ipi_readl(void *opaque, hwaddr addr, + uint64_t *data, + unsigned size, MemTxAttrs attrs) { - IPICore *s = opaque; + IPICore *s; + LoongArchIPI *ipi = opaque; uint64_t ret = 0; int index = 0; + s = &ipi->cpu[attrs.requester_id]; addr &= 0xff; switch (addr) { case CORE_STATUS_OFF: @@ -49,10 +52,12 @@ static uint64_t loongarch_ipi_readl(void *opaque, hwaddr addr, unsigned size) } trace_loongarch_ipi_read(size, (uint64_t)addr, ret); - return ret; + *data = ret; + return MEMTX_OK; } -static void send_ipi_data(CPULoongArchState *env, uint64_t val, hwaddr addr) +static void send_ipi_data(CPULoongArchState *env, uint64_t val, hwaddr addr, + MemTxAttrs attrs) { int i, mask = 0, data = 0; @@ -61,8 +66,8 @@ static void send_ipi_data(CPULoongArchState *env, uint64_t val, hwaddr addr) * if the mask is 0, we need not to do anything. */ if ((val >> 27) & 0xf) { - data = address_space_ldl(&env->address_space_iocsr, addr, - MEMTXATTRS_UNSPECIFIED, NULL); + data = address_space_ldl(env->address_space_iocsr, addr, + attrs, NULL); for (i = 0; i < 4; i++) { /* get mask for byte writing */ if (val & (0x1 << (27 + i))) { @@ -73,8 +78,8 @@ static void send_ipi_data(CPULoongArchState *env, uint64_t val, hwaddr addr) data &= mask; data |= (val >> 32) & ~mask; - address_space_stl(&env->address_space_iocsr, addr, - data, MEMTXATTRS_UNSPECIFIED, NULL); + address_space_stl(env->address_space_iocsr, addr, + data, attrs, NULL); } static int archid_cmp(const void *a, const void *b) @@ -103,80 +108,72 @@ static CPUState *ipi_getcpu(int arch_id) CPUArchId *archid; archid = find_cpu_by_archid(machine, arch_id); - return CPU(archid->cpu); -} - -static void ipi_send(uint64_t val) -{ - uint32_t cpuid; - uint8_t vector; - CPUState *cs; - LoongArchCPU *cpu; - LoongArchIPI *s; - - cpuid = extract32(val, 16, 10); - if (cpuid >= LOONGARCH_MAX_CPUS) { - trace_loongarch_ipi_unsupported_cpuid("IOCSR_IPI_SEND", cpuid); - return; + if (archid) { + return CPU(archid->cpu); } - /* IPI status vector */ - vector = extract8(val, 0, 5); - - cs = ipi_getcpu(cpuid); - cpu = LOONGARCH_CPU(cs); - s = LOONGARCH_IPI(cpu->env.ipistate); - loongarch_ipi_writel(&s->ipi_core, CORE_SET_OFF, BIT(vector), 4); + return NULL; } -static void mail_send(uint64_t val) +static MemTxResult mail_send(uint64_t val, MemTxAttrs attrs) { uint32_t cpuid; hwaddr addr; - CPULoongArchState *env; CPUState *cs; - LoongArchCPU *cpu; cpuid = extract32(val, 16, 10); if (cpuid >= LOONGARCH_MAX_CPUS) { trace_loongarch_ipi_unsupported_cpuid("IOCSR_MAIL_SEND", cpuid); - return; + return MEMTX_DECODE_ERROR; } - addr = 0x1020 + (val & 0x1c); cs = ipi_getcpu(cpuid); - cpu = LOONGARCH_CPU(cs); - env = &cpu->env; - send_ipi_data(env, val, addr); + if (cs == NULL) { + return MEMTX_DECODE_ERROR; + } + + /* override requester_id */ + addr = SMP_IPI_MAILBOX + CORE_BUF_20 + (val & 0x1c); + attrs.requester_id = cs->cpu_index; + send_ipi_data(&LOONGARCH_CPU(cs)->env, val, addr, attrs); + return MEMTX_OK; } -static void any_send(uint64_t val) +static MemTxResult any_send(uint64_t val, MemTxAttrs attrs) { uint32_t cpuid; hwaddr addr; - CPULoongArchState *env; CPUState *cs; - LoongArchCPU *cpu; cpuid = extract32(val, 16, 10); if (cpuid >= LOONGARCH_MAX_CPUS) { trace_loongarch_ipi_unsupported_cpuid("IOCSR_ANY_SEND", cpuid); - return; + return MEMTX_DECODE_ERROR; } - addr = val & 0xffff; cs = ipi_getcpu(cpuid); - cpu = LOONGARCH_CPU(cs); - env = &cpu->env; - send_ipi_data(env, val, addr); + if (cs == NULL) { + return MEMTX_DECODE_ERROR; + } + + /* override requester_id */ + addr = val & 0xffff; + attrs.requester_id = cs->cpu_index; + send_ipi_data(&LOONGARCH_CPU(cs)->env, val, addr, attrs); + return MEMTX_OK; } -static void loongarch_ipi_writel(void *opaque, hwaddr addr, uint64_t val, - unsigned size) +static MemTxResult loongarch_ipi_writel(void *opaque, hwaddr addr, uint64_t val, + unsigned size, MemTxAttrs attrs) { - IPICore *s = opaque; + LoongArchIPI *ipi = opaque; + IPICore *s; int index = 0; + uint32_t cpuid; + uint8_t vector; + CPUState *cs; + s = &ipi->cpu[attrs.requester_id]; addr &= 0xff; trace_loongarch_ipi_write(size, (uint64_t)addr, val); switch (addr) { @@ -203,17 +200,34 @@ static void loongarch_ipi_writel(void *opaque, hwaddr addr, uint64_t val, s->buf[index] = val; break; case IOCSR_IPI_SEND: - ipi_send(val); + cpuid = extract32(val, 16, 10); + if (cpuid >= LOONGARCH_MAX_CPUS) { + trace_loongarch_ipi_unsupported_cpuid("IOCSR_IPI_SEND", cpuid); + return MEMTX_DECODE_ERROR; + } + + /* IPI status vector */ + vector = extract8(val, 0, 5); + cs = ipi_getcpu(cpuid); + if (cs == NULL) { + return MEMTX_DECODE_ERROR; + } + + /* override requester_id */ + attrs.requester_id = cs->cpu_index; + loongarch_ipi_writel(ipi, CORE_SET_OFF, BIT(vector), 4, attrs); break; default: qemu_log_mask(LOG_UNIMP, "invalid write: %x", (uint32_t)addr); break; } + + return MEMTX_OK; } static const MemoryRegionOps loongarch_ipi_ops = { - .read = loongarch_ipi_readl, - .write = loongarch_ipi_writel, + .read_with_attrs = loongarch_ipi_readl, + .write_with_attrs = loongarch_ipi_writel, .impl.min_access_size = 4, .impl.max_access_size = 4, .valid.min_access_size = 4, @@ -222,24 +236,28 @@ static const MemoryRegionOps loongarch_ipi_ops = { }; /* mail send and any send only support writeq */ -static void loongarch_ipi_writeq(void *opaque, hwaddr addr, uint64_t val, - unsigned size) +static MemTxResult loongarch_ipi_writeq(void *opaque, hwaddr addr, uint64_t val, + unsigned size, MemTxAttrs attrs) { + MemTxResult ret = MEMTX_OK; + addr &= 0xfff; switch (addr) { case MAIL_SEND_OFFSET: - mail_send(val); + ret = mail_send(val, attrs); break; case ANY_SEND_OFFSET: - any_send(val); + ret = any_send(val, attrs); break; default: break; } + + return ret; } static const MemoryRegionOps loongarch_ipi64_ops = { - .write = loongarch_ipi_writeq, + .write_with_attrs = loongarch_ipi_writeq, .impl.min_access_size = 8, .impl.max_access_size = 8, .valid.min_access_size = 8, @@ -247,23 +265,39 @@ static const MemoryRegionOps loongarch_ipi64_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; -static void loongarch_ipi_init(Object *obj) +static void loongarch_ipi_realize(DeviceState *dev, Error **errp) { - LoongArchIPI *s = LOONGARCH_IPI(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + LoongArchIPI *s = LOONGARCH_IPI(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + int i; + + if (s->num_cpu == 0) { + error_setg(errp, "num-cpu must be at least 1"); + return; + } - memory_region_init_io(&s->ipi_iocsr_mem, obj, &loongarch_ipi_ops, - &s->ipi_core, "loongarch_ipi_iocsr", 0x48); + memory_region_init_io(&s->ipi_iocsr_mem, OBJECT(dev), &loongarch_ipi_ops, + s, "loongarch_ipi_iocsr", 0x48); /* loongarch_ipi_iocsr performs re-entrant IO through ipi_send */ s->ipi_iocsr_mem.disable_reentrancy_guard = true; sysbus_init_mmio(sbd, &s->ipi_iocsr_mem); - memory_region_init_io(&s->ipi64_iocsr_mem, obj, &loongarch_ipi64_ops, - &s->ipi_core, "loongarch_ipi64_iocsr", 0x118); + memory_region_init_io(&s->ipi64_iocsr_mem, OBJECT(dev), + &loongarch_ipi64_ops, + s, "loongarch_ipi64_iocsr", 0x118); sysbus_init_mmio(sbd, &s->ipi64_iocsr_mem); - qdev_init_gpio_out(DEVICE(obj), &s->ipi_core.irq, 1); + + s->cpu = g_new0(IPICore, s->num_cpu); + if (s->cpu == NULL) { + error_setg(errp, "Memory allocation for ExtIOICore faile"); + return; + } + + for (i = 0; i < s->num_cpu; i++) { + qdev_init_gpio_out(dev, &s->cpu[i].irq, 1); + } } static const VMStateDescription vmstate_ipi_core = { @@ -282,27 +316,42 @@ static const VMStateDescription vmstate_ipi_core = { static const VMStateDescription vmstate_loongarch_ipi = { .name = TYPE_LOONGARCH_IPI, - .version_id = 1, - .minimum_version_id = 1, - .fields = (VMStateField[]) { - VMSTATE_STRUCT(ipi_core, LoongArchIPI, 0, vmstate_ipi_core, IPICore), + .version_id = 2, + .minimum_version_id = 2, + .fields = (const VMStateField[]) { + VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, LoongArchIPI, num_cpu, + vmstate_ipi_core, IPICore), VMSTATE_END_OF_LIST() } }; +static Property ipi_properties[] = { + DEFINE_PROP_UINT32("num-cpu", LoongArchIPI, num_cpu, 1), + DEFINE_PROP_END_OF_LIST(), +}; + static void loongarch_ipi_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = loongarch_ipi_realize; + device_class_set_props(dc, ipi_properties); dc->vmsd = &vmstate_loongarch_ipi; } +static void loongarch_ipi_finalize(Object *obj) +{ + LoongArchIPI *s = LOONGARCH_IPI(obj); + + g_free(s->cpu); +} + static const TypeInfo loongarch_ipi_info = { .name = TYPE_LOONGARCH_IPI, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(LoongArchIPI), - .instance_init = loongarch_ipi_init, .class_init = loongarch_ipi_class_init, + .instance_finalize = loongarch_ipi_finalize, }; static void loongarch_ipi_register_types(void) diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c index 4b7dc67a2d..c9a680e61a 100644 --- a/hw/loongarch/virt.c +++ b/hw/loongarch/virt.c @@ -535,9 +535,6 @@ static void loongarch_irq_init(LoongArchMachineState *lams) CPUState *cpu_state; int cpu, pin, i, start, num; - extioi = qdev_new(TYPE_LOONGARCH_EXTIOI); - sysbus_realize_and_unref(SYS_BUS_DEVICE(extioi), &error_fatal); - /* * The connection of interrupts: * +-----+ +---------+ +-------+ @@ -559,41 +556,42 @@ static void loongarch_irq_init(LoongArchMachineState *lams) * | UARTs | | Devices | | Devices | * +--------+ +---------+ +---------+ */ + + /* Create IPI device */ + ipi = qdev_new(TYPE_LOONGARCH_IPI); + qdev_prop_set_uint32(ipi, "num-cpu", ms->smp.cpus); + sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal); + + /* IPI iocsr memory region */ + memory_region_add_subregion(&lams->system_iocsr, SMP_IPI_MAILBOX, + sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 0)); + memory_region_add_subregion(&lams->system_iocsr, MAIL_SEND_ADDR, + sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), 1)); + for (cpu = 0; cpu < ms->smp.cpus; cpu++) { cpu_state = qemu_get_cpu(cpu); cpudev = DEVICE(cpu_state); lacpu = LOONGARCH_CPU(cpu_state); env = &(lacpu->env); - - ipi = qdev_new(TYPE_LOONGARCH_IPI); - sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal); + env->address_space_iocsr = &lams->as_iocsr; /* connect ipi irq to cpu irq */ - qdev_connect_gpio_out(ipi, 0, qdev_get_gpio_in(cpudev, IRQ_IPI)); - /* IPI iocsr memory region */ - memory_region_add_subregion(&env->system_iocsr, SMP_IPI_MAILBOX, - sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), - 0)); - memory_region_add_subregion(&env->system_iocsr, MAIL_SEND_ADDR, - sysbus_mmio_get_region(SYS_BUS_DEVICE(ipi), - 1)); - /* - * extioi iocsr memory region - * only one extioi is added on loongarch virt machine - * external device interrupt can only be routed to cpu 0-3 - */ - if (cpu < EXTIOI_CPUS) - memory_region_add_subregion(&env->system_iocsr, APIC_BASE, - sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), - cpu)); + qdev_connect_gpio_out(ipi, cpu, qdev_get_gpio_in(cpudev, IRQ_IPI)); env->ipistate = ipi; } + /* Create EXTIOI device */ + extioi = qdev_new(TYPE_LOONGARCH_EXTIOI); + qdev_prop_set_uint32(extioi, "num-cpu", ms->smp.cpus); + sysbus_realize_and_unref(SYS_BUS_DEVICE(extioi), &error_fatal); + memory_region_add_subregion(&lams->system_iocsr, APIC_BASE, + sysbus_mmio_get_region(SYS_BUS_DEVICE(extioi), 0)); + /* * connect ext irq to the cpu irq * cpu_pin[9:2] <= intc_pin[7:0] */ - for (cpu = 0; cpu < MIN(ms->smp.cpus, EXTIOI_CPUS); cpu++) { + for (cpu = 0; cpu < ms->smp.cpus; cpu++) { cpudev = DEVICE(qemu_get_cpu(cpu)); for (pin = 0; pin < LS3A_INTC_IP; pin++) { qdev_connect_gpio_out(extioi, (cpu * 8 + pin), @@ -733,6 +731,43 @@ static void loongarch_direct_kernel_boot(LoongArchMachineState *lams, } } +static void loongarch_qemu_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ +} + +static uint64_t loongarch_qemu_read(void *opaque, hwaddr addr, unsigned size) +{ + switch (addr) { + case VERSION_REG: + return 0x11ULL; + case FEATURE_REG: + return 1ULL << IOCSRF_MSI | 1ULL << IOCSRF_EXTIOI | + 1ULL << IOCSRF_CSRIPI; + case VENDOR_REG: + return 0x6e6f73676e6f6f4cULL; /* "Loongson" */ + case CPUNAME_REG: + return 0x303030354133ULL; /* "3A5000" */ + case MISC_FUNC_REG: + return 1ULL << IOCSRM_EXTIOI_EN; + } + return 0ULL; +} + +static const MemoryRegionOps loongarch_qemu_ops = { + .read = loongarch_qemu_read, + .write = loongarch_qemu_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + static void loongarch_init(MachineState *machine) { LoongArchCPU *lacpu; @@ -761,8 +796,17 @@ static void loongarch_init(MachineState *machine) exit(1); } create_fdt(lams); - /* Init CPUs */ + /* Create IOCSR space */ + memory_region_init_io(&lams->system_iocsr, OBJECT(machine), NULL, + machine, "iocsr", UINT64_MAX); + address_space_init(&lams->as_iocsr, &lams->system_iocsr, "IOCSR"); + memory_region_init_io(&lams->iocsr_mem, OBJECT(machine), + &loongarch_qemu_ops, + machine, "iocsr_misc", 0x428); + memory_region_add_subregion(&lams->system_iocsr, 0, &lams->iocsr_mem); + + /* Init CPUs */ possible_cpus = mc->possible_cpu_arch_ids(machine); for (i = 0; i < possible_cpus->len; i++) { cpu = cpu_create(machine->cpu_type); diff --git a/include/hw/intc/loongarch_extioi.h b/include/hw/intc/loongarch_extioi.h index fbdef9a7b3..a0a46b888c 100644 --- a/include/hw/intc/loongarch_extioi.h +++ b/include/hw/intc/loongarch_extioi.h @@ -40,25 +40,29 @@ #define EXTIOI_COREMAP_START (0xC00 - APIC_OFFSET) #define EXTIOI_COREMAP_END (0xD00 - APIC_OFFSET) +typedef struct ExtIOICore { + uint32_t coreisr[EXTIOI_IRQS_GROUP_COUNT]; + DECLARE_BITMAP(sw_isr[LS3A_INTC_IP], EXTIOI_IRQS); + qemu_irq parent_irq[LS3A_INTC_IP]; +} ExtIOICore; + #define TYPE_LOONGARCH_EXTIOI "loongarch.extioi" OBJECT_DECLARE_SIMPLE_TYPE(LoongArchExtIOI, LOONGARCH_EXTIOI) struct LoongArchExtIOI { SysBusDevice parent_obj; + uint32_t num_cpu; /* hardware state */ uint32_t nodetype[EXTIOI_IRQS_NODETYPE_COUNT / 2]; uint32_t bounce[EXTIOI_IRQS_GROUP_COUNT]; uint32_t isr[EXTIOI_IRQS / 32]; - uint32_t coreisr[EXTIOI_CPUS][EXTIOI_IRQS_GROUP_COUNT]; uint32_t enable[EXTIOI_IRQS / 32]; uint32_t ipmap[EXTIOI_IRQS_IPMAP_SIZE / 4]; uint32_t coremap[EXTIOI_IRQS / 4]; uint32_t sw_pending[EXTIOI_IRQS / 32]; - DECLARE_BITMAP(sw_isr[EXTIOI_CPUS][LS3A_INTC_IP], EXTIOI_IRQS); uint8_t sw_ipmap[EXTIOI_IRQS_IPMAP_SIZE]; uint8_t sw_coremap[EXTIOI_IRQS]; - qemu_irq parent_irq[EXTIOI_CPUS][LS3A_INTC_IP]; qemu_irq irq[EXTIOI_IRQS]; - MemoryRegion extioi_iocsr_mem[EXTIOI_CPUS]; + ExtIOICore *cpu; MemoryRegion extioi_system_mem; }; #endif /* LOONGARCH_EXTIOI_H */ diff --git a/include/hw/intc/loongarch_ipi.h b/include/hw/intc/loongarch_ipi.h index 6c6194786e..1c1e834849 100644 --- a/include/hw/intc/loongarch_ipi.h +++ b/include/hw/intc/loongarch_ipi.h @@ -47,7 +47,8 @@ struct LoongArchIPI { SysBusDevice parent_obj; MemoryRegion ipi_iocsr_mem; MemoryRegion ipi64_iocsr_mem; - IPICore ipi_core; + uint32_t num_cpu; + IPICore *cpu; }; #endif diff --git a/include/hw/loongarch/virt.h b/include/hw/loongarch/virt.h index 674f4655e0..6ef9a92394 100644 --- a/include/hw/loongarch/virt.h +++ b/include/hw/loongarch/virt.h @@ -25,7 +25,7 @@ #define VIRT_LOWMEM_BASE 0 #define VIRT_LOWMEM_SIZE 0x10000000 -#define VIRT_HIGHMEM_BASE 0x90000000 +#define VIRT_HIGHMEM_BASE 0x80000000 #define VIRT_GED_EVT_ADDR 0x100e0000 #define VIRT_GED_MEM_ADDR (VIRT_GED_EVT_ADDR + ACPI_GED_EVT_SEL_LEN) #define VIRT_GED_REG_ADDR (VIRT_GED_MEM_ADDR + MEMORY_HOTPLUG_IO_LEN) @@ -50,6 +50,9 @@ struct LoongArchMachineState { DeviceState *platform_bus_dev; PCIBus *pci_bus; PFlashCFI01 *flash; + MemoryRegion system_iocsr; + MemoryRegion iocsr_mem; + AddressSpace as_iocsr; }; #define TYPE_LOONGARCH_MACHINE MACHINE_TYPE_NAME("virt") diff --git a/include/standard-headers/drm/drm_fourcc.h b/include/standard-headers/drm/drm_fourcc.h index 72279f4d25..3afb70160f 100644 --- a/include/standard-headers/drm/drm_fourcc.h +++ b/include/standard-headers/drm/drm_fourcc.h @@ -322,6 +322,8 @@ extern "C" { * index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian */ #define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */ +#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */ +#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */ /* * 2 plane YCbCr MSB aligned diff --git a/include/standard-headers/linux/fuse.h b/include/standard-headers/linux/fuse.h index 6b9793842c..fc0dcd10ae 100644 --- a/include/standard-headers/linux/fuse.h +++ b/include/standard-headers/linux/fuse.h @@ -209,7 +209,7 @@ * - add FUSE_HAS_EXPIRE_ONLY * * 7.39 - * - add FUSE_DIRECT_IO_RELAX + * - add FUSE_DIRECT_IO_ALLOW_MMAP * - add FUSE_STATX and related structures */ @@ -405,8 +405,7 @@ struct fuse_file_lock { * FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir, * symlink and mknod (single group that matches parent) * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation - * FUSE_DIRECT_IO_RELAX: relax restrictions in FOPEN_DIRECT_IO mode, for now - * allow shared mmap + * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode. */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -445,7 +444,10 @@ struct fuse_file_lock { #define FUSE_HAS_INODE_DAX (1ULL << 33) #define FUSE_CREATE_SUPP_GROUP (1ULL << 34) #define FUSE_HAS_EXPIRE_ONLY (1ULL << 35) -#define FUSE_DIRECT_IO_RELAX (1ULL << 36) +#define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36) + +/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ +#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP /** * CUSE INIT request/reply flags diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h index e5f558d964..a39193213f 100644 --- a/include/standard-headers/linux/pci_regs.h +++ b/include/standard-headers/linux/pci_regs.h @@ -80,6 +80,7 @@ #define PCI_HEADER_TYPE_NORMAL 0 #define PCI_HEADER_TYPE_BRIDGE 1 #define PCI_HEADER_TYPE_CARDBUS 2 +#define PCI_HEADER_TYPE_MFD 0x80 /* Multi-Function Device (possible) */ #define PCI_BIST 0x0f /* 8 bits */ #define PCI_BIST_CODE_MASK 0x0f /* Return result */ @@ -637,6 +638,7 @@ #define PCI_EXP_RTCAP 0x1e /* Root Capabilities */ #define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */ #define PCI_EXP_RTSTA 0x20 /* Root Status */ +#define PCI_EXP_RTSTA_PME_RQ_ID 0x0000ffff /* PME Requester ID */ #define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */ #define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */ /* @@ -930,12 +932,13 @@ /* Process Address Space ID */ #define PCI_PASID_CAP 0x04 /* PASID feature register */ -#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */ -#define PCI_PASID_CAP_PRIV 0x04 /* Privilege Mode Supported */ +#define PCI_PASID_CAP_EXEC 0x0002 /* Exec permissions Supported */ +#define PCI_PASID_CAP_PRIV 0x0004 /* Privilege Mode Supported */ +#define PCI_PASID_CAP_WIDTH 0x1f00 #define PCI_PASID_CTRL 0x06 /* PASID control register */ -#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */ -#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */ -#define PCI_PASID_CTRL_PRIV 0x04 /* Privilege Mode Enable */ +#define PCI_PASID_CTRL_ENABLE 0x0001 /* Enable bit */ +#define PCI_PASID_CTRL_EXEC 0x0002 /* Exec permissions Enable */ +#define PCI_PASID_CTRL_PRIV 0x0004 /* Privilege Mode Enable */ #define PCI_EXT_CAP_PASID_SIZEOF 8 /* Single Root I/O Virtualization */ @@ -975,6 +978,8 @@ #define PCI_LTR_VALUE_MASK 0x000003ff #define PCI_LTR_SCALE_MASK 0x00001c00 #define PCI_LTR_SCALE_SHIFT 10 +#define PCI_LTR_NOSNOOP_VALUE 0x03ff0000 /* Max No-Snoop Latency Value */ +#define PCI_LTR_NOSNOOP_SCALE 0x1c000000 /* Scale for Max Value */ #define PCI_EXT_CAP_LTR_SIZEOF 8 /* Access Control Service */ @@ -1042,9 +1047,16 @@ #define PCI_EXP_DPC_STATUS 0x08 /* DPC Status */ #define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */ #define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR 0x0000 /* Uncorrectable error */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE 0x0002 /* Rcvd ERR_NONFATAL */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE 0x0004 /* Rcvd ERR_FATAL */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT 0x0006 /* Reason in Trig Reason Extension field */ #define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */ #define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */ #define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO 0x0000 /* RP PIO error */ +#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER 0x0020 /* DPC SW Trigger bit */ +#define PCI_EXP_DPC_RP_PIO_FEP 0x1f00 /* RP PIO First Err Ptr */ #define PCI_EXP_DPC_SOURCE_ID 0x0A /* DPC Source Identifier */ @@ -1088,6 +1100,8 @@ #define PCI_L1SS_CTL1_LTR_L12_TH_VALUE 0x03ff0000 /* LTR_L1.2_THRESHOLD_Value */ #define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */ #define PCI_L1SS_CTL2 0x0c /* Control 2 Register */ +#define PCI_L1SS_CTL2_T_PWR_ON_SCALE 0x00000003 /* T_POWER_ON Scale */ +#define PCI_L1SS_CTL2_T_PWR_ON_VALUE 0x000000f8 /* T_POWER_ON Value */ /* Designated Vendor-Specific (DVSEC, PCI_EXT_CAP_ID_DVSEC) */ #define PCI_DVSEC_HEADER1 0x4 /* Designated Vendor-Specific Header1 */ diff --git a/include/standard-headers/linux/vhost_types.h b/include/standard-headers/linux/vhost_types.h index 5ad07e134a..fd54044936 100644 --- a/include/standard-headers/linux/vhost_types.h +++ b/include/standard-headers/linux/vhost_types.h @@ -185,5 +185,12 @@ struct vhost_vdpa_iova_range { * DRIVER_OK */ #define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6 +/* Device may expose the virtqueue's descriptor area, driver area and + * device area to a different group for ASID binding than where its + * buffers may reside. Requires VHOST_BACKEND_F_IOTLB_ASID. + */ +#define VHOST_BACKEND_F_DESC_ASID 0x7 +/* IOTLB don't flush memory mapping across device reset */ +#define VHOST_BACKEND_F_IOTLB_PERSIST 0x8 #endif diff --git a/include/standard-headers/linux/virtio_config.h b/include/standard-headers/linux/virtio_config.h index 8a7d0dc8b0..bfd1ca643e 100644 --- a/include/standard-headers/linux/virtio_config.h +++ b/include/standard-headers/linux/virtio_config.h @@ -103,6 +103,11 @@ */ #define VIRTIO_F_NOTIFICATION_DATA 38 +/* This feature indicates that the driver uses the data provided by the device + * as a virtqueue identifier in available buffer notifications. + */ +#define VIRTIO_F_NOTIF_CONFIG_DATA 39 + /* * This feature indicates that the driver can reset a queue individually. */ diff --git a/include/standard-headers/linux/virtio_pci.h b/include/standard-headers/linux/virtio_pci.h index be912cfc95..b7fdfd0668 100644 --- a/include/standard-headers/linux/virtio_pci.h +++ b/include/standard-headers/linux/virtio_pci.h @@ -166,6 +166,17 @@ struct virtio_pci_common_cfg { uint32_t queue_used_hi; /* read-write */ }; +/* + * Warning: do not use sizeof on this: use offsetofend for + * specific fields you need. + */ +struct virtio_pci_modern_common_cfg { + struct virtio_pci_common_cfg cfg; + + uint16_t queue_notify_data; /* read-write */ + uint16_t queue_reset; /* read-write */ +}; + /* Fields in VIRTIO_PCI_CAP_PCI_CFG: */ struct virtio_pci_cfg_cap { struct virtio_pci_cap cap; diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h index 38e5957526..c59ea55cd8 100644 --- a/linux-headers/asm-arm64/kvm.h +++ b/linux-headers/asm-arm64/kvm.h @@ -491,6 +491,38 @@ struct kvm_smccc_filter { #define KVM_HYPERCALL_EXIT_SMC (1U << 0) #define KVM_HYPERCALL_EXIT_16BIT (1U << 1) +/* + * Get feature ID registers userspace writable mask. + * + * From DDI0487J.a, D19.2.66 ("ID_AA64MMFR2_EL1, AArch64 Memory Model + * Feature Register 2"): + * + * "The Feature ID space is defined as the System register space in + * AArch64 with op0==3, op1=={0, 1, 3}, CRn==0, CRm=={0-7}, + * op2=={0-7}." + * + * This covers all currently known R/O registers that indicate + * anything useful feature wise, including the ID registers. + * + * If we ever need to introduce a new range, it will be described as + * such in the range field. + */ +#define KVM_ARM_FEATURE_ID_RANGE_IDX(op0, op1, crn, crm, op2) \ + ({ \ + __u64 __op1 = (op1) & 3; \ + __op1 -= (__op1 == 3); \ + (__op1 << 6 | ((crm) & 7) << 3 | (op2)); \ + }) + +#define KVM_ARM_FEATURE_ID_RANGE 0 +#define KVM_ARM_FEATURE_ID_RANGE_SIZE (3 * 8 * 8) + +struct reg_mask_range { + __u64 addr; /* Pointer to mask array */ + __u32 range; /* Requested range */ + __u32 reserved[13]; +}; + #endif #endif /* __ARM_KVM_H__ */ diff --git a/linux-headers/asm-generic/unistd.h b/linux-headers/asm-generic/unistd.h index abe087c53b..756b013fb8 100644 --- a/linux-headers/asm-generic/unistd.h +++ b/linux-headers/asm-generic/unistd.h @@ -71,7 +71,7 @@ __SYSCALL(__NR_fremovexattr, sys_fremovexattr) #define __NR_getcwd 17 __SYSCALL(__NR_getcwd, sys_getcwd) #define __NR_lookup_dcookie 18 -__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie) +__SYSCALL(__NR_lookup_dcookie, sys_ni_syscall) #define __NR_eventfd2 19 __SYSCALL(__NR_eventfd2, sys_eventfd2) #define __NR_epoll_create1 20 @@ -816,15 +816,21 @@ __SYSCALL(__NR_process_mrelease, sys_process_mrelease) __SYSCALL(__NR_futex_waitv, sys_futex_waitv) #define __NR_set_mempolicy_home_node 450 __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) - #define __NR_cachestat 451 __SYSCALL(__NR_cachestat, sys_cachestat) - #define __NR_fchmodat2 452 __SYSCALL(__NR_fchmodat2, sys_fchmodat2) +#define __NR_map_shadow_stack 453 +__SYSCALL(__NR_map_shadow_stack, sys_map_shadow_stack) +#define __NR_futex_wake 454 +__SYSCALL(__NR_futex_wake, sys_futex_wake) +#define __NR_futex_wait 455 +__SYSCALL(__NR_futex_wait, sys_futex_wait) +#define __NR_futex_requeue 456 +__SYSCALL(__NR_futex_requeue, sys_futex_requeue) #undef __NR_syscalls -#define __NR_syscalls 453 +#define __NR_syscalls 457 /* * 32 bit systems traditionally used different diff --git a/linux-headers/asm-loongarch/bitsperlong.h b/linux-headers/asm-loongarch/bitsperlong.h new file mode 100644 index 0000000000..6dc0bb0c13 --- /dev/null +++ b/linux-headers/asm-loongarch/bitsperlong.h @@ -0,0 +1 @@ +#include diff --git a/linux-headers/asm-loongarch/kvm.h b/linux-headers/asm-loongarch/kvm.h new file mode 100644 index 0000000000..923d0bd382 --- /dev/null +++ b/linux-headers/asm-loongarch/kvm.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __UAPI_ASM_LOONGARCH_KVM_H +#define __UAPI_ASM_LOONGARCH_KVM_H + +#include + +/* + * KVM LoongArch specific structures and definitions. + * + * Some parts derived from the x86 version of this file. + */ + +#define __KVM_HAVE_READONLY_MEM + +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_DIRTY_LOG_PAGE_OFFSET 64 + +/* + * for KVM_GET_REGS and KVM_SET_REGS + */ +struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + __u64 gpr[32]; + __u64 pc; +}; + +/* + * for KVM_GET_FPU and KVM_SET_FPU + */ +struct kvm_fpu { + __u32 fcsr; + __u64 fcc; /* 8x8 */ + struct kvm_fpureg { + __u64 val64[4]; + } fpr[32]; +}; + +/* + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various + * registers. The id field is broken down as follows: + * + * bits[63..52] - As per linux/kvm.h + * bits[51..32] - Must be zero. + * bits[31..16] - Register set. + * + * Register set = 0: GP registers from kvm_regs (see definitions below). + * + * Register set = 1: CSR registers. + * + * Register set = 2: KVM specific registers (see definitions below). + * + * Register set = 3: FPU / SIMD registers (see definitions below). + * + * Other sets registers may be added in the future. Each set would + * have its own identifier in bits[31..16]. + */ + +#define KVM_REG_LOONGARCH_GPR (KVM_REG_LOONGARCH | 0x00000ULL) +#define KVM_REG_LOONGARCH_CSR (KVM_REG_LOONGARCH | 0x10000ULL) +#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL) +#define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL) +#define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL) +#define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL) +#define KVM_CSR_IDX_MASK 0x7fff +#define KVM_CPUCFG_IDX_MASK 0x7fff + +/* + * KVM_REG_LOONGARCH_KVM - KVM specific control registers. + */ + +#define KVM_REG_LOONGARCH_COUNTER (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_LOONGARCH_VCPU_RESET (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 2) + +#define LOONGARCH_REG_SHIFT 3 +#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT)) +#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG) +#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG) +#define KVM_LOONGARCH_VCPU_CPUCFG 0 + +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* dummy definition */ +struct kvm_sregs { +}; + +struct kvm_iocsr_entry { + __u32 addr; + __u32 pad; + __u64 data; +}; + +#define KVM_NR_IRQCHIPS 1 +#define KVM_IRQCHIP_NUM_PINS 64 +#define KVM_MAX_CORES 256 + +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */ diff --git a/linux-headers/asm-loongarch/mman.h b/linux-headers/asm-loongarch/mman.h new file mode 100644 index 0000000000..8eebf89f5a --- /dev/null +++ b/linux-headers/asm-loongarch/mman.h @@ -0,0 +1 @@ +#include diff --git a/linux-headers/asm-loongarch/unistd.h b/linux-headers/asm-loongarch/unistd.h new file mode 100644 index 0000000000..fcb668984f --- /dev/null +++ b/linux-headers/asm-loongarch/unistd.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_CLONE3 + +#include diff --git a/linux-headers/asm-mips/unistd_n32.h b/linux-headers/asm-mips/unistd_n32.h index 46d8500654..994b6f008f 100644 --- a/linux-headers/asm-mips/unistd_n32.h +++ b/linux-headers/asm-mips/unistd_n32.h @@ -381,5 +381,9 @@ #define __NR_set_mempolicy_home_node (__NR_Linux + 450) #define __NR_cachestat (__NR_Linux + 451) #define __NR_fchmodat2 (__NR_Linux + 452) +#define __NR_map_shadow_stack (__NR_Linux + 453) +#define __NR_futex_wake (__NR_Linux + 454) +#define __NR_futex_wait (__NR_Linux + 455) +#define __NR_futex_requeue (__NR_Linux + 456) #endif /* _ASM_UNISTD_N32_H */ diff --git a/linux-headers/asm-mips/unistd_n64.h b/linux-headers/asm-mips/unistd_n64.h index c2f7ac673b..41dcf5877a 100644 --- a/linux-headers/asm-mips/unistd_n64.h +++ b/linux-headers/asm-mips/unistd_n64.h @@ -357,5 +357,9 @@ #define __NR_set_mempolicy_home_node (__NR_Linux + 450) #define __NR_cachestat (__NR_Linux + 451) #define __NR_fchmodat2 (__NR_Linux + 452) +#define __NR_map_shadow_stack (__NR_Linux + 453) +#define __NR_futex_wake (__NR_Linux + 454) +#define __NR_futex_wait (__NR_Linux + 455) +#define __NR_futex_requeue (__NR_Linux + 456) #endif /* _ASM_UNISTD_N64_H */ diff --git a/linux-headers/asm-mips/unistd_o32.h b/linux-headers/asm-mips/unistd_o32.h index 757c68f2ad..ae9d334d96 100644 --- a/linux-headers/asm-mips/unistd_o32.h +++ b/linux-headers/asm-mips/unistd_o32.h @@ -427,5 +427,9 @@ #define __NR_set_mempolicy_home_node (__NR_Linux + 450) #define __NR_cachestat (__NR_Linux + 451) #define __NR_fchmodat2 (__NR_Linux + 452) +#define __NR_map_shadow_stack (__NR_Linux + 453) +#define __NR_futex_wake (__NR_Linux + 454) +#define __NR_futex_wait (__NR_Linux + 455) +#define __NR_futex_requeue (__NR_Linux + 456) #endif /* _ASM_UNISTD_O32_H */ diff --git a/linux-headers/asm-powerpc/unistd_32.h b/linux-headers/asm-powerpc/unistd_32.h index 8ef94bbac1..b9b23d66d7 100644 --- a/linux-headers/asm-powerpc/unistd_32.h +++ b/linux-headers/asm-powerpc/unistd_32.h @@ -434,6 +434,10 @@ #define __NR_set_mempolicy_home_node 450 #define __NR_cachestat 451 #define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 #endif /* _ASM_UNISTD_32_H */ diff --git a/linux-headers/asm-powerpc/unistd_64.h b/linux-headers/asm-powerpc/unistd_64.h index 0e7ee43e88..cbb4b3e8f7 100644 --- a/linux-headers/asm-powerpc/unistd_64.h +++ b/linux-headers/asm-powerpc/unistd_64.h @@ -406,6 +406,10 @@ #define __NR_set_mempolicy_home_node 450 #define __NR_cachestat 451 #define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 #endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-riscv/kvm.h b/linux-headers/asm-riscv/kvm.h index 992c5e4071..60d3b21dea 100644 --- a/linux-headers/asm-riscv/kvm.h +++ b/linux-headers/asm-riscv/kvm.h @@ -80,6 +80,7 @@ struct kvm_riscv_csr { unsigned long sip; unsigned long satp; unsigned long scounteren; + unsigned long senvcfg; }; /* AIA CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ @@ -93,6 +94,11 @@ struct kvm_riscv_aia_csr { unsigned long iprio2h; }; +/* Smstateen CSR for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_smstateen_csr { + unsigned long sstateen0; +}; + /* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ struct kvm_riscv_timer { __u64 frequency; @@ -131,6 +137,8 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_ZICSR, KVM_RISCV_ISA_EXT_ZIFENCEI, KVM_RISCV_ISA_EXT_ZIHPM, + KVM_RISCV_ISA_EXT_SMSTATEEN, + KVM_RISCV_ISA_EXT_ZICOND, KVM_RISCV_ISA_EXT_MAX, }; @@ -148,6 +156,7 @@ enum KVM_RISCV_SBI_EXT_ID { KVM_RISCV_SBI_EXT_PMU, KVM_RISCV_SBI_EXT_EXPERIMENTAL, KVM_RISCV_SBI_EXT_VENDOR, + KVM_RISCV_SBI_EXT_DBCN, KVM_RISCV_SBI_EXT_MAX, }; @@ -178,10 +187,13 @@ enum KVM_RISCV_SBI_EXT_ID { #define KVM_REG_RISCV_CSR (0x03 << KVM_REG_RISCV_TYPE_SHIFT) #define KVM_REG_RISCV_CSR_GENERAL (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT) #define KVM_REG_RISCV_CSR_AIA (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_CSR_SMSTATEEN (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT) #define KVM_REG_RISCV_CSR_REG(name) \ (offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long)) #define KVM_REG_RISCV_CSR_AIA_REG(name) \ (offsetof(struct kvm_riscv_aia_csr, name) / sizeof(unsigned long)) +#define KVM_REG_RISCV_CSR_SMSTATEEN_REG(name) \ + (offsetof(struct kvm_riscv_smstateen_csr, name) / sizeof(unsigned long)) /* Timer registers are mapped as type 4 */ #define KVM_REG_RISCV_TIMER (0x04 << KVM_REG_RISCV_TYPE_SHIFT) diff --git a/linux-headers/asm-s390/unistd_32.h b/linux-headers/asm-s390/unistd_32.h index 716fa368ca..c093e6d5f9 100644 --- a/linux-headers/asm-s390/unistd_32.h +++ b/linux-headers/asm-s390/unistd_32.h @@ -425,5 +425,9 @@ #define __NR_set_mempolicy_home_node 450 #define __NR_cachestat 451 #define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 #endif /* _ASM_S390_UNISTD_32_H */ diff --git a/linux-headers/asm-s390/unistd_64.h b/linux-headers/asm-s390/unistd_64.h index b2a11b1d13..114c0569a4 100644 --- a/linux-headers/asm-s390/unistd_64.h +++ b/linux-headers/asm-s390/unistd_64.h @@ -373,5 +373,9 @@ #define __NR_set_mempolicy_home_node 450 #define __NR_cachestat 451 #define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 #endif /* _ASM_S390_UNISTD_64_H */ diff --git a/linux-headers/asm-x86/unistd_32.h b/linux-headers/asm-x86/unistd_32.h index d749ad1c24..329649c377 100644 --- a/linux-headers/asm-x86/unistd_32.h +++ b/linux-headers/asm-x86/unistd_32.h @@ -443,6 +443,10 @@ #define __NR_set_mempolicy_home_node 450 #define __NR_cachestat 451 #define __NR_fchmodat2 452 +#define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 #endif /* _ASM_UNISTD_32_H */ diff --git a/linux-headers/asm-x86/unistd_64.h b/linux-headers/asm-x86/unistd_64.h index cea67282eb..4583606ce6 100644 --- a/linux-headers/asm-x86/unistd_64.h +++ b/linux-headers/asm-x86/unistd_64.h @@ -366,6 +366,9 @@ #define __NR_cachestat 451 #define __NR_fchmodat2 452 #define __NR_map_shadow_stack 453 +#define __NR_futex_wake 454 +#define __NR_futex_wait 455 +#define __NR_futex_requeue 456 #endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-x86/unistd_x32.h b/linux-headers/asm-x86/unistd_x32.h index 5b2e79bf4c..146d74d8e4 100644 --- a/linux-headers/asm-x86/unistd_x32.h +++ b/linux-headers/asm-x86/unistd_x32.h @@ -318,6 +318,9 @@ #define __NR_set_mempolicy_home_node (__X32_SYSCALL_BIT + 450) #define __NR_cachestat (__X32_SYSCALL_BIT + 451) #define __NR_fchmodat2 (__X32_SYSCALL_BIT + 452) +#define __NR_futex_wake (__X32_SYSCALL_BIT + 454) +#define __NR_futex_wait (__X32_SYSCALL_BIT + 455) +#define __NR_futex_requeue (__X32_SYSCALL_BIT + 456) #define __NR_rt_sigaction (__X32_SYSCALL_BIT + 512) #define __NR_rt_sigreturn (__X32_SYSCALL_BIT + 513) #define __NR_ioctl (__X32_SYSCALL_BIT + 514) diff --git a/linux-headers/linux/iommufd.h b/linux-headers/linux/iommufd.h index 218bf7ac98..806d98d09c 100644 --- a/linux-headers/linux/iommufd.h +++ b/linux-headers/linux/iommufd.h @@ -47,6 +47,8 @@ enum { IOMMUFD_CMD_VFIO_IOAS, IOMMUFD_CMD_HWPT_ALLOC, IOMMUFD_CMD_GET_HW_INFO, + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING, + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP, }; /** @@ -347,20 +349,86 @@ struct iommu_vfio_ioas { }; #define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS) +/** + * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation + * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as + * the parent HWPT in a nesting configuration. + * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is + * enforced on device attachment + */ +enum iommufd_hwpt_alloc_flags { + IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1, +}; + +/** + * enum iommu_hwpt_vtd_s1_flags - Intel VT-d stage-1 page table + * entry attributes + * @IOMMU_VTD_S1_SRE: Supervisor request + * @IOMMU_VTD_S1_EAFE: Extended access enable + * @IOMMU_VTD_S1_WPE: Write protect enable + */ +enum iommu_hwpt_vtd_s1_flags { + IOMMU_VTD_S1_SRE = 1 << 0, + IOMMU_VTD_S1_EAFE = 1 << 1, + IOMMU_VTD_S1_WPE = 1 << 2, +}; + +/** + * struct iommu_hwpt_vtd_s1 - Intel VT-d stage-1 page table + * info (IOMMU_HWPT_DATA_VTD_S1) + * @flags: Combination of enum iommu_hwpt_vtd_s1_flags + * @pgtbl_addr: The base address of the stage-1 page table. + * @addr_width: The address width of the stage-1 page table + * @__reserved: Must be 0 + */ +struct iommu_hwpt_vtd_s1 { + __aligned_u64 flags; + __aligned_u64 pgtbl_addr; + __u32 addr_width; + __u32 __reserved; +}; + +/** + * enum iommu_hwpt_data_type - IOMMU HWPT Data Type + * @IOMMU_HWPT_DATA_NONE: no data + * @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table + */ +enum iommu_hwpt_data_type { + IOMMU_HWPT_DATA_NONE, + IOMMU_HWPT_DATA_VTD_S1, +}; + /** * struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC) * @size: sizeof(struct iommu_hwpt_alloc) - * @flags: Must be 0 + * @flags: Combination of enum iommufd_hwpt_alloc_flags * @dev_id: The device to allocate this HWPT for - * @pt_id: The IOAS to connect this HWPT to + * @pt_id: The IOAS or HWPT to connect this HWPT to * @out_hwpt_id: The ID of the new HWPT * @__reserved: Must be 0 + * @data_type: One of enum iommu_hwpt_data_type + * @data_len: Length of the type specific data + * @data_uptr: User pointer to the type specific data * * Explicitly allocate a hardware page table object. This is the same object * type that is returned by iommufd_device_attach() and represents the * underlying iommu driver's iommu_domain kernel object. * - * A HWPT will be created with the IOVA mappings from the given IOAS. + * A kernel-managed HWPT will be created with the mappings from the given + * IOAS via the @pt_id. The @data_type for this allocation must be set to + * IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a + * nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags. + * + * A user-managed nested HWPT will be created from a given parent HWPT via + * @pt_id, in which the parent HWPT must be allocated previously via the + * same ioctl from a given IOAS (@pt_id). In this case, the @data_type + * must be set to a pre-defined type corresponding to an I/O page table + * type supported by the underlying IOMMU hardware. + * + * If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and + * @data_uptr should be zero. Otherwise, both @data_len and @data_uptr + * must be given. */ struct iommu_hwpt_alloc { __u32 size; @@ -369,13 +437,26 @@ struct iommu_hwpt_alloc { __u32 pt_id; __u32 out_hwpt_id; __u32 __reserved; + __u32 data_type; + __u32 data_len; + __aligned_u64 data_uptr; }; #define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC) +/** + * enum iommu_hw_info_vtd_flags - Flags for VT-d hw_info + * @IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17: If set, disallow read-only mappings + * on a nested_parent domain. + * https://www.intel.com/content/www/us/en/content-details/772415/content-details.html + */ +enum iommu_hw_info_vtd_flags { + IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17 = 1 << 0, +}; + /** * struct iommu_hw_info_vtd - Intel VT-d hardware information * - * @flags: Must be 0 + * @flags: Combination of enum iommu_hw_info_vtd_flags * @__reserved: Must be 0 * * @cap_reg: Value of Intel VT-d capability register defined in VT-d spec @@ -404,6 +485,20 @@ enum iommu_hw_info_type { IOMMU_HW_INFO_TYPE_INTEL_VTD, }; +/** + * enum iommufd_hw_capabilities + * @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking + * If available, it means the following APIs + * are supported: + * + * IOMMU_HWPT_GET_DIRTY_BITMAP + * IOMMU_HWPT_SET_DIRTY_TRACKING + * + */ +enum iommufd_hw_capabilities { + IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0, +}; + /** * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO) * @size: sizeof(struct iommu_hw_info) @@ -415,6 +510,8 @@ enum iommu_hw_info_type { * the iommu type specific hardware information data * @out_data_type: Output the iommu hardware info type as defined in the enum * iommu_hw_info_type. + * @out_capabilities: Output the generic iommu capability info type as defined + * in the enum iommu_hw_capabilities. * @__reserved: Must be 0 * * Query an iommu type specific hardware information data from an iommu behind @@ -439,6 +536,81 @@ struct iommu_hw_info { __aligned_u64 data_uptr; __u32 out_data_type; __u32 __reserved; + __aligned_u64 out_capabilities; }; #define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO) + +/* + * enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty + * tracking + * @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking + */ +enum iommufd_hwpt_set_dirty_tracking_flags { + IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1, +}; + +/** + * struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING) + * @size: sizeof(struct iommu_hwpt_set_dirty_tracking) + * @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @__reserved: Must be 0 + * + * Toggle dirty tracking on an HW pagetable. + */ +struct iommu_hwpt_set_dirty_tracking { + __u32 size; + __u32 flags; + __u32 hwpt_id; + __u32 __reserved; +}; +#define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING) + +/** + * enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits + * @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing + * any dirty bits metadata. This flag + * can be passed in the expectation + * where the next operation is an unmap + * of the same IOVA range. + * + */ +enum iommufd_hwpt_get_dirty_bitmap_flags { + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1, +}; + +/** + * struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP) + * @size: sizeof(struct iommu_hwpt_get_dirty_bitmap) + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags + * @__reserved: Must be 0 + * @iova: base IOVA of the bitmap first bit + * @length: IOVA range size + * @page_size: page size granularity of each bit in the bitmap + * @data: bitmap where to set the dirty bits. The bitmap bits each + * represent a page_size which you deviate from an arbitrary iova. + * + * Checking a given IOVA is dirty: + * + * data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64)) + * + * Walk the IOMMU pagetables for a given IOVA range to return a bitmap + * with the dirty IOVAs. In doing so it will also by default clear any + * dirty bit metadata set in the IOPTE. + */ +struct iommu_hwpt_get_dirty_bitmap { + __u32 size; + __u32 hwpt_id; + __u32 flags; + __u32 __reserved; + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 page_size; + __aligned_u64 data; +}; +#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP) + #endif diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index 0d74ee999a..549fea3a97 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -264,6 +264,7 @@ struct kvm_xen_exit { #define KVM_EXIT_RISCV_SBI 35 #define KVM_EXIT_RISCV_CSR 36 #define KVM_EXIT_NOTIFY 37 +#define KVM_EXIT_LOONGARCH_IOCSR 38 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -336,6 +337,13 @@ struct kvm_run { __u32 len; __u8 is_write; } mmio; + /* KVM_EXIT_LOONGARCH_IOCSR */ + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } iocsr_io; /* KVM_EXIT_HYPERCALL */ struct { __u64 nr; @@ -1188,6 +1196,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_COUNTER_OFFSET 227 #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 +#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230 #ifdef KVM_CAP_IRQ_ROUTING @@ -1358,6 +1367,7 @@ struct kvm_dirty_tlb { #define KVM_REG_ARM64 0x6000000000000000ULL #define KVM_REG_MIPS 0x7000000000000000ULL #define KVM_REG_RISCV 0x8000000000000000ULL +#define KVM_REG_LOONGARCH 0x9000000000000000ULL #define KVM_REG_SIZE_SHIFT 52 #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL @@ -1558,6 +1568,7 @@ struct kvm_s390_ucas_mapping { #define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags) /* Available with KVM_CAP_COUNTER_OFFSET */ #define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset) +#define KVM_ARM_GET_REG_WRITABLE_MASKS _IOR(KVMIO, 0xb6, struct reg_mask_range) /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) diff --git a/linux-headers/linux/psp-sev.h b/linux-headers/linux/psp-sev.h index 12ccb70099..bcb21339ee 100644 --- a/linux-headers/linux/psp-sev.h +++ b/linux-headers/linux/psp-sev.h @@ -68,6 +68,7 @@ typedef enum { SEV_RET_INVALID_PARAM, SEV_RET_RESOURCE_LIMIT, SEV_RET_SECURE_DATA_INVALID, + SEV_RET_INVALID_KEY = 0x27, SEV_RET_MAX, } sev_ret_code; diff --git a/linux-headers/linux/stddef.h b/linux-headers/linux/stddef.h index 9bb07083ac..bf9749dd14 100644 --- a/linux-headers/linux/stddef.h +++ b/linux-headers/linux/stddef.h @@ -27,8 +27,13 @@ union { \ struct { MEMBERS } ATTRS; \ struct TAG { MEMBERS } ATTRS NAME; \ - } + } ATTRS +#ifdef __cplusplus +/* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */ +#define __DECLARE_FLEX_ARRAY(T, member) \ + T member[0] +#else /** * __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union * @@ -49,3 +54,5 @@ #ifndef __counted_by #define __counted_by(m) #endif + +#endif /* _LINUX_STDDEF_H */ diff --git a/linux-headers/linux/userfaultfd.h b/linux-headers/linux/userfaultfd.h index 59978fbaae..953c75feda 100644 --- a/linux-headers/linux/userfaultfd.h +++ b/linux-headers/linux/userfaultfd.h @@ -40,7 +40,8 @@ UFFD_FEATURE_EXACT_ADDRESS | \ UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \ UFFD_FEATURE_WP_UNPOPULATED | \ - UFFD_FEATURE_POISON) + UFFD_FEATURE_POISON | \ + UFFD_FEATURE_WP_ASYNC) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -216,6 +217,11 @@ struct uffdio_api { * (i.e. empty ptes). This will be the default behavior for shmem * & hugetlbfs, so this flag only affects anonymous memory behavior * when userfault write-protection mode is registered. + * + * UFFD_FEATURE_WP_ASYNC indicates that userfaultfd write-protection + * asynchronous mode is supported in which the write fault is + * automatically resolved and write-protection is un-set. + * It implies UFFD_FEATURE_WP_UNPOPULATED. */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) @@ -232,6 +238,7 @@ struct uffdio_api { #define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12) #define UFFD_FEATURE_WP_UNPOPULATED (1<<13) #define UFFD_FEATURE_POISON (1<<14) +#define UFFD_FEATURE_WP_ASYNC (1<<15) __u64 features; __u64 ioctls; diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h index acf72b4999..8e175ece31 100644 --- a/linux-headers/linux/vfio.h +++ b/linux-headers/linux/vfio.h @@ -277,8 +277,8 @@ struct vfio_region_info { #define VFIO_REGION_INFO_FLAG_CAPS (1 << 3) /* Info supports caps */ __u32 index; /* Region index */ __u32 cap_offset; /* Offset within info struct of first cap */ - __u64 size; /* Region size (bytes) */ - __u64 offset; /* Region offset from start of device fd */ + __aligned_u64 size; /* Region size (bytes) */ + __aligned_u64 offset; /* Region offset from start of device fd */ }; #define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8) @@ -294,8 +294,8 @@ struct vfio_region_info { #define VFIO_REGION_INFO_CAP_SPARSE_MMAP 1 struct vfio_region_sparse_mmap_area { - __u64 offset; /* Offset of mmap'able area within region */ - __u64 size; /* Size of mmap'able area */ + __aligned_u64 offset; /* Offset of mmap'able area within region */ + __aligned_u64 size; /* Size of mmap'able area */ }; struct vfio_region_info_cap_sparse_mmap { @@ -450,9 +450,9 @@ struct vfio_device_migration_info { VFIO_DEVICE_STATE_V1_RESUMING) __u32 reserved; - __u64 pending_bytes; - __u64 data_offset; - __u64 data_size; + __aligned_u64 pending_bytes; + __aligned_u64 data_offset; + __aligned_u64 data_size; }; /* @@ -476,7 +476,7 @@ struct vfio_device_migration_info { struct vfio_region_info_cap_nvlink2_ssatgt { struct vfio_info_cap_header header; - __u64 tgt; + __aligned_u64 tgt; }; /* @@ -816,7 +816,7 @@ struct vfio_device_gfx_plane_info { __u32 drm_plane_type; /* type of plane: DRM_PLANE_TYPE_* */ /* out */ __u32 drm_format; /* drm format of plane */ - __u64 drm_format_mod; /* tiled mode */ + __aligned_u64 drm_format_mod; /* tiled mode */ __u32 width; /* width of plane */ __u32 height; /* height of plane */ __u32 stride; /* stride of plane */ @@ -829,6 +829,7 @@ struct vfio_device_gfx_plane_info { __u32 region_index; /* region index */ __u32 dmabuf_id; /* dma-buf id */ }; + __u32 reserved; }; #define VFIO_DEVICE_QUERY_GFX_PLANE _IO(VFIO_TYPE, VFIO_BASE + 14) @@ -863,9 +864,10 @@ struct vfio_device_ioeventfd { #define VFIO_DEVICE_IOEVENTFD_32 (1 << 2) /* 4-byte write */ #define VFIO_DEVICE_IOEVENTFD_64 (1 << 3) /* 8-byte write */ #define VFIO_DEVICE_IOEVENTFD_SIZE_MASK (0xf) - __u64 offset; /* device fd offset of write */ - __u64 data; /* data to be written */ + __aligned_u64 offset; /* device fd offset of write */ + __aligned_u64 data; /* data to be written */ __s32 fd; /* -1 for de-assignment */ + __u32 reserved; }; #define VFIO_DEVICE_IOEVENTFD _IO(VFIO_TYPE, VFIO_BASE + 16) @@ -1434,6 +1436,27 @@ struct vfio_device_feature_mig_data_size { #define VFIO_DEVICE_FEATURE_MIG_DATA_SIZE 9 +/** + * Upon VFIO_DEVICE_FEATURE_SET, set or clear the BUS mastering for the device + * based on the operation specified in op flag. + * + * The functionality is incorporated for devices that needs bus master control, + * but the in-band device interface lacks the support. Consequently, it is not + * applicable to PCI devices, as bus master control for PCI devices is managed + * in-band through the configuration space. At present, this feature is supported + * only for CDX devices. + * When the device's BUS MASTER setting is configured as CLEAR, it will result in + * blocking all incoming DMA requests from the device. On the other hand, configuring + * the device's BUS MASTER setting as SET (enable) will grant the device the + * capability to perform DMA to the host memory. + */ +struct vfio_device_feature_bus_master { + __u32 op; +#define VFIO_DEVICE_FEATURE_CLEAR_MASTER 0 /* Clear Bus Master */ +#define VFIO_DEVICE_FEATURE_SET_MASTER 1 /* Set Bus Master */ +}; +#define VFIO_DEVICE_FEATURE_BUS_MASTER 10 + /* -------- API for Type1 VFIO IOMMU -------- */ /** @@ -1449,7 +1472,7 @@ struct vfio_iommu_type1_info { __u32 flags; #define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */ #define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */ - __u64 iova_pgsizes; /* Bitmap of supported page sizes */ + __aligned_u64 iova_pgsizes; /* Bitmap of supported page sizes */ __u32 cap_offset; /* Offset within info struct of first cap */ __u32 pad; }; diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h index f5c48b61ab..649560c685 100644 --- a/linux-headers/linux/vhost.h +++ b/linux-headers/linux/vhost.h @@ -219,4 +219,12 @@ */ #define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E) +/* Get the group for the descriptor table including driver & device areas + * of a virtqueue: read index, write group in num. + * The virtqueue index is stored in the index field of vhost_vring_state. + * The group ID of the descriptor table for this specific virtqueue + * is returned via num field of vhost_vring_state. + */ +#define VHOST_VDPA_GET_VRING_DESC_GROUP _IOWR(VHOST_VIRTIO, 0x7F, \ + struct vhost_vring_state) #endif diff --git a/meson.build b/meson.build index 6c77d9687d..0c62b4156d 100644 --- a/meson.build +++ b/meson.build @@ -114,6 +114,8 @@ elif cpu in ['riscv32'] kvm_targets = ['riscv32-softmmu'] elif cpu in ['riscv64'] kvm_targets = ['riscv64-softmmu'] +elif cpu in ['loongarch64'] + kvm_targets = ['loongarch64-softmmu'] else kvm_targets = [] endif @@ -3358,6 +3360,7 @@ if have_system or have_user 'target/hppa', 'target/i386', 'target/i386/kvm', + 'target/loongarch', 'target/mips/tcg', 'target/nios2', 'target/ppc', diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c index fc075952e6..b098b1c6f3 100644 --- a/target/loongarch/cpu.c +++ b/target/loongarch/cpu.c @@ -11,15 +11,23 @@ #include "qapi/error.h" #include "qemu/module.h" #include "sysemu/qtest.h" -#include "exec/cpu_ldst.h" +#include "sysemu/tcg.h" +#include "sysemu/kvm.h" +#include "kvm/kvm_loongarch.h" #include "exec/exec-all.h" #include "cpu.h" #include "internals.h" #include "fpu/softfloat-helpers.h" #include "cpu-csr.h" #include "sysemu/reset.h" -#include "tcg/tcg.h" #include "vec.h" +#ifdef CONFIG_KVM +#include +#endif +#ifdef CONFIG_TCG +#include "exec/cpu_ldst.h" +#include "tcg/tcg.h" +#endif const char * const regnames[32] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", @@ -108,12 +116,15 @@ void loongarch_cpu_set_irq(void *opaque, int irq, int level) return; } - env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0); - - if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) { - cpu_interrupt(cs, CPU_INTERRUPT_HARD); - } else { - cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + if (kvm_enabled()) { + kvm_loongarch_set_interrupt(cpu, irq, level); + } else if (tcg_enabled()) { + env->CSR_ESTAT = deposit64(env->CSR_ESTAT, irq, 1, level != 0); + if (FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS)) { + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } } } @@ -138,7 +149,10 @@ static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env) return (pending & status) != 0; } +#endif +#ifdef CONFIG_TCG +#ifndef CONFIG_USER_ONLY static void loongarch_cpu_do_interrupt(CPUState *cs) { LoongArchCPU *cpu = LOONGARCH_CPU(cs); @@ -320,7 +334,6 @@ static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request) } #endif -#ifdef CONFIG_TCG static void loongarch_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb) { @@ -531,10 +544,12 @@ static void loongarch_cpu_reset_hold(Object *obj) env->CSR_ESTAT = env->CSR_ESTAT & (~MAKE_64BIT_MASK(0, 2)); env->CSR_RVACFG = FIELD_DP64(env->CSR_RVACFG, CSR_RVACFG, RBITS, 0); + env->CSR_CPUID = cs->cpu_index; env->CSR_TCFG = FIELD_DP64(env->CSR_TCFG, CSR_TCFG, EN, 0); env->CSR_LLBCTL = FIELD_DP64(env->CSR_LLBCTL, CSR_LLBCTL, KLO, 0); env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 0); env->CSR_MERRCTL = FIELD_DP64(env->CSR_MERRCTL, CSR_MERRCTL, ISMERR, 0); + env->CSR_TID = cs->cpu_index; env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, TLB_TYPE, 2); env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, MTLB_ENTRY, 63); @@ -551,9 +566,14 @@ static void loongarch_cpu_reset_hold(Object *obj) #ifndef CONFIG_USER_ONLY env->pc = 0x1c000000; memset(env->tlb, 0, sizeof(env->tlb)); + if (kvm_enabled()) { + kvm_arch_reset_vcpu(env); + } #endif +#ifdef CONFIG_TCG restore_fp_status(env); +#endif cs->exception_index = -1; } @@ -582,47 +602,6 @@ static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp) lacc->parent_realize(dev, errp); } -#ifndef CONFIG_USER_ONLY -static void loongarch_qemu_write(void *opaque, hwaddr addr, - uint64_t val, unsigned size) -{ - qemu_log_mask(LOG_UNIMP, "[%s]: Unimplemented reg 0x%" HWADDR_PRIx "\n", - __func__, addr); -} - -static uint64_t loongarch_qemu_read(void *opaque, hwaddr addr, unsigned size) -{ - switch (addr) { - case VERSION_REG: - return 0x11ULL; - case FEATURE_REG: - return 1ULL << IOCSRF_MSI | 1ULL << IOCSRF_EXTIOI | - 1ULL << IOCSRF_CSRIPI; - case VENDOR_REG: - return 0x6e6f73676e6f6f4cULL; /* "Loongson" */ - case CPUNAME_REG: - return 0x303030354133ULL; /* "3A5000" */ - case MISC_FUNC_REG: - return 1ULL << IOCSRM_EXTIOI_EN; - } - return 0ULL; -} - -static const MemoryRegionOps loongarch_qemu_ops = { - .read = loongarch_qemu_read, - .write = loongarch_qemu_write, - .endianness = DEVICE_LITTLE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 8, - }, - .impl = { - .min_access_size = 8, - .max_access_size = 8, - }, -}; -#endif - static bool loongarch_get_lsx(Object *obj, Error **errp) { LoongArchCPU *cpu = LOONGARCH_CPU(obj); @@ -693,17 +672,12 @@ static void loongarch_cpu_init(Object *obj) { #ifndef CONFIG_USER_ONLY LoongArchCPU *cpu = LOONGARCH_CPU(obj); - CPULoongArchState *env = &cpu->env; qdev_init_gpio_in(DEVICE(cpu), loongarch_cpu_set_irq, N_IRQS); +#ifdef CONFIG_TCG timer_init_ns(&cpu->timer, QEMU_CLOCK_VIRTUAL, &loongarch_constant_timer_cb, cpu); - memory_region_init_io(&env->system_iocsr, OBJECT(cpu), NULL, - env, "iocsr", UINT64_MAX); - address_space_init(&env->address_space_iocsr, &env->system_iocsr, "IOCSR"); - memory_region_init_io(&env->iocsr_mem, OBJECT(cpu), &loongarch_qemu_ops, - NULL, "iocsr_misc", 0x428); - memory_region_add_subregion(&env->system_iocsr, 0, &env->iocsr_mem); +#endif #endif } @@ -762,6 +736,8 @@ void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) qemu_fprintf(f, "TLBRENTRY=%016" PRIx64 "\n", env->CSR_TLBRENTRY); qemu_fprintf(f, "TLBRBADV=%016" PRIx64 "\n", env->CSR_TLBRBADV); qemu_fprintf(f, "TLBRERA=%016" PRIx64 "\n", env->CSR_TLBRERA); + qemu_fprintf(f, "TCFG=%016" PRIx64 "\n", env->CSR_TCFG); + qemu_fprintf(f, "TVAL=%016" PRIx64 "\n", env->CSR_TVAL); /* fpr */ if (flags & CPU_DUMP_FPU) { diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h index 00d1fba597..4aba8aba4c 100644 --- a/target/loongarch/cpu.h +++ b/target/loongarch/cpu.h @@ -319,6 +319,7 @@ typedef struct CPUArchState { uint64_t CSR_PWCH; uint64_t CSR_STLBPS; uint64_t CSR_RVACFG; + uint64_t CSR_CPUID; uint64_t CSR_PRCFG1; uint64_t CSR_PRCFG2; uint64_t CSR_PRCFG3; @@ -350,16 +351,14 @@ typedef struct CPUArchState { uint64_t CSR_DBG; uint64_t CSR_DERA; uint64_t CSR_DSAVE; - uint64_t CSR_CPUID; #ifndef CONFIG_USER_ONLY LoongArchTLB tlb[LOONGARCH_TLB_MAX]; - AddressSpace address_space_iocsr; - MemoryRegion system_iocsr; - MemoryRegion iocsr_mem; + AddressSpace *address_space_iocsr; bool load_elf; uint64_t elf_address; + uint32_t mp_state; /* Store ipistate to access from this struct */ DeviceState *ipistate; #endif @@ -380,6 +379,8 @@ struct ArchCPU { /* 'compatible' string for this CPU for Linux device trees */ const char *dtb_compatible; + /* used by KVM_REG_LOONGARCH_COUNTER ioctl to access guest time counters */ + uint64_t kvm_state_counter; }; /** diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c new file mode 100644 index 0000000000..f68d63f466 --- /dev/null +++ b/target/loongarch/cpu_helper.c @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch CPU helpers for qemu + * + * Copyright (c) 2024 Loongson Technology Corporation Limited + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internals.h" +#include "cpu-csr.h" + +static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical, + int *prot, target_ulong address, + int access_type, int index, int mmu_idx) +{ + LoongArchTLB *tlb = &env->tlb[index]; + uint64_t plv = mmu_idx; + uint64_t tlb_entry, tlb_ppn; + uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv; + + if (index >= LOONGARCH_STLB) { + tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); + } else { + tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + } + n = (address >> tlb_ps) & 0x1;/* Odd or even */ + + tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0; + tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V); + tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D); + tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV); + if (is_la64(env)) { + tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN); + tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX); + tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR); + tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV); + } else { + tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN); + tlb_nx = 0; + tlb_nr = 0; + tlb_rplv = 0; + } + + /* Remove sw bit between bit12 -- bit PS*/ + tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) -1)); + + /* Check access rights */ + if (!tlb_v) { + return TLBRET_INVALID; + } + + if (access_type == MMU_INST_FETCH && tlb_nx) { + return TLBRET_XI; + } + + if (access_type == MMU_DATA_LOAD && tlb_nr) { + return TLBRET_RI; + } + + if (((tlb_rplv == 0) && (plv > tlb_plv)) || + ((tlb_rplv == 1) && (plv != tlb_plv))) { + return TLBRET_PE; + } + + if ((access_type == MMU_DATA_STORE) && !tlb_d) { + return TLBRET_DIRTY; + } + + *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) | + (address & MAKE_64BIT_MASK(0, tlb_ps)); + *prot = PAGE_READ; + if (tlb_d) { + *prot |= PAGE_WRITE; + } + if (!tlb_nx) { + *prot |= PAGE_EXEC; + } + return TLBRET_MATCH; +} + +/* + * One tlb entry holds an adjacent odd/even pair, the vpn is the + * content of the virtual page number divided by 2. So the + * compare vpn is bit[47:15] for 16KiB page. while the vppn + * field in tlb entry contains bit[47:13], so need adjust. + * virt_vpn = vaddr[47:13] + */ +bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, + int *index) +{ + LoongArchTLB *tlb; + uint16_t csr_asid, tlb_asid, stlb_idx; + uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps; + int i, compare_shift; + uint64_t vpn, tlb_vppn; + + csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); + stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1); + stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */ + compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; + + /* Search STLB */ + for (i = 0; i < 8; ++i) { + tlb = &env->tlb[i * 256 + stlb_idx]; + tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); + if (tlb_e) { + tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); + tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); + tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + + if ((tlb_g == 1 || tlb_asid == csr_asid) && + (vpn == (tlb_vppn >> compare_shift))) { + *index = i * 256 + stlb_idx; + return true; + } + } + } + + /* Search MTLB */ + for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) { + tlb = &env->tlb[i]; + tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); + if (tlb_e) { + tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); + tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); + tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); + tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; + vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1); + if ((tlb_g == 1 || tlb_asid == csr_asid) && + (vpn == (tlb_vppn >> compare_shift))) { + *index = i; + return true; + } + } + } + return false; +} + +static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, + int *prot, target_ulong address, + MMUAccessType access_type, int mmu_idx) +{ + int index, match; + + match = loongarch_tlb_search(env, address, &index); + if (match) { + return loongarch_map_tlb_entry(env, physical, prot, + address, access_type, index, mmu_idx); + } + + return TLBRET_NOMATCH; +} + +static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va, + target_ulong dmw) +{ + if (is_la64(env)) { + return va & TARGET_VIRT_MASK; + } else { + uint32_t pseg = FIELD_EX32(dmw, CSR_DMW_32, PSEG); + return (va & MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT)) | \ + (pseg << R_CSR_DMW_32_VSEG_SHIFT); + } +} + +int get_physical_address(CPULoongArchState *env, hwaddr *physical, + int *prot, target_ulong address, + MMUAccessType access_type, int mmu_idx) +{ + int user_mode = mmu_idx == MMU_IDX_USER; + int kernel_mode = mmu_idx == MMU_IDX_KERNEL; + uint32_t plv, base_c, base_v; + int64_t addr_high; + uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA); + uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG); + + /* Check PG and DA */ + if (da & !pg) { + *physical = address & TARGET_PHYS_MASK; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TLBRET_MATCH; + } + + plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT); + if (is_la64(env)) { + base_v = address >> R_CSR_DMW_64_VSEG_SHIFT; + } else { + base_v = address >> R_CSR_DMW_32_VSEG_SHIFT; + } + /* Check direct map window */ + for (int i = 0; i < 4; i++) { + if (is_la64(env)) { + base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_64, VSEG); + } else { + base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_32, VSEG); + } + if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) { + *physical = dmw_va2pa(env, address, env->CSR_DMW[i]); + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TLBRET_MATCH; + } + } + + /* Check valid extension */ + addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16); + if (!(addr_high == 0 || addr_high == -1)) { + return TLBRET_BADADDR; + } + + /* Mapped address */ + return loongarch_map_address(env, physical, prot, address, + access_type, mmu_idx); +} + +hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + hwaddr phys_addr; + int prot; + + if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD, + cpu_mmu_index(env, false)) != 0) { + return -1; + } + return phys_addr; +} diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h index c492863cc5..a2fc54c8a7 100644 --- a/target/loongarch/internals.h +++ b/target/loongarch/internals.h @@ -31,10 +31,23 @@ void G_NORETURN do_raise_exception(CPULoongArchState *env, const char *loongarch_exception_name(int32_t exception); +#ifdef CONFIG_TCG int ieee_ex_to_loongarch(int xcpt); void restore_fp_status(CPULoongArchState *env); +#endif #ifndef CONFIG_USER_ONLY +enum { + TLBRET_MATCH = 0, + TLBRET_BADADDR = 1, + TLBRET_NOMATCH = 2, + TLBRET_INVALID = 3, + TLBRET_DIRTY = 4, + TLBRET_RI = 5, + TLBRET_XI = 6, + TLBRET_PE = 7, +}; + extern const VMStateDescription vmstate_loongarch_cpu; void loongarch_cpu_set_irq(void *opaque, int irq, int level); @@ -44,12 +57,18 @@ uint64_t cpu_loongarch_get_constant_timer_counter(LoongArchCPU *cpu); uint64_t cpu_loongarch_get_constant_timer_ticks(LoongArchCPU *cpu); void cpu_loongarch_store_constant_timer_config(LoongArchCPU *cpu, uint64_t value); +bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, + int *index); +int get_physical_address(CPULoongArchState *env, hwaddr *physical, + int *prot, target_ulong address, + MMUAccessType access_type, int mmu_idx); +hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +#ifdef CONFIG_TCG bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); - -hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +#endif #endif /* !CONFIG_USER_ONLY */ uint64_t read_fcc(CPULoongArchState *env); diff --git a/target/loongarch/kvm/kvm.c b/target/loongarch/kvm/kvm.c new file mode 100644 index 0000000000..c19978a970 --- /dev/null +++ b/target/loongarch/kvm/kvm.c @@ -0,0 +1,802 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch KVM + * + * Copyright (c) 2023 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include +#include + +#include "qemu/timer.h" +#include "qemu/error-report.h" +#include "qemu/main-loop.h" +#include "sysemu/sysemu.h" +#include "sysemu/kvm.h" +#include "sysemu/kvm_int.h" +#include "hw/pci/pci.h" +#include "exec/memattrs.h" +#include "exec/address-spaces.h" +#include "hw/boards.h" +#include "hw/irq.h" +#include "qemu/log.h" +#include "hw/loader.h" +#include "migration/migration.h" +#include "sysemu/runstate.h" +#include "cpu-csr.h" +#include "kvm_loongarch.h" +#include "trace.h" + +static bool cap_has_mp_state; +const KVMCapabilityInfo kvm_arch_required_capabilities[] = { + KVM_CAP_LAST_INFO +}; + +static int kvm_loongarch_get_regs_core(CPUState *cs) +{ + int ret = 0; + int i; + struct kvm_regs regs; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + /* Get the current register set as KVM seems it */ + ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); + if (ret < 0) { + trace_kvm_failed_get_regs_core(strerror(errno)); + return ret; + } + /* gpr[0] value is always 0 */ + env->gpr[0] = 0; + for (i = 1; i < 32; i++) { + env->gpr[i] = regs.gpr[i]; + } + + env->pc = regs.pc; + return ret; +} + +static int kvm_loongarch_put_regs_core(CPUState *cs) +{ + int ret = 0; + int i; + struct kvm_regs regs; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + /* Set the registers based on QEMU's view of things */ + for (i = 0; i < 32; i++) { + regs.gpr[i] = env->gpr[i]; + } + + regs.pc = env->pc; + ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); + if (ret < 0) { + trace_kvm_failed_put_regs_core(strerror(errno)); + } + + return ret; +} + +static int kvm_loongarch_get_csr(CPUState *cs) +{ + int ret = 0; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CRMD), + &env->CSR_CRMD); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRMD), + &env->CSR_PRMD); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EUEN), + &env->CSR_EUEN); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_MISC), + &env->CSR_MISC); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ECFG), + &env->CSR_ECFG); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ESTAT), + &env->CSR_ESTAT); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ERA), + &env->CSR_ERA); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADV), + &env->CSR_BADV); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADI), + &env->CSR_BADI); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EENTRY), + &env->CSR_EENTRY); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBIDX), + &env->CSR_TLBIDX); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBEHI), + &env->CSR_TLBEHI); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO0), + &env->CSR_TLBELO0); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO1), + &env->CSR_TLBELO1); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ASID), + &env->CSR_ASID); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDL), + &env->CSR_PGDL); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDH), + &env->CSR_PGDH); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGD), + &env->CSR_PGD); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCL), + &env->CSR_PWCL); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCH), + &env->CSR_PWCH); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_STLBPS), + &env->CSR_STLBPS); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_RVACFG), + &env->CSR_RVACFG); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CPUID), + &env->CSR_CPUID); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG1), + &env->CSR_PRCFG1); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG2), + &env->CSR_PRCFG2); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG3), + &env->CSR_PRCFG3); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(0)), + &env->CSR_SAVE[0]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(1)), + &env->CSR_SAVE[1]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(2)), + &env->CSR_SAVE[2]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(3)), + &env->CSR_SAVE[3]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(4)), + &env->CSR_SAVE[4]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(5)), + &env->CSR_SAVE[5]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(6)), + &env->CSR_SAVE[6]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(7)), + &env->CSR_SAVE[7]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TID), + &env->CSR_TID); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CNTC), + &env->CSR_CNTC); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TICLR), + &env->CSR_TICLR); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_LLBCTL), + &env->CSR_LLBCTL); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL1), + &env->CSR_IMPCTL1); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL2), + &env->CSR_IMPCTL2); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRENTRY), + &env->CSR_TLBRENTRY); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRBADV), + &env->CSR_TLBRBADV); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRERA), + &env->CSR_TLBRERA); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRSAVE), + &env->CSR_TLBRSAVE); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO0), + &env->CSR_TLBRELO0); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO1), + &env->CSR_TLBRELO1); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBREHI), + &env->CSR_TLBREHI); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRPRMD), + &env->CSR_TLBRPRMD); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(0)), + &env->CSR_DMW[0]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(1)), + &env->CSR_DMW[1]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(2)), + &env->CSR_DMW[2]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(3)), + &env->CSR_DMW[3]); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TVAL), + &env->CSR_TVAL); + + ret |= kvm_get_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TCFG), + &env->CSR_TCFG); + + return ret; +} + +static int kvm_loongarch_put_csr(CPUState *cs, int level) +{ + int ret = 0; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CRMD), + &env->CSR_CRMD); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRMD), + &env->CSR_PRMD); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EUEN), + &env->CSR_EUEN); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_MISC), + &env->CSR_MISC); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ECFG), + &env->CSR_ECFG); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ESTAT), + &env->CSR_ESTAT); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ERA), + &env->CSR_ERA); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADV), + &env->CSR_BADV); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_BADI), + &env->CSR_BADI); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_EENTRY), + &env->CSR_EENTRY); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBIDX), + &env->CSR_TLBIDX); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBEHI), + &env->CSR_TLBEHI); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO0), + &env->CSR_TLBELO0); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBELO1), + &env->CSR_TLBELO1); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_ASID), + &env->CSR_ASID); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDL), + &env->CSR_PGDL); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGDH), + &env->CSR_PGDH); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PGD), + &env->CSR_PGD); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCL), + &env->CSR_PWCL); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PWCH), + &env->CSR_PWCH); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_STLBPS), + &env->CSR_STLBPS); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_RVACFG), + &env->CSR_RVACFG); + + /* CPUID is constant after poweron, it should be set only once */ + if (level >= KVM_PUT_FULL_STATE) { + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CPUID), + &env->CSR_CPUID); + } + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG1), + &env->CSR_PRCFG1); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG2), + &env->CSR_PRCFG2); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_PRCFG3), + &env->CSR_PRCFG3); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(0)), + &env->CSR_SAVE[0]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(1)), + &env->CSR_SAVE[1]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(2)), + &env->CSR_SAVE[2]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(3)), + &env->CSR_SAVE[3]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(4)), + &env->CSR_SAVE[4]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(5)), + &env->CSR_SAVE[5]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(6)), + &env->CSR_SAVE[6]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_SAVE(7)), + &env->CSR_SAVE[7]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TID), + &env->CSR_TID); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_CNTC), + &env->CSR_CNTC); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TICLR), + &env->CSR_TICLR); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_LLBCTL), + &env->CSR_LLBCTL); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL1), + &env->CSR_IMPCTL1); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_IMPCTL2), + &env->CSR_IMPCTL2); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRENTRY), + &env->CSR_TLBRENTRY); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRBADV), + &env->CSR_TLBRBADV); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRERA), + &env->CSR_TLBRERA); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRSAVE), + &env->CSR_TLBRSAVE); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO0), + &env->CSR_TLBRELO0); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRELO1), + &env->CSR_TLBRELO1); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBREHI), + &env->CSR_TLBREHI); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TLBRPRMD), + &env->CSR_TLBRPRMD); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(0)), + &env->CSR_DMW[0]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(1)), + &env->CSR_DMW[1]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(2)), + &env->CSR_DMW[2]); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_DMW(3)), + &env->CSR_DMW[3]); + /* + * timer cfg must be put at last since it is used to enable + * guest timer + */ + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TVAL), + &env->CSR_TVAL); + + ret |= kvm_set_one_reg(cs, KVM_IOC_CSRID(LOONGARCH_CSR_TCFG), + &env->CSR_TCFG); + return ret; +} + +static int kvm_loongarch_get_regs_fp(CPUState *cs) +{ + int ret, i; + struct kvm_fpu fpu; + + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + ret = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu); + if (ret < 0) { + trace_kvm_failed_get_fpu(strerror(errno)); + return ret; + } + + env->fcsr0 = fpu.fcsr; + for (i = 0; i < 32; i++) { + env->fpr[i].vreg.UD[0] = fpu.fpr[i].val64[0]; + } + for (i = 0; i < 8; i++) { + env->cf[i] = fpu.fcc & 0xFF; + fpu.fcc = fpu.fcc >> 8; + } + + return ret; +} + +static int kvm_loongarch_put_regs_fp(CPUState *cs) +{ + int ret, i; + struct kvm_fpu fpu; + + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + fpu.fcsr = env->fcsr0; + fpu.fcc = 0; + for (i = 0; i < 32; i++) { + fpu.fpr[i].val64[0] = env->fpr[i].vreg.UD[0]; + } + + for (i = 0; i < 8; i++) { + fpu.fcc |= env->cf[i] << (8 * i); + } + + ret = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu); + if (ret < 0) { + trace_kvm_failed_put_fpu(strerror(errno)); + } + + return ret; +} + +void kvm_arch_reset_vcpu(CPULoongArchState *env) +{ + env->mp_state = KVM_MP_STATE_RUNNABLE; +} + +static int kvm_loongarch_get_mpstate(CPUState *cs) +{ + int ret = 0; + struct kvm_mp_state mp_state; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + if (cap_has_mp_state) { + ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state); + if (ret) { + trace_kvm_failed_get_mpstate(strerror(errno)); + return ret; + } + env->mp_state = mp_state.mp_state; + } + + return ret; +} + +static int kvm_loongarch_put_mpstate(CPUState *cs) +{ + int ret = 0; + + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + struct kvm_mp_state mp_state = { + .mp_state = env->mp_state + }; + + if (cap_has_mp_state) { + ret = kvm_vcpu_ioctl(cs, KVM_SET_MP_STATE, &mp_state); + if (ret) { + trace_kvm_failed_put_mpstate(strerror(errno)); + } + } + + return ret; +} + +static int kvm_loongarch_get_cpucfg(CPUState *cs) +{ + int i, ret = 0; + uint64_t val; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + for (i = 0; i < 21; i++) { + ret = kvm_get_one_reg(cs, KVM_IOC_CPUCFG(i), &val); + if (ret < 0) { + trace_kvm_failed_get_cpucfg(strerror(errno)); + } + env->cpucfg[i] = (uint32_t)val; + } + return ret; +} + +static int kvm_check_cpucfg2(CPUState *cs) +{ + int ret; + uint64_t val; + struct kvm_device_attr attr = { + .group = KVM_LOONGARCH_VCPU_CPUCFG, + .attr = 2, + .addr = (uint64_t)&val, + }; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + + ret = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, &attr); + + if (!ret) { + kvm_vcpu_ioctl(cs, KVM_GET_DEVICE_ATTR, &attr); + env->cpucfg[2] &= val; + + if (FIELD_EX32(env->cpucfg[2], CPUCFG2, FP)) { + /* The FP minimal version is 1. */ + env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, FP_VER, 1); + } + + if (FIELD_EX32(env->cpucfg[2], CPUCFG2, LLFTP)) { + /* The LLFTP minimal version is 1. */ + env->cpucfg[2] = FIELD_DP32(env->cpucfg[2], CPUCFG2, LLFTP_VER, 1); + } + } + + return ret; +} + +static int kvm_loongarch_put_cpucfg(CPUState *cs) +{ + int i, ret = 0; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + uint64_t val; + + for (i = 0; i < 21; i++) { + if (i == 2) { + ret = kvm_check_cpucfg2(cs); + if (ret) { + return ret; + } + } + val = env->cpucfg[i]; + ret = kvm_set_one_reg(cs, KVM_IOC_CPUCFG(i), &val); + if (ret < 0) { + trace_kvm_failed_put_cpucfg(strerror(errno)); + } + } + return ret; +} + +int kvm_arch_get_registers(CPUState *cs) +{ + int ret; + + ret = kvm_loongarch_get_regs_core(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_get_csr(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_get_regs_fp(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_get_mpstate(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_get_cpucfg(cs); + return ret; +} + +int kvm_arch_put_registers(CPUState *cs, int level) +{ + int ret; + + ret = kvm_loongarch_put_regs_core(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_put_csr(cs, level); + if (ret) { + return ret; + } + + ret = kvm_loongarch_put_regs_fp(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_put_mpstate(cs); + if (ret) { + return ret; + } + + ret = kvm_loongarch_put_cpucfg(cs); + return ret; +} + +static void kvm_loongarch_vm_stage_change(void *opaque, bool running, + RunState state) +{ + int ret; + CPUState *cs = opaque; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + + if (running) { + ret = kvm_set_one_reg(cs, KVM_REG_LOONGARCH_COUNTER, + &cpu->kvm_state_counter); + if (ret < 0) { + trace_kvm_failed_put_counter(strerror(errno)); + } + } else { + ret = kvm_get_one_reg(cs, KVM_REG_LOONGARCH_COUNTER, + &cpu->kvm_state_counter); + if (ret < 0) { + trace_kvm_failed_get_counter(strerror(errno)); + } + } +} + +int kvm_arch_init_vcpu(CPUState *cs) +{ + qemu_add_vm_change_state_handler(kvm_loongarch_vm_stage_change, cs); + return 0; +} + +int kvm_arch_destroy_vcpu(CPUState *cs) +{ + return 0; +} + +unsigned long kvm_arch_vcpu_id(CPUState *cs) +{ + return cs->cpu_index; +} + +int kvm_arch_release_virq_post(int virq) +{ + return 0; +} + +int kvm_arch_msi_data_to_gsi(uint32_t data) +{ + abort(); +} + +int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, + uint64_t address, uint32_t data, PCIDevice *dev) +{ + return 0; +} + +int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, + int vector, PCIDevice *dev) +{ + return 0; +} + +void kvm_arch_init_irq_routing(KVMState *s) +{ +} + +int kvm_arch_get_default_type(MachineState *ms) +{ + return 0; +} + +int kvm_arch_init(MachineState *ms, KVMState *s) +{ + cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE); + return 0; +} + +int kvm_arch_irqchip_create(KVMState *s) +{ + return 0; +} + +void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) +{ +} + +MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) +{ + return MEMTXATTRS_UNSPECIFIED; +} + +int kvm_arch_process_async_events(CPUState *cs) +{ + return cs->halted; +} + +bool kvm_arch_stop_on_emulation_error(CPUState *cs) +{ + return true; +} + +bool kvm_arch_cpu_check_are_resettable(void) +{ + return true; +} + +int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) +{ + int ret = 0; + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + CPULoongArchState *env = &cpu->env; + MemTxAttrs attrs = {}; + + attrs.requester_id = env_cpu(env)->cpu_index; + + trace_kvm_arch_handle_exit(run->exit_reason); + switch (run->exit_reason) { + case KVM_EXIT_LOONGARCH_IOCSR: + address_space_rw(env->address_space_iocsr, + run->iocsr_io.phys_addr, + attrs, + run->iocsr_io.data, + run->iocsr_io.len, + run->iocsr_io.is_write); + break; + default: + ret = -1; + warn_report("KVM: unknown exit reason %d", run->exit_reason); + break; + } + return ret; +} + +int kvm_loongarch_set_interrupt(LoongArchCPU *cpu, int irq, int level) +{ + struct kvm_interrupt intr; + CPUState *cs = CPU(cpu); + + if (level) { + intr.irq = irq; + } else { + intr.irq = -irq; + } + + trace_kvm_set_intr(irq, level); + return kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); +} + +void kvm_arch_accel_class_init(ObjectClass *oc) +{ +} diff --git a/target/loongarch/kvm/kvm_loongarch.h b/target/loongarch/kvm/kvm_loongarch.h new file mode 100644 index 0000000000..d945b6bb82 --- /dev/null +++ b/target/loongarch/kvm/kvm_loongarch.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch kvm interface + * + * Copyright (c) 2023 Loongson Technology Corporation Limited + */ + +#include "cpu.h" + +#ifndef QEMU_KVM_LOONGARCH_H +#define QEMU_KVM_LOONGARCH_H + +int kvm_loongarch_set_interrupt(LoongArchCPU *cpu, int irq, int level); +void kvm_arch_reset_vcpu(CPULoongArchState *env); + +#endif diff --git a/target/loongarch/kvm/meson.build b/target/loongarch/kvm/meson.build new file mode 100644 index 0000000000..2266de6ca9 --- /dev/null +++ b/target/loongarch/kvm/meson.build @@ -0,0 +1 @@ +loongarch_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c')) diff --git a/target/loongarch/meson.build b/target/loongarch/meson.build index 18e8191e2b..e002e9aaf6 100644 --- a/target/loongarch/meson.build +++ b/target/loongarch/meson.build @@ -3,31 +3,20 @@ gen = decodetree.process('insns.decode') loongarch_ss = ss.source_set() loongarch_ss.add(files( 'cpu.c', -)) -loongarch_tcg_ss = ss.source_set() -loongarch_tcg_ss.add(gen) -loongarch_tcg_ss.add(files( - 'fpu_helper.c', - 'op_helper.c', - 'translate.c', 'gdbstub.c', - 'vec_helper.c', )) -loongarch_tcg_ss.add(zlib) loongarch_system_ss = ss.source_set() loongarch_system_ss.add(files( + 'cpu_helper.c', 'loongarch-qmp-cmds.c', 'machine.c', - 'tlb_helper.c', - 'constant_timer.c', - 'csr_helper.c', - 'iocsr_helper.c', )) common_ss.add(when: 'CONFIG_LOONGARCH_DIS', if_true: [files('disas.c'), gen]) -loongarch_ss.add_all(when: 'CONFIG_TCG', if_true: [loongarch_tcg_ss]) +subdir('tcg') target_arch += {'loongarch': loongarch_ss} target_system_arch += {'loongarch': loongarch_system_ss} +subdir('kvm') diff --git a/target/loongarch/constant_timer.c b/target/loongarch/tcg/constant_timer.c similarity index 100% rename from target/loongarch/constant_timer.c rename to target/loongarch/tcg/constant_timer.c diff --git a/target/loongarch/csr_helper.c b/target/loongarch/tcg/csr_helper.c similarity index 100% rename from target/loongarch/csr_helper.c rename to target/loongarch/tcg/csr_helper.c diff --git a/target/loongarch/fpu_helper.c b/target/loongarch/tcg/fpu_helper.c similarity index 100% rename from target/loongarch/fpu_helper.c rename to target/loongarch/tcg/fpu_helper.c diff --git a/target/loongarch/insn_trans/trans_arith.c.inc b/target/loongarch/tcg/insn_trans/trans_arith.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_arith.c.inc rename to target/loongarch/tcg/insn_trans/trans_arith.c.inc diff --git a/target/loongarch/insn_trans/trans_atomic.c.inc b/target/loongarch/tcg/insn_trans/trans_atomic.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_atomic.c.inc rename to target/loongarch/tcg/insn_trans/trans_atomic.c.inc diff --git a/target/loongarch/insn_trans/trans_bit.c.inc b/target/loongarch/tcg/insn_trans/trans_bit.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_bit.c.inc rename to target/loongarch/tcg/insn_trans/trans_bit.c.inc diff --git a/target/loongarch/insn_trans/trans_branch.c.inc b/target/loongarch/tcg/insn_trans/trans_branch.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_branch.c.inc rename to target/loongarch/tcg/insn_trans/trans_branch.c.inc diff --git a/target/loongarch/insn_trans/trans_extra.c.inc b/target/loongarch/tcg/insn_trans/trans_extra.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_extra.c.inc rename to target/loongarch/tcg/insn_trans/trans_extra.c.inc diff --git a/target/loongarch/insn_trans/trans_farith.c.inc b/target/loongarch/tcg/insn_trans/trans_farith.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_farith.c.inc rename to target/loongarch/tcg/insn_trans/trans_farith.c.inc diff --git a/target/loongarch/insn_trans/trans_fcmp.c.inc b/target/loongarch/tcg/insn_trans/trans_fcmp.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_fcmp.c.inc rename to target/loongarch/tcg/insn_trans/trans_fcmp.c.inc diff --git a/target/loongarch/insn_trans/trans_fcnv.c.inc b/target/loongarch/tcg/insn_trans/trans_fcnv.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_fcnv.c.inc rename to target/loongarch/tcg/insn_trans/trans_fcnv.c.inc diff --git a/target/loongarch/insn_trans/trans_fmemory.c.inc b/target/loongarch/tcg/insn_trans/trans_fmemory.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_fmemory.c.inc rename to target/loongarch/tcg/insn_trans/trans_fmemory.c.inc diff --git a/target/loongarch/insn_trans/trans_fmov.c.inc b/target/loongarch/tcg/insn_trans/trans_fmov.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_fmov.c.inc rename to target/loongarch/tcg/insn_trans/trans_fmov.c.inc diff --git a/target/loongarch/insn_trans/trans_memory.c.inc b/target/loongarch/tcg/insn_trans/trans_memory.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_memory.c.inc rename to target/loongarch/tcg/insn_trans/trans_memory.c.inc diff --git a/target/loongarch/insn_trans/trans_privileged.c.inc b/target/loongarch/tcg/insn_trans/trans_privileged.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_privileged.c.inc rename to target/loongarch/tcg/insn_trans/trans_privileged.c.inc diff --git a/target/loongarch/insn_trans/trans_shift.c.inc b/target/loongarch/tcg/insn_trans/trans_shift.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_shift.c.inc rename to target/loongarch/tcg/insn_trans/trans_shift.c.inc diff --git a/target/loongarch/insn_trans/trans_vec.c.inc b/target/loongarch/tcg/insn_trans/trans_vec.c.inc similarity index 100% rename from target/loongarch/insn_trans/trans_vec.c.inc rename to target/loongarch/tcg/insn_trans/trans_vec.c.inc diff --git a/target/loongarch/iocsr_helper.c b/target/loongarch/tcg/iocsr_helper.c similarity index 76% rename from target/loongarch/iocsr_helper.c rename to target/loongarch/tcg/iocsr_helper.c index 6cd01d5f09..b6916f53d2 100644 --- a/target/loongarch/iocsr_helper.c +++ b/target/loongarch/tcg/iocsr_helper.c @@ -17,52 +17,52 @@ uint64_t helper_iocsrrd_b(CPULoongArchState *env, target_ulong r_addr) { - return address_space_ldub(&env->address_space_iocsr, r_addr, + return address_space_ldub(env->address_space_iocsr, r_addr, GET_MEMTXATTRS(env), NULL); } uint64_t helper_iocsrrd_h(CPULoongArchState *env, target_ulong r_addr) { - return address_space_lduw(&env->address_space_iocsr, r_addr, + return address_space_lduw(env->address_space_iocsr, r_addr, GET_MEMTXATTRS(env), NULL); } uint64_t helper_iocsrrd_w(CPULoongArchState *env, target_ulong r_addr) { - return address_space_ldl(&env->address_space_iocsr, r_addr, + return address_space_ldl(env->address_space_iocsr, r_addr, GET_MEMTXATTRS(env), NULL); } uint64_t helper_iocsrrd_d(CPULoongArchState *env, target_ulong r_addr) { - return address_space_ldq(&env->address_space_iocsr, r_addr, + return address_space_ldq(env->address_space_iocsr, r_addr, GET_MEMTXATTRS(env), NULL); } void helper_iocsrwr_b(CPULoongArchState *env, target_ulong w_addr, target_ulong val) { - address_space_stb(&env->address_space_iocsr, w_addr, + address_space_stb(env->address_space_iocsr, w_addr, val, GET_MEMTXATTRS(env), NULL); } void helper_iocsrwr_h(CPULoongArchState *env, target_ulong w_addr, target_ulong val) { - address_space_stw(&env->address_space_iocsr, w_addr, + address_space_stw(env->address_space_iocsr, w_addr, val, GET_MEMTXATTRS(env), NULL); } void helper_iocsrwr_w(CPULoongArchState *env, target_ulong w_addr, target_ulong val) { - address_space_stl(&env->address_space_iocsr, w_addr, + address_space_stl(env->address_space_iocsr, w_addr, val, GET_MEMTXATTRS(env), NULL); } void helper_iocsrwr_d(CPULoongArchState *env, target_ulong w_addr, target_ulong val) { - address_space_stq(&env->address_space_iocsr, w_addr, + address_space_stq(env->address_space_iocsr, w_addr, val, GET_MEMTXATTRS(env), NULL); } diff --git a/target/loongarch/tcg/meson.build b/target/loongarch/tcg/meson.build new file mode 100644 index 0000000000..1a3cd589fb --- /dev/null +++ b/target/loongarch/tcg/meson.build @@ -0,0 +1,19 @@ +if 'CONFIG_TCG' not in config_all + subdir_done() +endif + +loongarch_ss.add([zlib, gen]) + +loongarch_ss.add(files( + 'fpu_helper.c', + 'op_helper.c', + 'translate.c', + 'vec_helper.c', +)) + +loongarch_system_ss.add(files( + 'constant_timer.c', + 'csr_helper.c', + 'iocsr_helper.c', + 'tlb_helper.c', +)) diff --git a/target/loongarch/op_helper.c b/target/loongarch/tcg/op_helper.c similarity index 100% rename from target/loongarch/op_helper.c rename to target/loongarch/tcg/op_helper.c diff --git a/target/loongarch/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c similarity index 71% rename from target/loongarch/tlb_helper.c rename to target/loongarch/tcg/tlb_helper.c index 449043c68b..804ab7a263 100644 --- a/target/loongarch/tlb_helper.c +++ b/target/loongarch/tcg/tlb_helper.c @@ -17,236 +17,6 @@ #include "exec/log.h" #include "cpu-csr.h" -enum { - TLBRET_MATCH = 0, - TLBRET_BADADDR = 1, - TLBRET_NOMATCH = 2, - TLBRET_INVALID = 3, - TLBRET_DIRTY = 4, - TLBRET_RI = 5, - TLBRET_XI = 6, - TLBRET_PE = 7, -}; - -static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical, - int *prot, target_ulong address, - int access_type, int index, int mmu_idx) -{ - LoongArchTLB *tlb = &env->tlb[index]; - uint64_t plv = mmu_idx; - uint64_t tlb_entry, tlb_ppn; - uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv; - - if (index >= LOONGARCH_STLB) { - tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); - } else { - tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); - } - n = (address >> tlb_ps) & 0x1;/* Odd or even */ - - tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0; - tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V); - tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D); - tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV); - if (is_la64(env)) { - tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN); - tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX); - tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR); - tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV); - } else { - tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN); - tlb_nx = 0; - tlb_nr = 0; - tlb_rplv = 0; - } - - /* Remove sw bit between bit12 -- bit PS*/ - tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) -1)); - - /* Check access rights */ - if (!tlb_v) { - return TLBRET_INVALID; - } - - if (access_type == MMU_INST_FETCH && tlb_nx) { - return TLBRET_XI; - } - - if (access_type == MMU_DATA_LOAD && tlb_nr) { - return TLBRET_RI; - } - - if (((tlb_rplv == 0) && (plv > tlb_plv)) || - ((tlb_rplv == 1) && (plv != tlb_plv))) { - return TLBRET_PE; - } - - if ((access_type == MMU_DATA_STORE) && !tlb_d) { - return TLBRET_DIRTY; - } - - *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) | - (address & MAKE_64BIT_MASK(0, tlb_ps)); - *prot = PAGE_READ; - if (tlb_d) { - *prot |= PAGE_WRITE; - } - if (!tlb_nx) { - *prot |= PAGE_EXEC; - } - return TLBRET_MATCH; -} - -/* - * One tlb entry holds an adjacent odd/even pair, the vpn is the - * content of the virtual page number divided by 2. So the - * compare vpn is bit[47:15] for 16KiB page. while the vppn - * field in tlb entry contains bit[47:13], so need adjust. - * virt_vpn = vaddr[47:13] - */ -static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, - int *index) -{ - LoongArchTLB *tlb; - uint16_t csr_asid, tlb_asid, stlb_idx; - uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps; - int i, compare_shift; - uint64_t vpn, tlb_vppn; - - csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); - stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); - vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1); - stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */ - compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; - - /* Search STLB */ - for (i = 0; i < 8; ++i) { - tlb = &env->tlb[i * 256 + stlb_idx]; - tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); - if (tlb_e) { - tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); - tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); - tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); - - if ((tlb_g == 1 || tlb_asid == csr_asid) && - (vpn == (tlb_vppn >> compare_shift))) { - *index = i * 256 + stlb_idx; - return true; - } - } - } - - /* Search MTLB */ - for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) { - tlb = &env->tlb[i]; - tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); - if (tlb_e) { - tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); - tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); - tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); - tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); - compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; - vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1); - if ((tlb_g == 1 || tlb_asid == csr_asid) && - (vpn == (tlb_vppn >> compare_shift))) { - *index = i; - return true; - } - } - } - return false; -} - -static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, - int *prot, target_ulong address, - MMUAccessType access_type, int mmu_idx) -{ - int index, match; - - match = loongarch_tlb_search(env, address, &index); - if (match) { - return loongarch_map_tlb_entry(env, physical, prot, - address, access_type, index, mmu_idx); - } - - return TLBRET_NOMATCH; -} - -static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va, - target_ulong dmw) -{ - if (is_la64(env)) { - return va & TARGET_VIRT_MASK; - } else { - uint32_t pseg = FIELD_EX32(dmw, CSR_DMW_32, PSEG); - return (va & MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT)) | \ - (pseg << R_CSR_DMW_32_VSEG_SHIFT); - } -} - -static int get_physical_address(CPULoongArchState *env, hwaddr *physical, - int *prot, target_ulong address, - MMUAccessType access_type, int mmu_idx) -{ - int user_mode = mmu_idx == MMU_IDX_USER; - int kernel_mode = mmu_idx == MMU_IDX_KERNEL; - uint32_t plv, base_c, base_v; - int64_t addr_high; - uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA); - uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG); - - /* Check PG and DA */ - if (da & !pg) { - *physical = address & TARGET_PHYS_MASK; - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - return TLBRET_MATCH; - } - - plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT); - if (is_la64(env)) { - base_v = address >> R_CSR_DMW_64_VSEG_SHIFT; - } else { - base_v = address >> R_CSR_DMW_32_VSEG_SHIFT; - } - /* Check direct map window */ - for (int i = 0; i < 4; i++) { - if (is_la64(env)) { - base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_64, VSEG); - } else { - base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_32, VSEG); - } - if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) { - *physical = dmw_va2pa(env, address, env->CSR_DMW[i]); - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - return TLBRET_MATCH; - } - } - - /* Check valid extension */ - addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16); - if (!(addr_high == 0 || addr_high == -1)) { - return TLBRET_BADADDR; - } - - /* Mapped address */ - return loongarch_map_address(env, physical, prot, address, - access_type, mmu_idx); -} - -hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) -{ - LoongArchCPU *cpu = LOONGARCH_CPU(cs); - CPULoongArchState *env = &cpu->env; - hwaddr phys_addr; - int prot; - - if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD, - cpu_mmu_index(env, false)) != 0) { - return -1; - } - return phys_addr; -} - static void raise_mmu_exception(CPULoongArchState *env, target_ulong address, MMUAccessType access_type, int tlb_error) { diff --git a/target/loongarch/translate.c b/target/loongarch/tcg/translate.c similarity index 100% rename from target/loongarch/translate.c rename to target/loongarch/tcg/translate.c diff --git a/target/loongarch/vec_helper.c b/target/loongarch/tcg/vec_helper.c similarity index 100% rename from target/loongarch/vec_helper.c rename to target/loongarch/tcg/vec_helper.c diff --git a/target/loongarch/trace-events b/target/loongarch/trace-events new file mode 100644 index 0000000000..dea11edc0f --- /dev/null +++ b/target/loongarch/trace-events @@ -0,0 +1,15 @@ +# See docs/devel/tracing.rst for syntax documentation. + +#kvm.c +kvm_failed_get_regs_core(const char *msg) "Failed to get core regs from KVM: %s" +kvm_failed_put_regs_core(const char *msg) "Failed to put core regs into KVM: %s" +kvm_failed_get_fpu(const char *msg) "Failed to get fpu from KVM: %s" +kvm_failed_put_fpu(const char *msg) "Failed to put fpu into KVM: %s" +kvm_failed_get_mpstate(const char *msg) "Failed to get mp_state from KVM: %s" +kvm_failed_put_mpstate(const char *msg) "Failed to put mp_state into KVM: %s" +kvm_failed_get_counter(const char *msg) "Failed to get counter from KVM: %s" +kvm_failed_put_counter(const char *msg) "Failed to put counter into KVM: %s" +kvm_failed_get_cpucfg(const char *msg) "Failed to get cpucfg from KVM: %s" +kvm_failed_put_cpucfg(const char *msg) "Failed to put cpucfg into KVM: %s" +kvm_arch_handle_exit(int num) "kvm arch handle exit, the reason number: %d" +kvm_set_intr(int irq, int level) "kvm set interrupt, irq num: %d, level: %d" diff --git a/target/loongarch/trace.h b/target/loongarch/trace.h new file mode 100644 index 0000000000..c2ecb78f08 --- /dev/null +++ b/target/loongarch/trace.h @@ -0,0 +1 @@ +#include "trace/trace-target_loongarch.h" -- Gitee