From dc2118f8645f1ff9d77fa2b396f631af9cf92d0c Mon Sep 17 00:00:00 2001 From: xianglai li Date: Tue, 5 Dec 2023 11:28:56 +0800 Subject: [PATCH] loongarch/kvm: Modify the kvm code style LoongArch inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8ONTF ------------------------------------------ Modify the kvm code style so that script/checkpatch.pl checks can pass. Signed-off-by: xianglai li (cherry picked from commit 89997c51a089e957f54ad6d313c3d4dd5f127c04) --- arch/loongarch/include/asm/inst.h | 2 +- arch/loongarch/include/asm/kvm_host.h | 4 +- arch/loongarch/include/asm/kvm_para.h | 2 +- arch/loongarch/kvm/Makefile | 2 +- arch/loongarch/kvm/csr.c | 744 ++++++++++++++++++------- arch/loongarch/kvm/emulate.c | 9 +- arch/loongarch/kvm/entry.S | 11 +- arch/loongarch/kvm/exit.c | 21 +- arch/loongarch/kvm/fpu.c | 1 - arch/loongarch/kvm/hypcall.c | 2 +- arch/loongarch/kvm/intc/ls3a_ext_irq.c | 78 ++- arch/loongarch/kvm/intc/ls3a_ext_irq.h | 4 +- arch/loongarch/kvm/intc/ls3a_ipi.c | 31 +- arch/loongarch/kvm/intc/ls3a_ipi.h | 18 +- arch/loongarch/kvm/intc/ls7a_irq.c | 28 +- arch/loongarch/kvm/intc/ls7a_irq.h | 4 +- arch/loongarch/kvm/kvm_compat.c | 48 +- arch/loongarch/kvm/kvm_compat.h | 48 +- arch/loongarch/kvm/kvmcpu.h | 5 +- arch/loongarch/kvm/kvmcsr.h | 51 +- arch/loongarch/kvm/loongarch.c | 50 +- arch/loongarch/kvm/mmu.c | 4 +- 22 files changed, 700 insertions(+), 467 deletions(-) diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index b7ebf2aa217a..22bf70375cf5 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -149,7 +149,7 @@ struct reg2i14_format { }; struct reg0i15_format { - unsigned int simmediate : 15; + unsigned int immediate : 15; unsigned int opcode : 17; }; diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index a5ef721a9011..fb5f67e89a16 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -251,7 +251,7 @@ static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) return csr->csrs[reg]; } -static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, \ +static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val) { csr->csrs[reg] = val; @@ -317,7 +317,7 @@ static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch) static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_free_memslot(struct kvm *kvm, - struct kvm_memory_slot *slot) {} + struct kvm_memory_slot *slot) {} static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index 9ec29edcdb17..889a0cb4194e 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -9,7 +9,7 @@ /* * Hypcall code field */ -#define KVM_HC_CODE_SERIVCE 0x0 +#define KVM_HC_CODE_SERVICE 0x0 #define KVM_HC_CODE_SWDBG 0x5 /* * function id diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile index 29dc154609d7..679d69dc7b53 100644 --- a/arch/loongarch/kvm/Makefile +++ b/arch/loongarch/kvm/Makefile @@ -10,7 +10,7 @@ common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o \ KVM := ../../../virt/kvm common-objs-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o -EXTRA_CFLAGS += -Ivirt/kvm -Iarch/loongarch/kvm +ccflags-y += -Ivirt/kvm -Iarch/loongarch/kvm kvm-objs := $(common-objs-y) loongarch.o emulate.o interrupt.o kvm-objs += hypcall.o diff --git a/arch/loongarch/kvm/csr.c b/arch/loongarch/kvm/csr.c index 0c42cbb9c739..82e21312c37c 100644 --- a/arch/loongarch/kvm/csr.c +++ b/arch/loongarch/kvm/csr.c @@ -13,33 +13,44 @@ #include "kvmcsr.h" #include "irq.h" -#define CASE_READ_SW_GCSR(csr, regid, csrid) \ - do { \ - if (regid == csrid) { \ - return kvm_read_sw_gcsr(csr, csrid); \ - } \ - } while (0) - unsigned long _kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid) { struct loongarch_csrs *csr = vcpu->arch.csr; unsigned long val = 0; - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_ERRCTL); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_ERRINFO1); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_ERRINFO2); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_MERRENTRY); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_MERRERA); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_ERRSAVE); + switch (csrid) { + case KVM_CSR_ERRCTL: + return kvm_read_sw_gcsr(csr, KVM_CSR_ERRCTL); + case KVM_CSR_ERRINFO1: + return kvm_read_sw_gcsr(csr, KVM_CSR_ERRINFO1); + case KVM_CSR_ERRINFO2: + return kvm_read_sw_gcsr(csr, KVM_CSR_ERRINFO2); + case KVM_CSR_MERRENTRY: + return kvm_read_sw_gcsr(csr, KVM_CSR_MERRENTRY); + case KVM_CSR_MERRERA: + return kvm_read_sw_gcsr(csr, KVM_CSR_MERRERA); + case KVM_CSR_ERRSAVE: + return kvm_read_sw_gcsr(csr, KVM_CSR_ERRSAVE); /* read sw csr when not config pmu to guest */ - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCTRL0); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCTRL1); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCTRL2); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCTRL3); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR0); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR1); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR2); - CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR3); + case KVM_CSR_PERFCTRL0: + return kvm_read_sw_gcsr(csr, KVM_CSR_PERFCTRL0); + case KVM_CSR_PERFCTRL1: + return kvm_read_sw_gcsr(csr, KVM_CSR_PERFCTRL1); + case KVM_CSR_PERFCTRL2: + return kvm_read_sw_gcsr(csr, KVM_CSR_PERFCTRL2); + case KVM_CSR_PERFCTRL3: + return kvm_read_sw_gcsr(csr, KVM_CSR_PERFCTRL3); + case KVM_CSR_PERFCNTR0: + return kvm_read_sw_gcsr(csr, KVM_CSR_PERFCNTR0); + case KVM_CSR_PERFCNTR1: + return kvm_read_sw_gcsr(csr, KVM_CSR_PERFCNTR1); + case KVM_CSR_PERFCNTR2: + return kvm_read_sw_gcsr(csr, KVM_CSR_PERFCNTR2); + case KVM_CSR_PERFCNTR3: + return kvm_read_sw_gcsr(csr, KVM_CSR_PERFCNTR3); + default: + break; + } val = 0; if (csrid < 4096) @@ -50,37 +61,83 @@ unsigned long _kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid) return val; } -#define CASE_WRITE_SW_GCSR(csr, regid, csrid, val) \ - do { \ - if (regid == csrid) { \ - kvm_write_sw_gcsr(csr, csrid, val); \ - return ; \ - } \ - } while (0) - void _kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val) { struct loongarch_csrs *csr = vcpu->arch.csr; - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_ERRCTL, val); - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_ERRINFO1, val); - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_ERRINFO2, val); - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_MERRENTRY, val); - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_MERRERA, val); - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_ERRSAVE, val); + switch (csrid) { + case KVM_CSR_ERRCTL: + return kvm_write_sw_gcsr(csr, KVM_CSR_ERRCTL, val); + case KVM_CSR_ERRINFO1: + return kvm_write_sw_gcsr(csr, KVM_CSR_ERRINFO1, val); + case KVM_CSR_ERRINFO2: + return kvm_write_sw_gcsr(csr, KVM_CSR_ERRINFO2, val); + case KVM_CSR_MERRENTRY: + return kvm_write_sw_gcsr(csr, KVM_CSR_MERRENTRY, val); + case KVM_CSR_MERRERA: + return kvm_write_sw_gcsr(csr, KVM_CSR_MERRERA, val); + case KVM_CSR_ERRSAVE: + return kvm_write_sw_gcsr(csr, KVM_CSR_ERRSAVE, val); + default: + break; + } /* give pmu register to guest when config perfctrl */ - CASE_WRITE_HW_PMU(vcpu, csr, csrid, KVM_CSR_PERFCTRL0, val); - CASE_WRITE_HW_PMU(vcpu, csr, csrid, KVM_CSR_PERFCTRL1, val); - CASE_WRITE_HW_PMU(vcpu, csr, csrid, KVM_CSR_PERFCTRL2, val); - CASE_WRITE_HW_PMU(vcpu, csr, csrid, KVM_CSR_PERFCTRL3, val); - /* write sw pmu csr if not config ctrl */ - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR0, val); - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR1, val); - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR2, val); - CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR3, val); + switch (csrid) { + case KVM_CSR_PERFCTRL0: + if (val & KVM_PMU_PLV_ENABLE) { + kvm_write_csr_gcfg(kvm_read_csr_gcfg() | KVM_GCFG_GPERF); + kvm_write_hw_gcsr(csr, KVM_CSR_PERFCTRL0, val | KVM_PERFCTRL_GMOD); + vcpu->arch.aux_inuse |= KVM_LARCH_PERF; + } else { + kvm_write_sw_gcsr(csr, KVM_CSR_PERFCTRL0, val); + } + return; + case KVM_CSR_PERFCTRL1: + if (val & KVM_PMU_PLV_ENABLE) { + kvm_write_csr_gcfg(kvm_read_csr_gcfg() | KVM_GCFG_GPERF); + kvm_write_hw_gcsr(csr, KVM_CSR_PERFCTRL1, val | KVM_PERFCTRL_GMOD); + vcpu->arch.aux_inuse |= KVM_LARCH_PERF; + } else { + kvm_write_sw_gcsr(csr, KVM_CSR_PERFCTRL1, val); + } + return; + case KVM_CSR_PERFCTRL2: + if (val & KVM_PMU_PLV_ENABLE) { + kvm_write_csr_gcfg(kvm_read_csr_gcfg() | KVM_GCFG_GPERF); + kvm_write_hw_gcsr(csr, KVM_CSR_PERFCTRL2, val | KVM_PERFCTRL_GMOD); + vcpu->arch.aux_inuse |= KVM_LARCH_PERF; + } else { + kvm_write_sw_gcsr(csr, KVM_CSR_PERFCTRL2, val); + } + return; + case KVM_CSR_PERFCTRL3: + if (val & KVM_PMU_PLV_ENABLE) { + kvm_write_csr_gcfg(kvm_read_csr_gcfg() | KVM_GCFG_GPERF); + kvm_write_hw_gcsr(csr, KVM_CSR_PERFCTRL3, val | KVM_PERFCTRL_GMOD); + vcpu->arch.aux_inuse |= KVM_LARCH_PERF; + } else { + kvm_write_sw_gcsr(csr, KVM_CSR_PERFCTRL3, val); + } + return; + default: + break; + } + /* write sw pmu csr if not config ctrl */ + switch (csrid) { + case KVM_CSR_PERFCNTR0: + return kvm_write_sw_gcsr(csr, KVM_CSR_PERFCNTR0, val); + case KVM_CSR_PERFCNTR1: + return kvm_write_sw_gcsr(csr, KVM_CSR_PERFCNTR1, val); + case KVM_CSR_PERFCNTR2: + return kvm_write_sw_gcsr(csr, KVM_CSR_PERFCNTR2, val); + case KVM_CSR_PERFCNTR3: + return kvm_write_sw_gcsr(csr, KVM_CSR_PERFCNTR3, val); + default: + break; + } if (csrid < 4096) kvm_write_sw_gcsr(csr, csrid, val); @@ -89,26 +146,29 @@ void _kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, csrid, vcpu->arch.pc); } -#define CASE_CHANGE_SW_GCSR(csr, regid, csrid, mask, val) \ - do { \ - if (regid == csrid) { \ - kvm_change_sw_gcsr(csr, csrid, mask, val); \ - return ; \ - } \ - } while (0) - void _kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long csr_mask, unsigned long val) { struct loongarch_csrs *csr = vcpu->arch.csr; - CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_IMPCTL1, csr_mask, val); - CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_ERRCTL, csr_mask, val); - CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_ERRINFO1, csr_mask, val); - CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_ERRINFO2, csr_mask, val); - CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_MERRENTRY, csr_mask, val); - CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_MERRERA, csr_mask, val); - CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_ERRSAVE, csr_mask, val); + switch (csrid) { + case KVM_CSR_IMPCTL1: + return kvm_change_sw_gcsr(csr, KVM_CSR_IMPCTL1, csr_mask, val); + case KVM_CSR_ERRCTL: + return kvm_change_sw_gcsr(csr, KVM_CSR_ERRCTL, csr_mask, val); + case KVM_CSR_ERRINFO1: + return kvm_change_sw_gcsr(csr, KVM_CSR_ERRINFO1, csr_mask, val); + case KVM_CSR_ERRINFO2: + return kvm_change_sw_gcsr(csr, KVM_CSR_ERRINFO2, csr_mask, val); + case KVM_CSR_MERRENTRY: + return kvm_change_sw_gcsr(csr, KVM_CSR_MERRENTRY, csr_mask, val); + case KVM_CSR_MERRERA: + return kvm_change_sw_gcsr(csr, KVM_CSR_MERRERA, csr_mask, val); + case KVM_CSR_ERRSAVE: + return kvm_change_sw_gcsr(csr, KVM_CSR_ERRSAVE, csr_mask, val); + default: + break; + } if (csrid < 4096) { unsigned long orig; @@ -126,73 +186,208 @@ int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v, int force) { struct loongarch_csrs *csr = vcpu->arch.csr; - GET_HW_GCSR(id, KVM_CSR_CRMD, v); - GET_HW_GCSR(id, KVM_CSR_PRMD, v); - GET_HW_GCSR(id, KVM_CSR_EUEN, v); - GET_HW_GCSR(id, KVM_CSR_MISC, v); - GET_HW_GCSR(id, KVM_CSR_ECFG, v); - GET_HW_GCSR(id, KVM_CSR_ESTAT, v); - GET_HW_GCSR(id, KVM_CSR_ERA, v); - GET_HW_GCSR(id, KVM_CSR_BADV, v); - GET_HW_GCSR(id, KVM_CSR_BADI, v); - GET_HW_GCSR(id, KVM_CSR_EENTRY, v); - GET_HW_GCSR(id, KVM_CSR_TLBIDX, v); - GET_HW_GCSR(id, KVM_CSR_TLBEHI, v); - GET_HW_GCSR(id, KVM_CSR_TLBELO0, v); - GET_HW_GCSR(id, KVM_CSR_TLBELO1, v); - GET_HW_GCSR(id, KVM_CSR_ASID, v); - GET_HW_GCSR(id, KVM_CSR_PGDL, v); - GET_HW_GCSR(id, KVM_CSR_PGDH, v); - GET_HW_GCSR(id, KVM_CSR_PWCTL0, v); - GET_HW_GCSR(id, KVM_CSR_PWCTL1, v); - GET_HW_GCSR(id, KVM_CSR_STLBPGSIZE, v); - GET_HW_GCSR(id, KVM_CSR_RVACFG, v); - GET_HW_GCSR(id, KVM_CSR_CPUID, v); - GET_HW_GCSR(id, KVM_CSR_PRCFG1, v); - GET_HW_GCSR(id, KVM_CSR_PRCFG2, v); - GET_HW_GCSR(id, KVM_CSR_PRCFG3, v); - GET_HW_GCSR(id, KVM_CSR_KS0, v); - GET_HW_GCSR(id, KVM_CSR_KS1, v); - GET_HW_GCSR(id, KVM_CSR_KS2, v); - GET_HW_GCSR(id, KVM_CSR_KS3, v); - GET_HW_GCSR(id, KVM_CSR_KS4, v); - GET_HW_GCSR(id, KVM_CSR_KS5, v); - GET_HW_GCSR(id, KVM_CSR_KS6, v); - GET_HW_GCSR(id, KVM_CSR_KS7, v); - GET_HW_GCSR(id, KVM_CSR_TMID, v); - GET_HW_GCSR(id, KVM_CSR_TCFG, v); - GET_HW_GCSR(id, KVM_CSR_TVAL, v); - GET_HW_GCSR(id, KVM_CSR_CNTC, v); - GET_HW_GCSR(id, KVM_CSR_LLBCTL, v); - GET_HW_GCSR(id, KVM_CSR_TLBRENTRY, v); - GET_HW_GCSR(id, KVM_CSR_TLBRBADV, v); - GET_HW_GCSR(id, KVM_CSR_TLBRERA, v); - GET_HW_GCSR(id, KVM_CSR_TLBRSAVE, v); - GET_HW_GCSR(id, KVM_CSR_TLBRELO0, v); - GET_HW_GCSR(id, KVM_CSR_TLBRELO1, v); - GET_HW_GCSR(id, KVM_CSR_TLBREHI, v); - GET_HW_GCSR(id, KVM_CSR_TLBRPRMD, v); - GET_HW_GCSR(id, KVM_CSR_DMWIN0, v); - GET_HW_GCSR(id, KVM_CSR_DMWIN1, v); - GET_HW_GCSR(id, KVM_CSR_DMWIN2, v); - GET_HW_GCSR(id, KVM_CSR_DMWIN3, v); - GET_HW_GCSR(id, KVM_CSR_MWPS, v); - GET_HW_GCSR(id, KVM_CSR_FWPS, v); - - GET_SW_GCSR(csr, id, KVM_CSR_IMPCTL1, v); - GET_SW_GCSR(csr, id, KVM_CSR_IMPCTL2, v); - GET_SW_GCSR(csr, id, KVM_CSR_ERRCTL, v); - GET_SW_GCSR(csr, id, KVM_CSR_ERRINFO1, v); - GET_SW_GCSR(csr, id, KVM_CSR_ERRINFO2, v); - GET_SW_GCSR(csr, id, KVM_CSR_MERRENTRY, v); - GET_SW_GCSR(csr, id, KVM_CSR_MERRERA, v); - GET_SW_GCSR(csr, id, KVM_CSR_ERRSAVE, v); - GET_SW_GCSR(csr, id, KVM_CSR_CTAG, v); - GET_SW_GCSR(csr, id, KVM_CSR_DEBUG, v); - GET_SW_GCSR(csr, id, KVM_CSR_DERA, v); - GET_SW_GCSR(csr, id, KVM_CSR_DESAVE, v); - - GET_SW_GCSR(csr, id, KVM_CSR_TINTCLR, v); + switch (id) { + case KVM_CSR_CRMD: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_CRMD); + return 0; + case KVM_CSR_PRMD: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_PRMD); + return 0; + case KVM_CSR_EUEN: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_EUEN); + return 0; + case KVM_CSR_MISC: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_MISC); + return 0; + case KVM_CSR_ECFG: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_ECFG); + return 0; + case KVM_CSR_ESTAT: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_ESTAT); + return 0; + case KVM_CSR_ERA: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_ERA); + return 0; + case KVM_CSR_BADV: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_BADV); + return 0; + case KVM_CSR_BADI: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_BADI); + return 0; + case KVM_CSR_EENTRY: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_EENTRY); + return 0; + case KVM_CSR_TLBIDX: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBIDX); + return 0; + case KVM_CSR_TLBEHI: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBEHI); + return 0; + case KVM_CSR_TLBELO0: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBELO0); + return 0; + case KVM_CSR_TLBELO1: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBELO1); + return 0; + case KVM_CSR_ASID: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_ASID); + return 0; + case KVM_CSR_PGDL: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_PGDL); + return 0; + case KVM_CSR_PGDH: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_PGDH); + return 0; + case KVM_CSR_PWCTL0: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_PWCTL0); + return 0; + case KVM_CSR_PWCTL1: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_PWCTL1); + return 0; + case KVM_CSR_STLBPGSIZE: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_STLBPGSIZE); + return 0; + case KVM_CSR_RVACFG: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_RVACFG); + return 0; + case KVM_CSR_CPUID: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_CPUID); + return 0; + case KVM_CSR_PRCFG1: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_PRCFG1); + return 0; + case KVM_CSR_PRCFG2: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_PRCFG2); + return 0; + case KVM_CSR_PRCFG3: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_PRCFG3); + return 0; + case KVM_CSR_KS0: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_KS0); + return 0; + case KVM_CSR_KS1: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_KS1); + return 0; + case KVM_CSR_KS2: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_KS2); + return 0; + case KVM_CSR_KS3: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_KS3); + return 0; + case KVM_CSR_KS4: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_KS4); + return 0; + case KVM_CSR_KS5: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_KS5); + return 0; + case KVM_CSR_KS6: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_KS6); + return 0; + case KVM_CSR_KS7: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_KS7); + return 0; + case KVM_CSR_TMID: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TMID); + return 0; + case KVM_CSR_TCFG: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TCFG); + return 0; + case KVM_CSR_TVAL: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TVAL); + return 0; + case KVM_CSR_CNTC: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_CNTC); + return 0; + case KVM_CSR_LLBCTL: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_LLBCTL); + return 0; + case KVM_CSR_TLBRENTRY: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBRENTRY); + return 0; + case KVM_CSR_TLBRBADV: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBRBADV); + return 0; + case KVM_CSR_TLBRERA: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBRERA); + return 0; + case KVM_CSR_TLBRSAVE: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBRSAVE); + return 0; + case KVM_CSR_TLBRELO0: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBRELO0); + return 0; + case KVM_CSR_TLBRELO1: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBRELO1); + return 0; + case KVM_CSR_TLBREHI: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBREHI); + return 0; + case KVM_CSR_TLBRPRMD: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_TLBRPRMD); + return 0; + case KVM_CSR_DMWIN0: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_DMWIN0); + return 0; + case KVM_CSR_DMWIN1: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_DMWIN1); + return 0; + case KVM_CSR_DMWIN2: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_DMWIN2); + return 0; + case KVM_CSR_DMWIN3: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_DMWIN3); + return 0; + case KVM_CSR_MWPS: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_MWPS); + return 0; + case KVM_CSR_FWPS: + *v = (long)kvm_read_hw_gcsr(KVM_CSR_FWPS); + return 0; + default: + break; + } + + switch (id) { + case KVM_CSR_IMPCTL1: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_IMPCTL1); + return 0; + case KVM_CSR_IMPCTL2: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_IMPCTL2); + return 0; + case KVM_CSR_ERRCTL: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_ERRCTL); + return 0; + case KVM_CSR_ERRINFO1: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_ERRINFO1); + return 0; + case KVM_CSR_ERRINFO2: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_ERRINFO2); + return 0; + case KVM_CSR_MERRENTRY: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_MERRENTRY); + return 0; + case KVM_CSR_MERRERA: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_MERRERA); + return 0; + case KVM_CSR_ERRSAVE: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_ERRSAVE); + return 0; + case KVM_CSR_CTAG: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_CTAG); + return 0; + case KVM_CSR_DEBUG: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_DEBUG); + return 0; + case KVM_CSR_DERA: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_DERA); + return 0; + case KVM_CSR_DESAVE: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_DESAVE); + return 0; + case KVM_CSR_TINTCLR: + *v = kvm_read_sw_gcsr(csr, KVM_CSR_TINTCLR); + return 0; + } if (force && (id < CSR_ALL_SIZE)) { *v = kvm_read_sw_gcsr(csr, id); @@ -207,73 +402,222 @@ int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v, int force) struct loongarch_csrs *csr = vcpu->arch.csr; int ret; - SET_HW_GCSR(csr, id, KVM_CSR_CRMD, v); - SET_HW_GCSR(csr, id, KVM_CSR_PRMD, v); - SET_HW_GCSR(csr, id, KVM_CSR_EUEN, v); - SET_HW_GCSR(csr, id, KVM_CSR_MISC, v); - SET_HW_GCSR(csr, id, KVM_CSR_ECFG, v); - SET_HW_GCSR(csr, id, KVM_CSR_ERA, v); - SET_HW_GCSR(csr, id, KVM_CSR_BADV, v); - SET_HW_GCSR(csr, id, KVM_CSR_BADI, v); - SET_HW_GCSR(csr, id, KVM_CSR_EENTRY, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBIDX, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBEHI, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBELO0, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBELO1, v); - SET_HW_GCSR(csr, id, KVM_CSR_ASID, v); - SET_HW_GCSR(csr, id, KVM_CSR_PGDL, v); - SET_HW_GCSR(csr, id, KVM_CSR_PGDH, v); - SET_HW_GCSR(csr, id, KVM_CSR_PWCTL0, v); - SET_HW_GCSR(csr, id, KVM_CSR_PWCTL1, v); - SET_HW_GCSR(csr, id, KVM_CSR_STLBPGSIZE, v); - SET_HW_GCSR(csr, id, KVM_CSR_RVACFG, v); - SET_HW_GCSR(csr, id, KVM_CSR_CPUID, v); - SET_HW_GCSR(csr, id, KVM_CSR_KS0, v); - SET_HW_GCSR(csr, id, KVM_CSR_KS1, v); - SET_HW_GCSR(csr, id, KVM_CSR_KS2, v); - SET_HW_GCSR(csr, id, KVM_CSR_KS3, v); - SET_HW_GCSR(csr, id, KVM_CSR_KS4, v); - SET_HW_GCSR(csr, id, KVM_CSR_KS5, v); - SET_HW_GCSR(csr, id, KVM_CSR_KS6, v); - SET_HW_GCSR(csr, id, KVM_CSR_KS7, v); - SET_HW_GCSR(csr, id, KVM_CSR_TMID, v); - SET_HW_GCSR(csr, id, KVM_CSR_TCFG, v); - SET_HW_GCSR(csr, id, KVM_CSR_TVAL, v); - SET_HW_GCSR(csr, id, KVM_CSR_CNTC, v); - SET_HW_GCSR(csr, id, KVM_CSR_LLBCTL, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBRENTRY, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBRBADV, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBRERA, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBRSAVE, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBRELO0, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBRELO1, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBREHI, v); - SET_HW_GCSR(csr, id, KVM_CSR_TLBRPRMD, v); - SET_HW_GCSR(csr, id, KVM_CSR_DMWIN0, v); - SET_HW_GCSR(csr, id, KVM_CSR_DMWIN1, v); - SET_HW_GCSR(csr, id, KVM_CSR_DMWIN2, v); - SET_HW_GCSR(csr, id, KVM_CSR_DMWIN3, v); - SET_HW_GCSR(csr, id, KVM_CSR_MWPS, v); - SET_HW_GCSR(csr, id, KVM_CSR_FWPS, v); - - SET_SW_GCSR(csr, id, KVM_CSR_IMPCTL1, v); - SET_SW_GCSR(csr, id, KVM_CSR_IMPCTL2, v); - SET_SW_GCSR(csr, id, KVM_CSR_ERRCTL, v); - SET_SW_GCSR(csr, id, KVM_CSR_ERRINFO1, v); - SET_SW_GCSR(csr, id, KVM_CSR_ERRINFO2, v); - SET_SW_GCSR(csr, id, KVM_CSR_MERRENTRY, v); - SET_SW_GCSR(csr, id, KVM_CSR_MERRERA, v); - SET_SW_GCSR(csr, id, KVM_CSR_ERRSAVE, v); - SET_SW_GCSR(csr, id, KVM_CSR_CTAG, v); - SET_SW_GCSR(csr, id, KVM_CSR_DEBUG, v); - SET_SW_GCSR(csr, id, KVM_CSR_DERA, v); - SET_SW_GCSR(csr, id, KVM_CSR_DESAVE, v); - SET_SW_GCSR(csr, id, KVM_CSR_PRCFG1, v); - SET_SW_GCSR(csr, id, KVM_CSR_PRCFG2, v); - SET_SW_GCSR(csr, id, KVM_CSR_PRCFG3, v); - - SET_SW_GCSR(csr, id, KVM_CSR_PGD, v); - SET_SW_GCSR(csr, id, KVM_CSR_TINTCLR, v); + switch (id) { + case KVM_CSR_CRMD: + kvm_write_hw_gcsr(csr, KVM_CSR_CRMD, *v); + return 0; + case KVM_CSR_PRMD: + kvm_write_hw_gcsr(csr, KVM_CSR_PRMD, *v); + return 0; + case KVM_CSR_EUEN: + kvm_write_hw_gcsr(csr, KVM_CSR_EUEN, *v); + return 0; + case KVM_CSR_MISC: + kvm_write_hw_gcsr(csr, KVM_CSR_MISC, *v); + return 0; + case KVM_CSR_ECFG: + kvm_write_hw_gcsr(csr, KVM_CSR_ECFG, *v); + return 0; + case KVM_CSR_ESTAT: + kvm_write_hw_gcsr(csr, KVM_CSR_ESTAT, *v); + return 0; + case KVM_CSR_ERA: + kvm_write_hw_gcsr(csr, KVM_CSR_ERA, *v); + return 0; + case KVM_CSR_BADV: + kvm_write_hw_gcsr(csr, KVM_CSR_BADV, *v); + return 0; + case KVM_CSR_BADI: + kvm_write_hw_gcsr(csr, KVM_CSR_BADI, *v); + return 0; + case KVM_CSR_EENTRY: + kvm_write_hw_gcsr(csr, KVM_CSR_EENTRY, *v); + return 0; + case KVM_CSR_TLBIDX: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBIDX, *v); + return 0k; + case KVM_CSR_TLBEHI: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBEHI, *v); + return 0; + case KVM_CSR_TLBELO0: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBELO0, *v); + return 0; + case KVM_CSR_TLBELO1: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBELO1, *v); + return 0; + case KVM_CSR_ASID: + kvm_write_hw_gcsr(csr, KVM_CSR_ASID, *v); + return 0; + case KVM_CSR_PGDL: + kvm_write_hw_gcsr(csr, KVM_CSR_PGDL, *v); + return 0; + case KVM_CSR_PGDH: + kvm_write_hw_gcsr(csr, KVM_CSR_PGDH, *v); + return 0; + case KVM_CSR_PWCTL0: + kvm_write_hw_gcsr(csr, KVM_CSR_PWCTL0, *v); + return 0; + case KVM_CSR_PWCTL1: + kvm_write_hw_gcsr(csr, KVM_CSR_PWCTL1, *v); + return 0; + case KVM_CSR_STLBPGSIZE: + kvm_write_hw_gcsr(csr, KVM_CSR_STLBPGSIZE, *v); + return 0; + case KVM_CSR_RVACFG: + kvm_write_hw_gcsr(csr, KVM_CSR_RVACFG, *v); + return 0; + case KVM_CSR_CPUID: + kvm_write_hw_gcsr(csr, KVM_CSR_CPUID, *v); + return 0; + case KVM_CSR_PRCFG1: + kvm_write_hw_gcsr(csr, KVM_CSR_PRCFG1, *v); + return 0; + case KVM_CSR_PRCFG2: + kvm_write_hw_gcsr(csr, KVM_CSR_PRCFG2, *v); + return 0; + case KVM_CSR_PRCFG3: + kvm_write_hw_gcsr(csr, KVM_CSR_PRCFG3, *v); + return 0; + case KVM_CSR_KS0: + kvm_write_hw_gcsr(csr, KVM_CSR_KS0, *v); + return 0; + case KVM_CSR_KS1: + kvm_write_hw_gcsr(csr, KVM_CSR_KS1, *v); + return 0; + case KVM_CSR_KS2: + kvm_write_hw_gcsr(csr, KVM_CSR_KS2, *v); + return 0; + case KVM_CSR_KS3: + kvm_write_hw_gcsr(csr, KVM_CSR_KS3, *v); + return 0; + case KVM_CSR_KS4: + kvm_write_hw_gcsr(csr, KVM_CSR_KS4, *v); + return 0; + case KVM_CSR_KS5: + kvm_write_hw_gcsr(csr, KVM_CSR_KS5, *v); + return 0; + case KVM_CSR_KS6: + kvm_write_hw_gcsr(csr, KVM_CSR_KS6, *v); + return 0; + case KVM_CSR_KS7: + kvm_write_hw_gcsr(csr, KVM_CSR_KS7, *v); + return 0; + case KVM_CSR_TMID: + kvm_write_hw_gcsr(csr, KVM_CSR_TMID, *v); + return 0; + case KVM_CSR_TCFG: + kvm_write_hw_gcsr(csr, KVM_CSR_TCFG, *v); + return 0; + case KVM_CSR_TVAL: + kvm_write_hw_gcsr(csr, KVM_CSR_TVAL, *v); + return 0; + case KVM_CSR_CNTC: + kvm_write_hw_gcsr(csr, KVM_CSR_CNTC, *v); + return 0; + case KVM_CSR_LLBCTL: + kvm_write_hw_gcsr(csr, KVM_CSR_LLBCTL, *v); + return 0; + case KVM_CSR_TLBRENTRY: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBRENTRY, *v); + return 0; + case KVM_CSR_TLBRBADV: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBRBADV, *v); + return 0; + case KVM_CSR_TLBRERA: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBRERA, *v); + return 0; + case KVM_CSR_TLBRSAVE: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBRSAVE, *v); + return 0; + case KVM_CSR_TLBRELO0: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBRELO0, *v); + return 0; + case KVM_CSR_TLBRELO1: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBRELO1, *v); + return 0; + case KVM_CSR_TLBREHI: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBREHI, *v); + return 0; + case KVM_CSR_TLBRPRMD: + kvm_write_hw_gcsr(csr, KVM_CSR_TLBRPRMD, *v); + return 0; + case KVM_CSR_DMWIN0: + kvm_write_hw_gcsr(csr, KVM_CSR_DMWIN0, *v); + return 0; + case KVM_CSR_DMWIN1: + kvm_write_hw_gcsr(csr, KVM_CSR_DMWIN1, *v); + return 0; + case KVM_CSR_DMWIN2: + kvm_write_hw_gcsr(csr, KVM_CSR_DMWIN2, *v); + return 0; + case KVM_CSR_DMWIN3: + kvm_write_hw_gcsr(csr, KVM_CSR_DMWIN3, *v); + return 0; + case KVM_CSR_MWPS: + kvm_write_hw_gcsr(csr, KVM_CSR_MWPS, *v); + return 0; + case KVM_CSR_FWPS: + kvm_write_hw_gcsr(csr, KVM_CSR_FWPS, *v); + return 0; + default: + break; + } + + switch (id) { + case KVM_CSR_IMPCTL1: + kvm_write_sw_gcsr(csr, KVM_CSR_IMPCTL1, *v); + return 0; + case KVM_CSR_IMPCTL2: + kvm_write_sw_gcsr(csr, KVM_CSR_IMPCTL2, *v); + return 0; + case KVM_CSR_ERRCTL: + kvm_write_sw_gcsr(csr, KVM_CSR_ERRCTL, *v); + return 0; + case KVM_CSR_ERRINFO1: + kvm_write_sw_gcsr(csr, KVM_CSR_ERRINFO1, *v); + return 0; + case KVM_CSR_ERRINFO2: + kvm_write_sw_gcsr(csr, KVM_CSR_ERRINFO2, *v); + return 0; + case KVM_CSR_MERRENTRY: + kvm_write_sw_gcsr(csr, KVM_CSR_MERRENTRY, *v); + return 0; + case KVM_CSR_MERRERA: + kvm_write_sw_gcsr(csr, KVM_CSR_MERRERA, *v); + return 0; + case KVM_CSR_ERRSAVE: + kvm_write_sw_gcsr(csr, KVM_CSR_ERRSAVE, *v); + return 0; + case KVM_CSR_CTAG: + kvm_write_sw_gcsr(csr, KVM_CSR_CTAG, *v); + return 0; + case KVM_CSR_DEBUG: + kvm_write_sw_gcsr(csr, KVM_CSR_DEBUG, *v); + return 0; + case KVM_CSR_DERA: + kvm_write_sw_gcsr(csr, KVM_CSR_DERA, *v); + return 0; + case KVM_CSR_DESAVE: + kvm_write_sw_gcsr(csr, KVM_CSR_DESAVE, *v); + return 0; + case KVM_CSR_PRCFG1: + kvm_write_sw_gcsr(csr, KVM_CSR_PRCFG1, *v); + return 0; + case KVM_CSR_PRCFG2: + kvm_write_sw_gcsr(csr, KVM_CSR_PRCFG2, *v); + return 0; + case KVM_CSR_PRCFG3: + kvm_write_sw_gcsr(csr, KVM_CSR_PRCFG3, *v); + return 0; + case KVM_CSR_PGD: + kvm_write_sw_gcsr(csr, KVM_CSR_PGD, *v); + return 0; + case KVM_CSR_TINTCLR: + kvm_write_sw_gcsr(csr, KVM_CSR_TINTCLR, *v); + return 0; + default: + break; + } ret = -1; switch (id) { @@ -296,8 +640,8 @@ int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v, int force) struct kvm_iocsr { u32 start, end; - int (*get) (struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, u64 *res); - int (*set) (struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, u64 val); + int (*get)(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, u64 *res); + int (*set)(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, u64 val); }; static struct kvm_iocsr_entry *_kvm_find_iocsr(struct kvm *kvm, u32 addr) @@ -409,11 +753,10 @@ static int kvm_extioi_set(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, { int ret; - if ((addr & 0x1f00) == KVM_IOCSR_EXTIOI_ISR_BASE) { + if ((addr & 0x1f00) == KVM_IOCSR_EXTIOI_ISR_BASE) run->mmio.phys_addr = EXTIOI_PERCORE_ADDR(vcpu->vcpu_id, (addr & 0xff)); - } else { + else run->mmio.phys_addr = EXTIOI_ADDR((addr & 0x1fff)); - } ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, run->mmio.len, &val); @@ -520,7 +863,7 @@ static int _kvm_emu_iocsr_write(struct kvm_run *run, struct kvm_vcpu *vcpu, } /* all iocsr operation should in kvm, no mmio */ -int _kvm_emu_iocsr(larch_inst inst, +int _kvm_emu_iocsr(union loongarch_instruction inst, struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 rd, rj, opcode; @@ -579,9 +922,8 @@ int _kvm_emu_iocsr(larch_inst inst, break; } - if (ret == EMULATE_DO_IOCSR) { + if (ret == EMULATE_DO_IOCSR) vcpu->arch.io_gpr = rd; - } return ret; } diff --git a/arch/loongarch/kvm/emulate.c b/arch/loongarch/kvm/emulate.c index 6a68dd57ecd1..4a85019ec0ef 100644 --- a/arch/loongarch/kvm/emulate.c +++ b/arch/loongarch/kvm/emulate.c @@ -30,7 +30,7 @@ int _kvm_emu_idle(struct kvm_vcpu *vcpu) return EMULATE_DONE; } -int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst) +int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, union loongarch_instruction inst) { struct kvm_run *run = vcpu->run; unsigned int rd, op8, opcode; @@ -152,8 +152,7 @@ int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst) return EMULATE_FAIL; } - -int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst) +int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, union loongarch_instruction inst) { unsigned int op8, opcode, rd; int ret = 0; @@ -290,9 +289,9 @@ int _kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run) break; case 4: - if (vcpu->mmio_needed == 2) { + if (vcpu->mmio_needed == 2) *gpr = *(int *)run->mmio.data; - } else + else *gpr = *(unsigned int *)run->mmio.data; break; diff --git a/arch/loongarch/kvm/entry.S b/arch/loongarch/kvm/entry.S index cc0856af60d9..be53cf896a1d 100644 --- a/arch/loongarch/kvm/entry.S +++ b/arch/loongarch/kvm/entry.S @@ -51,7 +51,7 @@ .macro kvm_switch_to_guest KVM_ARCH GPRNUM tmp tmp1 /* set host excfg.VS=0, all exceptions share one exception entry */ csrrd \tmp, KVM_CSR_ECFG - bstrins.w \tmp, zero, (KVM_ECFG_VS_SHIFT + KVM_ECFG_VS_WIDTH - 1), KVM_ECFG_VS_SHIFT + bstrins.w \tmp, zero, KEVS_KEVW, KVM_ECFG_VS_SHIFT csrwr \tmp, KVM_CSR_ECFG /* Load up the new EENTRY */ @@ -77,9 +77,9 @@ /* Mix GID and RID */ csrrd \tmp1, KVM_CSR_GSTAT - bstrpick.w \tmp1, \tmp1, (KVM_GSTAT_GID_SHIFT + KVM_GSTAT_GID_WIDTH - 1), KVM_GSTAT_GID_SHIFT + bstrpick.w \tmp1, \tmp1, KGGS_KGGW, KVM_GSTAT_GID_SHIFT csrrd \tmp, KVM_CSR_GTLBC - bstrins.w \tmp, \tmp1, (KVM_GTLBC_TGID_SHIFT + KVM_GTLBC_TGID_WIDTH - 1), KVM_GTLBC_TGID_SHIFT + bstrins.w \tmp, \tmp1, KGTS_KGTW, KVM_GTLBC_TGID_SHIFT csrwr \tmp, KVM_CSR_GTLBC /* @@ -196,11 +196,11 @@ SYM_FUNC_START(kvm_exit_entry) /* Clear GTLBC.TGID field */ csrrd t0, KVM_CSR_GTLBC - bstrins.w t0, zero, KVM_GTLBC_TGID_SHIFT + KVM_GTLBC_TGID_WIDTH - 1, KVM_GTLBC_TGID_SHIFT + bstrins.w t0, zero, KGTS_KGTW, KVM_GTLBC_TGID_SHIFT csrwr t0, KVM_CSR_GTLBC /* Enable Address Map mode */ - ori t0, zero, (1 << KVM_CRMD_DACM_SHIFT)|(1 << KVM_CRMD_DACF_SHIFT) | KVM_CRMD_PG |PLV_KERN + ori t0, zero, KCDS_PK csrwr t0, KVM_CSR_CRMD KVM_LONG_L tp, a2, KVM_ARCH_HGP @@ -325,4 +325,3 @@ SYM_FUNC_START(__kvm_restore_lasx) jirl zero, ra, 0 SYM_FUNC_END(__kvm_restore_lasx) #endif - diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 5653e082d43e..c0d56bdcd00e 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -47,7 +47,7 @@ static int _kvm_fault_ni(struct kvm_vcpu *vcpu) return RESUME_HOST; } -static int _kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) +static int _kvm_handle_csr(struct kvm_vcpu *vcpu, union loongarch_instruction inst) { enum emulation_result er = EMULATE_DONE; unsigned int rd, rj, csrid; @@ -84,7 +84,7 @@ static int _kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) return er; } -static int _kvm_emu_cache(struct kvm_vcpu *vcpu, larch_inst inst) +static int _kvm_emu_cache(struct kvm_vcpu *vcpu, union loongarch_instruction inst) { return EMULATE_DONE; } @@ -93,7 +93,7 @@ static int _kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DONE; struct kvm_run *run = vcpu->run; - larch_inst inst; + union loongarch_instruction inst; unsigned long curr_pc; int rd, rj; unsigned int index; @@ -180,7 +180,7 @@ static int _kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) static int _kvm_check_hypcall(struct kvm_vcpu *vcpu) { enum emulation_result ret; - larch_inst inst; + union loongarch_instruction inst; unsigned long curr_pc; unsigned int code; @@ -189,13 +189,13 @@ static int _kvm_check_hypcall(struct kvm_vcpu *vcpu) * an error and we want to rollback the PC */ inst.word = vcpu->arch.badi; - code = inst.reg0i15_format.simmediate; + code = inst.reg0i15_format.immediate; curr_pc = vcpu->arch.pc; update_pc(&vcpu->arch); ret = EMULATE_DONE; switch (code) { - case KVM_HC_CODE_SERIVCE: + case KVM_HC_CODE_SERVICE: ret = EMULATE_PV_HYPERCALL; break; case KVM_HC_CODE_SWDBG: @@ -220,7 +220,8 @@ static int _kvm_check_hypcall(struct kvm_vcpu *vcpu) * Also the access to unimplemented csrs 0x15 * 0x16, 0x50~0x53, 0x80, 0x81, 0x90~0x95, 0x98 * 0xc0~0xff, 0x100~0x109, 0x500~0x502, - * cache_op, idle_op iocsr ops the same */ + * cache_op, idle_op iocsr ops the same + */ static int _kvm_handle_gspr(struct kvm_vcpu *vcpu) { enum emulation_result er = EMULATE_DONE; @@ -342,7 +343,7 @@ static int _kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu) bool _kvm_guest_has_lasx(struct kvm_vcpu *vcpu) { - return cpu_has_lasx && vcpu->arch.lsx_enabled && vcpu->kvm->arch.cpucfg_lasx; + return cpu_has_lasx && vcpu->arch.lsx_enabled && vcpu->kvm->arch.cpucfg_lasx; } /** @@ -383,7 +384,7 @@ static int _kvm_handle_read_fault(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; ulong badv = vcpu->arch.badv; - larch_inst inst; + union loongarch_instruction inst; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; @@ -421,7 +422,7 @@ static int _kvm_handle_write_fault(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; ulong badv = vcpu->arch.badv; - larch_inst inst; + union loongarch_instruction inst; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; diff --git a/arch/loongarch/kvm/fpu.c b/arch/loongarch/kvm/fpu.c index 180e874f04fe..ad3d942d75ec 100644 --- a/arch/loongarch/kvm/fpu.c +++ b/arch/loongarch/kvm/fpu.c @@ -82,4 +82,3 @@ EXPORT_SYMBOL_GPL(kvm_restore_lasx_upper); EXPORT_SYMBOL_GPL(kvm_enter_guest); EXPORT_SYMBOL_GPL(kvm_exception_entry); - diff --git a/arch/loongarch/kvm/hypcall.c b/arch/loongarch/kvm/hypcall.c index aaf3a07f23f0..7632c8f4d659 100644 --- a/arch/loongarch/kvm/hypcall.c +++ b/arch/loongarch/kvm/hypcall.c @@ -10,7 +10,6 @@ #include "intc/ls3a_ipi.h" #include "kvm_compat.h" - int kvm_virt_ipi(struct kvm_vcpu *vcpu) { int ret = 0; @@ -62,6 +61,7 @@ static int _kvm_pv_feature(struct kvm_vcpu *vcpu) { int feature = vcpu->arch.gprs[KVM_REG_A1]; int ret = KVM_RET_NOT_SUPPORTED; + switch (feature) { case KVM_FEATURE_STEAL_TIME: if (sched_info_on()) diff --git a/arch/loongarch/kvm/intc/ls3a_ext_irq.c b/arch/loongarch/kvm/intc/ls3a_ext_irq.c index ad612661d36b..a007967e836b 100644 --- a/arch/loongarch/kvm/intc/ls3a_ext_irq.c +++ b/arch/loongarch/kvm/intc/ls3a_ext_irq.c @@ -30,7 +30,8 @@ void ext_deactive_core_isr(struct kvm *kvm, int irq_num, int vcpu_id) bitmap_clear((void *)state->ext_sw_ipisr[vcpu_id][ipnum + 2], irq_num, 1); found1 = find_next_bit((void *)state->ext_sw_ipisr[vcpu_id][ipnum + 2], EXTIOI_IRQS, 0); - kvm_debug("vcpu_id %d irqnum %d found:0x%lx ipnum %d down\n", vcpu_id, irq_num, found1, ipnum); + kvm_debug("vcpu_id %d irqnum %d found:0x%lx ipnum %d down\n", + vcpu_id, irq_num, found1, ipnum); if (found1 == EXTIOI_IRQS) { irq.cpu = vcpu_id; irq.irq = -(ipnum + 2); /* IP2~IP5 */ @@ -66,23 +67,22 @@ void ext_irq_update_core(struct kvm *kvm, int irq_num, int level) } if (level == 1) { - if (test_bit(irq_num, (void *)state->ext_en.reg_u8) == false) { + if (test_bit(irq_num, (void *)state->ext_en.reg_u8) == false) return; - } - if (test_bit(irq_num, (void *)state->ext_isr.reg_u8) == false) { + if (test_bit(irq_num, (void *)state->ext_isr.reg_u8) == false) return; - } bitmap_set((void *)state->ext_core_isr.reg_u8[vcpu_id], irq_num, 1); - found1 = find_next_bit((void *)state->ext_sw_ipisr[vcpu_id][ipnum + 2], EXTIOI_IRQS, 0); + found1 = find_next_bit((void *)state->ext_sw_ipisr[vcpu_id][ipnum + 2], + EXTIOI_IRQS, 0); bitmap_set((void *)state->ext_sw_ipisr[vcpu_id][ipnum + 2], irq_num, 1); kvm_debug("%s:%d --- vcpu_id %d irqnum %d found1 0x%lx ipnum %d\n", - __FUNCTION__, __LINE__, vcpu_id, irq_num, found1, ipnum); + __func__, __LINE__, vcpu_id, irq_num, found1, ipnum); if (found1 == EXTIOI_IRQS) { irq.cpu = vcpu_id; irq.irq = ipnum + 2; /* IP2~IP5 */ kvm_debug("%s:%d --- vcpu_id %d ipnum %d raise\n", - __FUNCTION__, __LINE__, vcpu_id, ipnum); + __func__, __LINE__, vcpu_id, ipnum); if (likely(kvm->vcpus[vcpu_id])) kvm_vcpu_ioctl_interrupt(kvm->vcpus[vcpu_id], &irq); kvm->stat.trigger_ls3a_ext_irq++; @@ -92,7 +92,8 @@ void ext_irq_update_core(struct kvm *kvm, int irq_num, int level) bitmap_clear((void *)state->ext_core_isr.reg_u8[vcpu_id], irq_num, 1); bitmap_clear((void *)state->ext_sw_ipisr[vcpu_id][ipnum + 2], irq_num, 1); - found1 = find_next_bit((void *)state->ext_sw_ipisr[vcpu_id][ipnum + 2], EXTIOI_IRQS, 0); + found1 = find_next_bit((void *)state->ext_sw_ipisr[vcpu_id][ipnum + 2], + EXTIOI_IRQS, 0); if (found1 == EXTIOI_IRQS) { irq.cpu = vcpu_id; irq.irq = -(ipnum + 2); /* IP2~IP5 */ @@ -168,7 +169,7 @@ static int ls3a_ext_intctl_readb(struct kvm_vcpu *vcpu, *(uint8_t *)val = state->node_type.reg_u8[reg_count]; } kvm_debug("%s: addr=0x%llx,val=0x%x\n", - __FUNCTION__, addr, *(uint8_t *)val); + __func__, addr, *(uint8_t *)val); return 0; } @@ -212,7 +213,7 @@ static int ls3a_ext_intctl_readw(struct kvm_vcpu *vcpu, *(uint32_t *)val = state->node_type.reg_u32[reg_count]; } kvm_debug("%s: addr=0x%llx,val=0x%x\n", - __FUNCTION__, addr, *(uint32_t *)val); + __func__, addr, *(uint32_t *)val); return 0; } @@ -257,7 +258,7 @@ static int ls3a_ext_intctl_readl(struct kvm_vcpu *vcpu, *(uint64_t *)val = state->node_type.reg_u64[reg_count]; } kvm_debug("%s: addr=0x%llx,val=0x%llx\n", - __FUNCTION__, addr, *(uint64_t *)val); + __func__, addr, *(uint64_t *)val); return 0; } /** @@ -282,8 +283,8 @@ static int ls3a_ext_intctl_read(struct kvm_vcpu *vcpu, offset = addr & 0xfffff; if (offset & (size - 1)) { - printk("%s:unaligned address access %llx size %d\n", - __FUNCTION__, addr, size); + pr_info("%s:unaligned address access %llx size %d\n", + __func__, addr, size); return 0; } addr = (addr & 0xfffff) - EXTIOI_ADDR_OFF; @@ -301,11 +302,11 @@ static int ls3a_ext_intctl_read(struct kvm_vcpu *vcpu, break; default: WARN_ONCE(1, "%s: Abnormal address access:addr 0x%llx, size %d\n", - __FUNCTION__, addr, size); + __func__, addr, size); } ls3a_ext_irq_unlock(s, flags); kvm_debug("%s(%d):address access %llx size %d\n", - __FUNCTION__, __LINE__, offset, size); + __func__, __LINE__, offset, size); return 0; } @@ -332,7 +333,7 @@ static int ls3a_ext_intctl_writeb(struct kvm_vcpu *vcpu, offset = addr & 0xfffff; val_data_u8 = val & 0xffUL; - kvm_debug("%s: addr=0x%llx,val=0x%lx\n", __FUNCTION__, addr, val); + kvm_debug("%s: addr=0x%llx,val=0x%lx\n", __func__, addr, val); if ((offset >= EXTIOI_ENABLE_START) && (offset < EXTIOI_ENABLE_END)) { reg_count = (offset - EXTIOI_ENABLE_START); @@ -361,9 +362,8 @@ static int ls3a_ext_intctl_writeb(struct kvm_vcpu *vcpu, mask = 0x1; for (i = 0; i < 8; i++) { - if ((old_data_u8 & mask) && (val_data_u8 & mask)) { + if ((old_data_u8 & mask) && (val_data_u8 & mask)) ext_irq_update_core(kvm, i + reg_count * 8, 0); - } mask = mask << 1; } @@ -416,7 +416,7 @@ static int ls3a_ext_intctl_writeb(struct kvm_vcpu *vcpu, state->node_type.reg_u8[reg_count] = val_data_u8; } else { WARN_ONCE(1, "%s: Abnormal address access:addr 0x%llx\n", - __FUNCTION__, addr); + __func__, addr); } return 0; @@ -445,7 +445,7 @@ static int ls3a_ext_intctl_writew(struct kvm_vcpu *vcpu, offset = addr & 0xfffff; val_data_u32 = val & 0xffffffffUL; - kvm_debug("%s: addr=0x%llx,val=0x%lx\n", __FUNCTION__, addr, val); + kvm_debug("%s: addr=0x%llx,val=0x%lx\n", __func__, addr, val); if ((offset >= EXTIOI_ENABLE_START) && (offset < EXTIOI_ENABLE_END)) { reg_count = (offset - EXTIOI_ENABLE_START) / 4; @@ -475,9 +475,8 @@ static int ls3a_ext_intctl_writew(struct kvm_vcpu *vcpu, mask = 0x1; for (i = 0; i < 8 * sizeof(old_data_u32); i++) { - if ((old_data_u32 & mask) && (val_data_u32 & mask)) { + if ((old_data_u32 & mask) && (val_data_u32 & mask)) ext_irq_update_core(kvm, i + reg_count * 32, 0); - } mask = mask << 1; } } else if ((offset >= EXTIOI_COREISR_START) && (offset < EXTIOI_COREISR_END)) { @@ -515,14 +514,14 @@ static int ls3a_ext_intctl_writew(struct kvm_vcpu *vcpu, tmp_data_u8 = (val_data_u32 >> 24) & 0xff; ls3a_ext_intctl_writeb(vcpu, dev, addr + 3, &tmp_data_u8); kvm_debug("%s:id:%d addr=0x%llx, offset 0x%llx val 0x%x\n", - __FUNCTION__, vcpu->vcpu_id, addr, offset, val_data_u32); + __func__, vcpu->vcpu_id, addr, offset, val_data_u32); } else if ((offset >= EXTIOI_NODETYPE_START) && (offset < EXTIOI_NODETYPE_END)) { reg_count = (offset - EXTIOI_NODETYPE_START) / 4; state->node_type.reg_u32[reg_count] = val_data_u32; } else { WARN_ONCE(1, "%s:%d Abnormal address access:addr 0x%llx\n", - __FUNCTION__, __LINE__, addr); + __func__, __LINE__, addr); } return 0; @@ -549,7 +548,7 @@ static int ls3a_ext_intctl_writel(struct kvm_vcpu *vcpu, offset = addr & 0xfffff; val_data_u64 = val; - kvm_debug("%s: addr=0x%llx,val=0x%lx\n", __FUNCTION__, addr, val); + kvm_debug("%s: addr=0x%llx,val=0x%lx\n", __func__, addr, val); if ((offset >= EXTIOI_ENABLE_START) && (offset < EXTIOI_ENABLE_END)) { reg_count = (offset - EXTIOI_ENABLE_START) / 8; @@ -579,9 +578,8 @@ static int ls3a_ext_intctl_writel(struct kvm_vcpu *vcpu, mask = 0x1; for (i = 0; i < 8 * sizeof(old_data_u64); i++) { - if ((old_data_u64 & mask) && (val_data_u64 & mask)) { + if ((old_data_u64 & mask) && (val_data_u64 & mask)) ext_irq_update_core(kvm, i + reg_count * 64, 0); - } mask = mask << 1; } } else if ((offset >= EXTIOI_COREISR_START) && (offset < EXTIOI_COREISR_END)) { @@ -640,7 +638,7 @@ static int ls3a_ext_intctl_writel(struct kvm_vcpu *vcpu, state->node_type.reg_u64[reg_count] = val_data_u64; } else { WARN_ONCE(1, "%s:%d Abnormal address access:addr 0x%llx\n", - __FUNCTION__, __LINE__, addr); + __func__, __LINE__, addr); } return 0; } @@ -666,8 +664,8 @@ static int ls3a_ext_intctl_write(struct kvm_vcpu *vcpu, offset = addr & 0xfffff; if (offset & (size - 1)) { - printk("%s(%d):unaligned address access %llx size %d\n", - __FUNCTION__, __LINE__, addr, size); + pr_info("%s(%d):unaligned address access %llx size %d\n", + __func__, __LINE__, addr, size); return 0; } @@ -686,13 +684,13 @@ static int ls3a_ext_intctl_write(struct kvm_vcpu *vcpu, break; default: WARN_ONCE(1, "%s: Abnormal address access:addr 0x%llx,size %d\n", - __FUNCTION__, addr, size); + __func__, addr, size); } ls3a_ext_irq_unlock(s, flags); kvm_debug("%s(%d):address access %llx size %d\n", - __FUNCTION__, __LINE__, offset, size); + __func__, __LINE__, offset, size); return 0; } @@ -739,7 +737,7 @@ int kvm_create_ls3a_ext_irq(struct kvm *kvm) EXTIOI_REG_BASE, EXTIOI_ADDR_SIZE, &s->device); mutex_unlock(&kvm->slots_lock); if (ret < 0) { - printk("%s dev_ls3a_ext_irq register error ret %d\n", __FUNCTION__, ret); + pr_info("%s dev_ls3a_ext_irq register error ret %d\n", __func__, ret); goto err_register; } @@ -766,7 +764,7 @@ static int kvm_set_ext_sw_ipmap(struct kvm_ls3a_extirq_state *state) break; } } - kvm_debug("%s:%d ipnum:%d i:%d val_data_u8:0x%x\n", __FUNCTION__, __LINE__, + kvm_debug("%s:%d ipnum:%d i:%d val_data_u8:0x%x\n", __func__, __LINE__, ipnum, i, val_data_u8); if (val_data_u8) { @@ -793,7 +791,7 @@ static int kvm_set_ext_sw_coremap(struct kvm *kvm, struct kvm_ls3a_extirq_state state->ext_sw_coremap[reg_count] = state->core_map.reg_u8[reg_count]; kvm_debug("%s:%d -- reg_count:%d vcpu %d\n", - __FUNCTION__, __LINE__, reg_count, state->core_map.reg_u8[reg_count]); + __func__, __LINE__, reg_count, state->core_map.reg_u8[reg_count]); } return 0; @@ -807,12 +805,10 @@ static int kvm_set_ext_sw_ipisr(struct kvm *kvm, struct kvm_ls3a_extirq_state *s core = state->ext_sw_coremap[irq_num]; ipnum = state->ext_sw_ipmap[irq_num]; - if (test_bit(irq_num, (void *)state->ext_core_isr.reg_u8[core]) == false) { + if (test_bit(irq_num, (void *)state->ext_core_isr.reg_u8[core]) == false) bitmap_clear((void *)state->ext_sw_ipisr[core][ipnum + 2], irq_num, 1); - } else { + else bitmap_set((void *)state->ext_sw_ipisr[core][ipnum + 2], irq_num, 1); - } - } return 0; } @@ -875,7 +871,7 @@ void kvm_dump_ls3a_extirq_state(struct seq_file *s, struct ls3a_kvm_extirq *irqchip) { struct kvm_ls3a_extirq_state *extirq; - int i, j = 0; + int i = 0, j = 0; unsigned long flags; seq_puts(s, "LS3A ext irqchip state:\n"); diff --git a/arch/loongarch/kvm/intc/ls3a_ext_irq.h b/arch/loongarch/kvm/intc/ls3a_ext_irq.h index f834527b1370..c45960d1c637 100644 --- a/arch/loongarch/kvm/intc/ls3a_ext_irq.h +++ b/arch/loongarch/kvm/intc/ls3a_ext_irq.h @@ -51,7 +51,7 @@ #define EXTIOI_IRQS_COREMAP_SIZE (EXTIOI_IRQS) #define EXTIOI_IRQS_NODETYPE_SIZE KVM_EXTIOI_IRQS_NODETYPE_SIZE -typedef struct kvm_ls3a_extirq_state { +struct kvm_ls3a_extirq_state { union ext_en { uint64_t reg_u64[EXTIOI_IRQS_BITMAP_SIZE / 8]; uint32_t reg_u32[EXTIOI_IRQS_BITMAP_SIZE / 4]; @@ -93,7 +93,7 @@ typedef struct kvm_ls3a_extirq_state { uint8_t ext_sw_ipmap[EXTIOI_IRQS]; uint8_t ext_sw_coremap[EXTIOI_IRQS]; uint8_t ext_sw_ipisr[KVM_MAX_VCPUS][LS3A_INTC_IP][EXTIOI_IRQS_BITMAP_SIZE]; -} LS3AExtirqState; +}; struct ls3a_kvm_extirq { spinlock_t lock; diff --git a/arch/loongarch/kvm/intc/ls3a_ipi.c b/arch/loongarch/kvm/intc/ls3a_ipi.c index 6c05eaad9f80..c194bf45de5f 100644 --- a/arch/loongarch/kvm/intc/ls3a_ipi.c +++ b/arch/loongarch/kvm/intc/ls3a_ipi.c @@ -17,7 +17,7 @@ int kvm_helper_send_ipi(struct kvm_vcpu *vcpu, unsigned int cpu, unsigned int ac { struct kvm *kvm = vcpu->kvm; struct ls3a_kvm_ipi *ipi = ls3a_ipi_irqchip(kvm); - gipiState *s = &(ipi->ls3a_gipistate); + struct gipiState *s = &(ipi->ls3a_gipistate); unsigned long flags; struct kvm_loongarch_interrupt irq; @@ -41,7 +41,7 @@ static int ls3a_gipi_writel(struct ls3a_kvm_ipi *ipi, gpa_t addr, { uint64_t data, offset; struct kvm_loongarch_interrupt irq; - gipiState *s = &(ipi->ls3a_gipistate); + struct gipiState *s = &(ipi->ls3a_gipistate); uint32_t cpu, action_data; struct kvm *kvm; void *pbuf; @@ -57,7 +57,7 @@ static int ls3a_gipi_writel(struct ls3a_kvm_ipi *ipi, gpa_t addr, switch (offset) { case CORE0_STATUS_OFF: - printk("CORE0_SET_OFF Can't be write\n"); + pr_info("CORE0_SET_OFF Can't be write\n"); break; case CORE0_EN_OFF: @@ -73,9 +73,8 @@ static int ls3a_gipi_writel(struct ls3a_kvm_ipi *ipi, gpa_t addr, irq.cpu = cpu; irq.irq = LARCH_INT_IPI; - if (likely(kvm->vcpus[cpu])) { + if (likely(kvm->vcpus[cpu])) kvm_vcpu_ioctl_interrupt(kvm->vcpus[cpu], &irq); - } } s->core[cpu].status |= action_data; break; @@ -114,7 +113,7 @@ static int ls3a_gipi_writel(struct ls3a_kvm_ipi *ipi, gpa_t addr, break; default: - printk("ls3a_gipi_writel with unknown addr %llx \n", addr); + pr_info("%s with unknown addr %llx\n", __func__, addr); break; } return 0; @@ -126,7 +125,7 @@ static uint64_t ls3a_gipi_readl(struct ls3a_kvm_ipi *ipi, uint64_t offset; uint64_t ret = 0; - gipiState *s = &(ipi->ls3a_gipistate); + struct gipiState *s = &(ipi->ls3a_gipistate); uint32_t cpu; void *pbuf; @@ -160,7 +159,7 @@ static uint64_t ls3a_gipi_readl(struct ls3a_kvm_ipi *ipi, ret = *(unsigned long *)pbuf; break; default: - printk("ls3a_gipi_readl with unknown addr %llx \n", addr); + pr_info("%s with unknown addr %llx\n", __func__, addr); break; } @@ -174,10 +173,10 @@ static int kvm_ls3a_ipi_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *val) { struct ls3a_kvm_ipi *ipi; - ipi_io_device *ipi_device; + struct ipi_io_device *ipi_device; unsigned long flags; - ipi_device = container_of(dev, ipi_io_device, device); + ipi_device = container_of(dev, struct ipi_io_device, device); ipi = ipi_device->ipi; ipi->kvm->stat.pip_write_exits++; @@ -193,10 +192,10 @@ static int kvm_ls3a_ipi_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *val) { struct ls3a_kvm_ipi *ipi; - ipi_io_device *ipi_device; + struct ipi_io_device *ipi_device; unsigned long flags; - ipi_device = container_of(dev, ipi_io_device, device); + ipi_device = container_of(dev, struct ipi_io_device, device); ipi = ipi_device->ipi; ipi->kvm->stat.pip_read_exits++; @@ -265,11 +264,11 @@ int kvm_create_ls3a_ipi(struct kvm *kvm) int kvm_get_ls3a_ipi(struct kvm *kvm, struct loongarch_gipiState *state) { struct ls3a_kvm_ipi *ipi = ls3a_ipi_irqchip(kvm); - gipiState *ipi_state = &(ipi->ls3a_gipistate); + struct gipiState *ipi_state = &(ipi->ls3a_gipistate); unsigned long flags; ls3a_gipi_lock(ipi, flags); - memcpy(state, ipi_state, sizeof(gipiState)); + memcpy(state, ipi_state, sizeof(struct gipiState)); ls3a_gipi_unlock(ipi, flags); return 0; } @@ -277,14 +276,14 @@ int kvm_get_ls3a_ipi(struct kvm *kvm, struct loongarch_gipiState *state) int kvm_set_ls3a_ipi(struct kvm *kvm, struct loongarch_gipiState *state) { struct ls3a_kvm_ipi *ipi = ls3a_ipi_irqchip(kvm); - gipiState *ipi_state = &(ipi->ls3a_gipistate); + struct gipiState *ipi_state = &(ipi->ls3a_gipistate); unsigned long flags; if (!ipi) return -EINVAL; ls3a_gipi_lock(ipi, flags); - memcpy(ipi_state, state, sizeof(gipiState)); + memcpy(ipi_state, state, sizeof(struct gipiState)); ls3a_gipi_unlock(ipi, flags); return 0; } diff --git a/arch/loongarch/kvm/intc/ls3a_ipi.h b/arch/loongarch/kvm/intc/ls3a_ipi.h index 3d40487177a7..75fe821defc2 100644 --- a/arch/loongarch/kvm/intc/ls3a_ipi.h +++ b/arch/loongarch/kvm/intc/ls3a_ipi.h @@ -12,32 +12,32 @@ #include #include -typedef struct gipi_single { +struct gipi_single { uint32_t status; uint32_t en; uint32_t set; uint32_t clear; uint64_t buf[4]; -} gipi_single; +}; -typedef struct gipiState { - gipi_single core[KVM_MAX_VCPUS]; -} gipiState; +struct gipiState { + struct gipi_single core[KVM_MAX_VCPUS]; +}; struct ls3a_kvm_ipi; -typedef struct ipi_io_device { +struct ipi_io_device { struct ls3a_kvm_ipi *ipi; struct kvm_io_device device; int nodeNum; -} ipi_io_device; +}; struct ls3a_kvm_ipi { spinlock_t lock; struct kvm *kvm; - gipiState ls3a_gipistate; + struct gipiState ls3a_gipistate; int nodeNum; - ipi_io_device dev_ls3a_ipi; + struct ipi_io_device dev_ls3a_ipi; }; #define SMP_MAILBOX (LOONGSON_VIRT_REG_BASE + 0x0000) diff --git a/arch/loongarch/kvm/intc/ls7a_irq.c b/arch/loongarch/kvm/intc/ls7a_irq.c index 18593aa7587d..5155de694f4e 100644 --- a/arch/loongarch/kvm/intc/ls7a_irq.c +++ b/arch/loongarch/kvm/intc/ls7a_irq.c @@ -139,7 +139,7 @@ static int ls7a_ioapic_reg_write(struct ls7a_kvm_ioapic *s, struct kvm_ls7a_ioapic_state *state; int64_t offset_tmp; uint64_t offset; - uint64_t data, old, himask, lowmask;; + uint64_t data, old, himask, lowmask; offset = addr & 0xfff; kvm = s->kvm; @@ -148,12 +148,12 @@ static int ls7a_ioapic_reg_write(struct ls7a_kvm_ioapic *s, himask = lowmask << 32; if (offset & (len - 1)) { - printk("%s(%d):unaligned address access %llx size %d \n", - __FUNCTION__, __LINE__, addr, len); + pr_info("%s(%d):unaligned address access %llx size %d\n", + __func__, __LINE__, addr, len); return 0; } - if (8 == len) { + if (len == 8) { data = *(uint64_t *)val; switch (offset) { case LS7A_INT_MASK_OFFSET: @@ -193,7 +193,7 @@ static int ls7a_ioapic_reg_write(struct ls7a_kvm_ioapic *s, WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len); break; } - } else if (4 == len) { + } else if (len == 4) { data = *(uint32_t *)val; switch (offset) { case LS7A_INT_MASK_OFFSET: @@ -267,7 +267,7 @@ static int ls7a_ioapic_reg_write(struct ls7a_kvm_ioapic *s, WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len); break; } - } else if (1 == len) { + } else if (len == 1) { data = *(unsigned char *)val; if (offset >= LS7A_HTMSI_VEC_OFFSET) { offset_tmp = offset - LS7A_HTMSI_VEC_OFFSET; @@ -324,12 +324,12 @@ static int ls7a_ioapic_reg_read(struct ls7a_kvm_ioapic *s, lowmask = 0xFFFFFFFFUL; himask = lowmask << 32; if (offset & (len - 1)) { - printk("%s(%d):unaligned address access %llx size %d \n", - __FUNCTION__, __LINE__, addr, len); + pr_info("%s(%d):unaligned address access %llx size %d\n", + __func__, __LINE__, addr, len); return 0; } - if (8 == len) { + if (len == 8) { switch (offset) { case LS7A_INT_MASK_OFFSET: result = state->int_mask; @@ -359,7 +359,7 @@ static int ls7a_ioapic_reg_read(struct ls7a_kvm_ioapic *s, } if (val != NULL) *(uint64_t *)val = result; - } else if (4 == len) { + } else if (len == 4) { switch (offset) { case LS7A_INT_MASK_OFFSET: result = state->int_mask & lowmask; @@ -413,17 +413,15 @@ static int ls7a_ioapic_reg_read(struct ls7a_kvm_ioapic *s, } if (val != NULL) *(uint32_t *)val = result; - } else if (1 == len) { + } else if (len == 1) { if (offset >= LS7A_HTMSI_VEC_OFFSET) { offset_tmp = offset - LS7A_HTMSI_VEC_OFFSET; - if (offset_tmp >= 0 && offset_tmp < 64) { + if (offset_tmp >= 0 && offset_tmp < 64) result = state->htmsi_vector[offset_tmp]; - } } else if (offset >= LS7A_ROUTE_ENTRY_OFFSET) { offset_tmp = offset - LS7A_ROUTE_ENTRY_OFFSET; - if (offset_tmp >= 0 && offset_tmp < 64) { + if (offset_tmp >= 0 && offset_tmp < 64) result = state->route_entry[offset_tmp]; - } } else { WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len); } diff --git a/arch/loongarch/kvm/intc/ls7a_irq.h b/arch/loongarch/kvm/intc/ls7a_irq.h index 0c91b63bf88f..50fa2fd340a9 100644 --- a/arch/loongarch/kvm/intc/ls7a_irq.h +++ b/arch/loongarch/kvm/intc/ls7a_irq.h @@ -36,7 +36,7 @@ #define LS7A_IOAPIC_NUM_PINS 32 -typedef struct kvm_ls7a_ioapic_state { +struct kvm_ls7a_ioapic_state { u64 int_id; /* 0x020 interrupt mask register */ u64 int_mask; @@ -68,7 +68,7 @@ typedef struct kvm_ls7a_ioapic_state { * 0 for high level tirgger */ u64 int_polarity; -} LS7AApicState; +}; struct ls7a_kvm_ioapic { spinlock_t lock; diff --git a/arch/loongarch/kvm/kvm_compat.c b/arch/loongarch/kvm/kvm_compat.c index 277d7760aa11..f404a497902f 100644 --- a/arch/loongarch/kvm/kvm_compat.c +++ b/arch/loongarch/kvm/kvm_compat.c @@ -9,52 +9,7 @@ extern int _kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); -#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) -void kvm_arch_check_processor_compat(void *rtn) -{ - *(int *)rtn = 0; -} - -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) -{ - _kvm_set_spte_hva(kvm, hva, pte); - return; -} - -#elif (LINUX_VERSION_CODE == KERNEL_VERSION(5, 4, 0)) -int kvm_arch_check_processor_compat(void) -{ - return 0; -} - -int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log) -{ - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - bool is_dirty = false; - int r; - mutex_lock(&kvm->slots_lock); - - r = kvm_clear_dirty_log_protect(kvm, log, &is_dirty); - - if (is_dirty) { - slots = kvm_memslots(kvm); - memslot = id_to_memslot(slots, log->slot); - - /* Let implementation handle TLB/GVA invalidation */ - kvm_flush_remote_tlbs(kvm); - } - - mutex_unlock(&kvm->slots_lock); - return r; -} - -int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) -{ - return _kvm_set_spte_hva(kvm, hva, pte); -} -#else int kvm_arch_check_processor_compat(void *opaque) { return 0; @@ -79,7 +34,6 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) { #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL - kvm_flush_remote_tlbs (kvm); + kvm_flush_remote_tlbs(kvm); #endif } -#endif diff --git a/arch/loongarch/kvm/kvm_compat.h b/arch/loongarch/kvm/kvm_compat.h index 1da54b2d80ec..5c724b3758dd 100644 --- a/arch/loongarch/kvm/kvm_compat.h +++ b/arch/loongarch/kvm/kvm_compat.h @@ -6,12 +6,8 @@ #else #define _ULCAST_ (unsigned long) #include -#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)) -#include -#else #include #endif -#endif #define KVM_REG_A0 0x4 #define KVM_REG_A1 0x5 @@ -122,6 +118,12 @@ #define KVM_ESTAT_IS_WIDTH 15 #define KVM_ESTAT_IS (_ULCAST_(0x7fff) << KVM_ESTAT_IS_SHIFT) +#define KEVS_KEVW (KVM_ECFG_VS_SHIFT + KVM_ECFG_VS_WIDTH - 1) +#define KGGS_KGGW (KVM_GSTAT_GID_SHIFT + KVM_GSTAT_GID_WIDTH - 1) +#define KGTS_KGTW (KVM_GTLBC_TGID_SHIFT + KVM_GTLBC_TGID_WIDTH - 1) +#define KCP_PK (KVM_CRMD_PG | PLV_KERN) +#define KCDS_PK ((1 << KVM_CRMD_DACM_SHIFT) | (1 << KVM_CRMD_DACF_SHIFT) | KCP_PK) + #define KVM_CSR_ERA 0x6 /* ERA */ #define KVM_CSR_BADV 0x7 /* Bad virtual address */ #define KVM_CSR_BADI 0x8 /* Bad instruction */ @@ -312,7 +314,7 @@ #define KVM_IOCSR_MISC_FUNC 0x420 #define KVM_IOCSRF_MISC_FUNC_EXT_IOI_EN BIT_ULL(48) -/* PerCore CSR, only accessable by local cores */ +/* PerCore CSR, only accessible by local cores */ #define KVM_IOCSR_IPI_STATUS 0x1000 #define KVM_IOCSR_IPI_SEND 0x1040 #define KVM_IOCSR_MBUF_SEND 0x1048 @@ -331,7 +333,7 @@ static inline u32 kvm_csr_readl(u32 reg) u32 val; asm volatile ( - "csrrd %[val], %[reg] \n" + "csrrd %[val], %[reg]\n" : [val] "=r" (val) : [reg] "i" (reg) : "memory"); @@ -343,7 +345,7 @@ static inline u64 kvm_csr_readq(u32 reg) u64 val; asm volatile ( - "csrrd %[val], %[reg] \n" + "csrrd %[val], %[reg]\n" : [val] "=r" (val) : [reg] "i" (reg) : "memory"); @@ -353,7 +355,7 @@ static inline u64 kvm_csr_readq(u32 reg) static inline void kvm_csr_writel(u32 val, u32 reg) { asm volatile ( - "csrwr %[val], %[reg] \n" + "csrwr %[val], %[reg]\n" : [val] "+r" (val) : [reg] "i" (reg) : "memory"); @@ -362,7 +364,7 @@ static inline void kvm_csr_writel(u32 val, u32 reg) static inline void kvm_csr_writeq(u64 val, u32 reg) { asm volatile ( - "csrwr %[val], %[reg] \n" + "csrwr %[val], %[reg]\n" : [val] "+r" (val) : [reg] "i" (reg) : "memory"); @@ -371,7 +373,7 @@ static inline void kvm_csr_writeq(u64 val, u32 reg) static inline u32 kvm_csr_xchgl(u32 val, u32 mask, u32 reg) { asm volatile ( - "csrxchg %[val], %[mask], %[reg] \n" + "csrxchg %[val], %[mask], %[reg]\n" : [val] "+r" (val) : [mask] "r" (mask), [reg] "i" (reg) : "memory"); @@ -381,7 +383,7 @@ static inline u32 kvm_csr_xchgl(u32 val, u32 mask, u32 reg) static inline u64 kvm_csr_xchgq(u64 val, u64 mask, u32 reg) { asm volatile ( - "csrxchg %[val], %[mask], %[reg] \n" + "csrxchg %[val], %[mask], %[reg]\n" : [val] "+r" (val) : [mask] "r" (mask), [reg] "i" (reg) : "memory"); @@ -395,7 +397,7 @@ static inline u32 kvm_iocsr_readl(u32 reg) u32 val; asm volatile ( - "iocsrrd.w %[val], %[reg] \n" + "iocsrrd.w %[val], %[reg]\n" : [val] "=r" (val) : [reg] "r" (reg) : "memory"); @@ -407,7 +409,7 @@ static inline u64 kvm_iocsr_readq(u32 reg) u64 val; asm volatile ( - "iocsrrd.d %[val], %[reg] \n" + "iocsrrd.d %[val], %[reg]\n" : [val] "=r" (val) : [reg] "r" (reg) : "memory"); @@ -417,7 +419,7 @@ static inline u64 kvm_iocsr_readq(u32 reg) static inline void kvm_iocsr_writeb(u8 val, u32 reg) { asm volatile ( - "iocsrwr.b %[val], %[reg] \n" + "iocsrwr.b %[val], %[reg]\n" : : [val] "r" (val), [reg] "r" (reg) : "memory"); @@ -426,7 +428,7 @@ static inline void kvm_iocsr_writeb(u8 val, u32 reg) static inline void kvm_iocsr_writel(u32 val, u32 reg) { asm volatile ( - "iocsrwr.w %[val], %[reg] \n" + "iocsrwr.w %[val], %[reg]\n" : : [val] "r" (val), [reg] "r" (reg) : "memory"); @@ -435,7 +437,7 @@ static inline void kvm_iocsr_writel(u32 val, u32 reg) static inline void kvm_iocsr_writeq(u64 val, u32 reg) { asm volatile ( - "iocsrwr.d %[val], %[reg] \n" + "iocsrwr.d %[val], %[reg]\n" : : [val] "r" (val), [reg] "r" (reg) : "memory"); @@ -448,8 +450,8 @@ static inline u64 kvm_gcsr_read(u32 reg) u64 val = 0; asm volatile ( - "parse_r __reg, %[val] \n" - ".word 0x5 << 24 | %[reg] << 10 | 0 << 5 | __reg \n" + "parse_r __reg, %[val]\n" + ".word 0x5 << 24 | %[reg] << 10 | 0 << 5 | __reg\n" : [val] "+r" (val) : [reg] "i" (reg) : "memory"); @@ -459,8 +461,8 @@ static inline u64 kvm_gcsr_read(u32 reg) static inline void kvm_gcsr_write(u64 val, u32 reg) { asm volatile ( - "parse_r __reg, %[val] \n" - ".word 0x5 << 24 | %[reg] << 10 | 1 << 5 | __reg \n" + "parse_r __reg, %[val]\n" + ".word 0x5 << 24 | %[reg] << 10 | 1 << 5 | __reg\n" : [val] "+r" (val) : [reg] "i" (reg) : "memory"); @@ -469,9 +471,9 @@ static inline void kvm_gcsr_write(u64 val, u32 reg) static inline u64 kvm_gcsr_xchg(u64 val, u64 mask, u32 reg) { asm volatile ( - "parse_r __rd, %[val] \n" - "parse_r __rj, %[mask] \n" - ".word 0x5 << 24 | %[reg] << 10 | __rj << 5 | __rd \n" + "parse_r __rd, %[val]\n" + "parse_r __rj, %[mask]\n" + ".word 0x5 << 24 | %[reg] << 10 | __rj << 5 | __rd\n" : [val] "+r" (val) : [mask] "r" (mask), [reg] "i" (reg) : "memory"); diff --git a/arch/loongarch/kvm/kvmcpu.h b/arch/loongarch/kvm/kvmcpu.h index 7bcaaa254d16..8b9fefd38cb1 100644 --- a/arch/loongarch/kvm/kvmcpu.h +++ b/arch/loongarch/kvm/kvmcpu.h @@ -67,11 +67,10 @@ #define KVM_LOONGSON_IRQ_CPU_FIQ 1 #define KVM_LOONGSON_CPU_IP_NUM 8 -typedef union loongarch_instruction larch_inst; typedef int (*exit_handle_fn)(struct kvm_vcpu *); -int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst); -int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst); +int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, union loongarch_instruction inst); +int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, union loongarch_instruction inst); int _kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run); int _kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run); int _kvm_emu_idle(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/kvm/kvmcsr.h b/arch/loongarch/kvm/kvmcsr.h index 24a84a3f72cd..4c92cf22a695 100644 --- a/arch/loongarch/kvm/kvmcsr.h +++ b/arch/loongarch/kvm/kvmcsr.h @@ -19,7 +19,7 @@ unsigned long _kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid); void _kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val); void _kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long csr_mask, unsigned long val); -int _kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu); +int _kvm_emu_iocsr(union loongarch_instruction inst, struct kvm_run *run, struct kvm_vcpu *vcpu); static inline void kvm_save_hw_gcsr(struct loongarch_csrs *csr, u32 gid) { @@ -54,39 +54,6 @@ static inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr, u32 gid, unsig csr->csrs[gid] |= val & _mask; } - -#define GET_HW_GCSR(id, csrid, v) \ - do { \ - if (csrid == id) { \ - *v = (long)kvm_read_hw_gcsr(csrid); \ - return 0; \ - } \ - } while (0) - -#define GET_SW_GCSR(csr, id, csrid, v) \ - do { \ - if (csrid == id) { \ - *v = kvm_read_sw_gcsr(csr, id); \ - return 0; \ - } \ - } while (0) - -#define SET_HW_GCSR(csr, id, csrid, v) \ - do { \ - if (csrid == id) { \ - kvm_write_hw_gcsr(csr, csrid, *v); \ - return 0; \ - } \ - } while (0) - -#define SET_SW_GCSR(csr, id, csrid, v) \ - do { \ - if (csrid == id) { \ - kvm_write_sw_gcsr(csr, csrid, *v); \ - return 0; \ - } \ - } while (0) - int _kvm_init_iocsr(struct kvm *kvm); int _kvm_set_iocsr(struct kvm *kvm, struct kvm_iocsr_entry *__user argp); int _kvm_get_iocsr(struct kvm *kvm, struct kvm_iocsr_entry *__user argp); @@ -95,20 +62,4 @@ int _kvm_get_iocsr(struct kvm *kvm, struct kvm_iocsr_entry *__user argp); KVM_PERFCTRL_PLV1 | \ KVM_PERFCTRL_PLV2 | \ KVM_PERFCTRL_PLV3) - -#define CASE_WRITE_HW_PMU(vcpu, csr, id, csrid, v) \ - do { \ - if (csrid == id) { \ - if (v & KVM_PMU_PLV_ENABLE) { \ - kvm_write_csr_gcfg(kvm_read_csr_gcfg() | KVM_GCFG_GPERF); \ - kvm_write_hw_gcsr(csr, csrid, v | KVM_PERFCTRL_GMOD); \ - vcpu->arch.aux_inuse |= KVM_LARCH_PERF; \ - return ; \ - } else { \ - kvm_write_sw_gcsr(csr, csrid, v); \ - return; \ - } \ - } \ - } while (0) - #endif /* __LOONGARCH_KVM_CSR_H__ */ diff --git a/arch/loongarch/kvm/loongarch.c b/arch/loongarch/kvm/loongarch.c index 7a4c7fcf5bef..e164bac96549 100644 --- a/arch/loongarch/kvm/loongarch.c +++ b/arch/loongarch/kvm/loongarch.c @@ -62,18 +62,18 @@ struct kvm_stats_debugfs_item vcpu_debugfs_entries[] = { VCPU_STAT("halt_wakeup", halt_wakeup), VCPU_STAT("tlbmiss_ld", excep_exits[KVM_EXCCODE_TLBL]), VCPU_STAT("tlbmiss_st", excep_exits[KVM_EXCCODE_TLBS]), - VCPU_STAT("tlb_ifetch", excep_exits[KVM_EXCCODE_TLBI]), - VCPU_STAT("tlbmod", excep_exits[KVM_EXCCODE_TLBM]), - VCPU_STAT("tlbri", excep_exits[KVM_EXCCODE_TLBRI]), - VCPU_STAT("tlbxi", excep_exits[KVM_EXCCODE_TLBXI]), + VCPU_STAT("tlb_ifetch", excep_exits[KVM_EXCCODE_TLBI]), + VCPU_STAT("tlbmod", excep_exits[KVM_EXCCODE_TLBM]), + VCPU_STAT("tlbri", excep_exits[KVM_EXCCODE_TLBRI]), + VCPU_STAT("tlbxi", excep_exits[KVM_EXCCODE_TLBXI]), VCPU_STAT("fp_dis", excep_exits[KVM_EXCCODE_FPDIS]), VCPU_STAT("lsx_dis", excep_exits[KVM_EXCCODE_LSXDIS]), VCPU_STAT("lasx_dis", excep_exits[KVM_EXCCODE_LASXDIS]), - VCPU_STAT("fpe", excep_exits[KVM_EXCCODE_FPE]), - VCPU_STAT("watch", excep_exits[KVM_EXCCODE_WATCH]), - VCPU_STAT("gspr", excep_exits[KVM_EXCCODE_GSPR]), - VCPU_STAT("gcm", excep_exits[KVM_EXCCODE_GCM]), - VCPU_STAT("hc", excep_exits[KVM_EXCCODE_HYP]), + VCPU_STAT("fpe", excep_exits[KVM_EXCCODE_FPE]), + VCPU_STAT("watch", excep_exits[KVM_EXCCODE_WATCH]), + VCPU_STAT("gspr", excep_exits[KVM_EXCCODE_GSPR]), + VCPU_STAT("gcm", excep_exits[KVM_EXCCODE_GCM]), + VCPU_STAT("hc", excep_exits[KVM_EXCCODE_HYP]), {NULL} }; @@ -539,9 +539,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) * also switch excfg.VS field, keep host excfg.VS info here */ vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL); - if (!vcpu->arch.csr) { + if (!vcpu->arch.csr) return -ENOMEM; - } /* Init */ vcpu->arch.last_sched_cpu = -1; @@ -1114,7 +1113,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct loongarch_kvm_irqchi r = kvm_get_ls7a_ioapic(kvm, (void *)chip->data); break; case KVM_IRQCHIP_LS3A_GIPI: - if (dlen != sizeof(gipiState)) { + if (dlen != sizeof(struct gipiState)) { kvm_err("get gipi state err dlen:%d\n", dlen); goto dlen_err; } @@ -1157,7 +1156,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct loongarch_kvm_irqchi r = kvm_set_ls7a_ioapic(kvm, (void *)chip->data); break; case KVM_IRQCHIP_LS3A_GIPI: - if (dlen != sizeof(gipiState)) { + if (dlen != sizeof(struct gipiState)) { kvm_err("set gipi state err dlen:%d\n", dlen); goto dlen_err; } @@ -1214,7 +1213,7 @@ static int kvm_csr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, unsigned size; r = -EFAULT; - if (copy_from_user(&msrs, user_msrs, sizeof msrs)) + if (copy_from_user(&msrs, user_msrs, sizeof(msrs))) goto out; r = -E2BIG; @@ -1383,7 +1382,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, vcpu->kvm->arch.online_vcpus = vcpu_state.online_vcpus; vcpu->kvm->arch.is_migrate = vcpu_state.is_migrate; for (i = 0; i < 4; i++) - vcpu->arch.core_ext_ioisr[i] = vcpu_state.core_ext_ioisr[i]; + vcpu->arch.core_ext_ioisr[i] = vcpu_state.core_ext_ioisr[i]; vcpu->arch.irq_pending = vcpu_state.irq_pending; vcpu->arch.irq_clear = vcpu_state.irq_clear; @@ -1533,7 +1532,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { r = 0; if (copy_to_user(argp, &kvm->arch.cpucfgs, sizeof(struct kvm_cpucfg))) - r = -EFAULT; + r = -EFAULT; break; } default: @@ -1817,10 +1816,8 @@ void kvm_own_lsx(struct kvm_vcpu *vcpu) * Enable FP if enabled in guest, since we're restoring FP context * anyway. */ - if (_kvm_guest_has_fpu(&vcpu->arch)) { - + if (_kvm_guest_has_fpu(&vcpu->arch)) kvm_set_csr_euen(KVM_EUEN_FPEN); - } /* Enable LSX for guest */ kvm_set_csr_euen(KVM_EUEN_LSXEN); @@ -1867,18 +1864,15 @@ void kvm_own_lasx(struct kvm_vcpu *vcpu) * Enable FP if enabled in guest, since we're restoring FP context * anyway. */ - if (_kvm_guest_has_lsx(&vcpu->arch)) { - /* Enable LSX for guest */ + if (_kvm_guest_has_lsx(&vcpu->arch)) kvm_set_csr_euen(KVM_EUEN_LSXEN); - } /* * Enable FPU if enabled in guest, since we're restoring FPU context * anyway. We set FR and FRE according to guest context. */ - if (_kvm_guest_has_fpu(&vcpu->arch)) { + if (_kvm_guest_has_fpu(&vcpu->arch)) kvm_set_csr_euen(KVM_EUEN_FPEN); - } /* Enable LASX for guest */ kvm_set_csr_euen(KVM_EUEN_LASXEN); @@ -1940,9 +1934,9 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) disable_lsx(); #endif - if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { + if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) kvm_clear_csr_euen(KVM_EUEN_FPEN); - } + vcpu->arch.aux_inuse &= ~(KVM_LARCH_FPU | KVM_LARCH_LSX | KVM_LARCH_LASX); } else if (cpu_has_lsx && vcpu->arch.aux_inuse & KVM_LARCH_LSX) { @@ -1955,9 +1949,9 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) disable_lsx(); #endif - if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { + if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) kvm_clear_csr_euen(KVM_EUEN_FPEN); - } + vcpu->arch.aux_inuse &= ~(KVM_LARCH_FPU | KVM_LARCH_LSX); } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 7b39290e5020..9e8e27851daf 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -54,7 +54,7 @@ static inline int kvm_pmd_huge(pmd_t pmd) return (pmd_val(pmd) & _PAGE_HUGE) != 0; #else return 0; -#endif +#endif } static inline int kvm_pud_huge(pud_t pud) @@ -63,7 +63,7 @@ static inline int kvm_pud_huge(pud_t pud) return (pud_val(pud) & _PAGE_HUGE) != 0; #else return 0; -#endif +#endif } static inline pmd_t kvm_pmd_mkhuge(pmd_t pmd) -- Gitee