From abf55e75986503b2b36fae2c9b69e4a8319b7f5e Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Wed, 4 Jun 2025 10:52:58 +0800 Subject: [PATCH 01/31] KVM: arm64: Probe Hisi CPU TYPE from ACPI/DTB Parse ACPI/DTB to get where the hypervisor is running. Signed-off-by: Zenghui Yu Signed-off-by: Yanan Wang Signed-off-by: lishusen Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/hisi_cpu_model.h | 19 ++++++ arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/Makefile | 1 + arch/arm64/kvm/hisi_cpu_model.c | 83 +++++++++++++++++++++++++ virt/kvm/arm/arm.c | 6 ++ 5 files changed, 110 insertions(+) create mode 100644 arch/arm64/include/asm/hisi_cpu_model.h create mode 100644 arch/arm64/kvm/hisi_cpu_model.c diff --git a/arch/arm64/include/asm/hisi_cpu_model.h b/arch/arm64/include/asm/hisi_cpu_model.h new file mode 100644 index 000000000000..003a3a53cf33 --- /dev/null +++ b/arch/arm64/include/asm/hisi_cpu_model.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright(c) 2019 Huawei Technologies Co., Ltd + */ + +#ifndef __HISI_CPU_MODEL_H__ +#define __HISI_CPU_MODEL_H__ + +enum hisi_cpu_type { + HI_1612, + HI_1616, + HI_1620, + UNKNOWN_HI_TYPE +}; + +extern enum hisi_cpu_type hi_cpu_type; + +void probe_hisi_cpu_type(void); +#endif /* __HISI_CPU_MODEL_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 01886b83d120..3d9d4665aa69 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -26,6 +26,7 @@ #include #include #include +#include #define __KVM_HAVE_ARCH_INTC_INITIALIZED diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 410c084984d0..4a609b5683ed 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -19,6 +19,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o pmu.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o +kvm-$(CONFIG_KVM_ARM_HOST) += hisi_cpu_model.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o diff --git a/arch/arm64/kvm/hisi_cpu_model.c b/arch/arm64/kvm/hisi_cpu_model.c new file mode 100644 index 000000000000..4d5a099bc27a --- /dev/null +++ b/arch/arm64/kvm/hisi_cpu_model.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright(c) 2019 Huawei Technologies Co., Ltd + */ + +#include +#include +#include +#include + +#ifdef CONFIG_ACPI + +/* ACPI Hisi oem table id str */ +const char *oem_str[] = { + "HIP06", /* Hisi 1612 */ + "HIP07", /* Hisi 1616 */ + "HIP08" /* Hisi 1620 */ +}; + +/* + * Get Hisi oem table id. + */ +static void acpi_get_hw_cpu_type(void) +{ + struct acpi_table_header *table; + acpi_status status; + int i, str_size = ARRAY_SIZE(oem_str); + + /* Get oem table id from ACPI table header */ + status = acpi_get_table(ACPI_SIG_DSDT, 0, &table); + if (ACPI_FAILURE(status)) { + pr_err("Failed to get ACPI table: %s\n", + acpi_format_exception(status)); + return; + } + + for (i = 0; i < str_size; ++i) { + if (!strncmp(oem_str[i], table->oem_table_id, 5)) { + hi_cpu_type = i; + return; + } + } +} + +#else +static void acpi_get_hw_cpu_type(void) {} +#endif + +/* of Hisi cpu model str */ +const char *of_model_str[] = { + "Hi1612", + "Hi1616" +}; + +static void of_get_hw_cpu_type(void) +{ + const char *cpu_type; + int ret, i, str_size = ARRAY_SIZE(of_model_str); + + ret = of_property_read_string(of_root, "model", &cpu_type); + if (ret < 0) { + pr_err("Failed to get Hisi cpu model by OF.\n"); + return; + } + + for (i = 0; i < str_size; ++i) { + if (strstr(cpu_type, of_model_str[i])) { + hi_cpu_type = i; + return; + } + } +} + +void probe_hisi_cpu_type(void) +{ + if (!acpi_disabled) + acpi_get_hw_cpu_type(); + else + of_get_hw_cpu_type(); + + if (hi_cpu_type == UNKNOWN_HI_TYPE) + pr_warn("UNKNOWN Hisi cpu type.\n"); +} diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 28cdd2f4d1ef..465631a8f4a4 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -57,6 +57,9 @@ static DEFINE_SPINLOCK(kvm_vmid_lock); static bool vgic_present; +/* Hisi cpu type enum */ +enum hisi_cpu_type hi_cpu_type = UNKNOWN_HI_TYPE; + static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) @@ -1769,6 +1772,9 @@ int kvm_arch_init(void *opaque) return -ENODEV; } + /* Probe the Hisi CPU type */ + probe_hisi_cpu_type(); + in_hyp_mode = is_kernel_in_hyp_mode(); if (!in_hyp_mode && kvm_arch_requires_vhe()) { -- Gitee From e6d27a1dc873e27f1a2d884822cf4638367c7de0 Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Sat, 7 Jun 2025 16:06:27 +0800 Subject: [PATCH 02/31] KVM: arm64: Add support for probing Hisi ncsnp capability Kunpeng 920 offers the HHA ncsnp capability, with which hypervisor doesn't need to perform a lot of cache maintenance like before (in case the guest has some non-cacheable Stage-1 mappings). Currently we apply this hardware capability when - vCPU switching MMU+caches on/off - creating Stage-2 mappings for Daborts Signed-off-by: Zenghui Yu Signed-off-by: Yanan Wang Signed-off-by: lishusen Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/hisi_cpu_model.h | 2 ++ arch/arm64/include/asm/kvm_mmu.h | 2 +- arch/arm64/kvm/hisi_cpu_model.c | 34 +++++++++++++++++++++++++ virt/kvm/arm/arm.c | 2 ++ 4 files changed, 39 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/hisi_cpu_model.h b/arch/arm64/include/asm/hisi_cpu_model.h index 003a3a53cf33..67008d17416e 100644 --- a/arch/arm64/include/asm/hisi_cpu_model.h +++ b/arch/arm64/include/asm/hisi_cpu_model.h @@ -14,6 +14,8 @@ enum hisi_cpu_type { }; extern enum hisi_cpu_type hi_cpu_type; +extern bool kvm_ncsnp_support; void probe_hisi_cpu_type(void); +void probe_hisi_ncsnp_support(void); #endif /* __HISI_CPU_MODEL_H__ */ diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index ffe0aad96b17..ad78b0047b2c 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -317,7 +317,7 @@ static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size) * faulting in pages. Furthermore, FWB implies IDC, so cleaning to * PoU is not required either in this case. */ - if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) + if (kvm_ncsnp_support || cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) return; kvm_flush_dcache_to_poc(va, size); diff --git a/arch/arm64/kvm/hisi_cpu_model.c b/arch/arm64/kvm/hisi_cpu_model.c index 4d5a099bc27a..52eecf1ba1cf 100644 --- a/arch/arm64/kvm/hisi_cpu_model.c +++ b/arch/arm64/kvm/hisi_cpu_model.c @@ -81,3 +81,37 @@ void probe_hisi_cpu_type(void) if (hi_cpu_type == UNKNOWN_HI_TYPE) pr_warn("UNKNOWN Hisi cpu type.\n"); } + +#define NCSNP_MMIO_BASE 0x20107E238 + +/* + * We have the fantastic HHA ncsnp capability on Kunpeng 920, + * with which hypervisor doesn't need to perform a lot of cache + * maintenance like before (in case the guest has non-cacheable + * Stage-1 mappings). + */ +void probe_hisi_ncsnp_support(void) +{ + void __iomem *base; + unsigned int high; + + kvm_ncsnp_support = false; + + if (hi_cpu_type != HI_1620) + goto out; + + base = ioremap(NCSNP_MMIO_BASE, 4); + if (!base) { + pr_err("Unable to map MMIO region when probing ncsnp!\n"); + goto out; + } + + high = readl_relaxed(base) >> 28; + iounmap(base); + if (high != 0x1) + kvm_ncsnp_support = true; + +out: + kvm_info("Hisi ncsnp: %s\n", kvm_ncsnp_support ? "enabled" : + "disabled"); +} diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 465631a8f4a4..fcbf0eaa35aa 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -59,6 +59,7 @@ static bool vgic_present; /* Hisi cpu type enum */ enum hisi_cpu_type hi_cpu_type = UNKNOWN_HI_TYPE; +bool kvm_ncsnp_support; static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); @@ -1774,6 +1775,7 @@ int kvm_arch_init(void *opaque) /* Probe the Hisi CPU type */ probe_hisi_cpu_type(); + probe_hisi_ncsnp_support(); in_hyp_mode = is_kernel_in_hyp_mode(); -- Gitee From 89312c776f9263c4777a0b51e74cf6e2d962f32b Mon Sep 17 00:00:00 2001 From: Yanan Wang Date: Sat, 7 Jun 2025 16:28:20 +0800 Subject: [PATCH 03/31] KVM: arm64: Only probe Hisi ncsnp feature on Hisi CPUs The "ncsnp" is an implementation specific CPU virtualization feature on Hisi 1620 series CPUs. This feature works just like ARM standard S2FWB to reduce some cache management operations in virtualization. Given that it's Hisi specific feature, let's restrict the detection only to Hisi CPUs. To realize this: 1) Add a sub-directory `hisilicon/` within arch/arm64/kvm to hold code for Hisi specific virtualization features. 2) Add a new kconfig option `CONFIG_KVM_HISI_VIRT` for users to select the whole Hisi specific virtualization features. 3) Add a generic global KVM variable `kvm_ncsnp_support` which is `false` by default. Only re-initialize it when we have `CONFIG_KVM_HISI_VIRT` enabled. Signed-off-by: Yanan Wang Signed-off-by: lishusen Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/configs/tencent.config | 1 + arch/arm64/include/asm/kvm_host.h | 3 +- arch/arm64/kvm/Kconfig | 1 + arch/arm64/kvm/Makefile | 2 +- arch/arm64/kvm/hisilicon/Kconfig | 7 ++ arch/arm64/kvm/hisilicon/Makefile | 2 + .../hisi_virt.c} | 93 ++++++++++--------- .../hisilicon/hisi_virt.h} | 14 ++- virt/kvm/arm/arm.c | 13 ++- 9 files changed, 79 insertions(+), 57 deletions(-) create mode 100644 arch/arm64/kvm/hisilicon/Kconfig create mode 100644 arch/arm64/kvm/hisilicon/Makefile rename arch/arm64/kvm/{hisi_cpu_model.c => hisilicon/hisi_virt.c} (44%) rename arch/arm64/{include/asm/hisi_cpu_model.h => kvm/hisilicon/hisi_virt.h} (39%) diff --git a/arch/arm64/configs/tencent.config b/arch/arm64/configs/tencent.config index ef4fb41bfb01..eba92e00871b 100644 --- a/arch/arm64/configs/tencent.config +++ b/arch/arm64/configs/tencent.config @@ -1510,3 +1510,4 @@ CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y CONFIG_HISI_L3T_PMU=m CONFIG_HISI_LPDDRC_PMU=m CONFIG_HISILICON_HW_METRIC=y +CONFIG_KVM_HISI_VIRT=y diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 3d9d4665aa69..73c260b417c6 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -26,7 +26,6 @@ #include #include #include -#include #define __KVM_HAVE_ARCH_INTC_INITIALIZED @@ -693,4 +692,6 @@ static inline enum mitigation_state kvm_arm_get_spectre_bhb_state(void) return arm64_get_spectre_bhb_state(); } +extern bool kvm_ncsnp_support; + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 1d0a3791c017..b065318df925 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -5,6 +5,7 @@ source "virt/kvm/Kconfig" source "virt/lib/Kconfig" +source "arch/arm64/kvm/hisilicon/Kconfig" menuconfig VIRTUALIZATION bool "Virtualization" diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 4a609b5683ed..58c568dc52d0 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -19,7 +19,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o pmu.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o -kvm-$(CONFIG_KVM_ARM_HOST) += hisi_cpu_model.o +obj-$(CONFIG_KVM_ARM_HOST) += hisilicon/ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o diff --git a/arch/arm64/kvm/hisilicon/Kconfig b/arch/arm64/kvm/hisilicon/Kconfig new file mode 100644 index 000000000000..6536f897a32e --- /dev/null +++ b/arch/arm64/kvm/hisilicon/Kconfig @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +config KVM_HISI_VIRT + bool "HiSilicon SoC specific virtualization features" + depends on ARCH_HISI + help + Support for HiSilicon SoC specific virtualization features. + On non-HiSilicon platforms, say N here. diff --git a/arch/arm64/kvm/hisilicon/Makefile b/arch/arm64/kvm/hisilicon/Makefile new file mode 100644 index 000000000000..849f99d1526d --- /dev/null +++ b/arch/arm64/kvm/hisilicon/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_KVM_HISI_VIRT) += hisi_virt.o diff --git a/arch/arm64/kvm/hisi_cpu_model.c b/arch/arm64/kvm/hisilicon/hisi_virt.c similarity index 44% rename from arch/arm64/kvm/hisi_cpu_model.c rename to arch/arm64/kvm/hisilicon/hisi_virt.c index 52eecf1ba1cf..9587f9508a79 100644 --- a/arch/arm64/kvm/hisi_cpu_model.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -1,26 +1,34 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright(c) 2019 Huawei Technologies Co., Ltd + * Copyright(c) 2022 Huawei Technologies Co., Ltd */ #include #include #include #include +#include "hisi_virt.h" -#ifdef CONFIG_ACPI +static enum hisi_cpu_type cpu_type = UNKNOWN_HI_TYPE; + +static const char * const hisi_cpu_type_str[] = { + "Hisi1612", + "Hisi1616", + "Hisi1620", + "Unknown" +}; /* ACPI Hisi oem table id str */ -const char *oem_str[] = { +static const char * const oem_str[] = { "HIP06", /* Hisi 1612 */ "HIP07", /* Hisi 1616 */ "HIP08" /* Hisi 1620 */ }; /* - * Get Hisi oem table id. + * Probe Hisi CPU type form ACPI. */ -static void acpi_get_hw_cpu_type(void) +static enum hisi_cpu_type acpi_get_hisi_cpu_type(void) { struct acpi_table_header *table; acpi_status status; @@ -29,89 +37,88 @@ static void acpi_get_hw_cpu_type(void) /* Get oem table id from ACPI table header */ status = acpi_get_table(ACPI_SIG_DSDT, 0, &table); if (ACPI_FAILURE(status)) { - pr_err("Failed to get ACPI table: %s\n", - acpi_format_exception(status)); - return; + pr_warn("Failed to get ACPI table: %s\n", + acpi_format_exception(status)); + return UNKNOWN_HI_TYPE; } for (i = 0; i < str_size; ++i) { - if (!strncmp(oem_str[i], table->oem_table_id, 5)) { - hi_cpu_type = i; - return; - } + if (!strncmp(oem_str[i], table->oem_table_id, 5)) + return i; } -} -#else -static void acpi_get_hw_cpu_type(void) {} -#endif + return UNKNOWN_HI_TYPE; +} /* of Hisi cpu model str */ -const char *of_model_str[] = { +static const char * const of_model_str[] = { "Hi1612", "Hi1616" }; -static void of_get_hw_cpu_type(void) +/* + * Probe Hisi CPU type from DT. + */ +static enum hisi_cpu_type of_get_hisi_cpu_type(void) { - const char *cpu_type; + const char *model; int ret, i, str_size = ARRAY_SIZE(of_model_str); - ret = of_property_read_string(of_root, "model", &cpu_type); + /* + * Note: There may not be a "model" node in FDT, which + * is provided by the vendor. In this case, we are not + * able to get CPU type information through this way. + */ + ret = of_property_read_string(of_root, "model", &model); if (ret < 0) { - pr_err("Failed to get Hisi cpu model by OF.\n"); - return; + pr_warn("Failed to get Hisi cpu model by OF.\n"); + return UNKNOWN_HI_TYPE; } for (i = 0; i < str_size; ++i) { - if (strstr(cpu_type, of_model_str[i])) { - hi_cpu_type = i; - return; - } + if (strstr(model, of_model_str[i])) + return i; } + + return UNKNOWN_HI_TYPE; } void probe_hisi_cpu_type(void) { if (!acpi_disabled) - acpi_get_hw_cpu_type(); + cpu_type = acpi_get_hisi_cpu_type(); else - of_get_hw_cpu_type(); + cpu_type = of_get_hisi_cpu_type(); - if (hi_cpu_type == UNKNOWN_HI_TYPE) - pr_warn("UNKNOWN Hisi cpu type.\n"); + kvm_info("detected: Hisi CPU type '%s'\n", hisi_cpu_type_str[cpu_type]); } -#define NCSNP_MMIO_BASE 0x20107E238 - /* * We have the fantastic HHA ncsnp capability on Kunpeng 920, * with which hypervisor doesn't need to perform a lot of cache * maintenance like before (in case the guest has non-cacheable * Stage-1 mappings). */ -void probe_hisi_ncsnp_support(void) +#define NCSNP_MMIO_BASE 0x20107E238 +bool hisi_ncsnp_supported(void) { void __iomem *base; unsigned int high; + bool supported = false; - kvm_ncsnp_support = false; - - if (hi_cpu_type != HI_1620) - goto out; + if (cpu_type != HI_1620) + return supported; base = ioremap(NCSNP_MMIO_BASE, 4); if (!base) { - pr_err("Unable to map MMIO region when probing ncsnp!\n"); - goto out; + pr_warn("Unable to map MMIO region when probing ncsnp!\n"); + return supported; } high = readl_relaxed(base) >> 28; iounmap(base); if (high != 0x1) - kvm_ncsnp_support = true; + supported = true; -out: - kvm_info("Hisi ncsnp: %s\n", kvm_ncsnp_support ? "enabled" : - "disabled"); + return supported; } diff --git a/arch/arm64/include/asm/hisi_cpu_model.h b/arch/arm64/kvm/hisilicon/hisi_virt.h similarity index 39% rename from arch/arm64/include/asm/hisi_cpu_model.h rename to arch/arm64/kvm/hisilicon/hisi_virt.h index 67008d17416e..c4b5acc93fec 100644 --- a/arch/arm64/include/asm/hisi_cpu_model.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -1,10 +1,10 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ /* - * Copyright(c) 2019 Huawei Technologies Co., Ltd + * Copyright(c) 2022 Huawei Technologies Co., Ltd */ -#ifndef __HISI_CPU_MODEL_H__ -#define __HISI_CPU_MODEL_H__ +#ifndef __HISI_VIRT_H__ +#define __HISI_VIRT_H__ enum hisi_cpu_type { HI_1612, @@ -13,9 +13,7 @@ enum hisi_cpu_type { UNKNOWN_HI_TYPE }; -extern enum hisi_cpu_type hi_cpu_type; -extern bool kvm_ncsnp_support; - void probe_hisi_cpu_type(void); -void probe_hisi_ncsnp_support(void); -#endif /* __HISI_CPU_MODEL_H__ */ +bool hisi_ncsnp_supported(void); + +#endif /* __HISI_VIRT_H__ */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index fcbf0eaa35aa..cae8fb6d3de9 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -44,6 +44,10 @@ __asm__(".arch_extension virt"); #endif +#ifdef CONFIG_KVM_HISI_VIRT +#include "hisilicon/hisi_virt.h" +#endif + DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data); static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); @@ -57,8 +61,7 @@ static DEFINE_SPINLOCK(kvm_vmid_lock); static bool vgic_present; -/* Hisi cpu type enum */ -enum hisi_cpu_type hi_cpu_type = UNKNOWN_HI_TYPE; +/* Capability of non-cacheable snooping */ bool kvm_ncsnp_support; static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); @@ -1773,9 +1776,11 @@ int kvm_arch_init(void *opaque) return -ENODEV; } - /* Probe the Hisi CPU type */ +#ifdef CONFIG_KVM_HISI_VIRT probe_hisi_cpu_type(); - probe_hisi_ncsnp_support(); + kvm_ncsnp_support = hisi_ncsnp_supported(); +#endif + kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); in_hyp_mode = is_kernel_in_hyp_mode(); -- Gitee From 16482dee874ae0d5d581c29048a9aa35ecd5a488 Mon Sep 17 00:00:00 2001 From: lishusen Date: Sat, 7 Jun 2025 16:30:20 +0800 Subject: [PATCH 04/31] KVM: arm64: Support a new HiSi CPU type Add a new entry ("HIP09") in oem_str[] to support detection of the new HiSi CPU type. Signed-off-by: Quan Zhou Signed-off-by: lishusen Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 4 +++- arch/arm64/kvm/hisilicon/hisi_virt.h | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 9587f9508a79..90c363ed642e 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -15,6 +15,7 @@ static const char * const hisi_cpu_type_str[] = { "Hisi1612", "Hisi1616", "Hisi1620", + "HIP09", "Unknown" }; @@ -22,7 +23,8 @@ static const char * const hisi_cpu_type_str[] = { static const char * const oem_str[] = { "HIP06", /* Hisi 1612 */ "HIP07", /* Hisi 1616 */ - "HIP08" /* Hisi 1620 */ + "HIP08", /* Hisi 1620 */ + "HIP09" /* HIP09 */ }; /* diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index c4b5acc93fec..9231b1dca7f2 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -10,6 +10,7 @@ enum hisi_cpu_type { HI_1612, HI_1616, HI_1620, + HI_IP09, UNKNOWN_HI_TYPE }; -- Gitee From 52682822ec0f4d87241b487c05a4578e75ecedc5 Mon Sep 17 00:00:00 2001 From: lishusen Date: Sat, 7 Jun 2025 17:12:13 +0800 Subject: [PATCH 05/31] KVM: arm64: Probe and configure DVMBM capability on HiSi CPUs DVMBM is an virtualization extension since HIP09, which allows TLBI executed at NS EL1 to be broadcast in a configurable range of physical CPUs (even with HCR_EL2.FB set). It will bring TLBI broadcast optimization. Introduce the method to detect and enable this feature. Also add a kernel command parameter "kvm-arm.dvmbm_enabled" (=0 on default) so that users can {en,dis}able DVMBM on need. The parameter description is added under Documentation/. Signed-off-by: Quan Zhou Signed-off-by: lishusen Signed-off-by: Xie Xiaodong <624338359@qq.com> --- .../admin-guide/kernel-parameters.txt | 4 ++ arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/hisilicon/hisi_virt.c | 49 +++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 6 +++ virt/kvm/arm/arm.c | 6 +++ 5 files changed, 66 insertions(+) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 810b9f5cbf9b..828c42bd3de6 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2172,6 +2172,10 @@ [KVM,ARM] Allow use of GICv4 for direct injection of LPIs. + kvm-arm.dvmbm_enabled= + [KVM,ARM] Allow use of HiSilicon DVMBM capability. + Default: 0 + kvm-intel.ept= [KVM,Intel] Disable extended page tables (virtualized MMU) support on capable Intel chips. Default is 1 (enabled) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 73c260b417c6..4a17c971f4f0 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -693,5 +693,6 @@ static inline enum mitigation_state kvm_arm_get_spectre_bhb_state(void) } extern bool kvm_ncsnp_support; +extern bool kvm_dvmbm_support; #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 90c363ed642e..b81488cd663b 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -11,6 +11,8 @@ static enum hisi_cpu_type cpu_type = UNKNOWN_HI_TYPE; +static bool dvmbm_enabled; + static const char * const hisi_cpu_type_str[] = { "Hisi1612", "Hisi1616", @@ -124,3 +126,50 @@ bool hisi_ncsnp_supported(void) return supported; } + +static int __init early_dvmbm_enable(char *buf) +{ + return strtobool(buf, &dvmbm_enabled); +} +early_param("kvm-arm.dvmbm_enabled", early_dvmbm_enable); + +static void hardware_enable_dvmbm(void *data) +{ + u64 val; + + val = read_sysreg_s(SYS_LSUDVM_CTRL_EL2); + val |= LSUDVM_CTLR_EL2_MASK; + write_sysreg_s(val, SYS_LSUDVM_CTRL_EL2); +} + +static void hardware_disable_dvmbm(void *data) +{ + u64 val; + + val = read_sysreg_s(SYS_LSUDVM_CTRL_EL2); + val &= ~LSUDVM_CTLR_EL2_MASK; + write_sysreg_s(val, SYS_LSUDVM_CTRL_EL2); +} + +bool hisi_dvmbm_supported(void) +{ + if (cpu_type != HI_IP09) + return false; + + /* Determine whether DVMBM is supported by the hardware */ + if (!(read_sysreg(aidr_el1) & AIDR_EL1_DVMBM_MASK)) + return false; + + /* User provided kernel command-line parameter */ + if (!dvmbm_enabled || !is_kernel_in_hyp_mode()) { + on_each_cpu(hardware_disable_dvmbm, NULL, 1); + return false; + } + + /* + * Enable TLBI Broadcast optimization by setting + * LSUDVM_CTRL_EL2's bit[0]. + */ + on_each_cpu(hardware_enable_dvmbm, NULL, 1); + return true; +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 9231b1dca7f2..f505d44e386f 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -14,7 +14,13 @@ enum hisi_cpu_type { UNKNOWN_HI_TYPE }; +/* HIP09 */ +#define AIDR_EL1_DVMBM_MASK GENMASK_ULL(13, 12) +#define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) +#define LSUDVM_CTLR_EL2_MASK BIT_ULL(0) + void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); +bool hisi_dvmbm_supported(void); #endif /* __HISI_VIRT_H__ */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index cae8fb6d3de9..c0ab4a66a023 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -49,6 +49,10 @@ __asm__(".arch_extension virt"); #endif DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data); + +/* Capability of DVMBM */ +bool kvm_dvmbm_support; + static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); /* Per-CPU variable containing the currently running vcpu. */ @@ -1779,8 +1783,10 @@ int kvm_arch_init(void *opaque) #ifdef CONFIG_KVM_HISI_VIRT probe_hisi_cpu_type(); kvm_ncsnp_support = hisi_ncsnp_supported(); + kvm_dvmbm_support = hisi_dvmbm_supported(); #endif kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); + kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled"); in_hyp_mode = is_kernel_in_hyp_mode(); -- Gitee From 0671f843976fac9e15b9eb82b66be6e4981a4ac4 Mon Sep 17 00:00:00 2001 From: lishusen Date: Sat, 7 Jun 2025 17:29:49 +0800 Subject: [PATCH 06/31] KVM: arm64: Add kvm_vcpu_arch::sched_cpus and pre_sched_cpus We already have cpus_ptr in current thread struct now, through which we can know the pcpu range the thread is allowed to run on. So in kvm_arch_vcpu_{load,put}, we can also know the pcpu range the vcpu thread is allowed to be scheduled on, and that is the range we want to configure for TLBI broadcast. Introduce two variables sched_cpus and pre_sched_cpus in struct kvm_vcpu_arch. @sched_cpus always comes from current->cpus_ptr and @pre_sched_cpus always comes from @sched_cpus. Signed-off-by: Quan Zhou Signed-off-by: lishusen Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/kvm_host.h | 8 ++++++ arch/arm64/kvm/hisilicon/hisi_virt.c | 37 ++++++++++++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 25 +++++++++++++++++++ virt/kvm/arm/arm.c | 14 ++++++++--- 4 files changed, 80 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 4a17c971f4f0..cab28c1db15c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -341,6 +341,14 @@ struct kvm_vcpu_arch { /* True when deferrable sysregs are loaded on the physical CPU, * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ bool sysregs_loaded_on_cpu; + +#ifdef CONFIG_KVM_HISI_VIRT + /* pCPUs this vCPU can be scheduled on. Pure copy of + * current->cpus_ptr + */ + cpumask_var_t sched_cpus; + cpumask_var_t pre_sched_cpus; +#endif }; /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index b81488cd663b..ac12fc54a6b4 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -173,3 +173,40 @@ bool hisi_dvmbm_supported(void) on_each_cpu(hardware_enable_dvmbm, NULL, 1); return true; } + +int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) +{ + if (!kvm_dvmbm_support) + return 0; + + if (!zalloc_cpumask_var(&vcpu->arch.sched_cpus, GFP_ATOMIC) || + !zalloc_cpumask_var(&vcpu->arch.pre_sched_cpus, GFP_ATOMIC)) + return -ENOMEM; + + return 0; +} + +void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + if (!kvm_dvmbm_support) + return; + + free_cpumask_var(vcpu->arch.sched_cpus); + free_cpumask_var(vcpu->arch.pre_sched_cpus); +} + +void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) +{ + if (!kvm_dvmbm_support) + return; + + cpumask_copy(vcpu->arch.sched_cpus, current->cpus_ptr); +} + +void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) +{ + if (!kvm_dvmbm_support) + return; + + cpumask_copy(vcpu->arch.pre_sched_cpus, vcpu->arch.sched_cpus); +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index f505d44e386f..8d8ef6aa165a 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -6,6 +6,7 @@ #ifndef __HISI_VIRT_H__ #define __HISI_VIRT_H__ +#ifdef CONFIG_KVM_HISI_VIRT enum hisi_cpu_type { HI_1612, HI_1616, @@ -23,4 +24,28 @@ void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); +int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu); +void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu); +void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu); +void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu); +#else +static inline void probe_hisi_cpu_type(void) {} +static inline bool hisi_ncsnp_supported(void) +{ + return false; +} +static inline bool hisi_dvmbm_supported(void) +{ + return false; +} + +static inline int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) +{ + return 0; +} +static inline void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu) {} +static inline void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) {} +static inline void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) {} +#endif /* CONFIG_KVM_HISI_VIRT */ + #endif /* __HISI_VIRT_H__ */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index c0ab4a66a023..8e2427ac39a7 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -44,9 +44,7 @@ __asm__(".arch_extension virt"); #endif -#ifdef CONFIG_KVM_HISI_VIRT #include "hisilicon/hisi_virt.h" -#endif DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data); @@ -294,6 +292,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) if (err) goto free_vcpu; + err = kvm_sched_affinity_vcpu_init(vcpu); + if (err) + goto free_vcpu; + err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP); if (err) goto vcpu_uninit; @@ -326,6 +328,8 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { kvm_arch_vcpu_free(vcpu); + + kvm_sched_affinity_vcpu_destroy(vcpu); } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) @@ -433,6 +437,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu_ptrauth_disable(vcpu); } + + kvm_tlbi_dvmbm_vcpu_load(vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -446,6 +452,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->cpu = -1; kvm_arm_set_running_vcpu(NULL); + + kvm_tlbi_dvmbm_vcpu_put(vcpu); } static void vcpu_power_off(struct kvm_vcpu *vcpu) @@ -1780,11 +1788,9 @@ int kvm_arch_init(void *opaque) return -ENODEV; } -#ifdef CONFIG_KVM_HISI_VIRT probe_hisi_cpu_type(); kvm_ncsnp_support = hisi_ncsnp_supported(); kvm_dvmbm_support = hisi_dvmbm_supported(); -#endif kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled"); -- Gitee From 82999d347b3f2592a2926de7eb3625a56369da57 Mon Sep 17 00:00:00 2001 From: lishusen Date: Sat, 7 Jun 2025 17:49:31 +0800 Subject: [PATCH 07/31] KVM: arm64: Add kvm_arch::sched_cpus and sched_lock Introduce sched_cpus and sched_lock in struct kvm_arch. sched_cpus will store the union of all vcpus' cpus_ptr in a VM and will be used for the TLBI broadcast range for this VM. sched_lock ensures a exclusive manipulation of sched_cpus. In vcpu_load, we should decide whether to perform the subsequent update operation by checking whether sched_cpus has changed. Signed-off-by: Quan Zhou Signed-off-by: lishusen Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/kvm_host.h | 5 +++ arch/arm64/kvm/hisilicon/hisi_virt.c | 52 ++++++++++++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 7 ++++ virt/kvm/arm/arm.c | 5 +++ 4 files changed, 69 insertions(+) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index cab28c1db15c..1cd8575b365f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -84,6 +84,11 @@ struct kvm_arch { /* Mandated version of PSCI */ u32 psci_version; + +#ifdef CONFIG_KVM_HISI_VIRT + spinlock_t sched_lock; + cpumask_var_t sched_cpus; /* Union of all vcpu's cpus_ptr */ +#endif }; #define KVM_NR_MEM_OBJS 40 diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index ac12fc54a6b4..fe0515c20989 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -197,10 +197,42 @@ void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) { + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu *tmp; + cpumask_t mask; + unsigned long i; + + /* Don't bother on old hardware */ if (!kvm_dvmbm_support) return; cpumask_copy(vcpu->arch.sched_cpus, current->cpus_ptr); + + if (likely(cpumask_equal(vcpu->arch.sched_cpus, + vcpu->arch.pre_sched_cpus))) + return; + + /* Re-calculate sched_cpus for this VM */ + spin_lock(&kvm->arch.sched_lock); + + cpumask_clear(&mask); + kvm_for_each_vcpu(i, tmp, kvm) { + /* + * We may get the stale sched_cpus if another thread + * is concurrently changing its affinity. It'll + * eventually go through vcpu_load() and we rely on + * the last sched_lock holder to make things correct. + */ + cpumask_or(&mask, &mask, tmp->arch.sched_cpus); + } + + if (cpumask_equal(kvm->arch.sched_cpus, &mask)) + goto out_unlock; + + cpumask_copy(kvm->arch.sched_cpus, &mask); + +out_unlock: + spin_unlock(&kvm->arch.sched_lock); } void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) @@ -210,3 +242,23 @@ void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) cpumask_copy(vcpu->arch.pre_sched_cpus, vcpu->arch.sched_cpus); } + +int kvm_sched_affinity_vm_init(struct kvm *kvm) +{ + if (!kvm_dvmbm_support) + return 0; + + spin_lock_init(&kvm->arch.sched_lock); + if (!zalloc_cpumask_var(&kvm->arch.sched_cpus, GFP_ATOMIC)) + return -ENOMEM; + + return 0; +} + +void kvm_sched_affinity_vm_destroy(struct kvm *kvm) +{ + if (!kvm_dvmbm_support) + return; + + free_cpumask_var(kvm->arch.sched_cpus); +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 8d8ef6aa165a..3de270ad2da5 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -26,6 +26,8 @@ bool hisi_dvmbm_supported(void); int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu); void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu); +int kvm_sched_affinity_vm_init(struct kvm *kvm); +void kvm_sched_affinity_vm_destroy(struct kvm *kvm); void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu); void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu); #else @@ -44,6 +46,11 @@ static inline int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) return 0; } static inline void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu) {} +static inline int kvm_sched_affinity_vm_init(struct kvm *kvm) +{ + return 0; +} +static inline void kvm_sched_affinity_vm_destroy(struct kvm *kvm) {} static inline void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) {} static inline void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) {} #endif /* CONFIG_KVM_HISI_VIRT */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 8e2427ac39a7..fe1cef08098f 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -116,6 +116,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int ret, cpu; + ret = kvm_sched_affinity_vm_init(kvm); + if (ret) + return ret; + ret = kvm_arm_setup_stage2(kvm, type); if (ret) return ret; @@ -172,6 +176,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) { int i; + kvm_sched_affinity_vm_destroy(kvm); kvm_vgic_destroy(kvm); free_percpu(kvm->arch.last_vcpu_ran); -- Gitee From 962295c4a5a98718358e061e7751e8f3e270d88a Mon Sep 17 00:00:00 2001 From: lishusen Date: Sat, 7 Jun 2025 18:08:20 +0800 Subject: [PATCH 08/31] KVM: arm64: Implement the capability of DVMBM Implement the capability of DVMBM. Before each vcpu is loaded, we re-calculate the VM-wide sched_cpus, and if it's changed we will kick all other vcpus out to reload the latest LSUDVMBM value to the register, and a new request KVM_REQ_RELOAD_TLBI_DVMBM is added to implement this. Otherwise if the sched_cpus is not changed by this single vcpu, in order to ensure the correctness of the contents in the register, we reload the LSUDVMBM value to the register and nothing else will be done. Signed-off-by: Quan Zhou Signed-off-by: lishusen Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/include/asm/kvm_host.h | 2 + arch/arm64/kvm/hisilicon/hisi_virt.c | 114 ++++++++++++++++++++++++++- arch/arm64/kvm/hisilicon/hisi_virt.h | 29 +++++++ virt/kvm/arm/arm.c | 3 + 4 files changed, 147 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 1cd8575b365f..cc892cfeec5d 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -45,6 +45,7 @@ #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4) +#define KVM_REQ_RELOAD_TLBI_DVMBM KVM_ARCH_REQ(8) DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); @@ -88,6 +89,7 @@ struct kvm_arch { #ifdef CONFIG_KVM_HISI_VIRT spinlock_t sched_lock; cpumask_var_t sched_cpus; /* Union of all vcpu's cpus_ptr */ + u64 tlbi_dvmbm; #endif }; diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index fe0515c20989..662ddf5b124b 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -195,6 +195,96 @@ void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu) free_cpumask_var(vcpu->arch.pre_sched_cpus); } +static void __kvm_write_lsudvmbm(struct kvm *kvm) +{ + write_sysreg_s(kvm->arch.tlbi_dvmbm, SYS_LSUDVMBM_EL2); +} + +static void kvm_write_lsudvmbm(struct kvm *kvm) +{ + spin_lock(&kvm->arch.sched_lock); + __kvm_write_lsudvmbm(kvm); + spin_unlock(&kvm->arch.sched_lock); +} + +static int kvm_dvmbm_get_dies_info(struct kvm *kvm, u64 *vm_aff3s, int size) +{ + int num = 0, cpu; + + for_each_cpu(cpu, kvm->arch.sched_cpus) { + bool found = false; + u64 aff3; + int i; + + if (num >= size) + break; + + aff3 = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 3); + for (i = 0; i < num; i++) { + if (vm_aff3s[i] == aff3) { + found = true; + break; + } + } + + if (!found) + vm_aff3s[num++] = aff3; + } + + return num; +} + +static void kvm_update_vm_lsudvmbm(struct kvm *kvm) +{ + u64 mpidr, aff3, aff2, aff1; + u64 vm_aff3s[DVMBM_MAX_DIES]; + u64 val; + int cpu, nr_dies; + + nr_dies = kvm_dvmbm_get_dies_info(kvm, vm_aff3s, DVMBM_MAX_DIES); + if (nr_dies > 2) { + val = DVMBM_RANGE_ALL_DIES << DVMBM_RANGE_SHIFT; + goto out_update; + } + + if (nr_dies == 1) { + val = DVMBM_RANGE_ONE_DIE << DVMBM_RANGE_SHIFT | + vm_aff3s[0] << DVMBM_DIE1_SHIFT; + + /* fulfill bits [52:0] */ + for_each_cpu(cpu, kvm->arch.sched_cpus) { + mpidr = cpu_logical_map(cpu); + aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1); + + val |= 1ULL << (aff2 * 4 + aff1); + } + + goto out_update; + } + + /* nr_dies == 2 */ + val = DVMBM_RANGE_TWO_DIES << DVMBM_RANGE_SHIFT | + DVMBM_GRAN_CLUSTER << DVMBM_GRAN_SHIFT | + vm_aff3s[0] << DVMBM_DIE1_SHIFT | + vm_aff3s[1] << DVMBM_DIE2_SHIFT; + + /* and fulfill bits [43:0] */ + for_each_cpu(cpu, kvm->arch.sched_cpus) { + mpidr = cpu_logical_map(cpu); + aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); + aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + + if (aff3 == vm_aff3s[0]) + val |= 1ULL << (aff2 + DVMBM_DIE1_CLUSTER_SHIFT); + else + val |= 1ULL << (aff2 + DVMBM_DIE2_CLUSTER_SHIFT); + } + +out_update: + kvm->arch.tlbi_dvmbm = val; +} + void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; @@ -209,8 +299,10 @@ void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) cpumask_copy(vcpu->arch.sched_cpus, current->cpus_ptr); if (likely(cpumask_equal(vcpu->arch.sched_cpus, - vcpu->arch.pre_sched_cpus))) + vcpu->arch.pre_sched_cpus))) { + kvm_write_lsudvmbm(kvm); return; + } /* Re-calculate sched_cpus for this VM */ spin_lock(&kvm->arch.sched_lock); @@ -231,7 +323,17 @@ void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) cpumask_copy(kvm->arch.sched_cpus, &mask); + kvm_flush_remote_tlbs(kvm); + + /* + * Re-calculate LSUDVMBM_EL2 for this VM and kick all vcpus + * out to reload the LSUDVMBM configuration. + */ + kvm_update_vm_lsudvmbm(kvm); + kvm_make_all_cpus_request(kvm, KVM_REQ_RELOAD_TLBI_DVMBM); + out_unlock: + __kvm_write_lsudvmbm(kvm); spin_unlock(&kvm->arch.sched_lock); } @@ -262,3 +364,13 @@ void kvm_sched_affinity_vm_destroy(struct kvm *kvm) free_cpumask_var(kvm->arch.sched_cpus); } + +void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) +{ + if (WARN_ON_ONCE(!kvm_dvmbm_support)) + return; + + preempt_disable(); + kvm_write_lsudvmbm(kvm); + preempt_enable(); +} diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 3de270ad2da5..4e162b7f6688 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -20,6 +20,33 @@ enum hisi_cpu_type { #define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) #define LSUDVM_CTLR_EL2_MASK BIT_ULL(0) +/* + * MPIDR_EL1 layout on HIP09 + * + * Aff3[7:3] - socket ID [0-15] + * Aff3[2:0] - die ID [1,3] + * Aff2 - cluster ID [0-9] + * Aff1 - core ID [0-3] + * Aff0 - thread ID [0,1] + */ + +#define SYS_LSUDVMBM_EL2 sys_reg(3, 4, 15, 7, 5) +#define DVMBM_RANGE_SHIFT 62 +#define DVMBM_RANGE_ONE_DIE 0ULL +#define DVMBM_RANGE_TWO_DIES 1ULL +#define DVMBM_RANGE_ALL_DIES 3ULL + +#define DVMBM_GRAN_SHIFT 61 +#define DVMBM_GRAN_CLUSTER 0ULL +#define DVMBM_GRAN_DIE 1ULL + +#define DVMBM_DIE1_SHIFT 53 +#define DVMBM_DIE2_SHIFT 45 +#define DVMBM_DIE1_CLUSTER_SHIFT 22 +#define DVMBM_DIE2_CLUSTER_SHIFT 0 + +#define DVMBM_MAX_DIES 32 + void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); @@ -30,6 +57,7 @@ int kvm_sched_affinity_vm_init(struct kvm *kvm); void kvm_sched_affinity_vm_destroy(struct kvm *kvm); void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu); void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu); +void kvm_hisi_reload_lsudvmbm(struct kvm *kvm); #else static inline void probe_hisi_cpu_type(void) {} static inline bool hisi_ncsnp_supported(void) @@ -53,6 +81,7 @@ static inline int kvm_sched_affinity_vm_init(struct kvm *kvm) static inline void kvm_sched_affinity_vm_destroy(struct kvm *kvm) {} static inline void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) {} static inline void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) {} +static inline void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) {} #endif /* CONFIG_KVM_HISI_VIRT */ #endif /* __HISI_VIRT_H__ */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index fe1cef08098f..c4aad0845dc6 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -720,6 +720,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu) vgic_v4_load(vcpu); preempt_enable(); } + + if (kvm_check_request(KVM_REQ_RELOAD_TLBI_DVMBM, vcpu)) + kvm_hisi_reload_lsudvmbm(vcpu->kvm); } } -- Gitee From 94150d6c49ddc137ee56358904222c26440cc405 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Thu, 12 Jun 2025 10:30:44 +0800 Subject: [PATCH 09/31] KVM: arm64: Translate logic cluster id to physical cluster id when updating lsudvmbm For dvmbm feature, MN requires physical cluster id while it is filled with logic cluster id right now. In some situations which physical cluster id is not equal to logic cluster id such as in PG boards, it will cause issues when enabling dvmbm. To avoid the issue, translate logic cluster id to physical cluster id when updating lsudvmbm. Signed-off-by: Xiang Chen Signed-off-by: Yanan Wang Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 153 +++++++++++++++++++++++++-- arch/arm64/kvm/hisilicon/hisi_virt.h | 17 +++ virt/kvm/arm/arm.c | 3 + 3 files changed, 166 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 662ddf5b124b..a925c183aed6 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -234,12 +234,99 @@ static int kvm_dvmbm_get_dies_info(struct kvm *kvm, u64 *vm_aff3s, int size) return num; } +static u32 socket_num, die_num; + +static u32 kvm_get_socket_num(void) +{ + int socket_id[MAX_PG_CFG_SOCKETS], cpu; + u32 num = 0; + + for_each_cpu(cpu, cpu_possible_mask) { + bool found = false; + u64 aff3, socket; + int i; + + aff3 = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 3); + /* aff3[7:3]: socket ID */ + socket = (aff3 & SOCKET_ID_MASK) >> SOCKET_ID_SHIFT; + for (i = 0; i < num; i++) { + if (socket_id[i] == socket) { + found = true; + break; + } + } + if (!found) + socket_id[num++] = socket; + } + return num; +} + +static u32 kvm_get_die_num(void) +{ + int die_id[MAX_DIES_PER_SOCKET], cpu; + u32 num = 0; + + for_each_cpu(cpu, cpu_possible_mask) { + bool found = false; + u64 aff3, die; + int i; + + aff3 = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 3); + /* aff3[2:0]: die ID */ + die = aff3 & DIE_ID_MASK; + for (i = 0; i < num; i++) { + if (die_id[i] == die) { + found = true; + break; + } + } + if (!found) + die_id[num++] = die; + } + return num; +} + +static u32 g_die_pg[MAX_PG_CFG_SOCKETS * MAX_DIES_PER_SOCKET] + [MAX_CLUSTERS_PER_DIE]; + +static void kvm_get_die_pg(unsigned long pg_cfg, int socket_id, int die_id) +{ + u32 pg_num = 0, i, j; + u32 pg_flag[MAX_CLUSTERS_PER_DIE]; + u32 die_tmp = socket_id * die_num + die_id; + + for (i = 0; i < MAX_CLUSTERS_PER_DIE; i++) { + if (test_bit(i, &pg_cfg)) + pg_num++; + g_die_pg[die_tmp][i] = i; + pg_flag[i] = 0; + } + + for (i = 0; i < MAX_CLUSTERS_PER_DIE - pg_num; i++) { + if (test_bit(i, &pg_cfg)) { + for (j = 0; j < pg_num; j++) { + u32 cluster_bak = MAX_CLUSTERS_PER_DIE + - pg_num + j; + + if (!test_bit(cluster_bak, &pg_cfg) && + !pg_flag[cluster_bak]) { + pg_flag[cluster_bak] = 1; + g_die_pg[die_tmp][i] = cluster_bak; + g_die_pg[die_tmp][cluster_bak] = i; + break; + } + } + } + } +} + static void kvm_update_vm_lsudvmbm(struct kvm *kvm) { - u64 mpidr, aff3, aff2, aff1; + u64 mpidr, aff3, aff2, aff1, phy_aff2; u64 vm_aff3s[DVMBM_MAX_DIES]; u64 val; int cpu, nr_dies; + u32 socket_id, die_id; nr_dies = kvm_dvmbm_get_dies_info(kvm, vm_aff3s, DVMBM_MAX_DIES); if (nr_dies > 2) { @@ -254,10 +341,18 @@ static void kvm_update_vm_lsudvmbm(struct kvm *kvm) /* fulfill bits [52:0] */ for_each_cpu(cpu, kvm->arch.sched_cpus) { mpidr = cpu_logical_map(cpu); + aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1); - - val |= 1ULL << (aff2 * 4 + aff1); + socket_id = (aff3 & SOCKET_ID_MASK) >> SOCKET_ID_SHIFT; + die_id = (aff3 & DIE_ID_MASK) >> DIE_ID_SHIFT; + if (die_id == TOTEM_B_ID) + die_id = 0; + else + die_id = 1; + + phy_aff2 = g_die_pg[socket_id * die_num + die_id][aff2]; + val |= 1ULL << (phy_aff2 * 4 + aff1); } goto out_update; @@ -274,11 +369,20 @@ static void kvm_update_vm_lsudvmbm(struct kvm *kvm) mpidr = cpu_logical_map(cpu); aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); - - if (aff3 == vm_aff3s[0]) - val |= 1ULL << (aff2 + DVMBM_DIE1_CLUSTER_SHIFT); + socket_id = (aff3 & SOCKET_ID_MASK) >> SOCKET_ID_SHIFT; + die_id = (aff3 & DIE_ID_MASK) >> DIE_ID_SHIFT; + if (die_id == TOTEM_B_ID) + die_id = 0; else - val |= 1ULL << (aff2 + DVMBM_DIE2_CLUSTER_SHIFT); + die_id = 1; + + if (aff3 == vm_aff3s[0]) { + phy_aff2 = g_die_pg[socket_id * die_num + die_id][aff2]; + val |= 1ULL << (phy_aff2 + DVMBM_DIE1_CLUSTER_SHIFT); + } else { + phy_aff2 = g_die_pg[socket_id * die_num + die_id][aff2]; + val |= 1ULL << (phy_aff2 + DVMBM_DIE2_CLUSTER_SHIFT); + } } out_update: @@ -345,6 +449,41 @@ void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) cpumask_copy(vcpu->arch.pre_sched_cpus, vcpu->arch.sched_cpus); } +void kvm_get_pg_cfg(void) +{ + void __iomem *mn_base; + u32 i, j; + u32 pg_cfgs[MAX_PG_CFG_SOCKETS * MAX_DIES_PER_SOCKET]; + u64 mn_phy_base; + u32 val; + + socket_num = kvm_get_socket_num(); + die_num = kvm_get_die_num(); + + for (i = 0; i < socket_num; i++) { + for (j = 0; j < die_num; j++) { + + /* + * totem B means the first CPU DIE within a SOCKET, + * totem A means the second one. + */ + mn_phy_base = (j == 0) ? TB_MN_BASE : TA_MN_BASE; + mn_phy_base += CHIP_ADDR_OFFSET(i); + mn_phy_base += MN_ECO0_OFFSET; + + mn_base = ioremap(mn_phy_base, 4); + if (!mn_base) { + kvm_info("MN base addr ioremap failed\n"); + return; + } + val = readl_relaxed(mn_base); + pg_cfgs[j + i * die_num] = val & 0xff; + kvm_get_die_pg(pg_cfgs[j + i * die_num], i, j); + iounmap(mn_base); + } + } +} + int kvm_sched_affinity_vm_init(struct kvm *kvm) { if (!kvm_dvmbm_support) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 4e162b7f6688..6d43ad13dff7 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -20,6 +20,21 @@ enum hisi_cpu_type { #define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) #define LSUDVM_CTLR_EL2_MASK BIT_ULL(0) +#define MAX_CLUSTERS_PER_DIE 8 +#define TB_MN_BASE 0x00C6067f0000 +#define TA_MN_BASE 0x0046067F0000 +#define CHIP_ADDR_OFFSET(_chip) (((((_chip) >> 3) & 0x1) * 0x80000000000) + \ + ((((_chip) >> 2) & 0x1) * (0x100000000000)) + \ + (((_chip) & 0x3) * 0x200000000000)) +#define MAX_PG_CFG_SOCKETS 4 +#define MAX_DIES_PER_SOCKET 2 +#define MN_ECO0_OFFSET 0xc00 +#define SOCKET_ID_MASK 0xf8 +#define SOCKET_ID_SHIFT 3 +#define DIE_ID_MASK 0x7 +#define DIE_ID_SHIFT 0 +#define TOTEM_B_ID 3 + /* * MPIDR_EL1 layout on HIP09 * @@ -50,6 +65,7 @@ enum hisi_cpu_type { void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); +void kvm_get_pg_cfg(void); int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu); void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu); @@ -68,6 +84,7 @@ static inline bool hisi_dvmbm_supported(void) { return false; } +static inline void kvm_get_pg_cfg(void) {} static inline int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) { diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index c4aad0845dc6..379dad09eefa 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1802,6 +1802,9 @@ int kvm_arch_init(void *opaque) kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled"); + if (kvm_dvmbm_support) + kvm_get_pg_cfg(); + in_hyp_mode = is_kernel_in_hyp_mode(); if (!in_hyp_mode && kvm_arch_requires_vhe()) { -- Gitee From 456dfd174320d01f023fa6db2fe20200bc7e792a Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Tue, 8 Oct 2024 19:21:36 +0800 Subject: [PATCH 10/31] KVM: arm64: Add new HiSi CPU type for supporting DVMBM Add new HiSi CPU type for supporting DVMBM, and expand ACPI hisi oem table id string to 8 bit. Signed-off-by: Zhou Wang Signed-off-by: caijian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 16 ++++++++++------ arch/arm64/kvm/hisilicon/hisi_virt.h | 6 ++++-- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index a925c183aed6..00ee4fd20996 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -18,15 +18,19 @@ static const char * const hisi_cpu_type_str[] = { "Hisi1616", "Hisi1620", "HIP09", + "HIP10", + "HIP10C", "Unknown" }; /* ACPI Hisi oem table id str */ static const char * const oem_str[] = { - "HIP06", /* Hisi 1612 */ - "HIP07", /* Hisi 1616 */ - "HIP08", /* Hisi 1620 */ - "HIP09" /* HIP09 */ + "HIP06 ", /* Hisi 1612 */ + "HIP07 ", /* Hisi 1616 */ + "HIP08 ", /* Hisi 1620 */ + "HIP09 ", /* HIP09 */ + "HIP10 ", /* HIP10 */ + "HIP10C " /* HIP10C */ }; /* @@ -47,7 +51,7 @@ static enum hisi_cpu_type acpi_get_hisi_cpu_type(void) } for (i = 0; i < str_size; ++i) { - if (!strncmp(oem_str[i], table->oem_table_id, 5)) + if (!strncmp(oem_str[i], table->oem_table_id, 8)) return i; } @@ -153,7 +157,7 @@ static void hardware_disable_dvmbm(void *data) bool hisi_dvmbm_supported(void) { - if (cpu_type != HI_IP09) + if (cpu_type != HI_IP10 && cpu_type != HI_IP10C) return false; /* Determine whether DVMBM is supported by the hardware */ diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 6d43ad13dff7..a718156a8f4e 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -12,10 +12,12 @@ enum hisi_cpu_type { HI_1616, HI_1620, HI_IP09, + HI_IP10, + HI_IP10C, UNKNOWN_HI_TYPE }; -/* HIP09 */ +/* HIP10 */ #define AIDR_EL1_DVMBM_MASK GENMASK_ULL(13, 12) #define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) #define LSUDVM_CTLR_EL2_MASK BIT_ULL(0) @@ -36,7 +38,7 @@ enum hisi_cpu_type { #define TOTEM_B_ID 3 /* - * MPIDR_EL1 layout on HIP09 + * MPIDR_EL1 layout on HIP10 * * Aff3[7:3] - socket ID [0-15] * Aff3[2:0] - die ID [1,3] -- Gitee From 14c45b4756f8d49ac75b37a09c982d45c1c02238 Mon Sep 17 00:00:00 2001 From: yangjinqian Date: Thu, 27 Mar 2025 15:55:53 +0800 Subject: [PATCH 11/31] kvm: hisi_virt: fix kernel panic when enable DVMBM in nVHE When the kernel is in nvhe mode and is in EL1, the original judgment logic causes the hardware_disable_dvmbm function to read the EL2 register in EL1, causing a panic during kernel startup. Signed-off-by: yangjinqian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 00ee4fd20996..c02963588343 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -160,12 +160,17 @@ bool hisi_dvmbm_supported(void) if (cpu_type != HI_IP10 && cpu_type != HI_IP10C) return false; + if (!is_kernel_in_hyp_mode()) { + kvm_info("Hisi dvmbm not supported by KVM nVHE mode\n"); + return false; + } + /* Determine whether DVMBM is supported by the hardware */ if (!(read_sysreg(aidr_el1) & AIDR_EL1_DVMBM_MASK)) return false; /* User provided kernel command-line parameter */ - if (!dvmbm_enabled || !is_kernel_in_hyp_mode()) { + if (!dvmbm_enabled) { on_each_cpu(hardware_disable_dvmbm, NULL, 1); return false; } -- Gitee From 9daf31b72332faa7b7976c628a0476fcc91f5e32 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Thu, 27 Mar 2025 15:55:54 +0800 Subject: [PATCH 12/31] kvm: hisi_virt: Update TLBI broadcast feature for hip12 Compared with hip09, there are some differences on TLBI broadcast feature for hip12 including: - No need to translate logical cluster id to physical cluster id; - The minimum granularity of TLBI broadcast is cluster; - Some fields of register LSUDVMBM changes; So update for corresponding changes. MPIDR_EL1 layout on HIP12: Aff3[3:2] - socket ID [0-3] Aff3[1:0] - die ID [0,1] Aff2 - cluster ID [0-5] Aff1 - core ID [0-15] Aff0 - thread ID [0,1] Signed-off-by: Xiang Chen Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 59 +++++++++++++++++++++++++++- arch/arm64/kvm/hisilicon/hisi_virt.h | 8 ++++ 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index c02963588343..23c97598513f 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -20,6 +20,7 @@ static const char * const hisi_cpu_type_str[] = { "HIP09", "HIP10", "HIP10C", + "HIP12", "Unknown" }; @@ -30,7 +31,8 @@ static const char * const oem_str[] = { "HIP08 ", /* Hisi 1620 */ "HIP09 ", /* HIP09 */ "HIP10 ", /* HIP10 */ - "HIP10C " /* HIP10C */ + "HIP10C ", /* HIP10C */ + "HIP12 " /* HIP12 */ }; /* @@ -398,6 +400,56 @@ static void kvm_update_vm_lsudvmbm(struct kvm *kvm) kvm->arch.tlbi_dvmbm = val; } +static void kvm_update_vm_lsudvmbm_hip12(struct kvm *kvm) +{ + u64 mpidr, aff3, aff2; + u64 vm_aff3s[DVMBM_MAX_DIES_HIP12]; + u64 val; + int cpu, nr_dies; + + nr_dies = kvm_dvmbm_get_dies_info(kvm, vm_aff3s, DVMBM_MAX_DIES_HIP12); + if (nr_dies > 2) { + val = DVMBM_RANGE_ALL_DIES << DVMBM_RANGE_SHIFT; + goto out_update; + } + + if (nr_dies == 1) { + val = DVMBM_RANGE_ONE_DIE << DVMBM_RANGE_SHIFT | + vm_aff3s[0] << DVMBM_DIE1_VDIE_SHIFT_HIP12; + + /* fulfill bits [11:6] */ + for_each_cpu(cpu, kvm->arch.sched_cpus) { + mpidr = cpu_logical_map(cpu); + aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + + val |= 1ULL << (aff2 + DVMBM_DIE1_CLUSTER_SHIFT_HIP12); + } + + goto out_update; + } + + /* nr_dies == 2 */ + val = DVMBM_RANGE_TWO_DIES << DVMBM_RANGE_SHIFT | + DVMBM_GRAN_CLUSTER << DVMBM_GRAN_SHIFT | + vm_aff3s[0] << DVMBM_DIE1_VDIE_SHIFT_HIP12 | + vm_aff3s[1] << DVMBM_DIE2_VDIE_SHIFT_HIP12; + + /* and fulfill bits [11:0] */ + for_each_cpu(cpu, kvm->arch.sched_cpus) { + mpidr = cpu_logical_map(cpu); + aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); + aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + + if (aff3 == vm_aff3s[0]) + val |= 1ULL << (aff2 + DVMBM_DIE1_CLUSTER_SHIFT_HIP12); + else + val |= 1ULL << (aff2 + DVMBM_DIE2_CLUSTER_SHIFT_HIP12); + } + +out_update: + kvm->arch.tlbi_dvmbm = val; +} + void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; @@ -442,7 +494,10 @@ void kvm_tlbi_dvmbm_vcpu_load(struct kvm_vcpu *vcpu) * Re-calculate LSUDVMBM_EL2 for this VM and kick all vcpus * out to reload the LSUDVMBM configuration. */ - kvm_update_vm_lsudvmbm(kvm); + if (cpu_type == HI_IP12) + kvm_update_vm_lsudvmbm_hip12(kvm); + else + kvm_update_vm_lsudvmbm(kvm); kvm_make_all_cpus_request(kvm, KVM_REQ_RELOAD_TLBI_DVMBM); out_unlock: diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index a718156a8f4e..ace161dc2935 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -14,6 +14,7 @@ enum hisi_cpu_type { HI_IP09, HI_IP10, HI_IP10C, + HI_IP12, UNKNOWN_HI_TYPE }; @@ -64,6 +65,13 @@ enum hisi_cpu_type { #define DVMBM_MAX_DIES 32 +/* HIP12 */ +#define DVMBM_DIE1_VDIE_SHIFT_HIP12 57 +#define DVMBM_DIE2_VDIE_SHIFT_HIP12 53 +#define DVMBM_DIE1_CLUSTER_SHIFT_HIP12 6 +#define DVMBM_DIE2_CLUSTER_SHIFT_HIP12 0 +#define DVMBM_MAX_DIES_HIP12 8 + void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); -- Gitee From 96978fdfd314013881c628730dc5b6386995d02a Mon Sep 17 00:00:00 2001 From: yangjinqian Date: Thu, 27 Mar 2025 15:55:55 +0800 Subject: [PATCH 13/31] KVM: arm64: Add new HiSi CPU type to support DVMBM Add new HiSi CPU type HIP12 for supporting DVMBM. Function kvm_get_pg_cfg() is used to get configuration for translating logic cluster id to physical cluster id which is not needed by hip12, so skip it for hip12. Signed-off-by: yangjinqian Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 23c97598513f..64b8b9f70140 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -159,7 +159,8 @@ static void hardware_disable_dvmbm(void *data) bool hisi_dvmbm_supported(void) { - if (cpu_type != HI_IP10 && cpu_type != HI_IP10C) + if (cpu_type != HI_IP10 && cpu_type != HI_IP10C && + cpu_type != HI_IP12) return false; if (!is_kernel_in_hyp_mode()) { @@ -521,6 +522,9 @@ void kvm_get_pg_cfg(void) u64 mn_phy_base; u32 val; + if (cpu_type == HI_IP12) + return; + socket_num = kvm_get_socket_num(); die_num = kvm_get_die_num(); -- Gitee From 494cf39a103701f956647032a8a21513de692f4b Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 23 Apr 2025 17:16:23 +0800 Subject: [PATCH 14/31] kvm: hisi_virt: Allocate VM table and save vpeid in it When guest access register ICC_SGI1R_EL1, GIC will access VM table to get the vpeid of vcpu for IPIV feature. So when IPIV feature is enabled, allocate VM table and save vpeid in it. The index of the entries in VM table is vcpu id. Signed-off-by: Xiang Chen Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- drivers/irqchip/irq-gic-v3-its.c | 32 ++++++++++++++++++++++++++++-- drivers/irqchip/irq-gic-v3.c | 3 +++ include/linux/irqchip/arm-gic-v4.h | 1 + 3 files changed, 34 insertions(+), 2 deletions(-) diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 9e421c7f1c07..e2451087d962 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -447,6 +447,7 @@ void build_devid_pools(void) pr_info("ITS: reserved device id pools enabled\n"); } #endif +extern struct static_key_false ipiv_enable; /* * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we @@ -4844,6 +4845,10 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain, if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); its_free_prop_table(vm->vprop_page); + if (static_branch_unlikely(&ipiv_enable)) { + free_pages((unsigned long)page_address(vm->vpeid_page), + get_order(nr_irqs * 2)); + } } } @@ -4853,8 +4858,10 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq struct irq_chip *irqchip = &its_vpe_irq_chip; struct its_vm *vm = args; unsigned long *bitmap; - struct page *vprop_page; + struct page *vprop_page, *vpeid_page; int base, nr_ids, i, err = 0; + void *vpeid_table_va; + u16 *vpeid_entry; BUG_ON(!vm); @@ -4878,14 +4885,35 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq vm->nr_db_lpis = nr_ids; vm->vprop_page = vprop_page; - if (gic_rdists->has_rvpeid) + if (gic_rdists->has_rvpeid) { irqchip = &its_vpe_4_1_irq_chip; + if (static_branch_unlikely(&ipiv_enable)) { + /* + * The vpeid's size is 2 bytes, so we need to + * allocate 2 * (num of vcpus). nr_irqs is + * equal to the number of vCPUs. + */ + vpeid_page = alloc_pages(GFP_KERNEL, + get_order(nr_irqs * 2)); + if (!vpeid_page) { + its_lpi_free(bitmap, base, nr_ids); + its_free_prop_table(vprop_page); + return -ENOMEM; + } + vm->vpeid_page = vpeid_page; + vpeid_table_va = page_address(vpeid_page); + } + } for (i = 0; i < nr_irqs; i++) { vm->vpes[i]->vpe_db_lpi = base + i; err = its_vpe_init(vm->vpes[i]); if (err) break; + if (static_branch_unlikely(&ipiv_enable)) { + vpeid_entry = (u16 *)vpeid_table_va + i; + *vpeid_entry = vm->vpes[i]->vpe_id; + } err = its_irq_gic_domain_alloc(domain, virq + i, vm->vpes[i]->vpe_db_lpi); if (err) diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index e83b24e2fad8..084a635da4f4 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -88,6 +88,9 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); */ static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); +DEFINE_STATIC_KEY_FALSE(ipiv_enable); +EXPORT_SYMBOL(ipiv_enable); + /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */ static refcount_t *ppi_nmi_refs; diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 07e8db2aa449..5fabbee10754 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -20,6 +20,7 @@ struct its_vm { struct fwnode_handle *fwnode; struct irq_domain *domain; struct page *vprop_page; + struct page *vpeid_page; struct its_vpe **vpes; int nr_vpes; irq_hw_number_t db_lpi_base; -- Gitee From 536147f503a4617f7f72fb4cfbd1295e607344a6 Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Wed, 23 Apr 2025 17:16:24 +0800 Subject: [PATCH 15/31] kvm: arm64: avoid sending multi-SGIs in IPIV IPIV optimizes unicast scenarios and does not support multicast or broadcast. To prevent GuestOS from generating multicast and broadcast, the MPIDR of vCPUs is modified to ensure that the [aff3, aff2, aff1] fields are uniquely assigned for each vCPU within a virtual machine, while all aff0 fields are set to zero. This configuration guarantees the uniqueness of vCPU affinity identifiers at the architecture level, thereby suppressing the generation of SGI multicast and broadcast signals by the GuestOS. Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/sys_regs.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 4fad598eb3dd..d00542454ba5 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -361,14 +361,14 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, /* * We want to avoid world-switching all the DBG registers all the * time: - * + * * - If we've touched any debug register, it is likely that we're * going to touch more of them. It then makes sense to disable the * traps and start doing the save/restore dance * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is * then mandatory to save/restore the registers, as the guest * depends on them. - * + * * For this, we use a DIRTY bit, indicating the guest has modified the * debug registers, used as follow: * @@ -610,6 +610,8 @@ static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1); } +extern struct static_key_false ipiv_enable; + static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { u64 mpidr; @@ -624,6 +626,16 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); + + if (static_branch_unlikely(&ipiv_enable)) { + /* + * To avoid sending multi-SGIs in guest OS, + * make aff1/aff2 unique + */ + mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(1); + mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(2); + } + vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1); } -- Gitee From 5d2ca95b494fca394d81b74cb9f62da6b2753605 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 23 Apr 2025 17:16:25 +0800 Subject: [PATCH 16/31] irqchip: gicv3-its: Set base address of vm table and targe ITS when vpe schedule and deschedule Set the base address of vm table and target ITS when vpe schedule and deschedule. Also need to make sure IPIV is not busy before setting them. Disable ICC_SGI1R_EL1 trap when vpe schedule, and enable the trap when vpe deschedule. Only disable ICC_SGI1R_EL1 trap when enabled ipiv and set GICD_CTLR_nASSGIreq for register GICD_CTLR of virtual machine. Signed-off-by: Xiang Chen Signed-off-by: Nianyao Tang Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- drivers/irqchip/irq-gic-v3-its.c | 73 ++++++++++++++++++++++++++++++ include/linux/irqchip/arm-gic-v3.h | 15 ++++++ include/linux/irqchip/arm-gic-v4.h | 1 + virt/kvm/arm/vgic/vgic-mmio-v3.c | 1 + 4 files changed, 90 insertions(+) diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index e2451087d962..eb160b3af096 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -4395,11 +4395,71 @@ static void its_vpe_4_1_unmask_irq(struct irq_data *d) its_vpe_4_1_send_inv(d); } +/* IPIV private register */ +#define CPU_SYS_TRAP_EL2 sys_reg(3, 4, 15, 7, 2) +#define CPU_SYS_TRAP_EL2_IPIV_ENABLE_SHIFT 0 +#define CPU_SYS_TRAP_EL2_IPIV_ENABLE \ + (1ULL << CPU_SYS_TRAP_EL2_IPIV_ENABLE_SHIFT) + +/* + * ipiv_disable_vsgi_trap and ipiv_enable_vsgi_trap run only + * in VHE mode and in EL2. + */ +static void ipiv_disable_vsgi_trap(void) +{ +#ifdef CONFIG_ARM64 + u64 val; + + /* disable guest access ICC_SGI1R_EL1 trap, enable ipiv */ + val = read_sysreg_s(CPU_SYS_TRAP_EL2); + val |= CPU_SYS_TRAP_EL2_IPIV_ENABLE; + write_sysreg_s(val, CPU_SYS_TRAP_EL2); +#endif +} + +static void ipiv_enable_vsgi_trap(void) +{ +#ifdef CONFIG_ARM64 + u64 val; + + /* enable guest access ICC_SGI1R_EL1 trap, disable ipiv */ + val = read_sysreg_s(CPU_SYS_TRAP_EL2); + val &= ~CPU_SYS_TRAP_EL2_IPIV_ENABLE; + write_sysreg_s(val, CPU_SYS_TRAP_EL2); +#endif +} + static void its_vpe_4_1_schedule(struct its_vpe *vpe, struct its_cmd_info *info) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + struct its_vm *vm = vpe->its_vm; + unsigned long vpeid_page_addr; + u64 ipiv_val = 0; u64 val = 0; + u32 nr_vpes; + + if (static_branch_unlikely(&ipiv_enable) && + vm->nassgireq) { + /* wait gicr_ipiv_busy */ + WARN_ON_ONCE(readl_relaxed_poll_timeout_atomic(vlpi_base + + GICR_IPIV_ST, ipiv_val, !(ipiv_val + & GICR_IPIV_ST_IPIV_BUSY), 1, 500)); + vpeid_page_addr = virt_to_phys(page_address(vm->vpeid_page)); + writel_relaxed(lower_32_bits(vpeid_page_addr), + vlpi_base + GICR_VM_TABLE_BAR_L); + writel_relaxed(upper_32_bits(vpeid_page_addr), + vlpi_base + GICR_VM_TABLE_BAR_H); + + /* setup gicr_vcpu_entry_num_max and gicr_ipiv_its_ta_sel */ + nr_vpes = vpe->its_vm->nr_vpes; + ipiv_val = ((nr_vpes - 1) << + GICR_IPIV_CTRL_VCPU_ENTRY_NUM_MAX_SHIFT) | + (0 << GICR_IPIV_CTRL_IPIV_ITS_TA_SEL_SHIFT); + writel_relaxed(ipiv_val, vlpi_base + GICR_IPIV_CTRL); + + ipiv_disable_vsgi_trap(); + } /* Schedule the VPE */ val |= GICR_VPENDBASER_Valid; @@ -4414,6 +4474,7 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, struct its_cmd_info *info) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + struct its_vm *vm = vpe->its_vm; u64 val; if (info->req_db) { @@ -4445,6 +4506,18 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, GICR_VPENDBASER_PendingLast); vpe->pending_last = true; } + + if (static_branch_unlikely(&ipiv_enable) && + vm->nassgireq) { + /* wait gicr_ipiv_busy */ + WARN_ON_ONCE(readl_relaxed_poll_timeout_atomic(vlpi_base + + GICR_IPIV_ST, val, !(val & GICR_IPIV_ST_IPIV_BUSY), + 1, 500)); + writel_relaxed(0, vlpi_base + GICR_VM_TABLE_BAR_L); + writel_relaxed(0, vlpi_base + GICR_VM_TABLE_BAR_H); + + ipiv_enable_vsgi_trap(); + } } static void its_vpe_4_1_invall(struct its_vpe *vpe) diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 9bf8c0c8b5d5..b642cf7c560b 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -362,6 +362,21 @@ #define GICR_VSGIPENDR_BUSY (1U << 31) #define GICR_VSGIPENDR_PENDING GENMASK(15, 0) +/* IPIV VM table address */ +#define GICR_VM_TABLE_BAR_L 0x140 +#define GICR_VM_TABLE_BAR_H 0x144 + +#define GICR_IPIV_CTRL 0x148 +#define GICR_IPIV_CTRL_VCPU_ENTRY_NUM_MAX_SHIFT 8 +/* + * Select ITS to determine the ITS through which the IPI is sent. + */ +#define GICR_IPIV_CTRL_IPIV_ITS_TA_SEL_SHIFT 4 + +#define GICR_IPIV_ST 0x14c +#define GICR_IPIV_ST_IPIV_BUSY_SHIFT 0 +#define GICR_IPIV_ST_IPIV_BUSY (1 << GICR_IPIV_ST_IPIV_BUSY_SHIFT) + /* * ITS registers, offsets from ITS_base */ diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 5fabbee10754..435d2a0ce7ba 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -27,6 +27,7 @@ struct its_vm { unsigned long *db_bitmap; int nr_db_lpis; u32 vlpi_count[GICv4_ITS_LIST_MAX]; + bool nassgireq; }; /* Embedded in kvm_vcpu.arch */ diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 9f58d383dbc3..29991d2a831d 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -129,6 +129,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, /* Switching HW SGIs? */ dist->nassgireq = val & GICD_CTLR_nASSGIreq; + dist->its_vm.nassgireq = dist->nassgireq; if (is_hwsgi != dist->nassgireq) vgic_v4_configure_vsgis(vcpu->kvm); -- Gitee From 0e70d84ff1c4cc2eab6534bd075d80a5b111f3c5 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 23 Apr 2025 17:16:26 +0800 Subject: [PATCH 17/31] kvm: hisi_virt: Register ipiv exception interrupt When one of the following conditions occurs: 1. The index of VM table exceeds the supported range. 2. Guest sends SGI with IRM=1. 3. Guest sends multicast. it triggers a exception interrupt (PPI interrupt). Just printk exception info in interrupt handler. Signed-off-by: Xiang Chen Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- virt/kvm/arm/vgic/vgic-init.c | 37 ++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 3d04b3c53984..03019ae2b05c 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2015, 2016 ARM Ltd. */ - +#include #include #include #include @@ -459,11 +459,16 @@ int kvm_vgic_map_resources(struct kvm *kvm) return ret; } +extern struct static_key_false ipiv_enable; +static int ipiv_irq; + /* GENERIC PROBE */ static int vgic_init_cpu_starting(unsigned int cpu) { enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0); + if (static_branch_unlikely(&ipiv_enable)) + enable_percpu_irq(ipiv_irq, 0); return 0; } @@ -471,9 +476,17 @@ static int vgic_init_cpu_starting(unsigned int cpu) static int vgic_init_cpu_dying(unsigned int cpu) { disable_percpu_irq(kvm_vgic_global_state.maint_irq); + if (static_branch_unlikely(&ipiv_enable)) + disable_percpu_irq(ipiv_irq); return 0; } +static irqreturn_t vgic_ipiv_irq_handler(int irq, void *data) +{ + kvm_info("IPIV irq handler!\n"); + return IRQ_HANDLED; +} + static irqreturn_t vgic_maintenance_handler(int irq, void *data) { /* @@ -561,6 +574,28 @@ int kvm_vgic_hyp_init(void) } kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); + + if (static_branch_unlikely(&ipiv_enable)) { + ipiv_irq = acpi_register_gsi(NULL, 18, ACPI_EDGE_SENSITIVE, + ACPI_ACTIVE_HIGH); + if (ipiv_irq < 0) { + kvm_err("No ipiv exception irq\n"); + free_percpu_irq(kvm_vgic_global_state.maint_irq, + kvm_get_running_vcpus()); + return -ENXIO; + } + + ret = request_percpu_irq(ipiv_irq, vgic_ipiv_irq_handler, + "ipiv exception", kvm_get_running_vcpus()); + if (ret) { + kvm_err("Cannot register interrupt %d\n", ipiv_irq); + free_percpu_irq(kvm_vgic_global_state.maint_irq, + kvm_get_running_vcpus()); + acpi_unregister_gsi(18); + return ret; + } + } + return 0; out_free_irq: -- Gitee From 4953e9839ff91e9a57c24b12d8cde0214fb9b09f Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 23 Apr 2025 17:16:27 +0800 Subject: [PATCH 18/31] kvm: arm64: Add interface KVM_CAP_ARM_IPIV_MODE Before IPIV feature, it gets mpidr from vcpu id, but after the feature, we need to know if IPIV mode is enabled. And new IPIV modes may be added later. Signed-off-by: Xiang Chen Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- include/uapi/linux/kvm.h | 1 + virt/kvm/arm/arm.c | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index ebb362be8c95..248e534379e8 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1008,6 +1008,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175 #define KVM_CAP_ARM_VIRT_MSI_BYPASS 799 +#define KVM_CAP_ARM_IPIV_MODE 503 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 379dad09eefa..537b4553aaa6 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -191,6 +191,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) atomic_set(&kvm->online_vcpus, 0); } +extern struct static_key_false ipiv_enable; + int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; @@ -243,6 +245,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = sdev_enable; break; #endif + case KVM_CAP_ARM_IPIV_MODE: + if (static_branch_unlikely(&ipiv_enable)) + r = 1; + else + r = 0; + break; default: r = kvm_arch_vm_ioctl_check_extension(kvm, ext); break; -- Gitee From f323f6663f7dbba6dec41b7b8fa49145a07e9011 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Wed, 23 Apr 2025 17:16:28 +0800 Subject: [PATCH 19/31] kvm: hisi_virt: Probe and configure IPIV capacity on HIP12 IPIV is an virtualization extension on HIP12, which allows IPIs on guest directly sending by hardware to other vcpu stead of trapping to EL2. It will bring IPI interrupt optimization on guest. Introduce the method to detect and enable the feature, and also add a kernel command parameter "kvm-arm.ipiv_enabled" (default is 0) so that users can disable or enable the feature. The feature is based on GICv4p1. Signed-off-by: Xiang Chen Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- .../admin-guide/kernel-parameters.txt | 3 ++ arch/arm64/include/asm/kvm_host.h | 1 + arch/arm64/kvm/hisilicon/hisi_virt.c | 37 +++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 11 ++++++ drivers/irqchip/irq-gic-v3.c | 30 +++++++++++++++ include/linux/irqchip/arm-gic-v3.h | 12 ++++++ virt/kvm/arm/arm.c | 9 +++++ 7 files changed, 103 insertions(+) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 828c42bd3de6..a6c32dd847d4 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2172,6 +2172,9 @@ [KVM,ARM] Allow use of GICv4 for direct injection of LPIs. + kvm-arm.ipiv_enabled= + [KVM,ARM] Allow use of HiSilicon ipiv on GICv4.1 + kvm-arm.dvmbm_enabled= [KVM,ARM] Allow use of HiSilicon DVMBM capability. Default: 0 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index cc892cfeec5d..8bcf3f4893b6 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -709,5 +709,6 @@ static inline enum mitigation_state kvm_arm_get_spectre_bhb_state(void) extern bool kvm_ncsnp_support; extern bool kvm_dvmbm_support; +extern bool kvm_ipiv_support; #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 64b8b9f70140..9f9177491844 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -12,6 +12,7 @@ static enum hisi_cpu_type cpu_type = UNKNOWN_HI_TYPE; static bool dvmbm_enabled; +static bool ipiv_enabled; static const char * const hisi_cpu_type_str[] = { "Hisi1612", @@ -157,6 +158,42 @@ static void hardware_disable_dvmbm(void *data) write_sysreg_s(val, SYS_LSUDVM_CTRL_EL2); } +static int __init early_ipiv_enable(char *buf) +{ + return strtobool(buf, &ipiv_enabled); +} +early_param("kvm-arm.ipiv_enabled", early_ipiv_enable); + +bool hisi_ipiv_supported(void) +{ + if (cpu_type != HI_IP12) + return false; + + /* Determine whether IPIV is supported by the hardware */ + if (!(read_sysreg(aidr_el1) & AIDR_EL1_IPIV_MASK)) { + kvm_info("Hisi ipiv not supported by the hardware\n"); + return false; + } + + /* User provided kernel command-line parameter */ + if (!ipiv_enabled || !is_kernel_in_hyp_mode()) + return false; + + /* Enable IPIV feature if necessary */ + if (!is_gicv4p1()) { + kvm_info("Hisi ipiv needs to enable GICv4p1!\n"); + return false; + } + + kvm_info("Enable Hisi ipiv, do not support vSGI broadcast\n"); + return true; +} + +void ipiv_gicd_init(void) +{ + gic_dist_enable_ipiv(); +} + bool hisi_dvmbm_supported(void) { if (cpu_type != HI_IP10 && cpu_type != HI_IP10C && diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index ace161dc2935..e956f6f9db36 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -18,6 +18,8 @@ enum hisi_cpu_type { UNKNOWN_HI_TYPE }; +/* HIP12 */ +#define AIDR_EL1_IPIV_MASK GENMASK_ULL(17, 16) /* HIP10 */ #define AIDR_EL1_DVMBM_MASK GENMASK_ULL(13, 12) #define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) @@ -75,7 +77,9 @@ enum hisi_cpu_type { void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); +bool hisi_ipiv_supported(void); void kvm_get_pg_cfg(void); +void ipiv_gicd_init(void); int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu); void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu); @@ -94,7 +98,12 @@ static inline bool hisi_dvmbm_supported(void) { return false; } +static inline bool hisi_ipiv_supported(void) +{ + return false; +} static inline void kvm_get_pg_cfg(void) {} +static inline void ipiv_gicd_init(void) {} static inline int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) { @@ -111,4 +120,6 @@ static inline void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) {} static inline void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) {} #endif /* CONFIG_KVM_HISI_VIRT */ +extern bool gic_dist_enable_ipiv(void); +extern bool is_gicv4p1(void); #endif /* __HISI_VIRT_H__ */ diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 084a635da4f4..38d3d013fda2 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1234,6 +1234,36 @@ static int gic_dist_supports_lpis(void) !gicv3_nolpi); } +bool is_gicv4p1(void) +{ + if (!gic_data.rdists.has_rvpeid) + return false; + + return true; +} +EXPORT_SYMBOL(is_gicv4p1); + +void gic_dist_enable_ipiv(void) +{ + u32 val; + + val = readl_relaxed(gic_data.dist_base + GICD_MISC_CTRL); + val |= GICD_MISC_CTRL_CFG_IPIV_EN; + writel_relaxed(val, gic_data.dist_base + GICD_MISC_CTRL); + static_branch_enable(&ipiv_enable); + + val = (0 << GICD_IPIV_CTRL_AFF_DIRECT_VPEID_SHIFT) | + (0 << GICD_IPIV_CTRL_AFF1_LEFT_SHIFT_SHIFT) | + (4 << GICD_IPIV_CTRL_AFF2_LEFT_SHIFT_SHIFT) | + (7 << GICD_IPIV_CTRL_VM_TABLE_INNERCACHE_SHIFT) | + (2 << GICD_IPIV_CTRL_VM_TABLE_SHAREABILITY_SHIFT); + writel_relaxed(val, gic_data.dist_base + GICD_IPIV_CTRL); + + /* Set target ITS address of IPIV feature */ + writel_relaxed(0x4880, gic_data.dist_base + GICD_IPIV_ITS_TA_BASE); +} +EXPORT_SYMBOL(gic_dist_enable_ipiv); + static void gic_cpu_init(void) { void __iomem *rbase; diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index b642cf7c560b..ff08cae37ad2 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -112,6 +112,18 @@ #define GIC_PAGE_SIZE_64K 2ULL #define GIC_PAGE_SIZE_MASK 3ULL +#define GICD_MISC_CTRL 0x2084 +#define GICD_MISC_CTRL_CFG_IPIV_EN (1U << 19) + +/* IPIV private register */ +#define GICD_IPIV_CTRL 0xc05c +#define GICD_IPIV_CTRL_AFF_DIRECT_VPEID_SHIFT 4 +#define GICD_IPIV_CTRL_AFF1_LEFT_SHIFT_SHIFT 8 +#define GICD_IPIV_CTRL_AFF2_LEFT_SHIFT_SHIFT 12 +#define GICD_IPIV_CTRL_VM_TABLE_INNERCACHE_SHIFT 16 +#define GICD_IPIV_CTRL_VM_TABLE_SHAREABILITY_SHIFT 19 +#define GICD_IPIV_ITS_TA_BASE 0xc010 + /* * Re-Distributor registers, offsets from RD_base */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 537b4553aaa6..93f9906e2d3e 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -51,6 +51,10 @@ DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data); /* Capability of DVMBM */ bool kvm_dvmbm_support; +/* Capability of IPIV */ +bool kvm_ipiv_support; + + static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); /* Per-CPU variable containing the currently running vcpu. */ @@ -1807,12 +1811,17 @@ int kvm_arch_init(void *opaque) probe_hisi_cpu_type(); kvm_ncsnp_support = hisi_ncsnp_supported(); kvm_dvmbm_support = hisi_dvmbm_supported(); + kvm_ipiv_support = hisi_ipiv_supported(); kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled"); + kvm_info("KVM ipiv %s\n", kvm_ipiv_support ? "enabled" : "disabled"); if (kvm_dvmbm_support) kvm_get_pg_cfg(); + if (kvm_ipiv_support) + ipiv_gicd_init(); + in_hyp_mode = is_kernel_in_hyp_mode(); if (!in_hyp_mode && kvm_arch_requires_vhe()) { -- Gitee From 1cebe54c9f9d0b2ed333b994c766e4d783c00846 Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Wed, 23 Apr 2025 17:16:29 +0800 Subject: [PATCH 20/31] kabi: Use KABI_EXTEND to perform kabi repair for IPIV Follow the kabi repair method of openeuler and use KABI_EXTEND. Its essence is to use GENKSYMS shielding. Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- include/linux/irqchip/arm-gic-v4.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 435d2a0ce7ba..aae19c59ca6f 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -20,14 +20,14 @@ struct its_vm { struct fwnode_handle *fwnode; struct irq_domain *domain; struct page *vprop_page; - struct page *vpeid_page; struct its_vpe **vpes; int nr_vpes; irq_hw_number_t db_lpi_base; unsigned long *db_bitmap; int nr_db_lpis; u32 vlpi_count[GICv4_ITS_LIST_MAX]; - bool nassgireq; + struct page *vpeid_page; +bool nassgireq; }; /* Embedded in kvm_vcpu.arch */ -- Gitee From adbea77583dbe00a1def6fc335231dd76f669e01 Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Fri, 23 May 2025 20:45:25 +0800 Subject: [PATCH 21/31] arm64/config: add config to control whether enable IPIV feature Add ARM64_HISI_IPIV config to control whether enable the IPIV feature. The IPIV feature optimizes vSGI performance based on GICv4.1 and is a feature of HIP12. Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/Kconfig | 13 +++++++++++ arch/arm64/configs/tencent.config | 1 + arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/hisilicon/hisi_virt.c | 4 ++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 15 +++++++++++-- arch/arm64/kvm/sys_regs.c | 4 ++++ drivers/irqchip/irq-gic-v3-its.c | 32 ++++++++++++++++++++++------ drivers/irqchip/irq-gic-v3.c | 4 ++++ include/linux/irqchip/arm-gic-v3.h | 4 ++++ include/linux/irqchip/arm-gic-v4.h | 2 ++ virt/kvm/arm/arm.c | 19 +++++++++++++++++ virt/kvm/arm/vgic/vgic-init.c | 10 +++++++++ virt/kvm/arm/vgic/vgic-mmio-v3.c | 2 ++ 13 files changed, 103 insertions(+), 9 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 93e47a234ba9..f987c6ebc151 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1684,6 +1684,19 @@ config ARM64_HAFT endmenu # "ARMv8.8 architectural features" +config ARM64_HISI_IPIV + bool "Enable support for IPIV" + default y + depends on ACPI + depends on ARM64 + help + IPIV optimizes vSGI on the basis of GICv4.1. The vCPU on the sending + side of vSGI needs to trap to Hypervisor. IPIv sends vSGI without + traping, improving performance. + + The feature will only be enabled if CPU in the system and Guest OS + support this feature. If unsure, say Y. + config ARM64_SVE bool "ARM Scalable Vector Extension support" default y diff --git a/arch/arm64/configs/tencent.config b/arch/arm64/configs/tencent.config index eba92e00871b..04bac1f57949 100644 --- a/arch/arm64/configs/tencent.config +++ b/arch/arm64/configs/tencent.config @@ -1511,3 +1511,4 @@ CONFIG_HISI_L3T_PMU=m CONFIG_HISI_LPDDRC_PMU=m CONFIG_HISILICON_HW_METRIC=y CONFIG_KVM_HISI_VIRT=y +CONFIG_ARM64_HISI_IPIV=y diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 8bcf3f4893b6..e70aa600faa8 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -709,6 +709,8 @@ static inline enum mitigation_state kvm_arm_get_spectre_bhb_state(void) extern bool kvm_ncsnp_support; extern bool kvm_dvmbm_support; +#ifdef CONFIG_ARM64_HISI_IPIV extern bool kvm_ipiv_support; +#endif #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 9f9177491844..57c94376a957 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -12,7 +12,9 @@ static enum hisi_cpu_type cpu_type = UNKNOWN_HI_TYPE; static bool dvmbm_enabled; +#ifdef CONFIG_ARM64_HISI_IPIV static bool ipiv_enabled; +#endif static const char * const hisi_cpu_type_str[] = { "Hisi1612", @@ -158,6 +160,7 @@ static void hardware_disable_dvmbm(void *data) write_sysreg_s(val, SYS_LSUDVM_CTRL_EL2); } +#ifdef CONFIG_ARM64_HISI_IPIV static int __init early_ipiv_enable(char *buf) { return strtobool(buf, &ipiv_enabled); @@ -193,6 +196,7 @@ void ipiv_gicd_init(void) { gic_dist_enable_ipiv(); } +#endif /* CONFIG_ARM64_HISI_IPIV */ bool hisi_dvmbm_supported(void) { diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index e956f6f9db36..c9c198363717 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -18,8 +18,10 @@ enum hisi_cpu_type { UNKNOWN_HI_TYPE }; +#ifdef CONFIG_ARM64_HISI_IPIV /* HIP12 */ #define AIDR_EL1_IPIV_MASK GENMASK_ULL(17, 16) +#endif /* HIP10 */ #define AIDR_EL1_DVMBM_MASK GENMASK_ULL(13, 12) #define SYS_LSUDVM_CTRL_EL2 sys_reg(3, 4, 15, 7, 4) @@ -77,9 +79,11 @@ enum hisi_cpu_type { void probe_hisi_cpu_type(void); bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); +#ifdef CONFIG_ARM64_HISI_IPIV bool hisi_ipiv_supported(void); -void kvm_get_pg_cfg(void); void ipiv_gicd_init(void); +#endif /* CONFIG_ARM64_HISI_IPIV */ +void kvm_get_pg_cfg(void); int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu); void kvm_sched_affinity_vcpu_destroy(struct kvm_vcpu *vcpu); @@ -98,12 +102,15 @@ static inline bool hisi_dvmbm_supported(void) { return false; } + +#ifdef CONFIG_ARM64_HISI_IPIV static inline bool hisi_ipiv_supported(void) { return false; } -static inline void kvm_get_pg_cfg(void) {} static inline void ipiv_gicd_init(void) {} +#endif /* CONFIG_ARM64_HISI_IPIV */ +static inline void kvm_get_pg_cfg(void) {} static inline int kvm_sched_affinity_vcpu_init(struct kvm_vcpu *vcpu) { @@ -120,6 +127,10 @@ static inline void kvm_tlbi_dvmbm_vcpu_put(struct kvm_vcpu *vcpu) {} static inline void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) {} #endif /* CONFIG_KVM_HISI_VIRT */ +#ifdef CONFIG_ARM64_HISI_IPIV extern bool gic_dist_enable_ipiv(void); extern bool is_gicv4p1(void); +#endif /* CONFIG_ARM64_HISI_IPIV */ + #endif /* __HISI_VIRT_H__ */ + diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index d00542454ba5..ebfcb2c7e6bf 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -610,7 +610,9 @@ static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1); } +#ifdef CONFIG_ARM64_HISI_IPIV extern struct static_key_false ipiv_enable; +#endif static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { @@ -627,6 +629,7 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable)) { /* * To avoid sending multi-SGIs in guest OS, @@ -635,6 +638,7 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(1); mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(2); } +#endif vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1); } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index eb160b3af096..70a8cc5ea00e 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -447,7 +447,10 @@ void build_devid_pools(void) pr_info("ITS: reserved device id pools enabled\n"); } #endif + +#ifdef CONFIG_ARM64_HISI_IPIV extern struct static_key_false ipiv_enable; +#endif /* * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we @@ -4395,6 +4398,7 @@ static void its_vpe_4_1_unmask_irq(struct irq_data *d) its_vpe_4_1_send_inv(d); } +#ifdef CONFIG_ARM64_HISI_IPIV /* IPIV private register */ #define CPU_SYS_TRAP_EL2 sys_reg(3, 4, 15, 7, 2) #define CPU_SYS_TRAP_EL2_IPIV_ENABLE_SHIFT 0 @@ -4407,36 +4411,35 @@ static void its_vpe_4_1_unmask_irq(struct irq_data *d) */ static void ipiv_disable_vsgi_trap(void) { -#ifdef CONFIG_ARM64 u64 val; /* disable guest access ICC_SGI1R_EL1 trap, enable ipiv */ val = read_sysreg_s(CPU_SYS_TRAP_EL2); val |= CPU_SYS_TRAP_EL2_IPIV_ENABLE; write_sysreg_s(val, CPU_SYS_TRAP_EL2); -#endif } static void ipiv_enable_vsgi_trap(void) { -#ifdef CONFIG_ARM64 u64 val; /* enable guest access ICC_SGI1R_EL1 trap, disable ipiv */ val = read_sysreg_s(CPU_SYS_TRAP_EL2); val &= ~CPU_SYS_TRAP_EL2_IPIV_ENABLE; write_sysreg_s(val, CPU_SYS_TRAP_EL2); -#endif } +#endif /* CONFIG_ARM64_HISI_IPIV */ static void its_vpe_4_1_schedule(struct its_vpe *vpe, struct its_cmd_info *info) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val = 0; + +#ifdef CONFIG_ARM64_HISI_IPIV struct its_vm *vm = vpe->its_vm; unsigned long vpeid_page_addr; u64 ipiv_val = 0; - u64 val = 0; u32 nr_vpes; if (static_branch_unlikely(&ipiv_enable) && @@ -4460,6 +4463,7 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe, ipiv_disable_vsgi_trap(); } +#endif /* CONFIG_ARM64_HISI_IPIV */ /* Schedule the VPE */ val |= GICR_VPENDBASER_Valid; @@ -4474,9 +4478,12 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, struct its_cmd_info *info) { void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); - struct its_vm *vm = vpe->its_vm; u64 val; +#ifdef CONFIG_ARM64_HISI_IPIV + struct its_vm *vm = vpe->its_vm; +#endif + if (info->req_db) { unsigned long flags; @@ -4507,6 +4514,7 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, vpe->pending_last = true; } +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable) && vm->nassgireq) { /* wait gicr_ipiv_busy */ @@ -4518,6 +4526,7 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, ipiv_enable_vsgi_trap(); } +#endif } static void its_vpe_4_1_invall(struct its_vpe *vpe) @@ -4918,10 +4927,12 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain, if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); its_free_prop_table(vm->vprop_page); +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable)) { free_pages((unsigned long)page_address(vm->vpeid_page), get_order(nr_irqs * 2)); } +#endif } } @@ -4931,10 +4942,13 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq struct irq_chip *irqchip = &its_vpe_irq_chip; struct its_vm *vm = args; unsigned long *bitmap; - struct page *vprop_page, *vpeid_page; + struct page *vprop_page; int base, nr_ids, i, err = 0; +#ifdef CONFIG_ARM64_HISI_IPIV + struct page *vpeid_page; void *vpeid_table_va; u16 *vpeid_entry; +#endif BUG_ON(!vm); @@ -4960,6 +4974,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq if (gic_rdists->has_rvpeid) { irqchip = &its_vpe_4_1_irq_chip; +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable)) { /* * The vpeid's size is 2 bytes, so we need to @@ -4976,6 +4991,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq vm->vpeid_page = vpeid_page; vpeid_table_va = page_address(vpeid_page); } +#endif } for (i = 0; i < nr_irqs; i++) { @@ -4983,10 +4999,12 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq err = its_vpe_init(vm->vpes[i]); if (err) break; +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable)) { vpeid_entry = (u16 *)vpeid_table_va + i; *vpeid_entry = vm->vpes[i]->vpe_id; } +#endif err = its_irq_gic_domain_alloc(domain, virq + i, vm->vpes[i]->vpe_db_lpi); if (err) diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 38d3d013fda2..fab64393994d 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -88,8 +88,10 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); */ static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); +#ifdef CONFIG_ARM64_HISI_IPIV DEFINE_STATIC_KEY_FALSE(ipiv_enable); EXPORT_SYMBOL(ipiv_enable); +#endif /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */ static refcount_t *ppi_nmi_refs; @@ -1234,6 +1236,7 @@ static int gic_dist_supports_lpis(void) !gicv3_nolpi); } +#ifdef CONFIG_ARM64_HISI_IPIV bool is_gicv4p1(void) { if (!gic_data.rdists.has_rvpeid) @@ -1263,6 +1266,7 @@ void gic_dist_enable_ipiv(void) writel_relaxed(0x4880, gic_data.dist_base + GICD_IPIV_ITS_TA_BASE); } EXPORT_SYMBOL(gic_dist_enable_ipiv); +#endif /* CONFIG_ARM64_HISI_IPIV */ static void gic_cpu_init(void) { diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index ff08cae37ad2..6c0380953fbe 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -112,6 +112,7 @@ #define GIC_PAGE_SIZE_64K 2ULL #define GIC_PAGE_SIZE_MASK 3ULL +#ifdef CONFIG_ARM64_HISI_IPIV #define GICD_MISC_CTRL 0x2084 #define GICD_MISC_CTRL_CFG_IPIV_EN (1U << 19) @@ -123,6 +124,7 @@ #define GICD_IPIV_CTRL_VM_TABLE_INNERCACHE_SHIFT 16 #define GICD_IPIV_CTRL_VM_TABLE_SHAREABILITY_SHIFT 19 #define GICD_IPIV_ITS_TA_BASE 0xc010 +#endif /* * Re-Distributor registers, offsets from RD_base @@ -374,6 +376,7 @@ #define GICR_VSGIPENDR_BUSY (1U << 31) #define GICR_VSGIPENDR_PENDING GENMASK(15, 0) +#ifdef CONFIG_ARM64_HISI_IPIV /* IPIV VM table address */ #define GICR_VM_TABLE_BAR_L 0x140 #define GICR_VM_TABLE_BAR_H 0x144 @@ -388,6 +391,7 @@ #define GICR_IPIV_ST 0x14c #define GICR_IPIV_ST_IPIV_BUSY_SHIFT 0 #define GICR_IPIV_ST_IPIV_BUSY (1 << GICR_IPIV_ST_IPIV_BUSY_SHIFT) +#endif /* CONFIG_ARM64_HISI_IPIV */ /* * ITS registers, offsets from ITS_base diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index aae19c59ca6f..15e3d04f8aa1 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -26,7 +26,9 @@ struct its_vm { unsigned long *db_bitmap; int nr_db_lpis; u32 vlpi_count[GICv4_ITS_LIST_MAX]; +#ifdef CONFIG_ARM64_HISI_IPIV struct page *vpeid_page; +#endif bool nassgireq; }; diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 93f9906e2d3e..504002bfb10b 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -51,8 +51,10 @@ DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data); /* Capability of DVMBM */ bool kvm_dvmbm_support; +#ifdef CONFIG_ARM64_HISI_IPIV /* Capability of IPIV */ bool kvm_ipiv_support; +#endif /* CONFIG_ARM64_HISI_IPIV */ static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); @@ -67,9 +69,15 @@ static DEFINE_SPINLOCK(kvm_vmid_lock); static bool vgic_present; + /* Capability of non-cacheable snooping */ bool kvm_ncsnp_support; +#ifdef CONFIG_ARM64_HISI_IPIV +/* Capability of IPIV */ +bool kvm_ipiv_support; +#endif /* CONFIG_ARM64_HISI_IPIV */ + static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled); static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) @@ -195,7 +203,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) atomic_set(&kvm->online_vcpus, 0); } +#ifdef CONFIG_ARM64_HISI_IPIV extern struct static_key_false ipiv_enable; +#endif int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { @@ -249,12 +259,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = sdev_enable; break; #endif +#ifdef CONFIG_ARM64_HISI_IPIV case KVM_CAP_ARM_IPIV_MODE: if (static_branch_unlikely(&ipiv_enable)) r = 1; else r = 0; break; +#endif default: r = kvm_arch_vm_ioctl_check_extension(kvm, ext); break; @@ -1811,16 +1823,23 @@ int kvm_arch_init(void *opaque) probe_hisi_cpu_type(); kvm_ncsnp_support = hisi_ncsnp_supported(); kvm_dvmbm_support = hisi_dvmbm_supported(); + +#ifdef CONFIG_ARM64_HISI_IPIV kvm_ipiv_support = hisi_ipiv_supported(); +#endif kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled"); +#ifdef CONFIG_ARM64_HISI_IPIV kvm_info("KVM ipiv %s\n", kvm_ipiv_support ? "enabled" : "disabled"); +#endif if (kvm_dvmbm_support) kvm_get_pg_cfg(); +#ifdef CONFIG_ARM64_HISI_IPIV if (kvm_ipiv_support) ipiv_gicd_init(); +#endif in_hyp_mode = is_kernel_in_hyp_mode(); diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 03019ae2b05c..7075875f48bf 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -459,16 +459,20 @@ int kvm_vgic_map_resources(struct kvm *kvm) return ret; } +#ifdef CONFIG_ARM64_HISI_IPIV extern struct static_key_false ipiv_enable; static int ipiv_irq; +#endif /* GENERIC PROBE */ static int vgic_init_cpu_starting(unsigned int cpu) { enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0); +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable)) enable_percpu_irq(ipiv_irq, 0); +#endif return 0; } @@ -476,16 +480,20 @@ static int vgic_init_cpu_starting(unsigned int cpu) static int vgic_init_cpu_dying(unsigned int cpu) { disable_percpu_irq(kvm_vgic_global_state.maint_irq); +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable)) disable_percpu_irq(ipiv_irq); +#endif return 0; } +#ifdef CONFIG_ARM64_HISI_IPIV static irqreturn_t vgic_ipiv_irq_handler(int irq, void *data) { kvm_info("IPIV irq handler!\n"); return IRQ_HANDLED; } +#endif static irqreturn_t vgic_maintenance_handler(int irq, void *data) { @@ -575,6 +583,7 @@ int kvm_vgic_hyp_init(void) kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); +#ifdef CONFIG_ARM64_HISI_IPIV if (static_branch_unlikely(&ipiv_enable)) { ipiv_irq = acpi_register_gsi(NULL, 18, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH); @@ -595,6 +604,7 @@ int kvm_vgic_hyp_init(void) return ret; } } +#endif return 0; diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 29991d2a831d..60a0f7ed997e 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -129,7 +129,9 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, /* Switching HW SGIs? */ dist->nassgireq = val & GICD_CTLR_nASSGIreq; +#ifdef CONFIG_ARM64_HISI_IPIV dist->its_vm.nassgireq = dist->nassgireq; +#endif if (is_hwsgi != dist->nassgireq) vgic_v4_configure_vsgis(vcpu->kvm); -- Gitee From 9b7a94770dc27e8ae39afa1de0ece183609455fb Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 23 May 2025 20:45:26 +0800 Subject: [PATCH 22/31] KVM: arm64: Introduce ipiv enable ioctl IPIV uses ioctl to be enabled. Users (such as qemu) can invoke the ioctl to enable IPIV. Signed-off-by: Zhou Wang Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- include/linux/irqchip/arm-gic-v4.h | 3 ++- include/uapi/linux/kvm.h | 4 ++-- virt/kvm/arm/arm.c | 2 +- virt/kvm/kvm_main.c | 16 ++++++++++++++-- 4 files changed, 19 insertions(+), 6 deletions(-) diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 15e3d04f8aa1..f9f235ecc638 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -29,7 +29,8 @@ struct its_vm { #ifdef CONFIG_ARM64_HISI_IPIV struct page *vpeid_page; #endif -bool nassgireq; + bool nassgireq; + bool enable_ipiv_from_vmm; }; /* Embedded in kvm_vcpu.arch */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 248e534379e8..0d3eca4bd2c6 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1008,7 +1008,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175 #define KVM_CAP_ARM_VIRT_MSI_BYPASS 799 -#define KVM_CAP_ARM_IPIV_MODE 503 +#define KVM_CAP_ARM_HISI_IPIV 798 #ifdef KVM_CAP_IRQ_ROUTING @@ -1249,7 +1249,7 @@ struct kvm_master_dev_info { __u32 nvectors; struct kvm_msi msi[]; }; - + /* * ioctls for VM fds */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 504002bfb10b..9a3ae40c6740 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -260,7 +260,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) break; #endif #ifdef CONFIG_ARM64_HISI_IPIV - case KVM_CAP_ARM_IPIV_MODE: + case KVM_CAP_ARM_HISI_IPIV: if (static_branch_unlikely(&ipiv_enable)) r = 1; else diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ac74b31efb81..3346cca1e44e 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1420,7 +1420,7 @@ int kvm_clear_dirty_log_protect(struct kvm *kvm, } } spin_unlock(&kvm->mmu_lock); - + if(flush) kvm_flush_remote_tlbs_memslot(kvm, memslot); @@ -1716,7 +1716,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, * tail pages of non-compound higher order allocations, which * would then underflow the refcount when the caller does the * required put_page. Don't allow those pages here. - */ + */ if (!kvm_try_get_pfn(pfn)) r = -EFAULT; @@ -3424,6 +3424,14 @@ int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, return -EINVAL; } +#ifdef CONFIG_ARM64_HISI_IPIV +static int kvm_hisi_ipiv_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) +{ + kvm->arch.vgic.its_vm.enable_ipiv_from_vmm = true; + return 0; +} +#endif + static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, struct kvm_enable_cap *cap) { @@ -3434,6 +3442,10 @@ static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, return -EINVAL; kvm->manual_dirty_log_protect = cap->args[0]; return 0; +#endif +#ifdef CONFIG_ARM64_HISI_IPIV + case KVM_CAP_ARM_HISI_IPIV: + return kvm_hisi_ipiv_enable_cap(kvm, cap); #endif default: return kvm_vm_ioctl_enable_cap(kvm, cap); -- Gitee From b139f53e4e4e4d9bbcb024a2b8029bcc71a6f63f Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 23 May 2025 20:45:27 +0800 Subject: [PATCH 23/31] KVM: arm64: Document PV-sgi interface Introduce a paravirtualization interface for KVM/arm64 to PV-sgi. A hypercall interface is provided for the guest to interrogate the hypervisor's support for IPIV. In the previous IPIV implementation, the MPIDR value of the vCPU needs to be changed to prevent guests from sending multicast and broadcast. This series of bugfix patches provide a method: Add the SMCCC interface to the kernel so that the guest OS can control the enabling of IPIV. When IPIV is enabled, the guest OS uses multiple unicast to implement multicast. (Broadcasting cannot be implemented due to hardware limitations. If a guest sends a broadcast, an exception message is displayed on the host.) In this way, do not need to modify the MPIDR. Signed-off-by: Zhou Wang Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- Documentation/virt/kvm/arm/pvsgi.rst | 33 ++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 Documentation/virt/kvm/arm/pvsgi.rst diff --git a/Documentation/virt/kvm/arm/pvsgi.rst b/Documentation/virt/kvm/arm/pvsgi.rst new file mode 100644 index 000000000000..5f12a3aaccd2 --- /dev/null +++ b/Documentation/virt/kvm/arm/pvsgi.rst @@ -0,0 +1,33 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Paravirtualized SGI support for HiSilicon +========================================== + +KVM/arm64 provides some hypervisor service calls to support a paravirtualized +SGI(software generated interrupt) in HiSilicon Hip12 SoC. + +Some SMCCC compatible hypercalls are defined: + +* PV_SGI_FEATURES: 0xC6000090 +* PV_SGI_ENABLE: 0xC6000091 + +The existence of the PV_SGI hypercall should be probed using the SMCCC 1.1 +ARCH_FEATURES mechanism before calling it. + +PV_SGI_FEATURES + + ============= ======== ========== + Function ID: (uint32) 0xC6000090 + PV_call_id: (uint32) The function to query for support. + Currently only PV_SGI_ENABLE is supported. + Return value: (int64) NOT_SUPPORTED (-1) or SUCCESS (0) if the relevant + PV-sgi feature is supported by the hypervisor. + ============= ======== ========== + +PV_SGI_ENABLE + + ============= ======== ========== + Function ID: (uint32) 0xC6000091 + Return value: (int64) NOT_SUPPORTED (-1) or SUCCESS (0) if this feature + has been enabled. + ============= ======== ========== -- Gitee From 765dfc24baad30809faf686cb06e6cc9f85e0a2f Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 23 May 2025 20:45:28 +0800 Subject: [PATCH 24/31] KVM: arm64: Implement PV_SGI related calls This provides a mechanism for querying whether IPIV are available in this hypervisor. Add some SMCCC compatible hypercalls for PV SGI: PV_SGI_FEATURES: 0xC6000090 PV_SGI_ENABLE: 0xC6000091 ipiv_enable is a global variable, indicating whether the hardware supports IPIV. enable_ipiv_from_vmm indicates whether the VMM (such as QEMU) enables IPIV through ioctl. enable_ipiv_from_guest indicates whether the guest OS enables IPIV through the SMCCC interface. Signed-off-by: Zhou Wang Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 25 +++++++++++++++++++++++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 7 +++++++ drivers/irqchip/irq-gic-v3-its.c | 16 +++++----------- include/linux/arm-smccc.h | 16 ++++++++++++++++ include/linux/irqchip/arm-gic-v4.h | 2 +- virt/kvm/arm/psci.c | 20 ++++++++++++++++++++ virt/kvm/arm/vgic/vgic-its.c | 9 +++++++++ virt/kvm/arm/vgic/vgic-mmio-v3.c | 3 --- 8 files changed, 83 insertions(+), 15 deletions(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 57c94376a957..9522cb5b9a58 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -192,6 +192,31 @@ bool hisi_ipiv_supported(void) return true; } +extern struct static_key_false ipiv_enable; + +bool hisi_ipiv_supported_per_vm(struct kvm_vcpu *vcpu) +{ + /* IPIV is supported by the hardware */ + if (!static_branch_unlikely(&ipiv_enable)) + return false; + + /* vSGI passthrough is configured */ + if (!vcpu->kvm->arch.vgic.nassgireq) + return false; + + /* IPIV is enabled by the user */ + if (!vcpu->kvm->arch.vgic.its_vm.enable_ipiv_from_vmm) + return false; + + return true; +} + +void hisi_ipiv_enable_per_vm(struct kvm_vcpu *vcpu) +{ + /* Enable IPIV feature */ + vcpu->kvm->arch.vgic.its_vm.enable_ipiv_from_guest = true; +} + void ipiv_gicd_init(void) { gic_dist_enable_ipiv(); diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index c9c198363717..4013a0c1702c 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -81,6 +81,8 @@ bool hisi_ncsnp_supported(void); bool hisi_dvmbm_supported(void); #ifdef CONFIG_ARM64_HISI_IPIV bool hisi_ipiv_supported(void); +bool hisi_ipiv_supported_per_vm(struct kvm_vcpu *vcpu); +void hisi_ipiv_enable_per_vm(struct kvm_vcpu *vcpu); void ipiv_gicd_init(void); #endif /* CONFIG_ARM64_HISI_IPIV */ void kvm_get_pg_cfg(void); @@ -108,6 +110,11 @@ static inline bool hisi_ipiv_supported(void) { return false; } +static bool hisi_ipiv_supported_per_vm(struct kvm_vcpu *vcpu) +{ + return false; +} +static void hisi_ipiv_enable_per_vm(struct kvm_vcpu *vcpu) {} static inline void ipiv_gicd_init(void) {} #endif /* CONFIG_ARM64_HISI_IPIV */ static inline void kvm_get_pg_cfg(void) {} diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 70a8cc5ea00e..46b6214556fa 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -448,10 +448,6 @@ void build_devid_pools(void) } #endif -#ifdef CONFIG_ARM64_HISI_IPIV -extern struct static_key_false ipiv_enable; -#endif - /* * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we * always have vSGIs mapped. @@ -4442,8 +4438,7 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe, u64 ipiv_val = 0; u32 nr_vpes; - if (static_branch_unlikely(&ipiv_enable) && - vm->nassgireq) { + if (vm->enable_ipiv_from_guest) { /* wait gicr_ipiv_busy */ WARN_ON_ONCE(readl_relaxed_poll_timeout_atomic(vlpi_base + GICR_IPIV_ST, ipiv_val, !(ipiv_val @@ -4515,8 +4510,7 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe, } #ifdef CONFIG_ARM64_HISI_IPIV - if (static_branch_unlikely(&ipiv_enable) && - vm->nassgireq) { + if (vm->enable_ipiv_from_guest) { /* wait gicr_ipiv_busy */ WARN_ON_ONCE(readl_relaxed_poll_timeout_atomic(vlpi_base + GICR_IPIV_ST, val, !(val & GICR_IPIV_ST_IPIV_BUSY), @@ -4928,7 +4922,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain, its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); its_free_prop_table(vm->vprop_page); #ifdef CONFIG_ARM64_HISI_IPIV - if (static_branch_unlikely(&ipiv_enable)) { + if (vm->enable_ipiv_from_vmm) { free_pages((unsigned long)page_address(vm->vpeid_page), get_order(nr_irqs * 2)); } @@ -4975,7 +4969,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq if (gic_rdists->has_rvpeid) { irqchip = &its_vpe_4_1_irq_chip; #ifdef CONFIG_ARM64_HISI_IPIV - if (static_branch_unlikely(&ipiv_enable)) { + if (vm->enable_ipiv_from_vmm) { /* * The vpeid's size is 2 bytes, so we need to * allocate 2 * (num of vcpus). nr_irqs is @@ -5000,7 +4994,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq if (err) break; #ifdef CONFIG_ARM64_HISI_IPIV - if (static_branch_unlikely(&ipiv_enable)) { + if (vm->enable_ipiv_from_vmm) { vpeid_entry = (u16 *)vpeid_table_va + i; *vpeid_entry = vm->vpes[i]->vpe_id; } diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 3e6ef64e74d3..89442c0d477a 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -45,6 +45,7 @@ #define ARM_SMCCC_OWNER_SIP 2 #define ARM_SMCCC_OWNER_OEM 3 #define ARM_SMCCC_OWNER_STANDARD 4 +#define ARM_SMCCC_OWNER_VENDOR_HYP 6 #define ARM_SMCCC_OWNER_TRUSTED_APP 48 #define ARM_SMCCC_OWNER_TRUSTED_APP_END 49 #define ARM_SMCCC_OWNER_TRUSTED_OS 50 @@ -383,5 +384,20 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, ARM_SMCCC_OWNER_STANDARD_HYP, \ 0x21) +#ifdef CONFIG_ARM64_HISI_IPIV +/* HiSilicon paravirtualised sgi calls */ +#define ARM_SMCCC_VENDOR_PV_SGI_FEATURES \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + 0x90) + +#define ARM_SMCCC_VENDOR_PV_SGI_ENABLE \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + 0x91) +#endif /* CONFIG_ARM64_HISI_IPIV */ + #endif /*__ASSEMBLY__*/ #endif /*__LINUX_ARM_SMCCC_H*/ diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index f9f235ecc638..b745a4fc8b4f 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -29,8 +29,8 @@ struct its_vm { #ifdef CONFIG_ARM64_HISI_IPIV struct page *vpeid_page; #endif - bool nassgireq; bool enable_ipiv_from_vmm; + bool enable_ipiv_from_guest; }; /* Embedded in kvm_vcpu.arch */ diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index 2f5dc7fb437b..824f8c2e3c32 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c @@ -16,6 +16,10 @@ #include +#ifdef CONFIG_ARM64_HISI_IPIV +#include "hisilicon/hisi_virt.h" +#endif + /* * This is an implementation of the Power State Coordination Interface * as described in ARM document number ARM DEN 0022A. @@ -426,6 +430,22 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) break; } break; +#ifdef CONFIG_ARM64_HISI_IPIV + case ARM_SMCCC_VENDOR_PV_SGI_FEATURES: + if (hisi_ipiv_supported_per_vm(vcpu)) + val = SMCCC_RET_SUCCESS; + else + val = SMCCC_RET_NOT_SUPPORTED; + break; + case ARM_SMCCC_VENDOR_PV_SGI_ENABLE: + if (hisi_ipiv_supported_per_vm(vcpu)) { + hisi_ipiv_enable_per_vm(vcpu); + val = SMCCC_RET_SUCCESS; + } else { + val = SMCCC_RET_NOT_SUPPORTED; + } + break; +#endif case ARM_SMCCC_ARCH_WORKAROUND_3: switch (kvm_arm_get_spectre_bhb_state()) { case SPECTRE_VULNERABLE: diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index a72f584254d0..a403c5f1851c 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -2631,6 +2631,15 @@ static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its) its->enabled = 0; vgic_its_free_device_list(kvm, its); vgic_its_free_collection_list(kvm, its); + +#ifdef CONFIG_ARM64_HISI_IPIV + /* + * For the para-virtualization feature IPIV, ensure that + * the flag of the guest OS is reset when the guest OS is + * reset. + */ + kvm->arch.vgic.its_vm.enable_ipiv_from_guest = false; +#endif } static int vgic_its_has_attr(struct kvm_device *dev, diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 60a0f7ed997e..9f58d383dbc3 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -129,9 +129,6 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, /* Switching HW SGIs? */ dist->nassgireq = val & GICD_CTLR_nASSGIreq; -#ifdef CONFIG_ARM64_HISI_IPIV - dist->its_vm.nassgireq = dist->nassgireq; -#endif if (is_hwsgi != dist->nassgireq) vgic_v4_configure_vsgis(vcpu->kvm); -- Gitee From afd87e7294b30cd41c11bfb0c35f40cfe6790152 Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 23 May 2025 20:45:29 +0800 Subject: [PATCH 25/31] irqchip/gic: Add HiSilicon PV SGI support Use the smccc interface to enable ipiv for the guest OS, modify the guest kernel, and use multiple unicasts to implement group boradcast. In this way, do not need to modify the MPIDR. In addition, the MPIDR modification is deleted, and the GICD configuration is modified. The hardware uses the mpidr to calculate the corresponding vCPU ID to lookup vpeid table. Signed-off-by: Zhou Wang Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/sys_regs.c | 15 ------------ drivers/irqchip/irq-gic-v3.c | 44 ++++++++++++++++++++++++++++++++++-- 2 files changed, 42 insertions(+), 17 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index ebfcb2c7e6bf..2deb85a8e532 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -610,10 +610,6 @@ static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1); } -#ifdef CONFIG_ARM64_HISI_IPIV -extern struct static_key_false ipiv_enable; -#endif - static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { u64 mpidr; @@ -629,17 +625,6 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); -#ifdef CONFIG_ARM64_HISI_IPIV - if (static_branch_unlikely(&ipiv_enable)) { - /* - * To avoid sending multi-SGIs in guest OS, - * make aff1/aff2 unique - */ - mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(1); - mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(2); - } -#endif - vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1); } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index fab64393994d..c81153d8cd60 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -31,6 +31,10 @@ #include "irq-gic-common.h" +#ifdef CONFIG_ARM64_HISI_IPIV +#include +#endif + #define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80) #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) @@ -89,8 +93,11 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); #ifdef CONFIG_ARM64_HISI_IPIV +/* indicate if host supports IPIv */ DEFINE_STATIC_KEY_FALSE(ipiv_enable); EXPORT_SYMBOL(ipiv_enable); +/* indicate if guest is using IPIv */ +static bool hisi_pv_sgi_enabled; #endif /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */ @@ -1256,8 +1263,8 @@ void gic_dist_enable_ipiv(void) static_branch_enable(&ipiv_enable); val = (0 << GICD_IPIV_CTRL_AFF_DIRECT_VPEID_SHIFT) | - (0 << GICD_IPIV_CTRL_AFF1_LEFT_SHIFT_SHIFT) | - (4 << GICD_IPIV_CTRL_AFF2_LEFT_SHIFT_SHIFT) | + (4 << GICD_IPIV_CTRL_AFF1_LEFT_SHIFT_SHIFT) | + (12 << GICD_IPIV_CTRL_AFF2_LEFT_SHIFT_SHIFT) | (7 << GICD_IPIV_CTRL_VM_TABLE_INNERCACHE_SHIFT) | (2 << GICD_IPIV_CTRL_VM_TABLE_SHAREABILITY_SHIFT); writel_relaxed(val, gic_data.dist_base + GICD_IPIV_CTRL); @@ -1374,7 +1381,14 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); u16 tlist; +#ifdef CONFIG_ARM64_HISI_IPIV + if (!hisi_pv_sgi_enabled) + tlist = gic_compute_target_list(&cpu, mask, cluster_id); + else + tlist = 1 << (gic_mpidr_to_affinity(cpu) & 0xf); +#else tlist = gic_compute_target_list(&cpu, mask, cluster_id); +#endif gic_send_sgi(cluster_id, tlist, irq); } @@ -2329,6 +2343,28 @@ static void __init gic_acpi_setup_kvm_info(void) gic_set_kvm_info(&gic_v3_kvm_info); } +#ifdef CONFIG_ARM64_HISI_IPIV +static void hisi_pv_sgi_init(void) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_PV_SGI_FEATURES, &res); + if (res.a0 != SMCCC_RET_SUCCESS) { + pr_info("Not Support HiSilicon PV SGI!\n"); + return; + } + + arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_PV_SGI_ENABLE, &res); + if (res.a0 != SMCCC_RET_SUCCESS) { + pr_info("Disable HiSilicon PV SGI!\n"); + return; + } + + hisi_pv_sgi_enabled = true; + pr_info("Enable HiSilicon PV SGI!\n"); +} +#endif + static int __init gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) { @@ -2380,6 +2416,10 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) if (static_branch_likely(&supports_deactivate_key)) gic_acpi_setup_kvm_info(); +#ifdef CONFIG_ARM64_HISI_IPIV + hisi_pv_sgi_init(); +#endif + return 0; out_fwhandle_free: -- Gitee From b0a65b1935ff5762cb366916616d653081b04ae1 Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Fri, 23 May 2025 20:45:30 +0800 Subject: [PATCH 26/31] KVM: arm64: fix live migration bug of IPIv GITS_IIDR bit7 is used to store whether IPIV is enabled for Guest OS to ensure that enable_ipiv_from_guest are the same before and after live migration. Signed-off-by: Zhou Wang Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- virt/kvm/arm/vgic/vgic-its.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index a403c5f1851c..7b93407552b8 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -495,6 +495,14 @@ static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm, return extract_bytes(reg, addr & 7, len); } +#ifdef CONFIG_ARM64_HISI_IPIV +/* + * Use bit7 not used by GITS_IIDR to indicate whether IPIV is + * enabled for guest OS. + */ +#define HISI_GUEST_ENABLE_IPIV_SHIFT 7 +#endif + static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm, struct vgic_its *its, gpa_t addr, unsigned int len) @@ -515,6 +523,12 @@ static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm, if (rev >= NR_ITS_ABIS) return -EINVAL; + +#ifdef CONFIG_ARM64_HISI_IPIV + if (val & (1UL << HISI_GUEST_ENABLE_IPIV_SHIFT)) + kvm->arch.vgic.its_vm.enable_ipiv_from_guest = true; +#endif + return vgic_its_set_abi(its, rev); } @@ -2023,6 +2037,11 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev, region->its_write(dev->kvm, its, addr, len, *reg); } else { *reg = region->its_read(dev->kvm, its, addr, len); +#ifdef CONFIG_ARM64_HISI_IPIV + if (dev->kvm->arch.vgic.its_vm.enable_ipiv_from_guest && + offset == GITS_IIDR) + *reg |= 1UL << HISI_GUEST_ENABLE_IPIV_SHIFT; +#endif } unlock_all_vcpus(dev->kvm); out: -- Gitee From 35fe68a3481eede0623639a7d7daea94b5f2b7cb Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 23 May 2025 20:45:31 +0800 Subject: [PATCH 27/31] kvm: hisi: make sure vcpu_id and vcpu_idx have same value in IPIv When the VM's vgic is initialized, vpeids are written to vpeid table in the sequence of vcpu_idx. However, the actual hardware lookups vpeid_table in the sequence of vcpu_id. Therefore, ensure that vcpu_idx and vcpu_id are the same for IPIV. Signed-off-by: Zhou Wang Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- virt/kvm/arm/arm.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 9a3ae40c6740..ae570fc62d68 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1084,6 +1084,16 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); } +#ifdef CONFIG_ARM64_HISI_IPIV + if (static_branch_unlikely(&ipiv_enable) && + vcpu->kvm->arch.vgic.its_vm.enable_ipiv_from_vmm && + vcpu->vcpu_id != vcpu->vcpu_idx) { + kvm_err("IPIV ERROR: vcpu_id %d != vcpu_idx %d\n", + vcpu->vcpu_id, vcpu->vcpu_idx); + return -EINVAL; + } +#endif + return ret; } -- Gitee From 81d8fa4d5bfa09541f22b4440330fc3a812d9c3f Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Fri, 23 May 2025 20:45:32 +0800 Subject: [PATCH 28/31] kvm: hisi: Don't allow to change mpidr in IPIv IPIV uses the MPIDR value to transmit vpeid to hardware, so don't allow to change it. Signed-off-by: Zhou Wang Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/sys_regs.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2deb85a8e532..8f9af179c03d 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1350,6 +1350,34 @@ static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, .set_user = set_raz_id_reg, \ } +#ifdef CONFIG_ARM64_HISI_IPIV +extern struct static_key_false ipiv_enable; +static int set_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, + const struct kvm_one_reg *reg, void __user *uaddr) +{ + const u64 id = sys_reg_to_index(rd); + int err; + u64 val; + + err = reg_from_user(&val, uaddr, id); + if (err) + return err; + + if (static_branch_unlikely(&ipiv_enable) && + vcpu->kvm->arch.vgic.its_vm.enable_ipiv_from_vmm) { + if (val != __vcpu_sys_reg(vcpu, rd->reg)) { + kvm_err("IPIV ERROR: MPIDR changed\n"); + return -EINVAL; + } + } + + __vcpu_sys_reg(vcpu, rd->reg) = val; + + return 0; + +} +#endif + /* * Architected system registers. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 @@ -1401,7 +1429,12 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 }, +#ifdef CONFIG_ARM64_HISI_IPIV + { SYS_DESC(SYS_MPIDR_EL1), + .reset = reset_mpidr, .reg = MPIDR_EL1, .set_user = set_mpidr}, +#else { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 }, +#endif /* * ID regs: all ID_SANITISED() entries here must have corresponding -- Gitee From 2570ff2487c9d485aae796379348a204c07f2c22 Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Fri, 23 May 2025 20:45:33 +0800 Subject: [PATCH 29/31] KVM: arm64: using kvm_vgic_global_state for ipiv when kvm-arm.vgic_v4_enable=0 kvm-arm.ipiv_enabled=1 is configured in the cmdline, the Host KVM displays "ipiv enabled", but ipiv is not enabled. gic_data.rdists.has_rvpeid is hardware-level information, which does not indicate that GICv4.1 is enabled for KVM. So whether the host supports IPIV depends on KVM enables GICv4.1m instead of hardware supports GICv4.1. Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 2 +- arch/arm64/kvm/hisilicon/hisi_virt.h | 1 - drivers/irqchip/irq-gic-v3.c | 9 --------- virt/kvm/arm/arm.c | 11 ----------- virt/kvm/arm/vgic/vgic-init.c | 10 ++++++++++ 5 files changed, 11 insertions(+), 22 deletions(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 9522cb5b9a58..765bc6b52e00 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -183,7 +183,7 @@ bool hisi_ipiv_supported(void) return false; /* Enable IPIV feature if necessary */ - if (!is_gicv4p1()) { + if (!kvm_vgic_global_state.has_gicv4_1) { kvm_info("Hisi ipiv needs to enable GICv4p1!\n"); return false; } diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index 4013a0c1702c..e8780e48123b 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -136,7 +136,6 @@ static inline void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) {} #ifdef CONFIG_ARM64_HISI_IPIV extern bool gic_dist_enable_ipiv(void); -extern bool is_gicv4p1(void); #endif /* CONFIG_ARM64_HISI_IPIV */ #endif /* __HISI_VIRT_H__ */ diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index c81153d8cd60..fb9c96ad03e1 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1244,15 +1244,6 @@ static int gic_dist_supports_lpis(void) } #ifdef CONFIG_ARM64_HISI_IPIV -bool is_gicv4p1(void) -{ - if (!gic_data.rdists.has_rvpeid) - return false; - - return true; -} -EXPORT_SYMBOL(is_gicv4p1); - void gic_dist_enable_ipiv(void) { u32 val; diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index ae570fc62d68..7f402a711286 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1834,23 +1834,12 @@ int kvm_arch_init(void *opaque) kvm_ncsnp_support = hisi_ncsnp_supported(); kvm_dvmbm_support = hisi_dvmbm_supported(); -#ifdef CONFIG_ARM64_HISI_IPIV - kvm_ipiv_support = hisi_ipiv_supported(); -#endif kvm_info("KVM ncsnp %s\n", kvm_ncsnp_support ? "enabled" : "disabled"); kvm_info("KVM dvmbm %s\n", kvm_dvmbm_support ? "enabled" : "disabled"); -#ifdef CONFIG_ARM64_HISI_IPIV - kvm_info("KVM ipiv %s\n", kvm_ipiv_support ? "enabled" : "disabled"); -#endif if (kvm_dvmbm_support) kvm_get_pg_cfg(); -#ifdef CONFIG_ARM64_HISI_IPIV - if (kvm_ipiv_support) - ipiv_gicd_init(); -#endif - in_hyp_mode = is_kernel_in_hyp_mode(); if (!in_hyp_mode && kvm_arch_requires_vhe()) { diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 7075875f48bf..6bc397550e8d 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -12,6 +12,10 @@ #include #include "vgic.h" +#ifdef CONFIG_ARM64_HISI_IPIV +#include "hisilicon/hisi_virt.h" +#endif + /* * Initialization rules: there are multiple stages to the vgic * initialization, both for the distributor and the CPU interfaces. The basic @@ -584,6 +588,12 @@ int kvm_vgic_hyp_init(void) kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); #ifdef CONFIG_ARM64_HISI_IPIV + if (hisi_ipiv_supported()) { + ipiv_gicd_init(); + kvm_info("KVM ipiv enabled\n"); + } else { + kvm_info("KVM ipiv disabled\n"); + } if (static_branch_unlikely(&ipiv_enable)) { ipiv_irq = acpi_register_gsi(NULL, 18, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH); -- Gitee From 63aa8a898a7a4f09e9b6e0cf6fc33abde46deddb Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Fri, 23 May 2025 20:45:34 +0800 Subject: [PATCH 30/31] kvm: hisi: print error for IPIV Displays detailed IPIV error causes based on hardware information. 1. Guest sends SGI with IRM=1. 2. Guest sends multicast. 3. The index of VM table exceeds the supported range. Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- drivers/irqchip/irq-gic-v3-its.c | 8 ++++++++ include/linux/irqchip/arm-gic-v3.h | 6 ++++++ virt/kvm/arm/vgic/vgic-init.c | 23 +++++++++++++++++++++-- 3 files changed, 35 insertions(+), 2 deletions(-) diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 46b6214556fa..d5b13c737d7f 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -428,6 +428,14 @@ static int alloc_devid_from_rsv_pools(struct rsv_devid_pool **devid_pool, #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) +#ifdef CONFIG_ARM64_HISI_IPIV +void __iomem *gic_data_rdist_get_vlpi_base(void) +{ + return gic_data_rdist_vlpi_base(); +} +EXPORT_SYMBOL(gic_data_rdist_get_vlpi_base); +#endif + #ifdef CONFIG_VIRT_PLAT_DEV /* * Currently we only build *one* devid pool. diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 6c0380953fbe..f820dfff04fa 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -391,6 +391,12 @@ #define GICR_IPIV_ST 0x14c #define GICR_IPIV_ST_IPIV_BUSY_SHIFT 0 #define GICR_IPIV_ST_IPIV_BUSY (1 << GICR_IPIV_ST_IPIV_BUSY_SHIFT) +#define GICR_IPIV_ST_IRM_ERR_ST_SHIFT 1 +#define GICR_IPIV_ST_IRM_ERR (1 << GICR_IPIV_ST_IRM_ERR_ST_SHIFT) +#define GICR_IPIV_ST_BRPBRD_ERR_ST_SHIFT 2 +#define GICR_IPIV_ST_BRPBRD_ERR (1 << GICR_IPIV_ST_BRPBRD_ERR_ST_SHIFT) +#define GICR_IPIV_ST_VCPUIDX_ERR_ST_SHIFT 3 +#define GICR_IPIV_ST_VCPUIDX_ERR (1 << GICR_IPIV_ST_VCPUIDX_ERR_ST_SHIFT) #endif /* CONFIG_ARM64_HISI_IPIV */ /* diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 6bc397550e8d..98372e9c75bc 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -13,6 +13,7 @@ #include "vgic.h" #ifdef CONFIG_ARM64_HISI_IPIV +#include #include "hisilicon/hisi_virt.h" #endif @@ -236,7 +237,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) /* PPIs */ irq->config = VGIC_CONFIG_LEVEL; } - + #ifdef CONFIG_VIRT_VTIMER_IRQ_BYPASS /* Needed? */ irq->vtimer_info = NULL; @@ -492,9 +493,27 @@ static int vgic_init_cpu_dying(unsigned int cpu) } #ifdef CONFIG_ARM64_HISI_IPIV +extern void __iomem *gic_data_rdist_get_vlpi_base(void); static irqreturn_t vgic_ipiv_irq_handler(int irq, void *data) { - kvm_info("IPIV irq handler!\n"); + void __iomem *vlpi_base = gic_data_rdist_get_vlpi_base(); + u32 gicr_ipiv_st; + bool broadcast_err, grpbrd_err, vcpuidx_err; + + gicr_ipiv_st = readl_relaxed(vlpi_base + GICR_IPIV_ST); + + broadcast_err = !!(gicr_ipiv_st & GICR_IPIV_ST_IRM_ERR); + if (broadcast_err) + kvm_err("IPIV error: IRM=1 Guest broadcast error\n"); + + grpbrd_err = !!(gicr_ipiv_st & GICR_IPIV_ST_BRPBRD_ERR); + if (grpbrd_err) + kvm_err("IPIV error: Guest group broadcast error\n"); + + vcpuidx_err = !!(gicr_ipiv_st & GICR_IPIV_ST_VCPUIDX_ERR); + if (vcpuidx_err) + kvm_err("IPIV error: The VCPU index is out of range\n"); + return IRQ_HANDLED; } #endif -- Gitee From c9662ce0c8873212471e1ed3e6d67256009d80d7 Mon Sep 17 00:00:00 2001 From: Jinqian Yang Date: Tue, 17 Jun 2025 19:31:02 +0800 Subject: [PATCH 31/31] KVM: arm64: check if IPIV is enabled in BIOS GICD_MISC_CTRL bit19(cfg_ipiv_en) is read-only in EL2. The write of GICD_MISC_CTRL bit19=1 is done by BIOS. Therefore, need to check whether the BIOS has enabled ipiv in OS. Fixes: 55a83889a45e ("kvm: hisi_virt: Probe and configure IPIV capacity on HIP12") Signed-off-by: Jinqian Yang Signed-off-by: Xie Xiaodong <624338359@qq.com> --- arch/arm64/kvm/hisilicon/hisi_virt.c | 5 +++++ arch/arm64/kvm/hisilicon/hisi_virt.h | 1 + drivers/irqchip/irq-gic-v3.c | 16 ++++++++++++---- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.c b/arch/arm64/kvm/hisilicon/hisi_virt.c index 765bc6b52e00..4dc80f4e773c 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.c +++ b/arch/arm64/kvm/hisilicon/hisi_virt.c @@ -178,6 +178,11 @@ bool hisi_ipiv_supported(void) return false; } + if (!gic_get_ipiv_status()) { + kvm_info("Hisi ipiv is disabled by BIOS\n"); + return false; + } + /* User provided kernel command-line parameter */ if (!ipiv_enabled || !is_kernel_in_hyp_mode()) return false; diff --git a/arch/arm64/kvm/hisilicon/hisi_virt.h b/arch/arm64/kvm/hisilicon/hisi_virt.h index e8780e48123b..fcf9b763940c 100644 --- a/arch/arm64/kvm/hisilicon/hisi_virt.h +++ b/arch/arm64/kvm/hisilicon/hisi_virt.h @@ -136,6 +136,7 @@ static inline void kvm_hisi_reload_lsudvmbm(struct kvm *kvm) {} #ifdef CONFIG_ARM64_HISI_IPIV extern bool gic_dist_enable_ipiv(void); +extern bool gic_get_ipiv_status(void); #endif /* CONFIG_ARM64_HISI_IPIV */ #endif /* __HISI_VIRT_H__ */ diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index fb9c96ad03e1..185bef735825 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1248,11 +1248,7 @@ void gic_dist_enable_ipiv(void) { u32 val; - val = readl_relaxed(gic_data.dist_base + GICD_MISC_CTRL); - val |= GICD_MISC_CTRL_CFG_IPIV_EN; - writel_relaxed(val, gic_data.dist_base + GICD_MISC_CTRL); static_branch_enable(&ipiv_enable); - val = (0 << GICD_IPIV_CTRL_AFF_DIRECT_VPEID_SHIFT) | (4 << GICD_IPIV_CTRL_AFF1_LEFT_SHIFT_SHIFT) | (12 << GICD_IPIV_CTRL_AFF2_LEFT_SHIFT_SHIFT) | @@ -1264,6 +1260,18 @@ void gic_dist_enable_ipiv(void) writel_relaxed(0x4880, gic_data.dist_base + GICD_IPIV_ITS_TA_BASE); } EXPORT_SYMBOL(gic_dist_enable_ipiv); + +bool gic_get_ipiv_status(void) +{ + u32 val; + + val = readl_relaxed(gic_data.dist_base + GICD_MISC_CTRL); + if (val & GICD_MISC_CTRL_CFG_IPIV_EN) + return true; + + return false; +} +EXPORT_SYMBOL(gic_get_ipiv_status); #endif /* CONFIG_ARM64_HISI_IPIV */ static void gic_cpu_init(void) -- Gitee