diff --git a/Documentation/arch/arm64/booting.rst b/Documentation/arch/arm64/booting.rst index 408d2e27b64182a8c84b8b3ec3629857b2b011f0..b540e0933ddebc90d6f7d82e3fbe90ba05c3d4d6 100644 --- a/Documentation/arch/arm64/booting.rst +++ b/Documentation/arch/arm64/booting.rst @@ -438,6 +438,18 @@ Before jumping into the kernel, the following conditions must be met: - HCRX_EL2.TALLINT must be initialised to 0b0. + For CPUs support for 64-byte loads and stores without status (FEAT_LS64): + + - If the kernel is entered at EL1 and EL2 is present: + + - HCRX_EL2.EnALS (bit 1) must be initialised to 0b1. + + For CPUs support for 64-byte loads and stores with status (FEAT_LS64_V): + + - If the kernel is entered at EL1 and EL2 is present: + + - HCRX_EL2.EnASR (bit 2) must be initialised to 0b1. + The requirements described above for CPU mode, caches, MMUs, architected timers, coherency and system registers apply to all CPUs. All CPUs must enter the kernel in the same exception level. Where the values documented diff --git a/Documentation/arch/arm64/elf_hwcaps.rst b/Documentation/arch/arm64/elf_hwcaps.rst index f88a24d621dd437dbfdfbe0d4f5af673f0d5fd53..c6e988a07c54f9022851e15ddfb8c95d6182e33d 100644 --- a/Documentation/arch/arm64/elf_hwcaps.rst +++ b/Documentation/arch/arm64/elf_hwcaps.rst @@ -16,9 +16,9 @@ architected discovery mechanism available to userspace code at EL0. The kernel exposes the presence of these features to userspace through a set of flags called hwcaps, exposed in the auxiliary vector. -Userspace software can test for features by acquiring the AT_HWCAP or -AT_HWCAP2 entry of the auxiliary vector, and testing whether the relevant -flags are set, e.g.:: +Userspace software can test for features by acquiring the AT_HWCAP, +AT_HWCAP2 or AT_HWCAP3 entry of the auxiliary vector, and testing +whether the relevant flags are set, e.g.:: bool floating_point_is_present(void) { @@ -320,6 +320,12 @@ HWCAP2_MOPS HWCAP2_HBC Functionality implied by ID_AA64ISAR2_EL1.BC == 0b0001. +HWCAP3_LS64 + Functionality implied by ID_AA64ISAR1_EL1.LS64 == 0b0001. + +HWCAP3_LS64_V + Functionality implied by ID_AA64ISAR1_EL1.LS64 == 0b0010. + 4. Unused AT_HWCAP bits ----------------------- diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1eb959318ecf14e1aca67e886e19ab1149f0a6fd..dabbbb0e012caf233ce68738aef5a765468eb979 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -122,6 +122,7 @@ config ARM64 select ARM_GIC_V3 select ARM_GIC_V3_ITS if PCI select ARM_PSCI_FW + select ARM64_LS64 if !FUNCTION_ALIGNMENT_64B select BUILDTIME_TABLE_SORT select CLONE_BACKWARDS select COMMON_CLK @@ -2328,6 +2329,9 @@ config ARM64_TWED help Delayed Trapping of WFE (part of the ARMv8.6 Extensions) +config ARM64_LS64 + bool + endmenu menu "ARMv8.7 architectural features" diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index a58c8b332b21d0d45611dfdce018a2a44ae325e3..dd758b04fd54dcdc99a95bca75c95cde2fc03160 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -12,7 +12,7 @@ #include #include -#define MAX_CPU_FEATURES 128 +#define MAX_CPU_FEATURES 192 #define cpu_feature(x) KERNEL_HWCAP_ ## x #define ARM64_SW_FEATURE_OVERRIDE_NOKASLR 0 @@ -434,6 +434,7 @@ void cpu_set_feature(unsigned int num); bool cpu_have_feature(unsigned int num); unsigned long cpu_get_elf_hwcap(void); unsigned long cpu_get_elf_hwcap2(void); +unsigned long cpu_get_elf_hwcap3(void); #define cpu_set_named_feature(name) cpu_set_feature(cpu_feature(name)) #define cpu_have_named_feature(name) cpu_have_feature(cpu_feature(name)) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index e7cae1452098c165418995524963999181a7dcce..6f65322e2fd07a95828ddd4236d2b8551d806f6b 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -27,6 +27,19 @@ ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4 cbz x0, .Lskip_hcrx_\@ mov_q x0, HCRX_HOST_FLAGS + +#ifdef CONFIG_ARM64_LS64 + /* Enable LS64, LS64_V if supported */ + mrs_s x1, SYS_ID_AA64ISAR1_EL1 + ubfx x1, x1, #ID_AA64ISAR1_EL1_LS64_SHIFT, #4 + cbz x1, .Lset_hcrx_\@ + orr x0, x0, #HCRX_EL2_EnALS + cmp x1, #ID_AA64ISAR1_EL1_LS64_LS64_V + b.lt .Lset_hcrx_\@ + orr x0, x0, #HCRX_EL2_EnASR +#endif + +.Lset_hcrx_\@ : msr_s SYS_HCRX_EL2, x0 .Lskip_hcrx_\@: .endm diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 2e914609db9360b62bcbbdf6e39fed4d21b30ee0..b890e5edaca4460075e0d72fb141629fca4f7abc 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -140,15 +140,20 @@ #define KERNEL_HWCAP_MOPS __khwcap2_feature(MOPS) #define KERNEL_HWCAP_HBC __khwcap2_feature(HBC) +#define __khwcap3_feature(x) (const_ilog2(HWCAP3_ ## x) + 128) +#define KERNEL_HWCAP_LS64 __khwcap3_feature(LS64) +#define KERNEL_HWCAP_LS64_V __khwcap3_feature(LS64_V) + /* * This yields a mask that user programs can use to figure out what * instruction set this cpu supports. */ #define ELF_HWCAP cpu_get_elf_hwcap() #define ELF_HWCAP2 cpu_get_elf_hwcap2() +#define ELF_HWCAP3 cpu_get_elf_hwcap3() #ifdef CONFIG_AARCH32_EL0 -extern unsigned int a32_elf_hwcap, a32_elf_hwcap2; +extern unsigned int a32_elf_hwcap, a32_elf_hwcap2, a32_elf_hwcap3; #endif enum { diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 53026f45a5092c4a7c0204cd852f128e8ca037f7..4cabccfd7d842963f9a68f21e3e5050caea63e24 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -105,4 +105,10 @@ #define HWCAP2_MOPS (1UL << 43) #define HWCAP2_HBC (1UL << 44) +/* + * HWCAP3 flags - for AT_HWCAP3 + */ +#define HWCAP3_LS64 (1UL << 0) +#define HWCAP3_LS64_V (1UL << 1) + #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 38eaa358c76b725b5e7aeb8a2d90b2f35e97c760..5c3d206785bdf89e2ab92e0a445fdbe5eada668b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -105,6 +105,7 @@ static DECLARE_BITMAP(elf_hwcap, MAX_CPU_FEATURES) __read_mostly; COMPAT_HWCAP_LPAE) unsigned int a32_elf_hwcap __read_mostly = AARCH32_EL0_ELF_HWCAP_DEFAULT; unsigned int a32_elf_hwcap2 __read_mostly; +unsigned int a32_elf_hwcap3 __read_mostly; #endif DECLARE_BITMAP(system_cpucaps, ARM64_NCAPS); @@ -200,6 +201,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_LS64_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, 0), @@ -2133,6 +2135,40 @@ static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap) static bool enable_pseudo_nmi; #endif +#ifdef CONFIG_ARM64_LS64 +static bool has_ls64(const struct arm64_cpu_capabilities *entry, int __unused) +{ + u64 ls64; + + ls64 = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg), + entry->field_pos, entry->sign); + + if (ls64 == ID_AA64ISAR1_EL1_LS64_NI || + ls64 > ID_AA64ISAR1_EL1_LS64_LS64_ACCDATA) + return false; + + if (entry->capability == ARM64_HAS_LS64 && + ls64 >= ID_AA64ISAR1_EL1_LS64_LS64) + return true; + + if (entry->capability == ARM64_HAS_LS64_V && + ls64 >= ID_AA64ISAR1_EL1_LS64_LS64_V) + return true; + + return false; +} + +static void cpu_enable_ls64(struct arm64_cpu_capabilities const *cap) +{ + sysreg_clear_set(sctlr_el1, SCTLR_EL1_EnALS, SCTLR_EL1_EnALS); +} + +static void cpu_enable_ls64_v(struct arm64_cpu_capabilities const *cap) +{ + sysreg_clear_set(sctlr_el1, SCTLR_EL1_EnASR, SCTLR_EL1_EnASR); +} +#endif + #ifdef CONFIG_ARM64_PSEUDO_NMI static int __init early_enable_pseudo_nmi(char *p) { @@ -2977,6 +3013,24 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .type = ARM64_CPUCAP_SYSTEM_FEATURE, .matches = has_xint_support, }, +#endif +#ifdef CONFIG_ARM64_LS64 + { + .desc = "LS64", + .capability = ARM64_HAS_LS64, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_ls64, + .cpu_enable = cpu_enable_ls64, + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LS64, LS64) + }, + { + .desc = "LS64_V", + .capability = ARM64_HAS_LS64_V, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_ls64, + .cpu_enable = cpu_enable_ls64_v, + ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LS64, LS64_V) + }, #endif {}, }; @@ -3086,6 +3140,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(ID_AA64ISAR1_EL1, BF16, EBF16, CAP_HWCAP, KERNEL_HWCAP_EBF16), HWCAP_CAP(ID_AA64ISAR1_EL1, DGH, IMP, CAP_HWCAP, KERNEL_HWCAP_DGH), HWCAP_CAP(ID_AA64ISAR1_EL1, I8MM, IMP, CAP_HWCAP, KERNEL_HWCAP_I8MM), + HWCAP_CAP(ID_AA64ISAR1_EL1, LS64, LS64, CAP_HWCAP, KERNEL_HWCAP_LS64), + HWCAP_CAP(ID_AA64ISAR1_EL1, LS64, LS64_V, CAP_HWCAP, KERNEL_HWCAP_LS64_V), HWCAP_CAP(ID_AA64MMFR2_EL1, AT, IMP, CAP_HWCAP, KERNEL_HWCAP_USCAT), #ifdef CONFIG_ARM64_SVE HWCAP_CAP(ID_AA64PFR0_EL1, SVE, IMP, CAP_HWCAP, KERNEL_HWCAP_SVE), @@ -3601,6 +3657,11 @@ unsigned long cpu_get_elf_hwcap2(void) return elf_hwcap[1]; } +unsigned long cpu_get_elf_hwcap3(void) +{ + return elf_hwcap[2]; +} + static void __init setup_system_capabilities(void) { /* diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 7466b6066d8728247e7b1f87583f28e95d317fd8..dade6604747857104e6126720c3b41716a03c245 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -82,6 +82,8 @@ static const char *const hwcap_str[] = { [KERNEL_HWCAP_SB] = "sb", [KERNEL_HWCAP_PACA] = "paca", [KERNEL_HWCAP_PACG] = "pacg", + [KERNEL_HWCAP_LS64] = "ls64", + [KERNEL_HWCAP_LS64_V] = "ls64_v", [KERNEL_HWCAP_DCPODP] = "dcpodp", [KERNEL_HWCAP_SVE2] = "sve2", [KERNEL_HWCAP_SVEAES] = "sveaes", diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index e16fa1211e10bba2f0cb1e55e2b94ec020cc6149..56776b55b01ab666dacc0a3632f5f2e59de85658 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -242,6 +242,12 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) hcrx &= ~clr; } + if (cpus_have_final_cap(ARM64_HAS_LS64)) + hcrx |= HCRX_EL2_EnALS; + + if (cpus_have_final_cap(ARM64_HAS_LS64_V)) + hcrx |= HCRX_EL2_EnASR; + write_sysreg_s(hcrx, SYS_HCRX_EL2); } diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index f2ddced689b5f37f4c8c7d86a12e14fa76f32261..27d93050e5dad571a31e501a07bc67fadaae121a 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -110,8 +110,8 @@ WORKAROUND_HISI_HIP08_RU_PREFETCH WORKAROUND_HISILICON_1980005 HAS_XCALL HAS_XINT -KABI_RESERVE_3 -KABI_RESERVE_4 +HAS_LS64 +HAS_LS64_V KABI_RESERVE_5 KABI_RESERVE_6 KABI_RESERVE_7 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index fb2c8d14327ae160d0ac97b49010a7e889671363..74d90a711647a274b8770815b4bd9e45c7b49b59 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -276,6 +276,12 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes); #ifdef ELF_HWCAP2 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2); +#endif +#ifdef ELF_HWCAP3 + NEW_AUX_ENT(AT_HWCAP3, ELF_HWCAP3); +#endif +#ifdef ELF_HWCAP4 + NEW_AUX_ENT(AT_HWCAP4, ELF_HWCAP4); #endif NEW_AUX_ENT(AT_EXECFN, bprm->exec); if (k_platform) { diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 96a8b13b57d969d3f54fc9b5a03d45385561b930..7419c3a41db97f57de5fedd6671afbbb7dc209ac 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -642,6 +642,12 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm, NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP); #ifdef ELF_HWCAP2 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2); +#endif +#ifdef ELF_HWCAP3 + NEW_AUX_ENT(AT_HWCAP3, ELF_HWCAP3); +#endif +#ifdef ELF_HWCAP4 + NEW_AUX_ENT(AT_HWCAP4, ELF_HWCAP4); #endif NEW_AUX_ENT(AT_PAGESZ, PAGE_SIZE); NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC); diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c index 8f0af4f626316ed2e92204ff9bf381cd14103ae9..d5ef5469e4e620f6ee97f40ce9cbbfa48e37e33c 100644 --- a/fs/compat_binfmt_elf.c +++ b/fs/compat_binfmt_elf.c @@ -80,6 +80,16 @@ #define ELF_HWCAP2 COMPAT_ELF_HWCAP2 #endif +#ifdef COMPAT_ELF_HWCAP3 +#undef ELF_HWCAP3 +#define ELF_HWCAP3 COMPAT_ELF_HWCAP3 +#endif + +#ifdef COMPAT_ELF_HWCAP4 +#undef ELF_HWCAP4 +#define ELF_HWCAP4 COMPAT_ELF_HWCAP4 +#endif + #ifdef COMPAT_ARCH_DLINFO #undef ARCH_DLINFO #define ARCH_DLINFO COMPAT_ARCH_DLINFO diff --git a/include/uapi/linux/auxvec.h b/include/uapi/linux/auxvec.h index 6991c4b8ab189d25f2cda92661f7da4a8a558a94..cc61cb9b3e9af86a41b50d5221a1b94f4d6591bb 100644 --- a/include/uapi/linux/auxvec.h +++ b/include/uapi/linux/auxvec.h @@ -32,6 +32,8 @@ #define AT_HWCAP2 26 /* extension of AT_HWCAP */ #define AT_RSEQ_FEATURE_SIZE 27 /* rseq supported feature size */ #define AT_RSEQ_ALIGN 28 /* rseq allocation alignment */ +#define AT_HWCAP3 29 /* extension of AT_HWCAP */ +#define AT_HWCAP4 30 /* extension of AT_HWCAP */ #define AT_EXECFN 31 /* filename of program */