diff --git a/arch/Kconfig b/arch/Kconfig index a4ed5d338dadf8ad635a51f5d2e2881c4307935f..93d8221cc834760d9a146fd2c09ca9979b87b50f 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1533,69 +1533,4 @@ config FUNCTION_ALIGNMENT default 4 if FUNCTION_ALIGNMENT_4B default 0 -config ARCH_SUPPORTS_FAST_SYSCALL - bool - -config FAST_SYSCALL - bool "Fast Syscall support" - depends on ARCH_SUPPORTS_FAST_SYSCALL - default n - help - This enable support Fast syscall feature. - The svc exception handling process, which includes auxiliary - functions for debug/trace and core functions like - KPTI, has been identified as overly "lengthy". - This inefficiency is particularly noticeable in short syscalls - such as lseek() and getpid(), where the syscall function itself - comprises a small percentage of the total instructions executed. - To address this, we introduce the concept of fast syscall, a fast svc - exception handling path that only considers necessary features - such as security, context saving, and recovery. - -config ARCH_SUPPORTS_FAST_IRQ - bool - -config FAST_IRQ - bool "Fast irq support" - depends on ARCH_SUPPORTS_FAST_IRQ - default n - help - The irq handling process, which includes auxiliary - functions for debug/trace and core functions like - KPTI, interrupt time record, interrupt processing as - a random number source, interrupt affinity - modification and interrupt processing race, as well as - spurious and unhandled interrupt debugging, has been - identified as overly "lengthy". - To address this, we introduce the concept of fast irq, - a fast interrupt handling path that only considers - necessary features such as security, context saving - and recovery, which adds an lightweight interrupt processing - framework for latency-sensitive interrupts. - -config DEBUG_FEATURE_BYPASS - bool "Bypass debug feature in fast syscall" - depends on FAST_SYSCALL || FAST_IRQ - depends on !LOCKDEP - default y - help - This to bypass debug feature in fast syscall. - The svc exception handling process, which includes auxiliary - functions for debug/trace and core functions like - KPTI, has been identified as overly "lengthy". - In fast syscall we only considers necessary features. - Disable this config to keep debug feature in fast syscall. - -config SECURITY_FEATURE_BYPASS - bool "Bypass security feature in fast syscall" - depends on FAST_SYSCALL || FAST_IRQ - default y - help - This to bypass security feature in fast syscall. - The svc exception handling process, which includes auxiliary - functions for debug/trace and core functions like - KPTI, has been identified as overly "lengthy". - In fast syscall we only considers necessary features. - Disable this config to keep security feature in fast syscall. - endmenu diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index dabbbb0e012caf233ce68738aef5a765468eb979..29554be608e125e8b817d57c3e4affde1a45ee44 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -267,8 +267,6 @@ config ARM64 select TRACE_IRQFLAGS_SUPPORT select TRACE_IRQFLAGS_NMI_SUPPORT select HAVE_SOFTIRQ_ON_OWN_STACK - select ARCH_SUPPORTS_FAST_SYSCALL if !ARM64_MTE && !KASAN_HW_TAGS - select ARCH_SUPPORTS_FAST_IRQ if ARM_GIC_V3 && !ARM64_MTE && !KASAN_HW_TAGS help ARM 64-bit (AArch64) Linux support. @@ -427,6 +425,8 @@ source "kernel/livepatch/Kconfig" menu "Kernel Features" +source "arch/arm64/Kconfig.turbo" + menu "ARM errata workarounds via the alternatives framework" config AMPERE_ERRATUM_AC03_CPU_38 diff --git a/arch/arm64/Kconfig.turbo b/arch/arm64/Kconfig.turbo new file mode 100644 index 0000000000000000000000000000000000000000..586edb89609c3bd5040a1bdddca8367d8dd8a3b9 --- /dev/null +++ b/arch/arm64/Kconfig.turbo @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "Turbo features selection" + +config FAST_SYSCALL + bool "Fast syscall handler support" + default n + select DEBUG_FEATURE_BYPASS + select SECURITY_FEATURE_BYPASS + help + This enable support Fast syscall feature. + The svc exception handling process, which includes auxiliary + functions for debug/trace and core functions like + KPTI, has been identified as overly "lengthy". + This inefficiency is particularly noticeable in short syscalls + such as lseek() and getpid(), where the syscall function itself + comprises a small percentage of the total instructions executed. + To address this, we introduce the concept of fast syscall, a fast svc + exception handling path that only considers necessary features + such as security, context saving, and recovery. + +config FAST_IRQ + bool "Fast irq handler support" + depends on ARM_GIC_V3 + default n + select DEBUG_FEATURE_BYPASS + select SECURITY_FEATURE_BYPASS + help + The irq handling process, which includes auxiliary + functions for debug/trace and core functions like + KPTI, interrupt time record, interrupt processing as + a random number source, interrupt affinity + modification and interrupt processing race, as well as + spurious and unhandled interrupt debugging, has been + identified as overly "lengthy". + To address this, we introduce the concept of fast irq, + a fast interrupt handling path that only considers + necessary features such as security, context saving + and recovery, which adds an lightweight interrupt processing + framework for latency-sensitive interrupts. + +config DEBUG_FEATURE_BYPASS + bool "Bypass debug feature in fast syscall" + depends on FAST_SYSCALL || FAST_IRQ + depends on !LOCKDEP + default n + help + This to bypass debug feature in fast syscall. + The svc exception handling process, which includes auxiliary + functions for debug/trace and core functions like + KPTI, has been identified as overly "lengthy". + In fast syscall we only considers necessary features. + Disable this config to keep debug feature in fast syscall. + +config SECURITY_FEATURE_BYPASS + bool "Bypass security feature in fast syscall" + depends on !ARM64_MTE + depends on !KASAN_HW_TAGS + depends on FAST_SYSCALL || FAST_IRQ + default n + help + This to bypass security feature in fast syscall. + The svc exception handling process, which includes auxiliary + functions for debug/trace and core functions like + KPTI, has been identified as overly "lengthy". + In fast syscall we only considers necessary features. + Disable this config to keep security feature in fast syscall. + +config ACTLR_XCALL_XINT + bool "Hardware XCALL and Xint support" + default n + help + Use the 0x600 as the offset to the exception vector base address for + 'svc ffff' + Use the 0x680 as the offset to the exception vector base address for + the Armv8.8 NMI taken from EL0. + +endmenu # "Turbo features selection" diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 11500a2d3ba1be54accc50f854a451f0ee0f86cd..e800680c4530d5ec0d7aac86e33c5955d4ec1c84 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -378,6 +378,16 @@ CONFIG_LIVEPATCH_ISOLATE_KPROBE=y # Kernel Features # +# +# Turbo features selection +# +CONFIG_FAST_SYSCALL=y +CONFIG_FAST_IRQ=y +CONFIG_DEBUG_FEATURE_BYPASS=y +CONFIG_SECURITY_FEATURE_BYPASS=y +CONFIG_ACTLR_XCALL_XINT=y +# end of Turbo features selection + # # ARM errata workarounds via the alternatives framework # @@ -902,10 +912,6 @@ CONFIG_HAVE_GCC_PLUGINS=y CONFIG_FUNCTION_ALIGNMENT_4B=y CONFIG_FUNCTION_ALIGNMENT_8B=y CONFIG_FUNCTION_ALIGNMENT=8 -CONFIG_ARCH_SUPPORTS_FAST_SYSCALL=y -# CONFIG_FAST_SYSCALL is not set -CONFIG_ARCH_SUPPORTS_FAST_IRQ=y -# CONFIG_FAST_IRQ is not set # end of General architecture-dependent options CONFIG_RT_MUTEXES=y diff --git a/arch/arm64/include/asm/actlr.h b/arch/arm64/include/asm/actlr.h new file mode 100644 index 0000000000000000000000000000000000000000..3a44b6876e81df7c69a7806ff15bca3f8ec661c2 --- /dev/null +++ b/arch/arm64/include/asm/actlr.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2025 - Huawei Ltd. + */ + +#ifndef __ASM_ACTLR_H +#define __ASM_ACTLR_H + +#define ACTLR_ELx_XCALL_SHIFT 20 +#define ACTLR_ELx_XCALL (UL(1) << ACTLR_ELx_XCALL_SHIFT) + +#define ACTLR_ELx_XINT_SHIFT (21) +#define ACTLR_ELx_XINT (UL(1) << ACTLR_ELx_XINT_SHIFT) + +#endif /* __ASM_ACTLR_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index dd758b04fd54dcdc99a95bca75c95cde2fc03160..88cdf27fe7012e16de2b5e936c044ffd175dc544 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -831,6 +831,14 @@ static inline bool system_has_full_ptr_auth(void) return system_supports_address_auth() && system_supports_generic_auth(); } +#ifdef CONFIG_ACTLR_XCALL_XINT +static __always_inline bool system_uses_xcall_xint(void) +{ + return IS_ENABLED(CONFIG_ACTLR_XCALL_XINT) && + cpus_have_const_cap(ARM64_HAS_HW_XCALL_XINT); +} +#endif + static __always_inline bool system_uses_irq_prio_masking(void) { return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h index 2047713e097d34f671565d90a5352f06c6bd1197..ae2c108880dd9777343ba1055408f56e950e9d93 100644 --- a/arch/arm64/include/asm/cpuidle.h +++ b/arch/arm64/include/asm/cpuidle.h @@ -38,4 +38,36 @@ struct arm_cpuidle_irq_context { }; #define arm_cpuidle_save_irq_context(c) (void)c #define arm_cpuidle_restore_irq_context(c) (void)c #endif + +#ifdef CONFIG_ACTLR_XCALL_XINT +struct arm_cpuidle_xcall_xint_context { + unsigned long actlr_el1; + unsigned long actlr_el2; +}; + +#define arm_cpuidle_save_xcall_xint_context(__c) \ + do { \ + struct arm_cpuidle_xcall_xint_context *c = __c; \ + if (system_uses_xcall_xint()) { \ + c->actlr_el1 = read_sysreg(actlr_el1); \ + if (read_sysreg(CurrentEL) == CurrentEL_EL2) \ + c->actlr_el2 = read_sysreg(actlr_el2); \ + } \ + } while (0) + +#define arm_cpuidle_restore_xcall_xint_context(__c) \ + do { \ + struct arm_cpuidle_xcall_xint_context *c = __c; \ + if (system_uses_xcall_xint()) { \ + write_sysreg(c->actlr_el1, actlr_el1); \ + if (read_sysreg(CurrentEL) == CurrentEL_EL2) \ + write_sysreg(c->actlr_el2, actlr_el2); \ + } \ + } while (0) +#else +struct arm_cpuidle_xcall_xint_context { }; + +#define arm_cpuidle_save_xcall_xint_context(c) (void)c +#define arm_cpuidle_restore_xcall_xint_context(c) (void)c +#endif #endif diff --git a/arch/arm64/include/asm/vectors.h b/arch/arm64/include/asm/vectors.h index bc9a2145f4194e5d8bff099f36a9c1e9a568df91..0addb4bd2b3d6bf07ab1a33e8db529aa3e52898e 100644 --- a/arch/arm64/include/asm/vectors.h +++ b/arch/arm64/include/asm/vectors.h @@ -11,6 +11,7 @@ #include extern char vectors[]; +extern char vectors_xcall_xint[]; extern char tramp_vectors[]; extern char __bp_harden_el1_vectors[]; diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 5c3d206785bdf89e2ab92e0a445fdbe5eada668b..b138586688772e1e2b22ef02ae0cf90ebeeff1ba 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -76,6 +76,7 @@ #include #include +#include #include #include #include @@ -2466,6 +2467,77 @@ static bool has_xint_support(const struct arm64_cpu_capabilities *entry, int __u } #endif +#ifdef CONFIG_ACTLR_XCALL_XINT +static bool has_arch_xcall_xint_support(const struct arm64_cpu_capabilities *entry, int scope) +{ + /* List of CPUs that support Xcall/Xint */ + static const struct midr_range xcall_xint_cpus[] = { + MIDR_ALL_VERSIONS(MIDR_HISI_HIP12), + { /* sentinel */ } + }; + + return is_midr_in_range_list(read_cpuid_id(), xcall_xint_cpus); +} + +static void enable_xcall_xint_vectors(void) +{ + /* + * Upon CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY is enabled, + * the vbar_el1 is set to the vectors starts from __bp_harden_el1_vectors. + * Kernel will jump to the xcall/xint vectors from the trampoline vector + * defined in the macro tramp_ventry. + */ + if (__this_cpu_read(this_cpu_vector) != vectors) + return; + + /* + * Upon KAISER is enabled, the vbar_el1 is set to the vectors starts + * from tramp_vectors. Kernel will jump to the vectors_xcall_xint from + * the trampoline vector defined in the macro tramp_ventry. + */ + if (arm64_kernel_unmapped_at_el0()) + return; + + /* + * If neither KAISER or BHB_MITIGATION is enabled, then we switch + * the vbar_el1 from the default vectors to the xcall/xint vectors + * at once. + */ + write_sysreg(vectors_xcall_xint, vbar_el1); + isb(); +} + +static void cpu_enable_arch_xcall_xint(const struct arm64_cpu_capabilities *__unused) +{ + int cpu = smp_processor_id(); + u64 actlr_el1, actlr_el2; + u64 el; + + el = read_sysreg(CurrentEL); + if (el == CurrentEL_EL2) { + /* + * Enable EL2 trap when access ACTLR_EL1 in guest kernel. + */ + write_sysreg_s(read_sysreg_s(SYS_HCR_EL2) | HCR_TACR, SYS_HCR_EL2); + actlr_el2 = read_sysreg(actlr_el2); + actlr_el2 |= (ACTLR_ELx_XINT | ACTLR_ELx_XCALL); + write_sysreg(actlr_el2, actlr_el2); + isb(); + actlr_el2 = read_sysreg(actlr_el2); + pr_info("actlr_el2: %llx, cpu:%d\n", actlr_el2, cpu); + } else { + actlr_el1 = read_sysreg(actlr_el1); + actlr_el1 |= (ACTLR_ELx_XINT | ACTLR_ELx_XCALL); + write_sysreg(actlr_el1, actlr_el1); + isb(); + actlr_el1 = read_sysreg(actlr_el1); + pr_info("actlr_el1: %llx, cpu:%d\n", actlr_el1, cpu); + } + + enable_xcall_xint_vectors(); +} +#endif + static const struct arm64_cpu_capabilities arm64_features[] = { { .capability = ARM64_ALWAYS_BOOT, @@ -3031,6 +3103,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_ls64_v, ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LS64, LS64_V) }, +#endif +#ifdef CONFIG_ACTLR_XCALL_XINT + { + .desc = "Hardware Xcall and Xint Support", + .capability = ARM64_HAS_HW_XCALL_XINT, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_arch_xcall_xint_support, + .cpu_enable = cpu_enable_arch_xcall_xint, + }, #endif {}, }; diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index 4602c107c40a4cd651d50b9952b9c2bb8c43aa28..a9023134675169b0570b06d9b808a4b4443ce72f 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -186,7 +186,7 @@ static __always_inline void __fast_exit_to_user_mode(void) static __always_inline void fast_exit_to_user_mode(struct pt_regs *regs) { fast_exit_to_user_mode_prepare(regs); -#ifndef CONFIG_DEBUG_FEATURE_BYPASS +#ifndef CONFIG_SECURITY_FEATURE_BYPASS mte_check_tfsr_exit(); #endif __fast_exit_to_user_mode(); @@ -202,6 +202,8 @@ static __always_inline void fast_enter_from_user_mode(struct pt_regs *regs) user_exit_irqoff(); #ifndef CONFIG_DEBUG_FEATURE_BYPASS trace_hardirqs_off_finish(); +#endif +#ifndef CONFIG_SECURITY_FEATURE_BYPASS mte_disable_tco_entry(current); #endif } @@ -607,7 +609,7 @@ static void noinstr el0_xint(struct pt_regs *regs, u64 nmi_flag, } -asmlinkage void noinstr el0t_64_xint_handler(struct pt_regs *regs) +asmlinkage void noinstr el0t_64_fast_irq_handler(struct pt_regs *regs) { el0_xint(regs, ISR_EL1_IS, handle_arch_irq, handle_arch_nmi_irq); } @@ -829,7 +831,7 @@ static void noinstr el0_xcall(struct pt_regs *regs) fast_exit_to_user_mode(regs); } -asmlinkage void noinstr el0t_64_xcall_handler(struct pt_regs *regs) +asmlinkage void noinstr el0t_64_fast_syscall_handler(struct pt_regs *regs) { el0_xcall(regs); } @@ -1049,6 +1051,17 @@ UNHANDLED(el0t, 32, fiq) UNHANDLED(el0t, 32, error) #endif /* CONFIG_AARCH32_EL0 */ +#ifdef CONFIG_ACTLR_XCALL_XINT +asmlinkage void noinstr el0t_64_xcall_handler(struct pt_regs *regs) +{ + el0_svc(regs); +} +asmlinkage void noinstr el0t_64_xint_handler(struct pt_regs *regs) +{ + el0_interrupt(regs, ISR_EL1_IS, handle_arch_irq, handle_arch_nmi_irq); +} +#endif + #ifdef CONFIG_VMAP_STACK asmlinkage void noinstr __noreturn handle_bad_stack(struct pt_regs *regs) { diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index da3809632f0f43d7b1a006133ff6de2a585f4961..801a521613ad0e7959e30e5bb6993b60b0c7e9bf 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -48,7 +48,13 @@ mrs x30, tpidrro_el0 msr tpidrro_el0, xzr .else + alternative_if ARM64_HAS_HW_XCALL_XINT + mrs x30, tpidrro_el0 + msr tpidrro_el0, xzr + alternative_else mov x30, xzr + nop + alternative_endif .endif .Lskip_tramp_vectors_cleanup\@: .endif @@ -570,6 +576,29 @@ SYM_CODE_START(vectors) kernel_ventry 0, t, 32, error // Error 32-bit EL0 SYM_CODE_END(vectors) + .align 11 +SYM_CODE_START(vectors_xcall_xint) + kernel_ventry 1, t, 64, sync // Synchronous EL1t + kernel_ventry 1, t, 64, irq // IRQ EL1t + kernel_ventry 1, t, 64, fiq // FIQ EL1t + kernel_ventry 1, t, 64, error // Error EL1t + + kernel_ventry 1, h, 64, sync // Synchronous EL1h + kernel_ventry 1, h, 64, irq // IRQ EL1h + kernel_ventry 1, h, 64, fiq // FIQ EL1h + kernel_ventry 1, h, 64, error // Error EL1h + + kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0 + kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0 + kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0 + kernel_ventry 0, t, 64, error // Error 64-bit EL0 + + kernel_ventry 0, t, 64, xcall // XCALL synchronous 64-bit EL0 + kernel_ventry 0, t, 64, xint // XINT 64-bit EL0 + kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0 + kernel_ventry 0, t, 32, error // Error 32-bit EL0 +SYM_CODE_END(vectors_xcall_xint) + #ifdef CONFIG_VMAP_STACK SYM_CODE_START_LOCAL(__bad_stack) /* @@ -648,7 +677,7 @@ SYM_CODE_END(__bad_stack) kernel_entry 0, 64 #endif mov x0, sp - bl el0t_64_xcall_handler + bl el0t_64_fast_syscall_handler #ifdef CONFIG_SECURITY_FEATURE_BYPASS kernel_exit 0, xcall #else @@ -696,7 +725,7 @@ SYM_CODE_END(__bad_stack) kernel_entry 0, 64 #endif mov x0, sp - bl el0t_64_xint_handler + bl el0t_64_fast_irq_handler #ifdef CONFIG_SECURITY_FEATURE_BYPASS kernel_exit 0, xint #else @@ -759,6 +788,10 @@ SYM_CODE_END(el\el\ht\()_\regsize\()_\label) entry_handler 0, t, 64, fiq entry_handler 0, t, 64, error +#ifdef CONFIG_ACTLR_XCALL_XINT + entry_handler 0, t, 64, xcall + entry_handler 0, t, 64, xint +#endif entry_handler 0, t, 32, sync entry_handler 0, t, 32, irq entry_handler 0, t, 32, fiq @@ -849,6 +882,12 @@ alternative_else_nop_endif msr tpidrro_el0, x30 // Restored in kernel_ventry .endif + .if \regsize == 32 + alternative_if ARM64_HAS_HW_XCALL_XINT + msr tpidrro_el0, x30 // Restored in kernel_ventry + alternative_else_nop_endif + .endif + .if \bhb == BHB_MITIGATION_LOOP /* * This sequence must appear before the first indirect branch. i.e. the @@ -873,7 +912,12 @@ alternative_else_nop_endif 2: tramp_map_kernel x30 alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 +alternative_if_not ARM64_HAS_HW_XCALL_XINT tramp_data_read_var x30, vectors + b 3f +alternative_else_nop_endif + tramp_data_read_var x30, vectors_xcall_xint +3: alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM prfm plil1strm, [x30, #(1b - \vector_start)] alternative_else_nop_endif @@ -881,7 +925,11 @@ alternative_else_nop_endif msr vbar_el1, x30 isb .else +alternative_if_not ARM64_HAS_HW_XCALL_XINT adr_l x30, vectors +alternative_else + adr_l x30, vectors_xcall_xint +alternative_endif .endif // \kpti == 1 .if \bhb == BHB_MITIGATION_FW diff --git a/arch/arm64/kernel/idle.c b/arch/arm64/kernel/idle.c index 3a0b59aa12e27153d0150699024fc068d769da8a..6876392948b372f750c92f83c3e3f0ec951b711b 100644 --- a/arch/arm64/kernel/idle.c +++ b/arch/arm64/kernel/idle.c @@ -44,3 +44,34 @@ void noinstr arch_cpu_idle(void) cpu_do_idle(); } EXPORT_SYMBOL_GPL(arch_cpu_idle); + +#ifdef CONFIG_ACTLR_XCALL_XINT +DEFINE_PER_CPU_ALIGNED(struct arm_cpuidle_xcall_xint_context, contexts); + +void arch_cpu_idle_enter(void) +{ + struct arm_cpuidle_xcall_xint_context *context; + + if (!system_uses_xcall_xint()) + return; + + context = &get_cpu_var(contexts); + arm_cpuidle_save_xcall_xint_context(context); + put_cpu_var(contexts); +} + +void arch_cpu_idle_exit(void) +{ + struct arm_cpuidle_xcall_xint_context *context; + + if (!system_uses_xcall_xint()) + return; + + context = &get_cpu_var(contexts); + arm_cpuidle_restore_xcall_xint_context(context); + put_cpu_var(contexts); +} +#else +void arch_cpu_idle_enter(void) {} +void arch_cpu_idle_exit(void) {} +#endif diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 27d93050e5dad571a31e501a07bc67fadaae121a..ae0268822d61c37390296ced9a43ec85cb6c9053 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -112,7 +112,7 @@ HAS_XCALL HAS_XINT HAS_LS64 HAS_LS64_V -KABI_RESERVE_5 +HAS_HW_XCALL_XINT KABI_RESERVE_6 KABI_RESERVE_7 KABI_RESERVE_8