diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index a9ee0d9dc740a0582028a447d969d9ff0e39afbb..20b0e146de98bf8b1f933c2dbc0c9fc725d81a23 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -439,6 +439,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, if (addr < TASK_SIZE) return do_page_fault(addr, fsr, regs); + if (interrupts_enabled(regs)) + local_irq_enable(); + if (user_mode(regs)) goto bad_area; @@ -506,6 +509,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, static int do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { + if (interrupts_enabled(regs)) + local_irq_enable(); + do_bad_area(addr, fsr, regs); return 0; } diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 58c53bc969289ac9dc85951286986db4d58af38b..71252cd8b594d5d1f3a4cd6c8f70d766034be96b 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -159,6 +159,16 @@ static void sve_free(struct task_struct *task) __sve_free(task); } +static void *sve_free_atomic(struct task_struct *task) +{ + void *sve_state = task->thread.sve_state; + + WARN_ON(test_tsk_thread_flag(task, TIF_SVE)); + + task->thread.sve_state = NULL; + return sve_state; +} + /* * TIF_SVE controls whether a task can use SVE without trapping while * in userspace, and also the way a task's FPSIMD/SVE state is stored @@ -547,6 +557,7 @@ int sve_set_vector_length(struct task_struct *task, * non-SVE thread. */ if (task == current) { + preempt_disable(); local_bh_disable(); fpsimd_save(); @@ -557,8 +568,10 @@ int sve_set_vector_length(struct task_struct *task, if (test_and_clear_tsk_thread_flag(task, TIF_SVE)) sve_to_fpsimd(task); - if (task == current) + if (task == current) { local_bh_enable(); + preempt_enable(); + } /* * Force reallocation of task SVE state to the correct size @@ -813,6 +826,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) sve_alloc(current); + preempt_disable(); local_bh_disable(); fpsimd_save(); @@ -826,6 +840,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) WARN_ON(1); /* SVE access shouldn't have trapped */ local_bh_enable(); + preempt_enable(); } /* @@ -892,10 +907,12 @@ void fpsimd_thread_switch(struct task_struct *next) void fpsimd_flush_thread(void) { int vl, supported_vl; + void *mem = NULL; if (!system_supports_fpsimd()) return; + preempt_disable(); local_bh_disable(); memset(¤t->thread.uw.fpsimd_state, 0, @@ -904,7 +921,7 @@ void fpsimd_flush_thread(void) if (system_supports_sve()) { clear_thread_flag(TIF_SVE); - sve_free(current); + mem = sve_free_atomic(current); /* * Reset the task vector length as required. @@ -940,6 +957,8 @@ void fpsimd_flush_thread(void) set_thread_flag(TIF_FOREIGN_FPSTATE); local_bh_enable(); + preempt_enable(); + kfree(mem); } /* @@ -951,9 +970,11 @@ void fpsimd_preserve_current_state(void) if (!system_supports_fpsimd()) return; + preempt_disable(); local_bh_disable(); fpsimd_save(); local_bh_enable(); + preempt_enable(); } /* @@ -1011,6 +1032,7 @@ void fpsimd_restore_current_state(void) if (!system_supports_fpsimd()) return; + preempt_disable(); local_bh_disable(); if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { @@ -1019,6 +1041,7 @@ void fpsimd_restore_current_state(void) } local_bh_enable(); + preempt_enable(); } /* @@ -1031,6 +1054,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) if (!system_supports_fpsimd()) return; + preempt_disable(); local_bh_disable(); current->thread.uw.fpsimd_state = *state; @@ -1043,6 +1067,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) clear_thread_flag(TIF_FOREIGN_FPSTATE); local_bh_enable(); + preempt_enable(); } /* @@ -1088,6 +1113,7 @@ void kernel_neon_begin(void) BUG_ON(!may_use_simd()); + preempt_disable(); local_bh_disable(); __this_cpu_write(kernel_neon_busy, true); @@ -1101,6 +1127,7 @@ void kernel_neon_begin(void) preempt_disable(); local_bh_enable(); + preempt_enable(); } EXPORT_SYMBOL(kernel_neon_begin); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 9acc2dd43bb4ea4bd8827dc4bc5b21bae860c7d1..f2d1dac5e581b88c647a0af22f721ba186a0012f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -2770,7 +2770,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state); * This call sets the internal irqchip state of an interrupt, * depending on the value of @which. * - * This function should be called with preemption disabled if the + * This function should be called with migration disabled if the * interrupt controller has per-cpu registers. */ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index 46ba853656f63b7ba1a1e11171b8cdc935666c8d..9a23632b6294fb35700bca09bd0cf981c5f4c4d3 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c @@ -140,6 +140,15 @@ KERNEL_ATTR_RO(vmcoreinfo); #endif /* CONFIG_CRASH_CORE */ +#if defined(CONFIG_PREEMPT_RT_FULL) +static ssize_t realtime_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", 1); +} +KERNEL_ATTR_RO(realtime); +#endif + /* whether file capabilities are enabled */ static ssize_t fscaps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -230,6 +239,9 @@ static struct attribute * kernel_attrs[] = { #ifndef CONFIG_TINY_RCU &rcu_expedited_attr.attr, &rcu_normal_attr.attr, +#endif +#ifdef CONFIG_PREEMPT_RT_FULL + &realtime_attr.attr, #endif NULL }; diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index a118151c986c4c664d4081589a938d868fcbb361..8b8947c42b10bd4cfcdffc0b2ca9a54016aa38bb 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -723,7 +723,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) * involves poking the GIC, which must be done in a * non-preemptible context. */ - preempt_disable(); + migrate_disable(); kvm_pmu_flush_hwstate(vcpu); @@ -772,7 +772,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); local_irq_enable(); - preempt_enable(); + migrate_enable(); continue; } @@ -850,7 +850,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) /* Exit types that need handling before we can be preempted */ handle_exit_early(vcpu, run, ret); - preempt_enable(); + migrate_enable(); ret = handle_exit(vcpu, run, ret); }