From e4058c9ad3e3b51a136ac4f5705a286799d22d77 Mon Sep 17 00:00:00 2001 From: meganz009 Date: Mon, 12 Jun 2023 19:01:10 +0800 Subject: [PATCH 1/3] powerpc/pseries/iommu: Use a locallock instead local_irq_save() commit 225db3efbd37d13d5f2f72e2371da7d6bd9a2405 upstream. The locallock protects the per-CPU variable tce_page. The function attempts to allocate memory while tce_page is protected (by disabling interrupts). Use local_irq_save() instead of local_irq_disable(). Cc: stable-rt@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Steven Rostedt (VMware) --- arch/powerpc/platforms/pseries/iommu.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 06f02960b439..d80d919c78d3 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -212,6 +213,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, } static DEFINE_PER_CPU(__be64 *, tce_page); +static DEFINE_LOCAL_IRQ_LOCK(tcp_page_lock); static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages, unsigned long uaddr, @@ -232,7 +234,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, direction, attrs); } - local_irq_save(flags); /* to protect tcep and the page behind it */ + /* to protect tcep and the page behind it */ + local_lock_irqsave(tcp_page_lock, flags); tcep = __this_cpu_read(tce_page); @@ -243,7 +246,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, tcep = (__be64 *)__get_free_page(GFP_ATOMIC); /* If allocation fails, fall back to the loop implementation */ if (!tcep) { - local_irq_restore(flags); + local_unlock_irqrestore(tcp_page_lock, flags); return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, direction, attrs); } @@ -277,7 +280,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, tcenum += limit; } while (npages > 0 && !rc); - local_irq_restore(flags); + local_unlock_irqrestore(tcp_page_lock, flags); if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) { ret = (int)rc; @@ -435,13 +438,14 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, u64 rc = 0; long l, limit; - local_irq_disable(); /* to protect tcep and the page behind it */ + /* to protect tcep and the page behind it */ + local_lock_irq(tcp_page_lock); tcep = __this_cpu_read(tce_page); if (!tcep) { tcep = (__be64 *)__get_free_page(GFP_ATOMIC); if (!tcep) { - local_irq_enable(); + local_unlock_irq(tcp_page_lock); return -ENOMEM; } __this_cpu_write(tce_page, tcep); @@ -487,7 +491,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, /* error cleanup: caller will clear whole range */ - local_irq_enable(); + local_unlock_irq(tcp_page_lock); return rc; } -- Gitee From 8de0878c5acc16ac3a660fc275e7e4e024e5b773 Mon Sep 17 00:00:00 2001 From: meganz009 Date: Mon, 12 Jun 2023 19:01:21 +0800 Subject: [PATCH 2/3] powerpc: reshuffle TIF bits commit aef8aedf3ea12fb22a6f2b340a3f6bd4aacac4e0 upstream. Powerpc32/64 does not compile because TIF_SYSCALL_TRACE's bit is higher than 15 and the assembly instructions don't expect that. Move TIF_RESTOREALL, TIF_NOERROR to the higher bits and keep TIF_NEED_RESCHED_LAZY in the lower range. As a result one split load is needed and otherwise we can use immediates. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Steven Rostedt (VMware) --- arch/powerpc/include/asm/thread_info.h | 11 +++++++---- arch/powerpc/kernel/entry_32.S | 12 +++++++----- arch/powerpc/kernel/entry_64.S | 12 +++++++----- 3 files changed, 21 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index ce316076bc52..64c3d1a720e2 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -83,18 +83,18 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_FSCHECK 3 /* Check FS is USER_DS on return */ -#define TIF_NEED_RESCHED_LAZY 4 /* lazy rescheduling necessary */ #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ #define TIF_PATCH_PENDING 6 /* pending live patching update */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SINGLESTEP 8 /* singlestepping active */ #define TIF_NOHZ 9 /* in adaptive nohz mode */ #define TIF_SECCOMP 10 /* secure computing */ -#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ -#define TIF_NOERROR 12 /* Force successful syscall return */ + +#define TIF_NEED_RESCHED_LAZY 11 /* lazy rescheduling necessary */ +#define TIF_SYSCALL_TRACEPOINT 12 /* syscall tracepoint instrumentation */ + #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ #define TIF_UPROBE 14 /* breakpointed or single-stepping */ -#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation for stack store? */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ @@ -103,6 +103,9 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src #endif #define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_32BIT 20 /* 32 bit binary */ +#define TIF_RESTOREALL 21 /* Restore all regs (implies NOERROR) */ +#define TIF_NOERROR 22 /* Force successful syscall return */ + /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1< Date: Mon, 12 Jun 2023 19:01:33 +0800 Subject: [PATCH 3/3] tty/sysrq: Convert show_lock to raw_spinlock_t commit 80e49fd34b0d5b3dd5f122c074ce833b9d727ae8 upstream. Systems which don't provide arch_trigger_cpumask_backtrace() will invoke showacpu() from a smp_call_function() function which is invoked with disabled interrupts even on -RT systems. The function acquires the show_lock lock which only purpose is to ensure that the CPUs don't print simultaneously. Otherwise the output would clash and it would be hard to tell the output from CPUx apart from CPUy. On -RT the spin_lock() can not be acquired from this context. A raw_spin_lock() is required. It will introduce the system's latency by performing the sysrq request and other CPUs will block on the lock until the request is done. This is okay because the user asked for a backtrace of all active CPUs and under "normal circumstances in production" this path should not be triggered. Signed-off-by: Julien Grall Signed-off-by: Steven Rostedt (VMware) [bigeasy@linuxtronix.de: commit description] Signed-off-by: Sebastian Andrzej Siewior Acked-by: Sebastian Andrzej Siewior Signed-off-by: Greg Kroah-Hartman Cc: stable-rt@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior --- drivers/tty/sysrq.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 06ed20dd01ba..627517ad55bf 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -215,7 +215,7 @@ static struct sysrq_key_op sysrq_showlocks_op = { #endif #ifdef CONFIG_SMP -static DEFINE_SPINLOCK(show_lock); +static DEFINE_RAW_SPINLOCK(show_lock); static void showacpu(void *dummy) { @@ -225,10 +225,10 @@ static void showacpu(void *dummy) if (idle_cpu(smp_processor_id())) return; - spin_lock_irqsave(&show_lock, flags); + raw_spin_lock_irqsave(&show_lock, flags); pr_info("CPU%d:\n", smp_processor_id()); show_stack(NULL, NULL); - spin_unlock_irqrestore(&show_lock, flags); + raw_spin_unlock_irqrestore(&show_lock, flags); } static void sysrq_showregs_othercpus(struct work_struct *dummy) -- Gitee