From bfc962ff89f35db0f2959e0959ef56ecd83998c2 Mon Sep 17 00:00:00 2001 From: meganz009 Date: Wed, 7 Jun 2023 21:21:42 +0800 Subject: [PATCH 1/7] rcu: Frob softirq test commit 9baac22c59df91638ab1dacfebd3290c5bfd73ca upstream. With RT_FULL we get the below wreckage: [ 126.060484] ======================================================= [ 126.060486] [ INFO: possible circular locking dependency detected ] [ 126.060489] 3.0.1-rt10+ #30 [ 126.060490] ------------------------------------------------------- [ 126.060492] irq/24-eth0/1235 is trying to acquire lock: [ 126.060495] (&(lock)->wait_lock#2){+.+...}, at: [] rt_mutex_slowunlock+0x16/0x55 [ 126.060503] [ 126.060504] but task is already holding lock: [ 126.060506] (&p->pi_lock){-...-.}, at: [] try_to_wake_up+0x35/0x429 [ 126.060511] [ 126.060511] which lock already depends on the new lock. [ 126.060513] [ 126.060514] [ 126.060514] the existing dependency chain (in reverse order) is: [ 126.060516] [ 126.060516] -> #1 (&p->pi_lock){-...-.}: [ 126.060519] [] lock_acquire+0x145/0x18a [ 126.060524] [] _raw_spin_lock_irqsave+0x4b/0x85 [ 126.060527] [] task_blocks_on_rt_mutex+0x36/0x20f [ 126.060531] [] rt_mutex_slowlock+0xd1/0x15a [ 126.060534] [] rt_mutex_lock+0x2d/0x2f [ 126.060537] [] rcu_boost+0xad/0xde [ 126.060541] [] rcu_boost_kthread+0x7d/0x9b [ 126.060544] [] kthread+0x99/0xa1 [ 126.060547] [] kernel_thread_helper+0x4/0x10 [ 126.060551] [ 126.060552] -> #0 (&(lock)->wait_lock#2){+.+...}: [ 126.060555] [] __lock_acquire+0x1157/0x1816 [ 126.060558] [] lock_acquire+0x145/0x18a [ 126.060561] [] _raw_spin_lock+0x40/0x73 [ 126.060564] [] rt_mutex_slowunlock+0x16/0x55 [ 126.060566] [] rt_mutex_unlock+0x27/0x29 [ 126.060569] [] rcu_read_unlock_special+0x17e/0x1c4 [ 126.060573] [] __rcu_read_unlock+0x48/0x89 [ 126.060576] [] select_task_rq_rt+0xc7/0xd5 [ 126.060580] [] try_to_wake_up+0x175/0x429 [ 126.060583] [] wake_up_process+0x15/0x17 [ 126.060585] [] wakeup_softirqd+0x24/0x26 [ 126.060590] [] irq_exit+0x49/0x55 [ 126.060593] [] smp_apic_timer_interrupt+0x8a/0x98 [ 126.060597] [] apic_timer_interrupt+0x13/0x20 [ 126.060600] [] irq_forced_thread_fn+0x1b/0x44 [ 126.060603] [] irq_thread+0xde/0x1af [ 126.060606] [] kthread+0x99/0xa1 [ 126.060608] [] kernel_thread_helper+0x4/0x10 [ 126.060611] [ 126.060612] other info that might help us debug this: [ 126.060614] [ 126.060615] Possible unsafe locking scenario: [ 126.060616] [ 126.060617] CPU0 CPU1 [ 126.060619] ---- ---- [ 126.060620] lock(&p->pi_lock); [ 126.060623] lock(&(lock)->wait_lock); [ 126.060625] lock(&p->pi_lock); [ 126.060627] lock(&(lock)->wait_lock); [ 126.060629] [ 126.060629] *** DEADLOCK *** [ 126.060630] [ 126.060632] 1 lock held by irq/24-eth0/1235: [ 126.060633] #0: (&p->pi_lock){-...-.}, at: [] try_to_wake_up+0x35/0x429 [ 126.060638] [ 126.060638] stack backtrace: [ 126.060641] Pid: 1235, comm: irq/24-eth0 Not tainted 3.0.1-rt10+ #30 [ 126.060643] Call Trace: [ 126.060644] [] print_circular_bug+0x289/0x29a [ 126.060651] [] __lock_acquire+0x1157/0x1816 [ 126.060655] [] ? trace_hardirqs_off_caller+0x1f/0x99 [ 126.060658] [] ? rt_mutex_slowunlock+0x16/0x55 [ 126.060661] [] lock_acquire+0x145/0x18a [ 126.060664] [] ? rt_mutex_slowunlock+0x16/0x55 [ 126.060668] [] _raw_spin_lock+0x40/0x73 [ 126.060671] [] ? rt_mutex_slowunlock+0x16/0x55 [ 126.060674] [] ? rcu_report_qs_rsp+0x87/0x8c [ 126.060677] [] rt_mutex_slowunlock+0x16/0x55 [ 126.060680] [] ? rcu_read_unlock_special+0x9b/0x1c4 [ 126.060683] [] rt_mutex_unlock+0x27/0x29 [ 126.060687] [] rcu_read_unlock_special+0x17e/0x1c4 [ 126.060690] [] __rcu_read_unlock+0x48/0x89 [ 126.060693] [] select_task_rq_rt+0xc7/0xd5 [ 126.060696] [] ? select_task_rq_rt+0x27/0xd5 [ 126.060701] [] ? clockevents_program_event+0x8e/0x90 [ 126.060704] [] try_to_wake_up+0x175/0x429 [ 126.060708] [] ? tick_program_event+0x1f/0x21 [ 126.060711] [] wake_up_process+0x15/0x17 [ 126.060715] [] wakeup_softirqd+0x24/0x26 [ 126.060718] [] irq_exit+0x49/0x55 [ 126.060721] [] smp_apic_timer_interrupt+0x8a/0x98 [ 126.060724] [] apic_timer_interrupt+0x13/0x20 [ 126.060726] [] ? migrate_disable+0x75/0x12d [ 126.060733] [] ? local_bh_disable+0xe/0x1f [ 126.060736] [] ? local_bh_disable+0x1d/0x1f [ 126.060739] [] irq_forced_thread_fn+0x1b/0x44 [ 126.060742] [] ? _raw_spin_unlock_irq+0x3b/0x59 [ 126.060745] [] irq_thread+0xde/0x1af [ 126.060748] [] ? irq_thread_fn+0x3a/0x3a [ 126.060751] [] ? irq_finalize_oneshot+0xd1/0xd1 [ 126.060754] [] ? irq_finalize_oneshot+0xd1/0xd1 [ 126.060757] [] kthread+0x99/0xa1 [ 126.060761] [] kernel_thread_helper+0x4/0x10 [ 126.060764] [] ? finish_task_switch+0x87/0x10a [ 126.060768] [] ? retint_restore_args+0xe/0xe [ 126.060771] [] ? __init_kthread_worker+0x8c/0x8c [ 126.060774] [] ? gs_change+0xb/0xb Because irq_exit() does: void irq_exit(void) { account_system_vtime(current); trace_hardirq_exit(); sub_preempt_count(IRQ_EXIT_OFFSET); if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); ... } Which triggers a wakeup, which uses RCU, now if the interrupted task has t->rcu_read_unlock_special set, the rcu usage from the wakeup will end up in rcu_read_unlock_special(). rcu_read_unlock_special() will test for in_irq(), which will fail as we just decremented preempt_count with IRQ_EXIT_OFFSET, and in_sering_softirq(), which for PREEMPT_RT_FULL reads: int in_serving_softirq(void) { int res; preempt_disable(); res = __get_cpu_var(local_softirq_runner) == current; preempt_enable(); return res; } Which will thus also fail, resulting in the above wreckage. The 'somewhat' ugly solution is to open-code the preempt_count() test in rcu_read_unlock_special(). Also, we're not at all sure how ->rcu_read_unlock_special gets set here... so this is very likely a bandaid and more thought is required. Cc: Paul E. McKenney Signed-off-by: Peter Zijlstra --- kernel/rcu/tree_plugin.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 564e3927e7b0..429a2f144e19 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -524,7 +524,7 @@ static void rcu_read_unlock_special(struct task_struct *t) } /* Hardware IRQ handlers cannot block, complain if they get here. */ - if (in_irq() || in_serving_softirq()) { + if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) { lockdep_rcu_suspicious(__FILE__, __LINE__, "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n"); pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n", -- Gitee From a26240b8cb3dd75fb25b8b3001bd8b8e8fc3ce1a Mon Sep 17 00:00:00 2001 From: meganz009 Date: Wed, 7 Jun 2023 21:21:54 +0800 Subject: [PATCH 2/7] rcu: Merge RCU-bh into RCU-preempt commit 901bc5821a73d1fc6bbfe65cf07194fd04845366 upstream. The Linux kernel has long RCU-bh read-side critical sections that intolerably increase scheduling latency under mainline's RCU-bh rules, which include RCU-bh read-side critical sections being non-preemptible. This patch therefore arranges for RCU-bh to be implemented in terms of RCU-preempt for CONFIG_PREEMPT_RT_FULL=y. This has the downside of defeating the purpose of RCU-bh, namely, handling the case where the system is subjected to a network-based denial-of-service attack that keeps at least one CPU doing full-time softirq processing. This issue will be fixed by a later commit. The current commit will need some work to make it appropriate for mainline use, for example, it needs to be extended to cover Tiny RCU. [ paulmck: Added a useful changelog ] Signed-off-by: Thomas Gleixner Signed-off-by: Paul E. McKenney Link: http://lkml.kernel.org/r/20111005185938.GA20403@linux.vnet.ibm.com Signed-off-by: Thomas Gleixner --- include/linux/rcupdate.h | 19 +++++++++++++++++++ include/linux/rcutree.h | 8 ++++++++ kernel/rcu/rcu.h | 11 +++++++++-- kernel/rcu/rcutorture.c | 7 +++++++ kernel/rcu/tree.c | 26 ++++++++++++++++++++++++++ kernel/rcu/tree.h | 2 ++ kernel/rcu/update.c | 2 ++ 7 files changed, 73 insertions(+), 2 deletions(-) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 1d7479913dae..549d72cf770b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -56,7 +56,11 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func); #define call_rcu call_rcu_sched #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ +#ifdef CONFIG_PREEMPT_RT_FULL +#define call_rcu_bh call_rcu +#else void call_rcu_bh(struct rcu_head *head, rcu_callback_t func); +#endif void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); void synchronize_sched(void); void rcu_barrier_tasks(void); @@ -261,7 +265,14 @@ extern struct lockdep_map rcu_sched_lock_map; extern struct lockdep_map rcu_callback_map; int debug_lockdep_rcu_enabled(void); int rcu_read_lock_held(void); +#ifdef CONFIG_PREEMPT_RT_FULL +static inline int rcu_read_lock_bh_held(void) +{ + return rcu_read_lock_held(); +} +#else int rcu_read_lock_bh_held(void); +#endif int rcu_read_lock_sched_held(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -679,10 +690,14 @@ static inline void rcu_read_unlock(void) static inline void rcu_read_lock_bh(void) { local_bh_disable(); +#ifdef CONFIG_PREEMPT_RT_FULL + rcu_read_lock(); +#else __acquire(RCU_BH); rcu_lock_acquire(&rcu_bh_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock_bh() used illegally while idle"); +#endif } /* @@ -692,10 +707,14 @@ static inline void rcu_read_lock_bh(void) */ static inline void rcu_read_unlock_bh(void) { +#ifdef CONFIG_PREEMPT_RT_FULL + rcu_read_unlock(); +#else RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock_bh() used illegally while idle"); rcu_lock_release(&rcu_bh_lock_map); __release(RCU_BH); +#endif local_bh_enable(); } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 914655848ef6..462ce061bac7 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -44,7 +44,11 @@ static inline void rcu_virt_note_context_switch(int cpu) rcu_note_context_switch(false); } +#ifdef CONFIG_PREEMPT_RT_FULL +# define synchronize_rcu_bh synchronize_rcu +#else void synchronize_rcu_bh(void); +#endif void synchronize_sched_expedited(void); void synchronize_rcu_expedited(void); @@ -72,7 +76,11 @@ static inline void synchronize_rcu_bh_expedited(void) } void rcu_barrier(void); +#ifdef CONFIG_PREEMPT_RT_FULL +# define rcu_barrier_bh rcu_barrier +#else void rcu_barrier_bh(void); +#endif void rcu_barrier_sched(void); bool rcu_eqs_special_set(int cpu); unsigned long get_state_synchronize_rcu(void); diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 4d04683c31b2..808cce9a5d43 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -528,7 +528,6 @@ static inline void show_rcu_gp_kthreads(void) { } static inline int rcu_get_gp_kthreads_prio(void) { return 0; } #else /* #ifdef CONFIG_TINY_RCU */ unsigned long rcu_get_gp_seq(void); -unsigned long rcu_bh_get_gp_seq(void); unsigned long rcu_sched_get_gp_seq(void); unsigned long rcu_exp_batches_completed(void); unsigned long rcu_exp_batches_completed_sched(void); @@ -536,10 +535,18 @@ unsigned long srcu_batches_completed(struct srcu_struct *sp); void show_rcu_gp_kthreads(void); int rcu_get_gp_kthreads_prio(void); void rcu_force_quiescent_state(void); -void rcu_bh_force_quiescent_state(void); void rcu_sched_force_quiescent_state(void); extern struct workqueue_struct *rcu_gp_wq; extern struct workqueue_struct *rcu_par_gp_wq; + +#ifdef CONFIG_PREEMPT_RT_FULL +#define rcu_bh_get_gp_seq rcu_get_gp_seq +#define rcu_bh_force_quiescent_state rcu_force_quiescent_state +#else +unsigned long rcu_bh_get_gp_seq(void); +void rcu_bh_force_quiescent_state(void); +#endif + #endif /* #else #ifdef CONFIG_TINY_RCU */ #ifdef CONFIG_RCU_NOCB_CPU diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index 0b7af7e2bcbb..e95d121efc80 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -434,6 +434,7 @@ static struct rcu_torture_ops rcu_ops = { .name = "rcu" }; +#ifndef CONFIG_PREEMPT_RT_FULL /* * Definitions for rcu_bh torture testing. */ @@ -475,6 +476,12 @@ static struct rcu_torture_ops rcu_bh_ops = { .name = "rcu_bh" }; +#else +static struct rcu_torture_ops rcu_bh_ops = { + .ttype = INVALID_RCU_FLAVOR, +}; +#endif + /* * Don't even think about trying any of these in real life!!! * The names includes "busted", and they really means it! diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 02d97774dc00..b593c74d20c2 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -245,6 +245,7 @@ void rcu_sched_qs(void) this_cpu_ptr(&rcu_sched_data), true); } +#ifndef CONFIG_PREEMPT_RT_FULL void rcu_bh_qs(void) { RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!"); @@ -255,6 +256,11 @@ void rcu_bh_qs(void) __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); } } +#else +void rcu_bh_qs(void) +{ +} +#endif /* * Steal a bit from the bottom of ->dynticks for idle entry/exit @@ -570,6 +576,7 @@ unsigned long rcu_sched_get_gp_seq(void) } EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq); +#ifndef CONFIG_PREEMPT_RT_FULL /* * Return the number of RCU-bh GPs completed thus far for debug & stats. */ @@ -578,6 +585,7 @@ unsigned long rcu_bh_get_gp_seq(void) return READ_ONCE(rcu_bh_state.gp_seq); } EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq); +#endif /* * Return the number of RCU expedited batches completed thus far for @@ -601,6 +609,7 @@ unsigned long rcu_exp_batches_completed_sched(void) } EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /* * Force a quiescent state. */ @@ -619,6 +628,13 @@ void rcu_bh_force_quiescent_state(void) } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); +#else +void rcu_force_quiescent_state(void) +{ +} +EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); +#endif + /* * Force a quiescent state for RCU-sched. */ @@ -676,9 +692,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, case RCU_FLAVOR: rsp = rcu_state_p; break; +#ifndef CONFIG_PREEMPT_RT_FULL case RCU_BH_FLAVOR: rsp = &rcu_bh_state; break; +#endif case RCU_SCHED_FLAVOR: rsp = &rcu_sched_state; break; @@ -3073,6 +3091,7 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) } EXPORT_SYMBOL_GPL(call_rcu_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /** * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. * @head: structure to be used for queueing the RCU updates. @@ -3100,6 +3119,7 @@ void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) __call_rcu(head, func, &rcu_bh_state, -1, 0); } EXPORT_SYMBOL_GPL(call_rcu_bh); +#endif /* * Queue an RCU callback for lazy invocation after a grace period. @@ -3185,6 +3205,7 @@ void synchronize_sched(void) } EXPORT_SYMBOL_GPL(synchronize_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /** * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. * @@ -3211,6 +3232,7 @@ void synchronize_rcu_bh(void) wait_rcu_gp(call_rcu_bh); } EXPORT_SYMBOL_GPL(synchronize_rcu_bh); +#endif /** * get_state_synchronize_rcu - Snapshot current RCU state @@ -3518,6 +3540,7 @@ static void _rcu_barrier(struct rcu_state *rsp) mutex_unlock(&rsp->barrier_mutex); } +#ifndef CONFIG_PREEMPT_RT_FULL /** * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. */ @@ -3526,6 +3549,7 @@ void rcu_barrier_bh(void) _rcu_barrier(&rcu_bh_state); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); +#endif /** * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. @@ -4173,7 +4197,9 @@ void __init rcu_init(void) rcu_bootup_announce(); rcu_init_geometry(); +#ifndef CONFIG_PREEMPT_RT_FULL rcu_init_one(&rcu_bh_state); +#endif rcu_init_one(&rcu_sched_state); if (dump_tree) rcu_dump_rcu_node_tree(&rcu_sched_state); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 4e74df768c57..fbbff7c21148 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -413,7 +413,9 @@ extern struct list_head rcu_struct_flavors; */ extern struct rcu_state rcu_sched_state; +#ifndef CONFIG_PREEMPT_RT_FULL extern struct rcu_state rcu_bh_state; +#endif #ifdef CONFIG_PREEMPT_RCU extern struct rcu_state rcu_preempt_state; diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 81688a133552..6ffafb1b1584 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -288,6 +288,7 @@ int rcu_read_lock_held(void) } EXPORT_SYMBOL_GPL(rcu_read_lock_held); +#ifndef CONFIG_PREEMPT_RT_FULL /** * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? * @@ -314,6 +315,7 @@ int rcu_read_lock_bh_held(void) return in_softirq() || irqs_disabled(); } EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); +#endif #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -- Gitee From d4195ee66d696ac446efa57d6e3b803de919bfca Mon Sep 17 00:00:00 2001 From: meganz009 Date: Wed, 7 Jun 2023 21:22:15 +0800 Subject: [PATCH 3/7] rcu: Make ksoftirqd do RCU quiescent states commit 8a484b91ae77a0557e92b9e07f3fe82b7ff735cb upstream. Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable to network-based denial-of-service attacks. This patch therefore makes __do_softirq() invoke rcu_bh_qs(), but only when __do_softirq() is running in ksoftirqd context. A wrapper layer in interposed so that other calls to __do_softirq() avoid invoking rcu_bh_qs(). The underlying function __do_softirq_common() does the actual work. The reason that rcu_bh_qs() is bad in these non-ksoftirqd contexts is that there might be a local_bh_enable() inside an RCU-preempt read-side critical section. This local_bh_enable() can invoke __do_softirq() directly, so if __do_softirq() were to invoke rcu_bh_qs() (which just calls rcu_preempt_qs() in the PREEMPT_RT_FULL case), there would be an illegal RCU-preempt quiescent state in the middle of an RCU-preempt read-side critical section. Therefore, quiescent states can only happen in cases where __do_softirq() is invoked directly from ksoftirqd. Signed-off-by: Paul E. McKenney Link: http://lkml.kernel.org/r/20111005184518.GA21601@linux.vnet.ibm.com Signed-off-by: Thomas Gleixner --- kernel/rcu/tree.c | 18 +++++++++++++----- kernel/rcu/tree_plugin.h | 8 +++++++- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b593c74d20c2..8c89f3bf85ba 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -245,7 +245,19 @@ void rcu_sched_qs(void) this_cpu_ptr(&rcu_sched_data), true); } -#ifndef CONFIG_PREEMPT_RT_FULL +#ifdef CONFIG_PREEMPT_RT_FULL +static void rcu_preempt_qs(void); + +void rcu_bh_qs(void) +{ + unsigned long flags; + + /* Callers to this function, rcu_preempt_qs(), must disable irqs. */ + local_irq_save(flags); + rcu_preempt_qs(); + local_irq_restore(flags); +} +#else void rcu_bh_qs(void) { RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!"); @@ -256,10 +268,6 @@ void rcu_bh_qs(void) __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); } } -#else -void rcu_bh_qs(void) -{ -} #endif /* diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 429a2f144e19..bee9bffeb0ce 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include "../time/tick-internal.h" @@ -1407,7 +1408,7 @@ static void rcu_prepare_kthreads(int cpu) #endif /* #else #ifdef CONFIG_RCU_BOOST */ -#if !defined(CONFIG_RCU_FAST_NO_HZ) +#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) /* * Check to see if any future RCU-related work will need to be done @@ -1423,7 +1424,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) *nextevt = KTIME_MAX; return rcu_cpu_has_callbacks(NULL); } +#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ +#if !defined(CONFIG_RCU_FAST_NO_HZ) /* * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up * after it. @@ -1520,6 +1523,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) return cbs_ready; } +#ifndef CONFIG_PREEMPT_RT_FULL + /* * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready * to invoke. If the CPU has callbacks, try to advance them. Tell the @@ -1562,6 +1567,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) *nextevt = basemono + dj * TICK_NSEC; return 0; } +#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ /* * Prepare a CPU for idle from an RCU perspective. The first major task -- Gitee From 94b13ae37df7dbb6f0964aedc4a963ed202df9a6 Mon Sep 17 00:00:00 2001 From: meganz009 Date: Wed, 7 Jun 2023 21:25:49 +0800 Subject: [PATCH 4/7] rcu: Eliminate softirq processing from rcutree commit 5c4e32e88d430654b1f529185331235d5b745a0d upstream. Running RCU out of softirq is a problem for some workloads that would like to manage RCU core processing independently of other softirq work, for example, setting kthread priority. This commit therefore moves the RCU core work from softirq to a per-CPU/per-flavor SCHED_OTHER kthread named rcuc. The SCHED_OTHER approach avoids the scalability problems that appeared with the earlier attempt to move RCU core processing to from softirq to kthreads. That said, kernels built with RCU_BOOST=y will run the rcuc kthreads at the RCU-boosting priority. Reported-by: Thomas Gleixner Tested-by: Mike Galbraith Signed-off-by: Paul E. McKenney Signed-off-by: Sebastian Andrzej Siewior --- kernel/rcu/tree.c | 114 ++++++++++++++++++++++++++++--- kernel/rcu/tree.h | 4 +- kernel/rcu/tree_plugin.h | 142 +++------------------------------------ 3 files changed, 115 insertions(+), 145 deletions(-) diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8c89f3bf85ba..8153a48908d7 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -62,6 +62,13 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include "../time/tick-internal.h" #include "tree.h" #include "rcu.h" @@ -2912,18 +2919,17 @@ __rcu_process_callbacks(struct rcu_state *rsp) /* * Do RCU core processing for the current CPU. */ -static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) +static __latent_entropy void rcu_process_callbacks(void) { struct rcu_state *rsp; if (cpu_is_offline(smp_processor_id())) return; - trace_rcu_utilization(TPS("Start RCU core")); for_each_rcu_flavor(rsp) __rcu_process_callbacks(rsp); - trace_rcu_utilization(TPS("End RCU core")); } +static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); /* * Schedule RCU callback invocation. If the specified type of RCU * does not support RCU priority boosting, just do a direct call, @@ -2935,18 +2941,105 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) { if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) return; - if (likely(!rsp->boost)) { - rcu_do_batch(rsp, rdp); - return; - } - invoke_rcu_callbacks_kthread(); + rcu_do_batch(rsp, rdp); +} + +static void rcu_wake_cond(struct task_struct *t, int status) +{ + /* + * If the thread is yielding, only wake it when this + * is invoked from idle + */ + if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) + wake_up_process(t); } +/* + * Wake up this CPU's rcuc kthread to do RCU core processing. + */ static void invoke_rcu_core(void) { - if (cpu_online(smp_processor_id())) - raise_softirq(RCU_SOFTIRQ); + unsigned long flags; + struct task_struct *t; + + if (!cpu_online(smp_processor_id())) + return; + local_irq_save(flags); + __this_cpu_write(rcu_cpu_has_work, 1); + t = __this_cpu_read(rcu_cpu_kthread_task); + if (t != NULL && current != t) + rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status)); + local_irq_restore(flags); +} + +static void rcu_cpu_kthread_park(unsigned int cpu) +{ + per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; +} + +static int rcu_cpu_kthread_should_run(unsigned int cpu) +{ + return __this_cpu_read(rcu_cpu_has_work); +} + +/* + * Per-CPU kernel thread that invokes RCU callbacks. This replaces the + * RCU softirq used in flavors and configurations of RCU that do not + * support RCU priority boosting. + */ +static void rcu_cpu_kthread(unsigned int cpu) +{ + unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); + char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); + int spincnt; + + for (spincnt = 0; spincnt < 10; spincnt++) { + trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); + local_bh_disable(); + *statusp = RCU_KTHREAD_RUNNING; + this_cpu_inc(rcu_cpu_kthread_loops); + local_irq_disable(); + work = *workp; + *workp = 0; + local_irq_enable(); + if (work) + rcu_process_callbacks(); + local_bh_enable(); + if (*workp == 0) { + trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); + *statusp = RCU_KTHREAD_WAITING; + return; + } + } + *statusp = RCU_KTHREAD_YIELDING; + trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); + schedule_timeout_interruptible(2); + trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); + *statusp = RCU_KTHREAD_WAITING; +} + +static struct smp_hotplug_thread rcu_cpu_thread_spec = { + .store = &rcu_cpu_kthread_task, + .thread_should_run = rcu_cpu_kthread_should_run, + .thread_fn = rcu_cpu_kthread, + .thread_comm = "rcuc/%u", + .setup = rcu_cpu_kthread_setup, + .park = rcu_cpu_kthread_park, +}; + +/* + * Spawn per-CPU RCU core processing kthreads. + */ +static int __init rcu_spawn_core_kthreads(void) +{ + int cpu; + + for_each_possible_cpu(cpu) + per_cpu(rcu_cpu_has_work, cpu) = 0; + BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); + return 0; } +early_initcall(rcu_spawn_core_kthreads); /* * Handle any core-RCU processing required by a call_rcu() invocation. @@ -4212,7 +4305,6 @@ void __init rcu_init(void) if (dump_tree) rcu_dump_rcu_node_tree(&rcu_sched_state); __rcu_init_preempt(); - open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); /* * We don't need protection against CPU-hotplug here because diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index fbbff7c21148..98257d20feb2 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -423,12 +423,10 @@ extern struct rcu_state rcu_preempt_state; int rcu_dynticks_snap(struct rcu_dynticks *rdtp); -#ifdef CONFIG_RCU_BOOST DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DECLARE_PER_CPU(char, rcu_cpu_has_work); -#endif /* #ifdef CONFIG_RCU_BOOST */ #ifndef RCU_TREE_NONCORE @@ -451,8 +449,8 @@ static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); -static void invoke_rcu_callbacks_kthread(void); static bool rcu_is_callbacks_kthread(void); +static void rcu_cpu_kthread_setup(unsigned int cpu); #ifdef CONFIG_RCU_BOOST static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, struct rcu_node *rnp); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index bee9bffeb0ce..2e8737f1010f 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -24,42 +24,16 @@ * Paul E. McKenney */ -#include -#include -#include -#include -#include -#include -#include -#include -#include "../time/tick-internal.h" - -#ifdef CONFIG_RCU_BOOST - #include "../locking/rtmutex_common.h" /* * Control variables for per-CPU and per-rcu_node kthreads. These * handle all flavors of RCU. */ -static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DEFINE_PER_CPU(char, rcu_cpu_has_work); -#else /* #ifdef CONFIG_RCU_BOOST */ - -/* - * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST, - * all uses are in dead code. Provide a definition to keep the compiler - * happy, but add WARN_ON_ONCE() to complain if used in the wrong place. - * This probably needs to be excluded from -rt builds. - */ -#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; }) -#define rt_mutex_futex_unlock(x) WARN_ON_ONCE(1) - -#endif /* #else #ifdef CONFIG_RCU_BOOST */ - #ifdef CONFIG_RCU_NOCB_CPU static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ @@ -1027,18 +1001,21 @@ dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck) #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ +/* + * If boosting, set rcuc kthreads to realtime priority. + */ +static void rcu_cpu_kthread_setup(unsigned int cpu) +{ #ifdef CONFIG_RCU_BOOST + struct sched_param sp; -static void rcu_wake_cond(struct task_struct *t, int status) -{ - /* - * If the thread is yielding, only wake it when this - * is invoked from idle - */ - if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) - wake_up_process(t); + sp.sched_priority = kthread_prio; + sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); +#endif /* #ifdef CONFIG_RCU_BOOST */ } +#ifdef CONFIG_RCU_BOOST + /* * Carry out RCU priority boosting on the task indicated by ->exp_tasks * or ->boost_tasks, advancing the pointer to the next task in the @@ -1176,23 +1153,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) } } -/* - * Wake up the per-CPU kthread to invoke RCU callbacks. - */ -static void invoke_rcu_callbacks_kthread(void) -{ - unsigned long flags; - - local_irq_save(flags); - __this_cpu_write(rcu_cpu_has_work, 1); - if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && - current != __this_cpu_read(rcu_cpu_kthread_task)) { - rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), - __this_cpu_read(rcu_cpu_kthread_status)); - } - local_irq_restore(flags); -} - /* * Is the current CPU running the RCU-callbacks kthread? * Caller must have preemption disabled. @@ -1247,67 +1207,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, return 0; } -static void rcu_kthread_do_work(void) -{ - rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); - rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); - rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data)); -} - -static void rcu_cpu_kthread_setup(unsigned int cpu) -{ - struct sched_param sp; - - sp.sched_priority = kthread_prio; - sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); -} - -static void rcu_cpu_kthread_park(unsigned int cpu) -{ - per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; -} - -static int rcu_cpu_kthread_should_run(unsigned int cpu) -{ - return __this_cpu_read(rcu_cpu_has_work); -} - -/* - * Per-CPU kernel thread that invokes RCU callbacks. This replaces the - * RCU softirq used in flavors and configurations of RCU that do not - * support RCU priority boosting. - */ -static void rcu_cpu_kthread(unsigned int cpu) -{ - unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); - char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); - int spincnt; - - for (spincnt = 0; spincnt < 10; spincnt++) { - trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); - local_bh_disable(); - *statusp = RCU_KTHREAD_RUNNING; - this_cpu_inc(rcu_cpu_kthread_loops); - local_irq_disable(); - work = *workp; - *workp = 0; - local_irq_enable(); - if (work) - rcu_kthread_do_work(); - local_bh_enable(); - if (*workp == 0) { - trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); - *statusp = RCU_KTHREAD_WAITING; - return; - } - } - *statusp = RCU_KTHREAD_YIELDING; - trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); - schedule_timeout_interruptible(2); - trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); - *statusp = RCU_KTHREAD_WAITING; -} - /* * Set the per-rcu_node kthread's affinity to cover all CPUs that are * served by the rcu_node in question. The CPU hotplug lock is still @@ -1338,26 +1237,12 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) free_cpumask_var(cm); } -static struct smp_hotplug_thread rcu_cpu_thread_spec = { - .store = &rcu_cpu_kthread_task, - .thread_should_run = rcu_cpu_kthread_should_run, - .thread_fn = rcu_cpu_kthread, - .thread_comm = "rcuc/%u", - .setup = rcu_cpu_kthread_setup, - .park = rcu_cpu_kthread_park, -}; - /* * Spawn boost kthreads -- called as soon as the scheduler is running. */ static void __init rcu_spawn_boost_kthreads(void) { struct rcu_node *rnp; - int cpu; - - for_each_possible_cpu(cpu) - per_cpu(rcu_cpu_has_work, cpu) = 0; - BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); rcu_for_each_leaf_node(rcu_state_p, rnp) (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); } @@ -1380,11 +1265,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } -static void invoke_rcu_callbacks_kthread(void) -{ - WARN_ON_ONCE(1); -} - static bool rcu_is_callbacks_kthread(void) { return false; -- Gitee From 84437dc9a7393ff371be17366676ebfbb2643901 Mon Sep 17 00:00:00 2001 From: meganz009 Date: Wed, 7 Jun 2023 21:26:29 +0800 Subject: [PATCH 5/7] srcu: use cpu_online() instead custom check commit 68c6703bb92019392658036dcf0a667b1388a3b0 upstream. The current check via srcu_online is slightly racy because after looking at srcu_online there could be an interrupt that interrupted us long enough until the CPU we checked against went offline. An alternative would be to hold the hotplug rwsem (so the CPUs don't change their state) and then check based on cpu_online() if we queue it on a specific CPU or not. queue_work_on() itself can handle if something is enqueued on an offline CPU but a timer which is enqueued on an offline CPU won't fire until the CPU is back online. Signed-off-by: Sebastian Andrzej Siewior --- kernel/rcu/srcutree.c | 22 ++++------------------ kernel/rcu/tree.c | 4 ---- 2 files changed, 4 insertions(+), 22 deletions(-) diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index cdbcb2e66bff..2eb1dec94760 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -38,6 +38,7 @@ #include #include #include +#include #include "rcu.h" #include "rcu_segcblist.h" @@ -462,21 +463,6 @@ static void srcu_gp_start(struct srcu_struct *sp) WARN_ON_ONCE(state != SRCU_STATE_SCAN1); } -/* - * Track online CPUs to guide callback workqueue placement. - */ -DEFINE_PER_CPU(bool, srcu_online); - -void srcu_online_cpu(unsigned int cpu) -{ - WRITE_ONCE(per_cpu(srcu_online, cpu), true); -} - -void srcu_offline_cpu(unsigned int cpu) -{ - WRITE_ONCE(per_cpu(srcu_online, cpu), false); -} - /* * Place the workqueue handler on the specified CPU if online, otherwise * just run it whereever. This is useful for placing workqueue handlers @@ -488,12 +474,12 @@ static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, { bool ret; - preempt_disable(); - if (READ_ONCE(per_cpu(srcu_online, cpu))) + cpus_read_lock(); + if (cpu_online(cpu)) ret = queue_delayed_work_on(cpu, wq, dwork, delay); else ret = queue_delayed_work(wq, dwork, delay); - preempt_enable(); + cpus_read_unlock(); return ret; } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8153a48908d7..e809b4730e6f 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3800,8 +3800,6 @@ int rcutree_online_cpu(unsigned int cpu) rnp->ffmask |= rdp->grpmask; raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } - if (IS_ENABLED(CONFIG_TREE_SRCU)) - srcu_online_cpu(cpu); if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) return 0; /* Too early in boot for scheduler work. */ sync_sched_exp_online_cleanup(cpu); @@ -3829,8 +3827,6 @@ int rcutree_offline_cpu(unsigned int cpu) } rcutree_affinity_setting(cpu, cpu); - if (IS_ENABLED(CONFIG_TREE_SRCU)) - srcu_offline_cpu(cpu); return 0; } -- Gitee From 9b14d4562691d1c5ceb559d4f4e6261a2710944a Mon Sep 17 00:00:00 2001 From: meganz009 Date: Wed, 7 Jun 2023 21:26:40 +0800 Subject: [PATCH 6/7] srcu: replace local_irqsave() with a locallock commit 5bc56052951df498807fec250c48debf7cc3a55b upstream. There are two instances which disable interrupts in order to become a stable this_cpu_ptr() pointer. The restore part is coupled with spin_unlock_irqrestore() which does not work on RT. Replace the local_irq_save() call with the appropriate local_lock() version of it. Signed-off-by: Sebastian Andrzej Siewior --- kernel/rcu/srcutree.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 2eb1dec94760..216497531be9 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "rcu.h" #include "rcu_segcblist.h" @@ -762,6 +763,8 @@ static void srcu_flip(struct srcu_struct *sp) * negligible when amoritized over that time period, and the extra latency * of a needlessly non-expedited grace period is similarly negligible. */ +static DEFINE_LOCAL_IRQ_LOCK(sp_llock); + static bool srcu_might_be_idle(struct srcu_struct *sp) { unsigned long curseq; @@ -770,13 +773,13 @@ static bool srcu_might_be_idle(struct srcu_struct *sp) unsigned long t; /* If the local srcu_data structure has callbacks, not idle. */ - local_irq_save(flags); + local_lock_irqsave(sp_llock, flags); sdp = this_cpu_ptr(sp->sda); if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { - local_irq_restore(flags); + local_unlock_irqrestore(sp_llock, flags); return false; /* Callbacks already present, so not idle. */ } - local_irq_restore(flags); + local_unlock_irqrestore(sp_llock, flags); /* * No local callbacks, so probabalistically probe global state. @@ -854,7 +857,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, return; } rhp->func = func; - local_irq_save(flags); + local_lock_irqsave(sp_llock, flags); sdp = this_cpu_ptr(sp->sda); spin_lock_rcu_node(sdp); rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); @@ -870,7 +873,8 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, sdp->srcu_gp_seq_needed_exp = s; needexp = true; } - spin_unlock_irqrestore_rcu_node(sdp, flags); + spin_unlock_rcu_node(sdp); + local_unlock_irqrestore(sp_llock, flags); if (needgp) srcu_funnel_gp_start(sp, sdp, s, do_norm); else if (needexp) -- Gitee From 69f0341d50bde5d7710eb5984cd0fb5e3ad54201 Mon Sep 17 00:00:00 2001 From: meganz009 Date: Wed, 7 Jun 2023 21:26:52 +0800 Subject: [PATCH 7/7] rcu: enable rcu_normal_after_boot by default for RT commit 72d24a41a24c422c75947391b6fb8419bce49da3 upstream. The forcing of an expedited grace period is an expensive and very RT-application unfriendly operation, as it forcibly preempts all running tasks on CPUs which are preventing the gp from expiring. By default, as a policy decision, disable the expediting of grace periods (after boot) on configurations which enable PREEMPT_RT_FULL. Suggested-by: Luiz Capitulino Acked-by: Paul E. McKenney Signed-off-by: Julia Cartwright Signed-off-by: Sebastian Andrzej Siewior --- kernel/rcu/update.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 6ffafb1b1584..16d8dba23329 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -68,7 +68,7 @@ extern int rcu_expedited; /* from sysctl */ module_param(rcu_expedited, int, 0); extern int rcu_normal; /* from sysctl */ module_param(rcu_normal, int, 0); -static int rcu_normal_after_boot; +static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); module_param(rcu_normal_after_boot, int, 0); #endif /* #ifndef CONFIG_TINY_RCU */ -- Gitee