diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 93da963c2775a049c28a18ca99e206272d16b1d5..d5b9e1f349cfd56d0e5f5afb73f2b799f55b62d0 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -118,7 +118,11 @@ /* * The preempt_count offset after spin_lock() */ +#if !defined(CONFIG_PREEMPT_RT_FULL) #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET +#else +#define PREEMPT_LOCK_OFFSET 0 +#endif /* * The preempt_count offset needed for things like: diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index cf139d6e5c1d35db8801704a99b06609d7ee4d04..18262dd58476ce00caa43aa53499e34e50ed906c 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -73,6 +73,11 @@ void synchronize_rcu(void); * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ #define rcu_preempt_depth() (current->rcu_read_lock_nesting) +#ifndef CONFIG_PREEMPT_RT_FULL +#define sched_rcu_preempt_depth() rcu_preempt_depth() +#else +static inline int sched_rcu_preempt_depth(void) { return 0; } +#endif #else /* #ifdef CONFIG_PREEMPT_RCU */ @@ -96,6 +101,8 @@ static inline int rcu_preempt_depth(void) return 0; } +#define sched_rcu_preempt_depth() rcu_preempt_depth() + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ /* Internal to kernel */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f8fa240ef55fd050fa49e02c896bafa7cff81a65..e1d2200a9dbba561cb5e809c7813103084eb4274 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6215,7 +6215,7 @@ void __init sched_init(void) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { - int nested = preempt_count() + rcu_preempt_depth(); + int nested = preempt_count() + sched_rcu_preempt_depth(); return (nested == preempt_offset); } diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 90df0efd4ee0b2d78413117ab4d31689b9a1d168..f5dc2fb906925d051441ec8cc45c0b05eea4046b 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -45,11 +45,16 @@ SCHED_FEAT(DOUBLE_TICK, false) */ SCHED_FEAT(NONTASK_CAPACITY, true) +#ifdef CONFIG_PREEMPT_RT_FULL +SCHED_FEAT(TTWU_QUEUE, false) +#else + /* * Queue remote wakeups on the target CPU and process them * using the scheduler IPI. Reduces rq->lock contention/bounces. */ SCHED_FEAT(TTWU_QUEUE, true) +#endif /* * When doing wakeups, attempt to limit superfluous scans of the LLC domain.