diff --git a/include/linux/preempt.h b/include/linux/preempt.h index af39859f02ee15c78bbb3b1706359aa59871a153..7b5b2ed55531ad51d4abf2efd46577ffcf88e44d 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -208,12 +208,12 @@ do { \ preempt_count_dec(); \ } while (0) -#ifdef CONFIG_PREEMPT_RT +#ifndef CONFIG_PREEMPT_RT # define preempt_enable_no_resched() sched_preempt_enable_no_resched() -# define preempt_check_resched_rt() preempt_check_resched() +# define preempt_check_resched_rt() barrier(); #else # define preempt_enable_no_resched() preempt_enable() -# define preempt_check_resched_rt() barrier(); +# define preempt_check_resched_rt() preempt_check_resched() #endif #define preemptible() (preempt_count() == 0 && !irqs_disabled()) diff --git a/init/Kconfig b/init/Kconfig index 4546cdf5f4c0b4261376f9e3ef55526d7c5eacf8..a78289eebba24022287c55907fe60c2373c4eb6e 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -866,7 +866,7 @@ config NUMA_BALANCING bool "Memory placement aware NUMA scheduler" depends on ARCH_SUPPORTS_NUMA_BALANCING depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY - depends on SMP && NUMA && MIGRATION + depends on SMP && NUMA && MIGRATION && !PREEMPT_RT help This option adds support for automatic NUMA aware memory/task placement. The mechanism is quite primitive and is based on migrating memory when diff --git a/kernel/locking/rwsem-rt.c b/kernel/locking/rwsem-rt.c index 274172d5bb3a573eedaf356f7ec360121213b645..b61edc4dcb7391e50f2ebd6e6ad853b85814f0a7 100644 --- a/kernel/locking/rwsem-rt.c +++ b/kernel/locking/rwsem-rt.c @@ -198,7 +198,6 @@ void __up_read(struct rw_semaphore *sem) if (!atomic_dec_and_test(&sem->readers)) return; - might_sleep(); raw_spin_lock_irq(&m->wait_lock); /* * Wake the writer, i.e. the rtmutex owner. It might release the diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 43595c1a9f9c423bb71eaec6c4d81939ddc0b421..a4ea0a0bf2f20ed1af5cb94ba3e4ced0a17ec641 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -3746,7 +3746,9 @@ bool pr_flush(int timeout_ms, bool reset_on_progress) u64 diff; u64 seq; - may_sleep = (preemptible() && !in_softirq()); + may_sleep = (preemptible() && + !in_softirq() && + system_state >= SYSTEM_RUNNING); seq = prb_next_seq(prb); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 7dad2ff3e7785462a0b0b44c5b62c1119753a526..16ce2b05df90440b779a6864e1e662588167881a 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -82,7 +82,7 @@ struct zsmalloc_handle { unsigned long addr; - struct mutex lock; + spinlock_t lock; }; #define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle)) @@ -370,7 +370,7 @@ static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) if (p) { struct zsmalloc_handle *zh = p; - mutex_init(&zh->lock); + spin_lock_init(&zh->lock); } #endif return (unsigned long)p; @@ -930,7 +930,7 @@ static inline int testpin_tag(unsigned long handle) #ifdef CONFIG_PREEMPT_RT struct zsmalloc_handle *zh = zs_get_pure_handle(handle); - return mutex_is_locked(&zh->lock); + return spin_is_locked(&zh->lock); #else return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle); #endif @@ -941,7 +941,7 @@ static inline int trypin_tag(unsigned long handle) #ifdef CONFIG_PREEMPT_RT struct zsmalloc_handle *zh = zs_get_pure_handle(handle); - return mutex_trylock(&zh->lock); + return spin_trylock(&zh->lock); #else return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle); #endif @@ -952,7 +952,7 @@ static void pin_tag(unsigned long handle) __acquires(bitlock) #ifdef CONFIG_PREEMPT_RT struct zsmalloc_handle *zh = zs_get_pure_handle(handle); - return mutex_lock(&zh->lock); + return spin_lock(&zh->lock); #else bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle); #endif @@ -963,7 +963,7 @@ static void unpin_tag(unsigned long handle) __releases(bitlock) #ifdef CONFIG_PREEMPT_RT struct zsmalloc_handle *zh = zs_get_pure_handle(handle); - return mutex_unlock(&zh->lock); + return spin_unlock(&zh->lock); #else bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle); #endif