From 84e090fd5ca9211567b9498ac8af0ed912a38d3c Mon Sep 17 00:00:00 2001 From: meganz009 Date: Sat, 3 Jun 2023 15:45:22 +0800 Subject: [PATCH 1/2] list_bl: Make list head locking RT safe commit 3023eb220840f37bbc9d31e9959de567cb1aaaae upstream. As per changes in include/linux/jbd_common.h for avoiding the bit_spin_locks on RT ("fs: jbd/jbd2: Make state lock and journal head lock rt safe") we do the same thing here. We use the non atomic __set_bit and __clear_bit inside the scope of the lock to preserve the ability of the existing LIST_DEBUG code to use the zero'th bit in the sanity checks. As a bit spinlock, we had no lockdep visibility into the usage of the list head locking. Now, if we were to implement it as a standard non-raw spinlock, we would see: BUG: sleeping function called from invalid context at kernel/rtmutex.c:658 in_atomic(): 1, irqs_disabled(): 0, pid: 122, name: udevd 5 locks held by udevd/122: #0: (&sb->s_type->i_mutex_key#7/1){+.+.+.}, at: [] lock_rename+0xe8/0xf0 #1: (rename_lock){+.+...}, at: [] d_move+0x2c/0x60 #2: (&dentry->d_lock){+.+...}, at: [] dentry_lock_for_move+0xf3/0x130 #3: (&dentry->d_lock/2){+.+...}, at: [] dentry_lock_for_move+0xc4/0x130 #4: (&dentry->d_lock/3){+.+...}, at: [] dentry_lock_for_move+0xd7/0x130 Pid: 122, comm: udevd Not tainted 3.4.47-rt62 #7 Call Trace: [] __might_sleep+0x134/0x1f0 [] rt_spin_lock+0x24/0x60 [] __d_shrink+0x5c/0xa0 [] __d_drop+0x1d/0x40 [] __d_move+0x8e/0x320 [] d_move+0x3e/0x60 [] vfs_rename+0x198/0x4c0 [] sys_renameat+0x213/0x240 [] ? _raw_spin_unlock+0x35/0x60 [] ? do_page_fault+0x1ec/0x4b0 [] ? retint_swapgs+0xe/0x13 [] ? trace_hardirqs_on_thunk+0x3a/0x3f [] sys_rename+0x1b/0x20 [] system_call_fastpath+0x1a/0x1f Since we are only taking the lock during short lived list operations, lets assume for now that it being raw won't be a significant latency concern. Signed-off-by: Paul Gortmaker Signed-off-by: Sebastian Andrzej Siewior --- include/linux/list_bl.h | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h index 3fc2cc57ba1b..69b659259bac 100644 --- a/include/linux/list_bl.h +++ b/include/linux/list_bl.h @@ -3,6 +3,7 @@ #define _LINUX_LIST_BL_H #include +#include #include /* @@ -33,13 +34,22 @@ struct hlist_bl_head { struct hlist_bl_node *first; +#ifdef CONFIG_PREEMPT_RT_BASE + raw_spinlock_t lock; +#endif }; struct hlist_bl_node { struct hlist_bl_node *next, **pprev; }; -#define INIT_HLIST_BL_HEAD(ptr) \ - ((ptr)->first = NULL) + +static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h) +{ + h->first = NULL; +#ifdef CONFIG_PREEMPT_RT_BASE + raw_spin_lock_init(&h->lock); +#endif +} static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) { @@ -119,12 +129,26 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n) static inline void hlist_bl_lock(struct hlist_bl_head *b) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(0, (unsigned long *)b); +#else + raw_spin_lock(&b->lock); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __set_bit(0, (unsigned long *)b); +#endif +#endif } static inline void hlist_bl_unlock(struct hlist_bl_head *b) { +#ifndef CONFIG_PREEMPT_RT_BASE __bit_spin_unlock(0, (unsigned long *)b); +#else +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __clear_bit(0, (unsigned long *)b); +#endif + raw_spin_unlock(&b->lock); +#endif } static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) -- Gitee From 3b04ff60f83e2e5928e55e050672b534cb8aaf2f Mon Sep 17 00:00:00 2001 From: meganz009 Date: Sat, 3 Jun 2023 15:45:58 +0800 Subject: [PATCH 2/2] list_bl: fixup bogus lockdep warning commit 819674251514107628d4f10edb7514ab08bb57a9 upstream. At first glance, the use of 'static inline' seems appropriate for INIT_HLIST_BL_HEAD(). However, when a 'static inline' function invocation is inlined by gcc, all callers share any static local data declared within that inline function. This presents a problem for how lockdep classes are setup. raw_spinlocks, for example, when CONFIG_DEBUG_SPINLOCK, # define raw_spin_lock_init(lock) \ do { \ static struct lock_class_key __key; \ \ __raw_spin_lock_init((lock), #lock, &__key); \ } while (0) When this macro is expanded into a 'static inline' caller, like INIT_HLIST_BL_HEAD(): static inline INIT_HLIST_BL_HEAD(struct hlist_bl_head *h) { h->first = NULL; raw_spin_lock_init(&h->lock); } ...the static local lock_class_key object is made a function static. For compilation units which initialize invoke INIT_HLIST_BL_HEAD() more than once, then, all of the invocations share this same static local object. This can lead to some very confusing lockdep splats (example below). Solve this problem by forcing the INIT_HLIST_BL_HEAD() to be a macro, which prevents the lockdep class object sharing. ============================================= [ INFO: possible recursive locking detected ] 4.4.4-rt11 #4 Not tainted --- include/linux/list_bl.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h index 69b659259bac..0b5de7d9ffcf 100644 --- a/include/linux/list_bl.h +++ b/include/linux/list_bl.h @@ -43,13 +43,15 @@ struct hlist_bl_node { struct hlist_bl_node *next, **pprev; }; -static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h) -{ - h->first = NULL; #ifdef CONFIG_PREEMPT_RT_BASE - raw_spin_lock_init(&h->lock); +#define INIT_HLIST_BL_HEAD(h) \ +do { \ + (h)->first = NULL; \ + raw_spin_lock_init(&(h)->lock); \ +} while (0) +#else +#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL #endif -} static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) { -- Gitee