diff --git a/drivers/base/core.c b/drivers/base/core.c index 389d13616d1df5371576399be1e276efcc92dda0..22f3dc118f6752279c63fef95a6369faae34ddae 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2446,6 +2446,7 @@ void device_initialize(struct device *dev) mutex_init(&dev->lockdep_mutex); #endif lockdep_set_novalidate_class(&dev->mutex); + lite_lockdep_set_novalidate_class(&dev->mutex); spin_lock_init(&dev->devres_lock); INIT_LIST_HEAD(&dev->devres_head); device_pm_init(dev); diff --git a/fs/inode.c b/fs/inode.c index 82090bfadb0742eb6aa15fb420490fd11f8ef71b..1244e829a38fa96f52a143aa195bd15db7b06224 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -172,9 +172,11 @@ int inode_init_always(struct super_block *sb, struct inode *inode) goto out; spin_lock_init(&inode->i_lock); lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); + lite_lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); init_rwsem(&inode->i_rwsem); lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key); + lite_lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key); atomic_set(&inode->i_dio_count, 0); @@ -991,6 +993,27 @@ void lockdep_annotate_inode_mutex_key(struct inode *inode) EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); #endif +#ifdef CONFIG_LITE_LOCKDEP +void lockdep_annotate_inode_mutex_key(struct inode *inode) +{ + if (S_ISDIR(inode->i_mode)) { + struct file_system_type *type = inode->i_sb->s_type; + + /* Set new key only if filesystem hasn't already changed it */ + if (lite_lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) { + /* + * ensure nobody is actually holding i_mutex + */ + // mutex_destroy(&inode->i_mutex); + init_rwsem(&inode->i_rwsem); + lite_lockdep_set_class(&inode->i_rwsem, + &type->i_mutex_dir_key); + } + } +} +EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key); +#endif + /** * unlock_new_inode - clear the I_NEW state and wake up any waiters * @inode: new inode to unlock diff --git a/fs/super.c b/fs/super.c index 494bfdc6f778b3f5803adb2fc05d0c0b62295a7c..25439d41fc6cebd7b2bfdd2e26cb1a86b6f0afe1 100644 --- a/fs/super.c +++ b/fs/super.c @@ -211,6 +211,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags, s->s_user_ns = get_user_ns(user_ns); init_rwsem(&s->s_umount); lockdep_set_class(&s->s_umount, &type->s_umount_key); + lite_lockdep_set_class(&s->s_umount, &type->s_umount_key); /* * sget() can have s_umount recursion. * @@ -254,6 +255,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags, atomic_set(&s->s_active, 1); mutex_init(&s->s_vfs_rename_mutex); lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); + lite_lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); init_rwsem(&s->s_dquot.dqio_sem); s->s_maxbytes = MAX_NON_LFS; s->s_op = &default_op; diff --git a/include/linux/completion.h b/include/linux/completion.h index bf8e77001f18f162b65ab5eb2bdcd014f37a87aa..e7af59ee694178c3f6cba0c3545d4f9b27e84fe4 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -65,7 +65,7 @@ static inline void complete_release(struct completion *x) {} * This macro declares and initializes a completion structure on the kernel * stack. */ -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) || defined(CONFIG_LITE_LOCKDEP) # define DECLARE_COMPLETION_ONSTACK(work) \ struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) # define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \ diff --git a/include/linux/fs.h b/include/linux/fs.h index db632747781a73a9f280296bb6837778bac7266f..d60228250622124df91f5318199bc3c5741bac05 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2980,7 +2980,7 @@ extern struct inode *find_inode_rcu(struct super_block *, unsigned long, extern struct inode *find_inode_by_ino_rcu(struct super_block *, unsigned long); extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); extern int insert_inode_locked(struct inode *); -#ifdef CONFIG_DEBUG_LOCK_ALLOC +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP) extern void lockdep_annotate_inode_mutex_key(struct inode *inode); #else static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { }; diff --git a/include/linux/lite_lockdep.h b/include/linux/lite_lockdep.h new file mode 100644 index 0000000000000000000000000000000000000000..da78d5cd5de43040ac861f3c6b154fa46a0164c0 --- /dev/null +++ b/include/linux/lite_lockdep.h @@ -0,0 +1,132 @@ +#ifndef __LINUX_LITE_LOCKDEP_H +#define __LINUX_LITE_LOCKDEP_H + +#include + +struct task_struct; + +/* sysctl */ +extern int lite_lockdep; +extern int check_reachability; +extern int detect_deadlocks; + +#ifdef CONFIG_LITE_LOCKDEP + +#include + +extern void lite_lock_acquire(struct lite_lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, + struct lite_lockdep_map *nest_lock, unsigned long ip); + +extern void lite_lock_release(struct lite_lockdep_map *lock, unsigned long ip); + +#define lite_lock_acquire_exclusive(l, s, t, n, i) lite_lock_acquire(l, s, t, 0, 1, n, i) +#define lite_lock_acquire_shared(l, s, t, n, i) lite_lock_acquire(l, s, t, 1, 1, n, i) +#define lite_lock_acquire_shared_recursive(l, s, t, n, i) lite_lock_acquire(l, s, t, 2, 1, n, i) + +#define lite_spin_acquire(l, s, t, i) lite_lock_acquire_exclusive(l, s, t, NULL, i) +#define lite_spin_acquire_nest(l, s, t, n, i) lite_lock_acquire_exclusive(l, s, t, n, i) +#define lite_spin_release(l, i) lite_lock_release(l, i) + +#define lite_mutex_acquire(l, s, t, i) lite_lock_acquire_exclusive(l, s, t, NULL, i) +#define lite_mutex_acquire_nest(l, s, t, n, i) lite_lock_acquire_exclusive(l, s, t, n, i) +#define lite_mutex_release(l, i) lite_lock_release(l, i) + +#define lite_rwsem_acquire(l, s, t, i) lite_lock_acquire_exclusive(l, s, t, NULL, i) +#define lite_rwsem_acquire_nest(l, s, t, n, i) lite_lock_acquire_exclusive(l, s, t, n, i) +#define lite_rwsem_acquire_read(l, s, t, i) lite_lock_acquire_shared(l, s, t, NULL, i) +#define lite_rwsem_release(l, i) lite_lock_release(l, i) + +struct lite_held_lock { + unsigned long acquire_ip; + struct lite_lockdep_map *instance; + struct lite_lockdep_map *nest_lock; + unsigned int subclass; + pid_t pid; + char comm[TASK_COMM_LEN]; + unsigned int class_idx:MAX_LITE_LOCKDEP_KEYS_BITS; + unsigned int trylock:1; + unsigned int read:2; + unsigned int check:1; +}; + + +struct lite_lock_list { + struct hlist_node hash_entry; + struct lite_lock_class *class; + unsigned long acquire_ip; + pid_t pid; + char comm[TASK_COMM_LEN]; + unsigned int read:2; +}; + +struct ind_cycle_list { + struct list_head cycle_entry; + struct lite_lock_class *class; +}; + +struct stack_list { + struct list_head stack_entry; + struct lite_lock_list *lock_entry; +}; + +struct visit_hlist { + struct hlist_node vis_entry; + struct lite_lock_class *class; +}; + +struct deadlock_entry { + unsigned long chain_head; + unsigned long chain_tail; +}; + +struct ind_cycle_entry { + const struct lite_lock_class_sub_key *head; + const struct lite_lock_class_sub_key *dep; +}; + +extern int detect_cycles_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); + +extern void lite_lockdep_print_held_locks(struct task_struct *p); + +extern void lite_debug_show_all_locks(void); + +extern void lite_lockdep_init_map_type(struct lite_lockdep_map *lock, const char *name, + struct lite_lock_class_key *key, int subclass); + +static inline void +lite_lockdep_init_map(struct lite_lockdep_map *lock, const char *name, + struct lite_lock_class_key *key, int subclass) +{ + lite_lockdep_init_map_type(lock, name, key, subclass); +} + +#define lite_lockdep_set_class(lock, key) \ + lite_lockdep_init_map(&(lock)->lite_dep_map, #key, key, 0) + +#define lite_lockdep_set_class_and_name(lock, key, name) \ + lite_lockdep_init_map(&(lock)->lite_dep_map, name, key, 0) + +#define lite_lockdep_set_novalidate_class(lock) \ + lite_lockdep_set_class_and_name(lock, &__lite_lockdep_no_validate__, #lock) + +#define lite_lockdep_match_class(lock, key) \ + lite_lockdep_match_key(&(lock)->lite_dep_map, key) + +static inline int lite_lockdep_match_key(struct lite_lockdep_map *lock, + struct lite_lock_class_key *key) +{ + return lock->key == key; +} + +#else /* !CONFIG_LITE_LOCKDEP */ + +# define lite_lock_acquire(l, s, t, r, c, n, i) do { } while (0) +# define lite_lock_release(l, i) do { } while (0) +# define lite_lockdep_set_novalidate_class(l) do { } while (0) +# define lite_lockdep_set_class(l, m) do { } while (0) + +#endif /* CONFIG_LITE_LOCKDEP */ + +#endif /* __LINUX_LITE_LOCKDEP_H */ \ No newline at end of file diff --git a/include/linux/lite_lockdep_types.h b/include/linux/lite_lockdep_types.h new file mode 100644 index 0000000000000000000000000000000000000000..8537d20299a09aa88718977ddb5391225e1fe6ad --- /dev/null +++ b/include/linux/lite_lockdep_types.h @@ -0,0 +1,70 @@ +#ifndef __LINUX_LITE_LOCKDEP_TYPES_H +#define __LINUX_LITE_LOCKDEP_TYPES_H + +#include + +#ifdef CONFIG_LITE_LOCKDEP + +#define MAX_LITE_LOCKDEP_KEYS_BITS 13 +#define MAX_LITE_LOCKDEP_KEYS (1UL << MAX_LITE_LOCKDEP_KEYS_BITS) +#define MAX_LITE_LOCKDEP_CHAINS_BITS 16 +#define INITIAL_LITE_CHAIN_KEY -1 +#define LITE_CLASSDEP_HASH_BITS 4 +#define LITE_CLASSDEP_HASH_SIZE (1UL << LITE_CLASSDEP_HASH_BITS) +#define MAX_LITE_LOCKDEP_ENTRIES 32768UL +#define TASK_COMM_LEN 16 + +struct lite_lock_class_sub_key { + char __one_byte; +} __attribute__ ((__packed__)); + +/* hash_entry is used to keep track of dynamically allocated keys. */ +struct lite_lock_class_key { + union { + struct hlist_node hash_entry; + struct lite_lock_class_sub_key sub_key[1]; + }; +}; + +struct lite_lock_class { + /* + * class-hash: + */ + struct hlist_node hash_entry; + + struct list_head lock_entry; + + struct hlist_head dir_from[LITE_CLASSDEP_HASH_SIZE]; + + struct hlist_head dir_to[LITE_CLASSDEP_HASH_SIZE]; + + struct hlist_head ind_from[LITE_CLASSDEP_HASH_SIZE]; + + struct hlist_head ind_to[LITE_CLASSDEP_HASH_SIZE]; + + struct hlist_head ind_cycle_dir_from[LITE_CLASSDEP_HASH_SIZE]; + + const struct lite_lock_class_sub_key *key; + + const char *name; +} __no_randomize_layout; + +/* + * Map the lock object (the lock instance) to the lock-class object. + * This is embedded into specific lock instances: + */ +struct lite_lockdep_map { + struct lite_lock_class_key *key; + struct lite_lock_class *class; + const char *name; +}; + +extern struct lite_lock_class_key __lite_lockdep_no_validate__; + +#else /* !CONFIG_LITE_LOCKDEP */ +struct lite_lock_class_key { }; +struct lite_lockdep_map { }; + +#endif /* CONFIG_LITE_LOCKDEP */ + +#endif /* __LINUX_LITE_LOCKDEP_TYPES_H */ diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h index 2ec9ff5a7fff070b1d48aa1d403b999e4c87bd8d..3fc6e451a2b07c8c21c4477d6b6baa2db1029a54 100644 --- a/include/linux/lockdep_types.h +++ b/include/linux/lockdep_types.h @@ -189,6 +189,15 @@ struct lockdep_map { struct pin_cookie { unsigned int val; }; +#elif defined CONFIG_LITE_LOCKDEP +#include + +#define lock_class_key lite_lock_class_key + +#define lockdep_map lite_lockdep_map + +struct pin_cookie { }; + #else /* !CONFIG_LOCKDEP */ /* diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 4d671fba3cab4c585bc9f38d3bf79edddd31d638..1a07867545f376d17c4c2fc4fc487e14499c69e0 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -63,6 +64,9 @@ struct mutex { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif +#ifdef CONFIG_LITE_LOCKDEP + struct lite_lockdep_map lite_dep_map; +#endif }; struct ww_class; @@ -125,6 +129,11 @@ do { \ .name = #lockname, \ .wait_type_inner = LD_WAIT_SLEEP, \ } +#elif defined(CONFIG_LITE_LOCKDEP) +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .lite_dep_map = { \ + .name = #lockname, \ + } #else # define __DEP_MAP_MUTEX_INITIALIZER(lockname) #endif @@ -154,7 +163,7 @@ extern bool mutex_is_locked(struct mutex *lock); * See kernel/locking/mutex.c for detailed documentation of these APIs. * Also see Documentation/locking/mutex-design.rst. */ -#ifdef CONFIG_DEBUG_LOCK_ALLOC +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP) extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); @@ -169,12 +178,19 @@ extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass); #define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) #define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0) +#ifdef CONFIG_LITE_LOCKDEP +#define mutex_lock_nest_lock(lock, nest_lock) \ + _mutex_lock_nest_lock(lock, &(nest_lock)->lite_dep_map); + +#else #define mutex_lock_nest_lock(lock, nest_lock) \ do { \ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ } while (0) +#endif + #else extern void mutex_lock(struct mutex *lock); extern int __must_check mutex_lock_interruptible(struct mutex *lock); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 4c715be4871719dcaa252c9e8e5ab1445703e391..0a053bcc46dfa41012844e8e181dba9a9cfb7d52 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -51,6 +51,9 @@ struct rw_semaphore { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif +#ifdef CONFIG_LITE_LOCKDEP + struct lite_lockdep_map lite_dep_map; +#endif }; /* In all implementations count != 0 means locked */ @@ -70,6 +73,11 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) .name = #lockname, \ .wait_type_inner = LD_WAIT_SLEEP, \ }, +#elif defined(CONFIG_LITE_LOCKDEP) +# define __RWSEM_DEP_MAP_INIT(lockname) \ + .lite_dep_map = { \ + .name = #lockname, \ + }, #else # define __RWSEM_DEP_MAP_INIT(lockname) #endif @@ -157,7 +165,7 @@ extern void up_write(struct rw_semaphore *sem); */ extern void downgrade_write(struct rw_semaphore *sem); -#ifdef CONFIG_DEBUG_LOCK_ALLOC +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP) /* * nested locking. NOTE: rwsems are not allowed to recurse * (which occurs if the same task tries to acquire the same @@ -177,12 +185,22 @@ extern void down_write_nested(struct rw_semaphore *sem, int subclass); extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass); extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); +#ifdef CONFIG_LITE_LOCKDEP +# define down_write_nest_lock(sem, nest_lock) \ +do { \ + typecheck(struct lite_lockdep_map *, &(nest_lock)->lite_dep_map); \ + _down_write_nest_lock(sem, &(nest_lock)->lite_dep_map); \ +} while (0); + +#else # define down_write_nest_lock(sem, nest_lock) \ do { \ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ _down_write_nest_lock(sem, &(nest_lock)->dep_map); \ } while (0); +#endif + /* * Take/release a lock when not the owner will release it. * diff --git a/include/linux/sched.h b/include/linux/sched.h index edd236f98f0c7b1a3f778ae4b8a48fe3d30aa133..8a9dce122f06cb38cc627e76b6edc212f439c50c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1068,6 +1068,12 @@ struct task_struct { struct held_lock held_locks[MAX_LOCK_DEPTH]; #endif +#ifdef CONFIG_LITE_LOCKDEP +# define MAX_LITE_LOCK_DEPTH 48UL + int lite_lockdep_depth; + struct lite_held_lock held_locks[MAX_LITE_LOCK_DEPTH]; +#endif + #if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP) unsigned int in_ubsan; #endif diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h index 6694d0019a682dfdf8cc62257b3597958dd49d26..abd76877b76aa43488dd9ef48e936182f3256e5e 100644 --- a/include/linux/semaphore.h +++ b/include/linux/semaphore.h @@ -32,7 +32,11 @@ static inline void sema_init(struct semaphore *sem, int val) { static struct lock_class_key __key; *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); +#ifdef CONFIG_LITE_LOCKDEP + lite_lockdep_init_map(&sem->lock.lite_dep_map, "semaphore->lock", &__key, 0); +#else lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0); +#endif } extern void down(struct semaphore *sem); diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 79897841a2cc8ad329e5237fa5b9f210cfcab5b7..c72c62d89c4194856fcd58e1922d7ce1e5dc3261 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -56,6 +56,7 @@ #include #include #include +#include #include #include #include @@ -224,7 +225,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) #define raw_spin_lock(lock) _raw_spin_lock(lock) -#ifdef CONFIG_DEBUG_LOCK_ALLOC +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP) # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock_nested(lock, subclass) @@ -252,7 +253,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) flags = _raw_spin_lock_irqsave(lock); \ } while (0) -#ifdef CONFIG_DEBUG_LOCK_ALLOC +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP) #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ typecheck(unsigned long, flags); \ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 19a9be9d97ee531f236be5f1953ac80ce729238b..22655da76de2e9801fc2fb86545580cca232fa57 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -87,7 +87,11 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) { preempt_disable(); if (do_raw_spin_trylock(lock)) { +#ifdef CONFIG_LITE_LOCKDEP + lite_spin_acquire(&lock->lite_dep_map, 0, 1, _RET_IP_); +#else spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); +#endif return 1; } preempt_enable(); @@ -99,7 +103,8 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) * even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are * not re-enabled during lock-acquire (which the preempt-spin-ops do): */ -#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) +#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) || \ + defined(CONFIG_LITE_LOCKDEP) static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) { @@ -107,7 +112,11 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) local_irq_save(flags); preempt_disable(); +#ifdef CONFIG_LITE_LOCKDEP + lite_spin_acquire(&lock->lite_dep_map, 0, 0, _RET_IP_); +#else spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); +#endif /* * On lockdep we dont want the hand-coded irq-enable of * do_raw_spin_lock_flags() code, because lockdep assumes @@ -125,29 +134,48 @@ static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) { local_irq_disable(); preempt_disable(); +#ifdef CONFIG_LITE_LOCKDEP + lite_spin_acquire(&lock->lite_dep_map, 0, 0, _RET_IP_); + do_raw_spin_lock(lock); +#else spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); +#endif } static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) { __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); +#ifdef CONFIG_LITE_LOCKDEP + lite_spin_acquire(&lock->lite_dep_map, 0, 0, _RET_IP_); + do_raw_spin_lock(lock); +#else spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); +#endif } static inline void __raw_spin_lock(raw_spinlock_t *lock) { preempt_disable(); +#ifdef CONFIG_LITE_LOCKDEP + lite_spin_acquire(&lock->lite_dep_map, 0, 0, _RET_IP_); + do_raw_spin_lock(lock); +#else spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); +#endif } -#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC || CONFIG_LITE_LOCKDEP */ static inline void __raw_spin_unlock(raw_spinlock_t *lock) { +#ifdef CONFIG_LITE_LOCKDEP + lite_spin_release(&lock->lite_dep_map, _RET_IP_); +#else spin_release(&lock->dep_map, _RET_IP_); +#endif do_raw_spin_unlock(lock); preempt_enable(); } @@ -155,7 +183,11 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { +#ifdef CONFIG_LITE_LOCKDEP + lite_spin_release(&lock->lite_dep_map, _RET_IP_); +#else spin_release(&lock->dep_map, _RET_IP_); +#endif do_raw_spin_unlock(lock); local_irq_restore(flags); preempt_enable(); @@ -163,7 +195,11 @@ static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) { +#ifdef CONFIG_LITE_LOCKDEP + lite_spin_release(&lock->lite_dep_map, _RET_IP_); +#else spin_release(&lock->dep_map, _RET_IP_); +#endif do_raw_spin_unlock(lock); local_irq_enable(); preempt_enable(); @@ -171,7 +207,11 @@ static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) { +#ifdef CONFIG_LITE_LOCKDEP + lite_spin_release(&lock->lite_dep_map, _RET_IP_); +#else spin_release(&lock->dep_map, _RET_IP_); +#endif do_raw_spin_unlock(lock); __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); } @@ -180,7 +220,11 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) { __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); if (do_raw_spin_trylock(lock)) { +#ifdef CONFIG_LITE_LOCKDEP + lite_spin_acquire(&lock->lite_dep_map, 0, 1, _RET_IP_); +#else spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); +#endif return 1; } __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index b981caafe8bf1bb31ebcfab56a57d9f2238cdc1f..031740a034c6a938200cdcc20ae35a887b6f6240 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -16,6 +16,7 @@ #endif #include +#include typedef struct raw_spinlock { arch_spinlock_t raw_lock; @@ -26,6 +27,9 @@ typedef struct raw_spinlock { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif +#ifdef CONFIG_LITE_LOCKDEP + struct lite_lockdep_map lite_dep_map; +#endif } raw_spinlock_t; #define SPINLOCK_MAGIC 0xdead4ead @@ -43,6 +47,15 @@ typedef struct raw_spinlock { .name = #lockname, \ .wait_type_inner = LD_WAIT_CONFIG, \ } +#elif defined(CONFIG_LITE_LOCKDEP) +# define RAW_SPIN_DEP_MAP_INIT(lockname) \ + .lite_dep_map = { \ + .name = #lockname, \ + } +# define SPIN_DEP_MAP_INIT(lockname) \ + .lite_dep_map = { \ + .name = #lockname, \ + } #else # define RAW_SPIN_DEP_MAP_INIT(lockname) # define SPIN_DEP_MAP_INIT(lockname) @@ -79,6 +92,14 @@ typedef struct spinlock { struct lockdep_map dep_map; }; #endif + +#ifdef CONFIG_LITE_LOCKDEP +# define LOCK_PADSIZE (offsetof(struct raw_spinlock, lite_dep_map)) + struct { + u8 __padding[LOCK_PADSIZE]; + struct lite_lockdep_map lite_dep_map; + }; +#endif }; } spinlock_t; diff --git a/include/linux/swait.h b/include/linux/swait.h index 6a8c22b8c2a5fdeb3a6e9f7b3f9108920167e49c..bbcbe593e704b4f4455dc9edf869115c0c37bb30 100644 --- a/include/linux/swait.h +++ b/include/linux/swait.h @@ -75,7 +75,7 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name __init_swait_queue_head((q), #q, &__key); \ } while (0) -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) || defined(CONFIG_LITE_LOCKDEP) # define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ ({ init_swait_queue_head(&name); name; }) # define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ diff --git a/include/linux/wait.h b/include/linux/wait.h index 9b8b0833100a0b736699f429ddb9f568f5d3f0fd..ef26a852576f647f5bffae2d57919a0798e9def6 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -69,7 +69,7 @@ extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *n __init_waitqueue_head((wq_head), #wq_head, &__key); \ } while (0) -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) || defined(CONFIG_LITE_LOCKDEP) # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ ({ init_waitqueue_head(&name); name; }) # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \ diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index 6ecf2a0220dbe6c21ec8a3176c39b48823b0aaa7..08c14d2b101bba474747cd82955f7ccf915d62b2 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -39,7 +39,7 @@ struct ww_acquire_ctx { struct ww_class *ww_class; struct ww_mutex *contending_lock; #endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP) struct lockdep_map dep_map; #endif #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH diff --git a/include/trace/events/lite_lock.h b/include/trace/events/lite_lock.h new file mode 100644 index 0000000000000000000000000000000000000000..e0c08a34baa0c869447c019ef8add7dc49baf4ee --- /dev/null +++ b/include/trace/events/lite_lock.h @@ -0,0 +1,68 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM lite_lock + +#if !defined(_TRACE_LITE_LOCK_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_LITE_LOCK_H + +#include +#include + +#ifdef CONFIG_LITE_LOCKDEP + +TRACE_EVENT(lock_acquire_lite, + + TP_PROTO(struct lite_lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, + struct lite_lockdep_map *next_lock, unsigned long ip), + + TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip), + + TP_STRUCT__entry( + __field(unsigned int, flags) + __string(name, lock->name) + __field(void *, lockdep_addr) + ), + + TP_fast_assign( + __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0); + __assign_str(name, lock->name); + __entry->lockdep_addr = lock; + ), + + TP_printk("======== %p %s%s%s", __entry->lockdep_addr, + (__entry->flags & 1) ? "try " : "", + (__entry->flags & 2) ? "read " : "", + __get_str(name)) +); + +DECLARE_EVENT_CLASS(lock, + + TP_PROTO(struct lite_lockdep_map *lock, unsigned long ip), + + TP_ARGS(lock, ip), + + TP_STRUCT__entry( + __string( name, lock->name ) + __field( void *, lockdep_addr ) + ), + + TP_fast_assign( + __assign_str(name, lock->name); + __entry->lockdep_addr = lock; + ), + + TP_printk("======== %p %s", __entry->lockdep_addr, __get_str(name)) +); + +DEFINE_EVENT(lock, lock_release_lite, + + TP_PROTO(struct lite_lockdep_map *lock, unsigned long ip), + + TP_ARGS(lock, ip) +); + +#endif /* CONFIG_LITE_LOCKDEP */ + +#endif /* _TRACE_LITE_LOCK_H */ + +#include \ No newline at end of file diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 396ebaebea3fea3578fe295c73cf041530a91d93..b58d1b427db9525b3c3a30f3477da953266e9131 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -139,6 +139,10 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) if (sysctl_hung_task_all_cpu_backtrace) hung_task_show_all_bt = true; + +#ifdef CONFIG_LITE_LOCKDEP + lite_debug_show_all_locks(); +#endif } touch_nmi_watchdog(); diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index 6d11cfb9b41f28ebab4cc0b980a7a42b7c3b358c..4bf1fa581b71b08ffc30faad4587e1f5b519fe0b 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile @@ -17,6 +17,7 @@ endif obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o obj-$(CONFIG_LOCKDEP) += lockdep.o +obj-$(CONFIG_LITE_LOCKDEP) += lite_lockdep.o ifeq ($(CONFIG_PROC_FS),y) obj-$(CONFIG_LOCKDEP) += lockdep_proc.o endif diff --git a/kernel/locking/lite_lockdep.c b/kernel/locking/lite_lockdep.c new file mode 100644 index 0000000000000000000000000000000000000000..aa2cce507589121a7c7134ccfdbdc1d13c7d2786 --- /dev/null +++ b/kernel/locking/lite_lockdep.c @@ -0,0 +1,1637 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +int lite_lockdep = CONFIG_LITE_LOCKDEP; +module_param(lite_lockdep, int, 0644); + +#ifdef CONFIG_LOCK_REACHABILITY +int check_reachability = 1; +#else +int check_reachability = 0; +#endif +module_param(check_reachability, int, 0644); + +int detect_deadlocks = 0; +module_param(detect_deadlocks, int, 0644); + +/* + * The hash-table for lite-lockdep classes: + */ +#define LITE_CLASSHASH_BITS (MAX_LITE_LOCKDEP_KEYS_BITS - 1) +#define LITE_CLASSHASH_SIZE (1UL << LITE_CLASSHASH_BITS) +#define __liteclasshashfn(key) hash_long((unsigned long)key, LITE_CLASSHASH_BITS) +#define liteclasshashentry(key) (lite_classhash_table + __liteclasshashfn((key))) + +static struct hlist_head lite_classhash_table[LITE_CLASSHASH_SIZE]; + +#define LITE_KEYHASH_BITS (MAX_LITE_LOCKDEP_KEYS_BITS - 1) +#define LITE_KEYHASH_SIZE (1UL << LITE_KEYHASH_BITS) +static struct hlist_head lite_lock_keys_hash[LITE_KEYHASH_SIZE]; + +unsigned long nr_lite_lock_classes; +struct lite_lock_class lite_lock_classes[MAX_LITE_LOCKDEP_KEYS]; +static DECLARE_BITMAP(lite_lock_classes_in_use, MAX_LITE_LOCKDEP_KEYS); + +unsigned long nr_lite_list_entries; +static struct lite_lock_list lite_list_entries[MAX_LITE_LOCKDEP_ENTRIES]; +static DECLARE_BITMAP(lite_list_entries_in_use, MAX_LITE_LOCKDEP_ENTRIES); + +/* Temporarily saves the cycles to be printed. */ +unsigned long nr_ind_cycle_entries; +static struct ind_cycle_list ind_cycle_entries[LITE_CLASSHASH_SIZE]; +static DECLARE_BITMAP(ind_cycle_entries_in_use, LITE_CLASSHASH_SIZE); + +/* Records entries of current path in dfs. */ +unsigned long nr_stack_entries; +static struct stack_list stack_entries[LITE_CLASSHASH_SIZE]; +static DECLARE_BITMAP(stack_entries_in_use, LITE_CLASSHASH_SIZE); + +/* Indicate whether an item has been visited in dfs. */ +unsigned long nr_visit_entries; +static struct visit_hlist visit_entries[LITE_CLASSHASH_SIZE]; +static DECLARE_BITMAP(visit_entries_in_use, LITE_CLASSHASH_SIZE); +static DEFINE_HASHTABLE(visited, LITE_CLASSHASH_BITS); + +/* Indicate equivalent deadlocks. */ +unsigned long nr_detected_deadlocks; +static struct deadlock_entry detected_deadlocks[LITE_CLASSHASH_SIZE]; + +/* Indicate detected cycles. */ +unsigned long nr_checked_cycles; +static struct ind_cycle_entry checked_cycles[LITE_CLASSHASH_SIZE]; + +static LIST_HEAD(all_lite_lock_classes); +static LIST_HEAD(free_lite_lock_classes); + +/* + * lite_lockdep_lock: protects the reachability graph, and + * other shared data structures. + */ +static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; +static struct task_struct *__owner; + +struct lite_lock_class_key __lite_lockdep_no_validate__; +EXPORT_SYMBOL_GPL(__lite_lockdep_no_validate__); + +static inline void lite_lockdep_lock(void) +{ + DEBUG_LOCKS_WARN_ON(!irqs_disabled()); + + arch_spin_lock(&__lock); + __owner = current; +} + +static inline void lite_lockdep_unlock(void) +{ + DEBUG_LOCKS_WARN_ON(!irqs_disabled()); + + if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current)) + return; + + __owner = NULL; + arch_spin_unlock(&__lock); +} + +static int lite_graph_lock(void) +{ + lite_lockdep_lock(); + + if (!debug_locks) { + lite_lockdep_unlock(); + return 0; + } + return 1; +} + +static inline void lite_graph_unlock(void) +{ + lite_lockdep_unlock(); +} + +static inline int lite_debug_locks_off_graph_unlock(void) +{ + int ret = debug_locks_off(); + + lite_lockdep_unlock(); + + return ret; +} + +static inline +struct hlist_head *litekeyhashentry(const struct lock_class_key *key) +{ + unsigned long hash = hash_long((uintptr_t)key, LITE_KEYHASH_BITS); + + return lite_lock_keys_hash + hash; +} + +static inline +struct hlist_head *litedephashentry(struct hlist_head *head, + const struct lite_lock_class_sub_key *key) +{ + unsigned long hash = hash_long((unsigned long)key, LITE_CLASSDEP_HASH_BITS); + + return head + hash; +} + +/** + * Judge if the address of a static object, same + * as the one in lockdep.c. + */ +#ifdef __KERNEL__ +static int static_obj(const void *obj) +{ + unsigned long start = (unsigned long) &_stext, + end = (unsigned long) &_end, + addr = (unsigned long) obj; + + if (arch_is_kernel_initmem_freed(addr)) + return 0; + + if ((addr >= start) && (addr < end)) + return 1; + + if (arch_is_kernel_data(addr)) + return 1; + + if (is_kernel_percpu_address(addr)) + return 1; + + return is_module_address(addr) || is_module_percpu_address(addr); +} +#endif + +/* Check whether a key has been registered as a dynamic key, + * same as the one in lockdep.c. + */ +static bool is_dynamic_key(const struct lite_lock_class_key *key) +{ + struct hlist_head *hash_head; + struct lite_lock_class_key *k; + bool found = false; + + if (WARN_ON_ONCE(static_obj(key))) + return false; + + if (!debug_locks) + return true; + + hash_head = litekeyhashentry(key); + + rcu_read_lock(); + hlist_for_each_entry_rcu(k, hash_head, hash_entry) { + if (k == key) { + found = true; + break; + } + } + rcu_read_unlock(); + + return found; +} + +/** + * Assign lock keys, same as the one in lockdep.c. + */ +static bool assign_lite_lock_key(struct lite_lockdep_map *lock) +{ + unsigned long can_addr, addr = (unsigned long)lock; + + if (__is_kernel_percpu_address(addr, &can_addr)) + lock->key = (void *)can_addr; + else if (__is_module_percpu_address(addr, &can_addr)) + lock->key = (void *)can_addr; + else if (static_obj(lock)) + lock->key = (void *)lock; + else { + debug_locks_off(); + pr_err("INFO: trying to register non-static key.\n"); + pr_err("you didn't initialize this object before use?\n"); + pr_err("turning off the locking correctness validator.\n"); + dump_stack(); + return false; + } + + return true; +} + +static inline struct lite_lock_class *lite_hlock_class(struct lite_held_lock *hlock) +{ + unsigned int class_idx = hlock->class_idx; + + barrier(); + + if (!test_bit(class_idx, lite_lock_classes_in_use)) { + DEBUG_LOCKS_WARN_ON(1); + return NULL; + } + + return lite_lock_classes + class_idx; +} + +const char *__get_key_name(const struct lite_lock_class_sub_key *key, char *str) +{ + return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); +} + +static void lite_print_lock_name(struct lite_lock_class *class) +{ + char str[KSYM_NAME_LEN]; + const char *name; + + name = class->name; + if (!name) { + name = __get_key_name(class->key, str); + printk(KERN_CONT "%s", name); + } else { + printk(KERN_CONT "%s", name); + } +} + +static void lite_print_lock(struct lite_held_lock *hlock) +{ + struct lite_lock_class *lock = lite_hlock_class(hlock); + + if (!lock) { + printk(KERN_CONT "\n"); + return; + } + + printk(KERN_CONT "%px", hlock->instance); + lite_print_lock_name(lock); + printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip); +} + +void lite_lockdep_print_held_locks(struct task_struct *p) +{ + int i, depth = READ_ONCE(p->lite_lockdep_depth); + + if (!depth) + printk("no locks held by %s/%d.\n", p->comm, task_pid_nr(p)); + else + printk("%d lock%s held by %s/%d:\n", depth, + depth > 1 ? "s" : "", p->comm, task_pid_nr(p)); + + if (p->state == TASK_RUNNING && p != current) + return; + for (i = 0; i < depth; i++) { + printk(" #%d: ", i); + lite_print_lock(p->held_locks + i); + } +} + +#ifdef __KERNEL__ +void lite_debug_show_all_locks(void) +{ + struct task_struct *g, *p; + + if (unlikely(!debug_locks)) { + pr_warn("INFO: lite-lockdep is turned off.\n"); + return; + } + pr_warn("\nShowing all locks held in the system:\n"); + + rcu_read_lock(); + for_each_process_thread(g, p) { + if (!p->lite_lockdep_depth) + continue; + lite_lockdep_print_held_locks(p); + touch_nmi_watchdog(); + touch_all_softlockup_watchdogs(); + } + rcu_read_unlock(); + + pr_warn("\n"); + pr_warn("=============================================\n\n"); +} +EXPORT_SYMBOL_GPL(lite_debug_show_all_locks); +#endif + +/* If the heads and the tails of two cycles are the same, + * we consider they are identical deadlocks. + */ +static bool deadlock_checked(unsigned long head, unsigned long tail) +{ + struct deadlock_entry *deadlock; + int i; + + for (i = 0; i < nr_detected_deadlocks; i++) { + deadlock = detected_deadlocks + i; + if (deadlock->chain_head == head && + deadlock->chain_tail == tail) + return true; + } + + return false; +} + +static int add_deadlock(unsigned long head, unsigned long tail) +{ + struct deadlock_entry *deadlock; + + if (nr_detected_deadlocks >= LITE_CLASSHASH_SIZE) { + debug_locks_off(); + lite_lockdep_unlock(); + + printk(KERN_DEBUG "BUG: max detected_deadlocks size too small!"); + dump_stack(); + return 0; + } + + deadlock = detected_deadlocks + nr_detected_deadlocks; + deadlock->chain_head = head; + deadlock->chain_tail = tail; + nr_detected_deadlocks++; + + return 1; +} + +/** + * Returns 2 on deadlocks has already been checked. + * Returns 1 on OK. + */ +static int record_dir_deadlocks(struct lite_lock_class *first, + struct lite_lock_class *next) +{ + unsigned long first_key = (unsigned long)first->key; + unsigned long next_key = (unsigned long)next->key; + unsigned long bigger = first_key > next_key ? first_key : next_key; + unsigned long smaller = first_key < next_key ? first_key : next_key; + int ret; + + if (deadlock_checked(bigger, smaller)) + return 2; + + ret = add_deadlock(bigger, smaller); + if (!ret) + return 0; + + return 1; +} + +static int record_ind_deadlocks(struct list_head *stack) +{ + struct stack_list *first, *entry; + struct lite_lock_class *class; + unsigned long bigger, smaller, curr_key; + int ret; + + first = list_first_entry(stack, struct stack_list, stack_entry); + bigger = (unsigned long)first->lock_entry->class->key; + smaller = (unsigned long)first->lock_entry->class->key; + + list_for_each_entry(entry, stack, stack_entry) { + class = entry->lock_entry->class; + curr_key = (unsigned long)class->key; + if (curr_key < smaller) + smaller = curr_key; + if (curr_key > bigger) + bigger = curr_key; + } + + if (deadlock_checked(bigger, smaller)) + return 2; + + ret = add_deadlock(bigger, smaller); + if (!ret) + return 0; + + return 1; +} + +/* If the keys of two pair of locks are the same, + * we consider they are identical cycles. + */ +static bool cycle_checked(struct lite_lock_class *lock, + struct lite_lock_class *dep) +{ + struct ind_cycle_entry ind_cycle; + bool found = false; + int i; + + for (i = 0; i < nr_checked_cycles; i++) { + ind_cycle = checked_cycles[i]; + if (ind_cycle.head == lock->key && + ind_cycle.dep == dep->key) + found = true; + } + + return found; +} + +static int add_checked_cycle(struct lite_lock_class *lock, + struct lite_lock_class *dep) +{ + struct ind_cycle_entry *ind_cycle; + + if (nr_checked_cycles >= LITE_CLASSHASH_SIZE) { + debug_locks_off(); + lite_lockdep_unlock(); + + printk(KERN_DEBUG "BUG: max checked_cycles size too small!"); + dump_stack(); + return 0; + } + + ind_cycle = &checked_cycles[nr_checked_cycles]; + ind_cycle->head = lock->key; + ind_cycle->dep = dep->key; + + nr_checked_cycles++; + + return 1; +} + +static void print_lite_kernel_ident(void) +{ + printk("%s %.*s %s\n", init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version, + print_tainted()); +} + +static noinline void print_dir_deadlock_bug(struct lite_lock_class *prev, + struct lite_lock_list *next) +{ + struct task_struct *curr = current; + struct lite_lock_list *entry; + struct hlist_head *head = next->class->dir_from; + const struct lite_lock_class_sub_key *key = prev->key; + unsigned int from_read, to_read; + unsigned long from_ip, to_ip; + pid_t from_pid, to_pid; + char *from_comm, *to_comm; + bool found = false; + + if (record_dir_deadlocks(prev, next->class) == 2) + return; + + if (debug_locks) { + if (!lite_debug_locks_off_graph_unlock()) + return; + } + + hlist_for_each_entry(entry, litedephashentry(head, key), hash_entry) { + if (entry->class->key == key) { + from_read = entry->read; + from_ip = entry->acquire_ip; + from_pid = entry->pid; + from_comm = entry->comm; + found = true; + break; + } + } + + if (!found) + WARN_ON(1); + + /* If the entry of next is in DirectFrom(prev), then the entry of + * prev can be found in DirectTo(class of next). + */ + found = false; + head = next->class->dir_to; + + hlist_for_each_entry(entry, litedephashentry(head, key), hash_entry) { + if (entry->class->key == key) { + to_read = entry->read; + to_ip = entry->acquire_ip; + to_pid = entry->pid; + to_comm = entry->comm; + found = true; + break; + } + } + + if (!found) + WARN_ON(1); + + pr_warn("\n"); + pr_warn("======================================================\n"); + pr_warn("WARNING: possible circular locking dependency detected\n"); + print_lite_kernel_ident(); + pr_warn("------------------------------------------------------\n"); + pr_warn("\nthe existing dependency chain is:\n"); + + lite_print_lock_name(prev); + printk(KERN_CONT ", at: %pS", (void *)from_ip); + printk(KERN_CONT ", %lx", from_ip); + printk(KERN_CONT ", held by %s/%d\n", from_comm, from_pid); + + printk("\n-- depends on -->\n\n"); + + lite_print_lock_name(next->class); + printk(KERN_CONT ", at: %pS", (void *)next->acquire_ip); + printk(KERN_CONT ", %lx", next->acquire_ip); + printk(KERN_CONT ", held by %s/%d\n", next->comm, next->pid); + + printk("\n-- depends on -->\n\n"); + + lite_print_lock_name(prev); + printk(KERN_CONT ", at: %pS", (void *)to_ip); + printk(KERN_CONT ", %lx", to_ip); + printk(KERN_CONT ", held by %s/%d\n", to_comm, to_pid); + printk("\n"); + + lite_lockdep_print_held_locks(curr); +} + +static void print_ind_deadlock_bug(struct list_head *stack) +{ + struct task_struct *curr = current; + struct stack_list *prev, *next, *last, *last_prev; + struct hlist_head *head; + struct lite_lock_list *entry; + struct lite_lock_class_sub_key *key; + bool found = false; + + if (record_ind_deadlocks(stack) == 2) + return; + + if (debug_locks) { + if (!lite_debug_locks_off_graph_unlock()) + return; + } + + /* The last entry is filled in dfs_head. */ + last = list_last_entry(stack, struct stack_list, stack_entry); + last_prev = list_prev_entry(last, stack_entry); + + head = last_prev->lock_entry->class->dir_to; + key = last->lock_entry->class->key; + + hlist_for_each_entry(entry, litedephashentry(head, key), hash_entry) { + if (entry->class->key == key) { + last->lock_entry->read = entry->read; + last->lock_entry->acquire_ip = entry->acquire_ip; + last->lock_entry->pid = entry->pid; + strcpy(last->lock_entry->comm, entry->comm); + found = true; + break; + } + } + + if (!found) + WARN_ON(1); + + pr_warn("\n"); + pr_warn("======================================================\n"); + pr_warn("WARNING: possible circular locking dependency detected\n"); + print_lite_kernel_ident(); + pr_warn("------------------------------------------------------\n"); + pr_warn("\nthe existing dependency chain is:\n"); + + list_for_each_entry(prev, stack, stack_entry) { + next = list_next_entry(prev, stack_entry); + + lite_print_lock_name(prev->lock_entry->class); + printk(KERN_CONT ", at: %pS", (void *)prev->lock_entry->acquire_ip); + printk(KERN_CONT ", %lx", prev->lock_entry->acquire_ip); + printk(KERN_CONT ", held by %s/%d\n", prev->lock_entry->comm, + prev->lock_entry->pid); + + if (!list_entry_is_head(next, stack, stack_entry)) { + printk("\n-- depends on -->\n\n"); + continue; + } + + printk("\n"); + } + + lite_lockdep_print_held_locks(curr); +} + +static void init_data_structures_once(void) +{ + static bool __read_mostly initialized; + int i; + + if (likely(initialized)) + return; + + initialized = true; + + for (i = 0; i < ARRAY_SIZE(lite_lock_classes); i++) { + list_add_tail(&lite_lock_classes[i].lock_entry, &free_lite_lock_classes); + hash_init(lite_lock_classes[i].dir_from); + hash_init(lite_lock_classes[i].dir_to); + hash_init(lite_lock_classes[i].ind_from); + hash_init(lite_lock_classes[i].ind_to); + hash_init(lite_lock_classes[i].ind_cycle_dir_from); + } +} + +static noinstr struct lite_lock_class * +look_up_lite_lock_class(const struct lite_lockdep_map *lock) +{ + struct lite_lock_class_sub_key *key; + struct hlist_head *hash_head; + struct lite_lock_class *class; + + if (unlikely(!lock->key)) + return NULL; + + key = lock->key->sub_key; + + hash_head = liteclasshashentry(key); + + if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) + return NULL; + + hlist_for_each_entry_rcu_notrace(class, hash_head, hash_entry) { + if (class->key == key) { + WARN_ON_ONCE(class->name != lock->name && + lock->key != &__lite_lockdep_no_validate__); + return class; + } + } + + return NULL; +} + +/* + * Register a lock's class in the hash-table. + */ +static struct lite_lock_class * +register_lite_lock_class(struct lite_lockdep_map *lock) +{ + struct lite_lock_class_sub_key *key; + struct hlist_head *hash_head; + struct lite_lock_class *class; + + DEBUG_LOCKS_WARN_ON(!irqs_disabled()); + + class = look_up_lite_lock_class(lock); + + if (likely(class)) + goto out_set_class; + + if (!lock->key) { + if (!assign_lite_lock_key(lock)) + return NULL; + } else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) { + return NULL; + } + + key = lock->key->sub_key; + hash_head = liteclasshashentry(key); + + if (!lite_graph_lock()) { + return NULL; + } + + hlist_for_each_entry_rcu(class, hash_head, hash_entry) { + if (class->key == key) + goto out_unlock_set; + } + + init_data_structures_once(); + + class = list_first_entry_or_null(&free_lite_lock_classes, typeof(*class), + lock_entry); + + if (!class) { + printk(KERN_DEBUG "BUG: MAX_LOCKDEP_KEYS too low!"); + dump_stack(); + return NULL; + } + + nr_lite_lock_classes++; + __set_bit(class - lite_lock_classes, lite_lock_classes_in_use); + + class->key = key; + class->name = lock->name; + + hlist_add_head_rcu(&class->hash_entry, hash_head); + + list_move_tail(&class->lock_entry, &all_lite_lock_classes); + +out_unlock_set: + lite_graph_unlock(); + +out_set_class: + lock->class = class; + return class; +} + +/** + * Check whether the provided key is in a hash table. + */ +static bool in_lite_hlist_possible(struct hlist_head *head, + const struct lite_lock_class_sub_key *key) +{ + struct lite_lock_list *entry; + + hlist_for_each_entry(entry, litedephashentry(head, key), hash_entry) { + if (entry->class->key == key) + return true; + } + + return false; +} + +/* + * Allocate a dependency entry, assumes the graph_lock held. + */ +static struct lite_lock_list *alloc_lite_list_entry(void) +{ + int idx = find_first_zero_bit(lite_list_entries_in_use, + ARRAY_SIZE(lite_list_entries)); + + if (idx >= ARRAY_SIZE(lite_list_entries)) { + debug_locks_off(); + lite_lockdep_unlock(); + + printk(KERN_DEBUG "BUG: MAX_LITE_LOCKDEP_ENTRIES too low!"); + dump_stack(); + return NULL; + } + nr_lite_list_entries++; + __set_bit(idx, lite_list_entries_in_use); + return lite_list_entries + idx; +} + +/** + * Add a new dependency to the head of the hash list. + */ +static int +add_lite_lock_to_hlist(struct hlist_head *head, struct lite_lock_class *class, + unsigned int read, unsigned long acquire_ip, + pid_t pid, char *comm) +{ + struct lite_lock_list *entry; + const struct lite_lock_class_sub_key *key = class->key; + + if (in_lite_hlist_possible(head, key)) + return 2; + + entry = alloc_lite_list_entry(); + if (!entry) + return 0; + + entry->class = class; + entry->read = read; + entry->acquire_ip = acquire_ip; + entry->pid = pid; + strcpy(entry->comm, comm); + + hlist_add_head_rcu(&entry->hash_entry, litedephashentry(head, key)); + + return 1; +} + +static struct ind_cycle_list *alloc_ind_cycle_entry(void) +{ + int idx = find_first_zero_bit(ind_cycle_entries_in_use, + ARRAY_SIZE(ind_cycle_entries)); + + if (idx >= ARRAY_SIZE(ind_cycle_entries)) { + debug_locks_off(); + lite_lockdep_unlock(); + + printk(KERN_DEBUG "BUG: MAX_IND_CYCLE_ENTRIES too low!"); + dump_stack(); + return NULL; + } + nr_ind_cycle_entries++; + __set_bit(idx, ind_cycle_entries_in_use); + return ind_cycle_entries + idx; +} + +static int +add_cycle_to_list(struct list_head *head, struct lite_lock_class *class) +{ + struct ind_cycle_list *entry; + + entry = alloc_ind_cycle_entry(); + if (!entry) + return 0; + + entry->class = class; + + list_add_rcu(&entry->cycle_entry, head); + + return 1; +} + +static void init_cycle_list(void) +{ + unsigned long pos; + for_each_set_bit(pos, ind_cycle_entries_in_use, + ARRAY_SIZE(ind_cycle_entries)) { + __clear_bit(pos, ind_cycle_entries_in_use); + } + nr_ind_cycle_entries = 0; +} + +static struct stack_list *alloc_stack_entry(void) +{ + int idx = find_first_zero_bit(stack_entries_in_use, + ARRAY_SIZE(stack_entries)); + + if (idx >= ARRAY_SIZE(stack_entries)) { + debug_locks_off(); + lite_lockdep_unlock(); + + printk(KERN_DEBUG "BUG: MAX_STACK_ENTRIES too low!"); + dump_stack(); + return NULL; + } + nr_stack_entries++; + __set_bit(idx, stack_entries_in_use); + return stack_entries + idx; +} + +static int add_stack_to_list(struct list_head *head, + struct lite_lock_list *lock_entry) +{ + struct stack_list *entry; + + entry = alloc_stack_entry(); + if (!entry) + return 0; + + entry->lock_entry = lock_entry; + + list_add_rcu(&entry->stack_entry, head); + + return 1; +} + +static void del_stack_in_list(struct list_head *node) +{ + struct stack_list *entry = list_entry(node, struct stack_list, stack_entry); + int idx = entry - stack_entries; + + if (!test_bit(idx, stack_entries_in_use)) { + debug_locks_off(); + lite_lockdep_unlock(); + + printk(KERN_DEBUG "BUG: unbalanced MAX_LITE_LOCKDEP_STACK_ENTRIES del!"); + dump_stack(); + } + + __clear_bit(idx, stack_entries_in_use); + + list_del_init(&entry->stack_entry); + nr_stack_entries--; +} + +static void init_stack_list(void) +{ + unsigned long pos; + for_each_set_bit(pos, stack_entries_in_use, + ARRAY_SIZE(stack_entries)) { + __clear_bit(pos, stack_entries_in_use); + } + nr_stack_entries = 0; +} + +static struct visit_hlist *alloc_visit_entry(void) +{ + int idx = find_first_zero_bit(visit_entries_in_use, + ARRAY_SIZE(visit_entries)); + + if (idx >= ARRAY_SIZE(visit_entries)) { + debug_locks_off(); + lite_lockdep_unlock(); + + printk(KERN_DEBUG "BUG: MAX_LITE_LOCKDEP_VISIT_ENTRIES too low!"); + dump_stack(); + return NULL; + } + nr_visit_entries++; + __set_bit(idx, visit_entries_in_use); + return visit_entries + idx; +} + +static int add_visit_to_hlist(struct hlist_head *head, + struct lite_lock_class *class) +{ + struct visit_hlist *entry; + + entry = alloc_visit_entry(); + if (!entry) + return 0; + + entry->class = class; + + hlist_add_head_rcu(&entry->vis_entry, litedephashentry(head, class->key)); + + return 1; +} + +static void init_visit_hlist(void) +{ + unsigned long pos; + for_each_set_bit(pos, visit_entries_in_use, + ARRAY_SIZE(visit_entries)) { + __clear_bit(pos, visit_entries_in_use); + } + nr_visit_entries = 0; +} + +/* Find hlist_node and delete it */ +static void del_visit_in_hlist(struct hlist_head *head, + const struct lite_lock_class_sub_key *key) +{ + struct visit_hlist *entry; + int idx; + + hlist_for_each_entry(entry, litedephashentry(head, key), vis_entry) { + if (entry->class->key == key) { + idx = entry - visit_entries; + + if (!test_bit(idx, visit_entries_in_use)) { + debug_locks_off(); + lite_lockdep_unlock(); + + printk(KERN_DEBUG "BUG: unbalanced MAX_LITE_LOCKDEP_VISIT_ENTRIES del!"); + dump_stack(); + } + + __clear_bit(idx, visit_entries_in_use); + hash_del(&entry->vis_entry); + nr_visit_entries--; + + break; + } + } +} + +/* + * Update reachability graph dued to the direct edge: prev → next. + * Then all indirect reachabilities are constructed. + */ +static void +propagate_reachability(struct lite_held_lock *p, struct lite_held_lock *n) +{ + struct lite_lock_class *prev = lite_hlock_class(p); + struct lite_lock_class *next = lite_hlock_class(n); + struct lite_lock_class *_prev, *_next; + struct lite_lock_list *p_entry, *n_entry; + int i, j; + unsigned int read; + unsigned long ip; + pid_t pid; + char *comm; + + hash_for_each(prev->dir_to, i, p_entry, hash_entry) { + _prev = p_entry->class; + hash_for_each(next->dir_from, j, n_entry, hash_entry) { + _next = n_entry->class; + read = n_entry->read; + ip = n_entry->acquire_ip; + pid = n_entry->pid; + comm = n_entry->comm; + add_lite_lock_to_hlist(_prev->ind_from, _next, read, ip, + pid, comm); + } + hash_for_each(next->ind_from, j, n_entry, hash_entry) { + _next = n_entry->class; + read = n_entry->read; + ip = n_entry->acquire_ip; + pid = n_entry->pid; + comm = n_entry->comm; + add_lite_lock_to_hlist(_prev->ind_from, _next, read, ip, + pid, comm); + } + add_lite_lock_to_hlist(_prev->ind_from, next, n->read, n->acquire_ip, + n->pid, n->comm); + } + hash_for_each(prev->ind_to, i, p_entry, hash_entry) { + _prev = p_entry->class; + hash_for_each(next->dir_from, j, n_entry, hash_entry) { + _next = n_entry->class; + read = n_entry->read; + ip = n_entry->acquire_ip; + pid = n_entry->pid; + comm = n_entry->comm; + add_lite_lock_to_hlist(_prev->ind_from, _next, read, ip, + pid, comm); + } + hash_for_each(next->ind_from, j, n_entry, hash_entry) { + _next = n_entry->class; + read = n_entry->read; + ip = n_entry->acquire_ip; + pid = n_entry->pid; + comm = n_entry->comm; + add_lite_lock_to_hlist(_prev->ind_from, _next, read, ip, + pid, comm); + } + add_lite_lock_to_hlist(_prev->ind_from, next, n->read, n->acquire_ip, + n->pid, n->comm); + } + + hash_for_each(next->dir_from, i, n_entry, hash_entry) { + _next = n_entry->class; + hash_for_each(prev->dir_to, j, p_entry, hash_entry) { + _prev = p_entry->class; + read = p_entry->read; + ip = p_entry->acquire_ip; + pid = p_entry->pid; + comm = p_entry->comm; + add_lite_lock_to_hlist(_next->ind_to, _prev, read, ip, + pid, comm); + } + hash_for_each(prev->ind_to, j, p_entry, hash_entry) { + _prev = p_entry->class; + read = p_entry->read; + ip = p_entry->acquire_ip; + pid = p_entry->pid; + comm = p_entry->comm; + add_lite_lock_to_hlist(_next->ind_to, _prev, read, ip, + pid, comm); + } + add_lite_lock_to_hlist(_next->ind_to, prev, p->read, p->acquire_ip, + p->pid, p->comm); + } + hash_for_each(next->ind_from, i, n_entry, hash_entry) { + _next = n_entry->class; + hash_for_each(prev->dir_to, j, p_entry, hash_entry) { + _prev = p_entry->class; + read = p_entry->read; + ip = p_entry->acquire_ip; + pid = p_entry->pid; + comm = p_entry->comm; + add_lite_lock_to_hlist(_next->ind_to, _prev, read, ip, + pid, comm); + } + hash_for_each(prev->ind_to, j, p_entry, hash_entry) { + _prev = p_entry->class; + read = p_entry->read; + ip = p_entry->acquire_ip; + pid = p_entry->pid; + comm = p_entry->comm; + add_lite_lock_to_hlist(_next->ind_to, _prev, read, ip, + pid, comm); + } + add_lite_lock_to_hlist(_next->ind_to, prev, p->read, p->acquire_ip, + p->pid, p->comm); + } + + hash_for_each(next->dir_from, i, n_entry, hash_entry) { + _next = n_entry->class; + read = n_entry->read; + ip = n_entry->acquire_ip; + pid = n_entry->pid; + comm = n_entry->comm; + add_lite_lock_to_hlist(prev->ind_from, _next, read, ip, + pid, comm); + } + hash_for_each(next->ind_from, i, n_entry, hash_entry) { + _next = n_entry->class; + read = n_entry->read; + ip = n_entry->acquire_ip; + pid = n_entry->pid; + comm = n_entry->comm; + add_lite_lock_to_hlist(prev->ind_from, _next, read, ip, + pid, comm); + } + + hash_for_each(prev->dir_to, i, p_entry, hash_entry) { + _prev = p_entry->class; + read = p_entry->read; + ip = p_entry->acquire_ip; + pid = p_entry->pid; + comm = p_entry->comm; + add_lite_lock_to_hlist(next->ind_to, _prev, read, ip, + pid, comm); + } + hash_for_each(prev->ind_to, i, p_entry, hash_entry) { + _prev = p_entry->class; + read = p_entry->read; + ip = p_entry->acquire_ip; + pid = p_entry->pid; + comm = p_entry->comm; + add_lite_lock_to_hlist(next->ind_to, _prev, read, ip, + pid, comm); + } +} + +/** + * Search a complete cycle catched by detect_cycles. + */ +static void dfs(struct lite_lock_list *entry, struct list_head *stack, + struct hlist_head *visited) +{ + struct lite_lock_class *lock = entry->class; + struct lite_lock_class *ind_lock; + struct lite_lock_list *ind_entry; + struct stack_list *st_entry; + int i; + + if (entry->read == 2) + return; + + add_stack_to_list(stack, entry); + + hash_for_each(lock->ind_cycle_dir_from, i, ind_entry, hash_entry) { + ind_lock = ind_entry->class; + st_entry = list_entry(stack->prev, struct stack_list, stack_entry); + if (st_entry->lock_entry->class->key == ind_lock->key) { + add_stack_to_list(stack, ind_entry); + print_ind_deadlock_bug(stack); + del_stack_in_list(stack->next); + del_stack_in_list(stack->next); + return; + } + + if(!in_lite_hlist_possible(visited, ind_lock->key)) { + add_visit_to_hlist(visited, ind_lock); + dfs(ind_entry, stack, visited); + del_visit_in_hlist(visited, ind_lock->key); + } + } + + del_stack_in_list(stack->next); +} + +/** + * Make a dummy entry for the @class and start searching. + */ +static void dfs_head(struct lite_lock_class *class, struct list_head *stack, + struct hlist_head *visited) +{ + struct lite_lock_list lock_entry; + INIT_HLIST_NODE(&lock_entry.hash_entry); + lock_entry.class = class; + lock_entry.read = 0; + lock_entry.acquire_ip = _RET_IP_; + lock_entry.pid = 0; + lock_entry.comm[0] = '\0'; + dfs(&lock_entry, stack, visited); +} + +/** + * First, check on simple cycles. DFS will be + * performed if only simple cycle exists. + * This function is called by detect_cycles_handler. + */ +static int detect_cycles(void) +{ + LIST_HEAD(ind_cycle_locks); + LIST_HEAD(stack); + struct lite_lock_class *class, *dep; + struct lite_lock_list *entry; + struct ind_cycle_list *ind_list; + struct lite_lock_class_sub_key *key; + int i, j ,ret = 1; + unsigned int read; + unsigned long ip; + pid_t pid; + char *comm; + unsigned long flags; + + raw_local_irq_save(flags); + + if (!lite_graph_lock()) { + return 0; + } + + init_visit_hlist(); + init_cycle_list(); + init_stack_list(); + + for_each_set_bit(i, lite_lock_classes_in_use, ARRAY_SIZE(stack_entries)) { + class = lite_lock_classes + i; + key = class->key; + + hash_for_each(class->dir_from, j, entry, hash_entry) { + dep = entry->class; + read = entry->read; + ip = entry->acquire_ip; + comm = entry->comm; + pid = entry->pid; + + if (in_lite_hlist_possible(dep->dir_from, key)) { + if (cycle_checked(class, dep)) + continue; + + print_dir_deadlock_bug(class, entry); + add_checked_cycle(class, dep); + ret = 0; + } + + if (in_lite_hlist_possible(dep->ind_from, key) && + dep->key != key) { + if (cycle_checked(class, dep)) + continue; + + add_lite_lock_to_hlist(class->ind_cycle_dir_from, + dep, read, ip, pid, comm); + add_cycle_to_list(&ind_cycle_locks, class); + add_checked_cycle(class, dep); + ret = 0; + } + } + } + + list_for_each_entry(ind_list, &ind_cycle_locks, cycle_entry) { + add_visit_to_hlist(visited, ind_list->class); + dfs_head(ind_list->class, &stack, visited); + del_visit_in_hlist(visited, ind_list->class->key); + } + + lite_graph_unlock(); + + raw_local_irq_restore(flags); + + return ret; +} + +/* + * Construct the reachability graph (including direct + * and indirect) due to the @next lock. + */ +static int +check_lock_reachability(struct task_struct *curr, struct lite_held_lock *next, + int end) +{ + struct lite_held_lock *hlock; + struct lite_lock_class *prev_class; + struct lite_lock_class *next_class = lite_hlock_class(next); + int i, ret = 1; + + if (next->read == 2) + return 1; + + for (i = 0; i < end; i++) { + hlock = curr->held_locks + i; + prev_class = lite_hlock_class(hlock); + + // record direct edges + if (in_lite_hlist_possible(prev_class->dir_from, next_class->key)) + continue; + + ret = add_lite_lock_to_hlist(prev_class->dir_from, next_class, + next->read, next->acquire_ip, + next->pid, next->comm); + + if (!ret) + return 0; + + ret = add_lite_lock_to_hlist(next_class->dir_to, prev_class, + hlock->read, hlock->acquire_ip, + next->pid, next->comm); + + if (!ret) + return 0; + + // propagate indirect dependencies + propagate_reachability(hlock, next); + } + + return ret; +} + +/** + * Since trylocks can be held in any order, we don't + * construct their reachabilities until the next non- + * trylock comes. See check_prevs_add in lockdep.c. + */ +static int +check_prevs_reachability(struct task_struct *curr, struct lite_held_lock *next) +{ + int i, ret = 1; + int depth = curr->lite_lockdep_depth; + int start = depth; + struct lite_held_lock *hlock; + + for (;;) { + if (!depth) + break; + + hlock = curr->held_locks + depth - 1; + if (!hlock->trylock) { + start = depth; + break; + } + + depth--; + } + + depth = curr->lite_lockdep_depth; + + for (i = start; i <= depth; i++) { + hlock = curr->held_locks + i; + + if (hlock->read != 2 && !hlock->nest_lock && !hlock->subclass && + hlock->check) + ret &= check_lock_reachability(curr, hlock, i); + } + + return ret; +} + +static int +__lite_lock_acquire(struct lite_lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, + struct lite_lockdep_map *nest_lock, unsigned long ip, + int reacquire) +{ + struct task_struct *curr = current; + struct lite_lock_class *class = NULL; + struct lite_held_lock *hlock; + unsigned int depth; + int class_idx; + int ret = 1; + + if (unlikely(!debug_locks)) + return 0; + + if (!lite_lockdep) + return 0; + + if (lock->key == &__lite_lockdep_no_validate__) + check = 0; + + class = lock->class; + + if (unlikely(!class)) { + class = register_lite_lock_class(lock); + if (!class) + return 0; + } + + depth = curr->lite_lockdep_depth; + + if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LITE_LOCK_DEPTH)) + return 0; + + class_idx = class - lite_lock_classes; + + hlock = curr->held_locks + depth; + hlock->class_idx = class_idx; + hlock->subclass = subclass; + hlock->acquire_ip = ip; + hlock->instance = lock; + hlock->nest_lock = nest_lock; + hlock->trylock = trylock; + hlock->read = read; + hlock->check = check; + hlock->pid = curr->pid; + strcpy(hlock->comm, curr->comm); + + if (DEBUG_LOCKS_WARN_ON(!test_bit(class_idx, lite_lock_classes_in_use))) + return 0; + + /* If the hlock is a recursive reader or nested lock, we don't + * propagate its reachability. + */ + if (check_reachability &&hlock->read != 2 && !nest_lock && !subclass && + !reacquire && check && !trylock && lite_graph_lock()) { + ret = check_prevs_reachability(curr, hlock); + lite_graph_unlock(); + } + + curr->lite_lockdep_depth++; + + if (unlikely(curr->lite_lockdep_depth >= MAX_LITE_LOCK_DEPTH)) { + debug_locks_off(); + printk(KERN_DEBUG "BUG: MAX_LOCK_DEPTH too low!"); + printk(KERN_DEBUG "depth: %i max: %lu!\n", + curr->lite_lockdep_depth, MAX_LITE_LOCK_DEPTH); + lite_lockdep_print_held_locks(current); + lite_debug_show_all_locks(); + dump_stack(); + return 0; + } + + return ret; +} + +static noinstr int match_lite_held_lock(const struct lite_held_lock *hlock, + const struct lite_lockdep_map *lock) +{ + if (hlock->instance == lock) + return 1; + return 0; +} + +static struct lite_held_lock *find_lite_held_lock(struct task_struct *curr, + struct lite_lockdep_map *lock, + unsigned int depth, int *idx) +{ + struct lite_held_lock *ret, *hlock, *prev_hlock; + int i; + + i = depth - 1; + hlock = curr->held_locks + i; + ret = hlock; + if (match_lite_held_lock(hlock, lock)) + goto out; + + ret = NULL; + for (i--, prev_hlock = hlock--; + i >= 0; + i--, prev_hlock = hlock--) { + if (match_lite_held_lock(hlock, lock)) { + ret = hlock; + break; + } + } + +out: + *idx = i; + return ret; +} + +static int +lite_reacquire_held_locks(struct task_struct *curr, unsigned int depth, int idx) +{ + struct lite_held_lock *hlock; + + if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) + return 0; + + for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { + switch (__lite_lock_acquire(hlock->instance, + hlock->subclass, + hlock->trylock, + hlock->read, + hlock->check, + hlock->nest_lock, + hlock->acquire_ip, + 1)) { + case 0: + return 1; + case 1: + break; + default: + WARN_ON(1); + return 0; + } + } + return 0; +} + +static void print_lite_lockdep_cache(struct lite_lockdep_map *lock) +{ + const char *name; + char str[KSYM_NAME_LEN]; + + name = lock->name; + if (!name) + name = __get_key_name(lock->key->sub_key, str); + + printk(KERN_CONT "%s", name); +} + +static inline void print_lite_ip_sym(const char *loglvl, unsigned long ip) +{ + printk("%s[<%px>] %pS\n", loglvl, (void *) ip, (void *) ip); +} + +static void print_lite_unlock_imbalance_bug(struct task_struct *curr, + struct lite_lockdep_map *lock, + unsigned long ip) +{ + pr_warn("\n"); + pr_warn("=====================================\n"); + pr_warn("WARNING: bad unlock balance detected!\n"); + print_lite_kernel_ident(); + pr_warn("-------------------------------------\n"); + pr_warn("%s/%d is trying to release lock (", + curr->comm, task_pid_nr(curr)); + print_lite_lockdep_cache(lock); + pr_cont(") at:\n"); + print_lite_ip_sym(KERN_WARNING, ip); + pr_warn("but there are no more locks to release!\n"); + pr_warn("\nother info that might help us debug this:\n"); + lite_lockdep_print_held_locks(curr); + + pr_warn("\nstack backtrace:\n"); + dump_stack(); +} + +static int +__lite_lock_release(struct lite_lockdep_map *lock, unsigned long ip) +{ + struct task_struct *curr = current; + unsigned int depth = 1; + struct lite_held_lock *hlock; + int i; + + if (unlikely(!debug_locks)) + return 0; + + if (!lite_lockdep) + return 0; + + depth = curr->lite_lockdep_depth; + + if (depth <= 0) { + print_lite_unlock_imbalance_bug(curr, lock, ip); + return 0; + } + + hlock = find_lite_held_lock(curr, lock, depth, &i); + + if (!hlock) { + print_lite_unlock_imbalance_bug(curr, lock, ip); + return 0; + } + + curr->lite_lockdep_depth = i; + + if (i == depth - 1) + return 1; + + if (lite_reacquire_held_locks(curr, depth, i + 1)) + return 0; + + return 0; +} + +void lite_lock_acquire(struct lite_lockdep_map *lock, unsigned int subclass, + int trylock, int read, int check, + struct lite_lockdep_map *nest_lock, unsigned long ip) +{ + unsigned long flags; + + if (!debug_locks) + return; + + raw_local_irq_save(flags); + + trace_lock_acquire_lite(lock, subclass, trylock, read, check, nest_lock, ip); + + __lite_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip, 0); + raw_local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(lite_lock_acquire); + +void lite_lock_release(struct lite_lockdep_map *lock, unsigned long ip) +{ + unsigned long flags; + + trace_lock_release_lite(lock, ip); + + raw_local_irq_save(flags); + + __lite_lock_release(lock, ip); + raw_local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(lite_lock_release); + +void lite_lockdep_init_map_type(struct lite_lockdep_map *lock, const char *name, + struct lite_lock_class_key *key, int subclass) +{ + lock->class = NULL; + + if (DEBUG_LOCKS_WARN_ON(!name)) { + lock->name = "NULL"; + return; + } + + lock->name = name; + + if (DEBUG_LOCKS_WARN_ON(!key)) + return; + + if (!static_obj(key) && !is_dynamic_key(key)) { + if (debug_locks) + printk(KERN_ERR "BUG: key %px has not been registered!\n", key); + DEBUG_LOCKS_WARN_ON(1); + return; + } + lock->key = key; + + if (unlikely(!debug_locks)) + return; +} +EXPORT_SYMBOL_GPL(lite_lockdep_init_map_type); + +int detect_cycles_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + int old_value = detect_deadlocks; + int ret; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (ret == 0 && write && old_value != detect_deadlocks && + detect_deadlocks == 1) { + detect_deadlocks = 0; + detect_cycles(); + } + return ret; +} diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c index db9301591e3fcca7bfba67c11942a07ae2cf3de6..fb0b90ca7dcd11eaae99529452d761f74abee6ba 100644 --- a/kernel/locking/mutex-debug.c +++ b/kernel/locking/mutex-debug.c @@ -86,6 +86,9 @@ void debug_mutex_init(struct mutex *lock, const char *name, */ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP); +#endif +#ifdef CONFIG_LITE_LOCKDEP + lite_lockdep_init_map(&lock->lite_dep_map, name, key, 0); #endif lock->magic = lock; } diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 86061901636cc76e2fb27d8627f373f1ac6ff86c..2693ffa6469499951d2cda52b9570aba8885b960 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -152,7 +152,7 @@ static inline bool __mutex_trylock(struct mutex *lock) return !__mutex_trylock_or_owner(lock); } -#ifndef CONFIG_DEBUG_LOCK_ALLOC +#if !defined(CONFIG_DEBUG_LOCK_ALLOC) && !defined(CONFIG_LITE_LOCKDEP) /* * Lockdep annotations are contained to the slow paths for simplicity. * There is nothing that would stop spreading the lockdep annotations outwards @@ -256,7 +256,7 @@ static void __mutex_handoff(struct mutex *lock, struct task_struct *task) } } -#ifndef CONFIG_DEBUG_LOCK_ALLOC +#if !defined(CONFIG_DEBUG_LOCK_ALLOC) && !defined(CONFIG_LITE_LOCKDEP) /* * We split the mutex lock/unlock logic into separate fastpath and * slowpath functions, to reduce the register pressure on the fastpath. @@ -743,7 +743,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne */ void __sched mutex_unlock(struct mutex *lock) { -#ifndef CONFIG_DEBUG_LOCK_ALLOC +#if !defined(CONFIG_DEBUG_LOCK_ALLOC) && !defined(CONFIG_LITE_LOCKDEP) if (__mutex_unlock_fast(lock)) return; #endif @@ -965,7 +965,11 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, } preempt_disable(); +#ifdef CONFIG_LITE_LOCKDEP + lite_mutex_acquire_nest(&lock->lite_dep_map, subclass, 0, nest_lock, ip); +#else mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); +#endif if (__mutex_trylock(lock) || mutex_optimistic_spin(lock, ww_ctx, NULL)) { @@ -1097,7 +1101,11 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, err_early_kill: spin_unlock(&lock->wait_lock); debug_mutex_free_waiter(&waiter); +#ifdef CONFIG_LITE_LOCKDEP + lite_mutex_release(&lock->lite_dep_map, ip); +#else mutex_release(&lock->dep_map, ip); +#endif preempt_enable(); return ret; } @@ -1117,7 +1125,8 @@ __ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass, return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true); } -#ifdef CONFIG_DEBUG_LOCK_ALLOC +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP) + void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass) { @@ -1231,7 +1240,11 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne DEFINE_WAKE_Q(wake_q); unsigned long owner; +#ifdef CONFIG_LITE_LOCKDEP + lite_mutex_release(&lock->lite_dep_map, ip); +#else mutex_release(&lock->dep_map, ip); +#endif /* * Release the lock before (potentially) taking the spinlock such that @@ -1286,7 +1299,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne wake_up_q(&wake_q); } -#ifndef CONFIG_DEBUG_LOCK_ALLOC +#if !defined(CONFIG_DEBUG_LOCK_ALLOC) && !defined(CONFIG_LITE_LOCKDEP) /* * Here come the less common (and hence less performance-critical) APIs: * mutex_lock_interruptible() and mutex_trylock(). @@ -1422,14 +1435,19 @@ int __sched mutex_trylock(struct mutex *lock) #endif locked = __mutex_trylock(lock); +#ifdef CONFIG_LITE_LOCKDEP + if (locked) + lite_mutex_acquire(&lock->lite_dep_map, 0, 1, _RET_IP_); +#else if (locked) mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); +#endif return locked; } EXPORT_SYMBOL(mutex_trylock); -#ifndef CONFIG_DEBUG_LOCK_ALLOC +#if !defined(CONFIG_DEBUG_LOCK_ALLOC) && !defined(CONFIG_LITE_LOCKDEP) int __sched ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index a163542d178ee11198080f6cad124c9e2c25d115..e96f67c7fbcb4fbcefbdb2abff165c9320d46b7c 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -330,6 +330,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, debug_check_no_locks_freed((void *)sem, sizeof(*sem)); lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP); #endif +#ifdef CONFIG_LITE_LOCKDEP + lite_lockdep_init_map(&sem->lite_dep_map, name, key, 0); +#endif #ifdef CONFIG_DEBUG_RWSEMS sem->magic = sem; #endif @@ -1501,7 +1504,11 @@ static inline void __downgrade_write(struct rw_semaphore *sem) void __sched down_read(struct rw_semaphore *sem) { might_sleep(); +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_acquire_read(&sem->lite_dep_map, 0, 0, _RET_IP_); +#else rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); +#endif LOCK_CONTENDED(sem, __down_read_trylock, __down_read); } @@ -1510,10 +1517,18 @@ EXPORT_SYMBOL(down_read); int __sched down_read_interruptible(struct rw_semaphore *sem) { might_sleep(); +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_acquire_read(&sem->lite_dep_map, 0, 0, _RET_IP_); +#else rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); +#endif if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_interruptible)) { +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_release(&sem->lite_dep_map, _RET_IP_); +#else rwsem_release(&sem->dep_map, _RET_IP_); +#endif return -EINTR; } @@ -1524,10 +1539,18 @@ EXPORT_SYMBOL(down_read_interruptible); int __sched down_read_killable(struct rw_semaphore *sem) { might_sleep(); +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_acquire_read(&sem->lite_dep_map, 0, 0, _RET_IP_); +#else rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); +#endif if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_release(&sem->lite_dep_map, _RET_IP_); +#else rwsem_release(&sem->dep_map, _RET_IP_); +#endif return -EINTR; } @@ -1543,7 +1566,11 @@ int down_read_trylock(struct rw_semaphore *sem) int ret = __down_read_trylock(sem); if (ret == 1) +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_acquire_read(&sem->lite_dep_map, 0, 1, _RET_IP_); +#else rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); +#endif return ret; } EXPORT_SYMBOL(down_read_trylock); @@ -1554,7 +1581,11 @@ EXPORT_SYMBOL(down_read_trylock); void __sched down_write(struct rw_semaphore *sem) { might_sleep(); +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_acquire(&sem->lite_dep_map, 0, 0, _RET_IP_); +#else rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); +#endif LOCK_CONTENDED(sem, __down_write_trylock, __down_write); } EXPORT_SYMBOL(down_write); @@ -1565,11 +1596,19 @@ EXPORT_SYMBOL(down_write); int __sched down_write_killable(struct rw_semaphore *sem) { might_sleep(); +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_acquire(&sem->lite_dep_map, 0, 0, _RET_IP_); +#else rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); +#endif if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) { +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_release(&sem->lite_dep_map, _RET_IP_); +#else rwsem_release(&sem->dep_map, _RET_IP_); +#endif return -EINTR; } @@ -1585,7 +1624,11 @@ int down_write_trylock(struct rw_semaphore *sem) int ret = __down_write_trylock(sem); if (ret == 1) +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_acquire(&sem->lite_dep_map, 0, 1, _RET_IP_); +#else rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_); +#endif return ret; } @@ -1596,7 +1639,11 @@ EXPORT_SYMBOL(down_write_trylock); */ void up_read(struct rw_semaphore *sem) { +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_release(&sem->lite_dep_map, _RET_IP_); +#else rwsem_release(&sem->dep_map, _RET_IP_); +#endif __up_read(sem); } EXPORT_SYMBOL(up_read); @@ -1606,7 +1653,11 @@ EXPORT_SYMBOL(up_read); */ void up_write(struct rw_semaphore *sem) { +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_release(&sem->lite_dep_map, _RET_IP_); +#else rwsem_release(&sem->dep_map, _RET_IP_); +#endif __up_write(sem); } EXPORT_SYMBOL(up_write); @@ -1621,12 +1672,17 @@ void downgrade_write(struct rw_semaphore *sem) } EXPORT_SYMBOL(downgrade_write); -#ifdef CONFIG_DEBUG_LOCK_ALLOC +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP) void down_read_nested(struct rw_semaphore *sem, int subclass) { might_sleep(); + +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_acquire_read(&sem->lite_dep_map, subclass, 0, _RET_IP_); +#else rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); +#endif LOCK_CONTENDED(sem, __down_read_trylock, __down_read); } EXPORT_SYMBOL(down_read_nested); @@ -1634,10 +1690,19 @@ EXPORT_SYMBOL(down_read_nested); int down_read_killable_nested(struct rw_semaphore *sem, int subclass) { might_sleep(); + +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_acquire_read(&sem->lite_dep_map, subclass, 0, _RET_IP_); +#else rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); +#endif if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_release(&sem->lite_dep_map, _RET_IP_); +#else rwsem_release(&sem->dep_map, _RET_IP_); +#endif return -EINTR; } @@ -1648,7 +1713,12 @@ EXPORT_SYMBOL(down_read_killable_nested); void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest) { might_sleep(); + +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_acquire_nest(&sem->lite_dep_map, 0, 0, nest, _RET_IP_); +#else rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_); +#endif LOCK_CONTENDED(sem, __down_write_trylock, __down_write); } EXPORT_SYMBOL(_down_write_nest_lock); @@ -1664,7 +1734,12 @@ EXPORT_SYMBOL(down_read_non_owner); void down_write_nested(struct rw_semaphore *sem, int subclass) { might_sleep(); + +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_acquire(&sem->lite_dep_map, subclass, 0, _RET_IP_); +#else rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); +#endif LOCK_CONTENDED(sem, __down_write_trylock, __down_write); } EXPORT_SYMBOL(down_write_nested); @@ -1672,11 +1747,20 @@ EXPORT_SYMBOL(down_write_nested); int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass) { might_sleep(); + +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_acquire(&sem->lite_dep_map, subclass, 0, _RET_IP_); +#else rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_); +#endif if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) { +#ifdef CONFIG_LITE_LOCKDEP + lite_rwsem_release(&sem->lite_dep_map, _RET_IP_); +#else rwsem_release(&sem->dep_map, _RET_IP_); +#endif return -EINTR; } diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c index 0ff08380f5318ee6722589f6fc6329b8a0de5f74..71d3044492502fef4e770fe86d24fe8f7356148a 100644 --- a/kernel/locking/spinlock.c +++ b/kernel/locking/spinlock.c @@ -353,13 +353,18 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) EXPORT_SYMBOL(_raw_write_unlock_bh); #endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC +#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_LITE_LOCKDEP) void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) { preempt_disable(); +#ifdef CONFIG_LITE_LOCKDEP + lite_spin_acquire(&lock->lite_dep_map, subclass, 0, _RET_IP_); + do_raw_spin_lock(lock); +#else spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); +#endif } EXPORT_SYMBOL(_raw_spin_lock_nested); @@ -370,9 +375,14 @@ unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, local_irq_save(flags); preempt_disable(); +#ifdef CONFIG_LITE_LOCKDEP + lite_spin_acquire(&lock->lite_dep_map, subclass, 0, _RET_IP_); + do_raw_spin_lock(lock); +#else spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock, do_raw_spin_lock_flags, &flags); +#endif return flags; } EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested); @@ -381,8 +391,13 @@ void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *nest_lock) { preempt_disable(); +#ifdef CONFIG_LITE_LOCKDEP + lite_spin_acquire_nest(&lock->lite_dep_map, 0, 0, nest_lock, _RET_IP_); + do_raw_spin_lock(lock); +#else spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); +#endif } EXPORT_SYMBOL(_raw_spin_lock_nest_lock); diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c index b9d93087ee669155b7b10bab8f31205a7ca6258c..bf0d5209e75eda8814fcfd9cb8f1d03cf2d25b9e 100644 --- a/kernel/locking/spinlock_debug.c +++ b/kernel/locking/spinlock_debug.c @@ -22,6 +22,9 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, */ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lockdep_init_map_wait(&lock->dep_map, name, key, 0, inner); +#endif +#ifdef CONFIG_LITE_LOCKDEP + lite_lockdep_init_map(&lock->lite_dep_map, name, key, 0); #endif lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; lock->magic = SPINLOCK_MAGIC; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 46d219b871093ef50f416008c50c99585f2ce2ca..2adbd83cfe0441fd1ffd0a49712169c9015f45ef 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -20,6 +20,10 @@ #include #include +#ifdef CONFIG_LITE_LOCKDEP +#include +#endif + #include "../workqueue_internal.h" #include "../../fs/io-wq.h" #include "../smpboot.h" @@ -3540,7 +3544,11 @@ prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf * do an early lockdep release here: */ rq_unpin_lock(rq, rf); +#ifdef CONFIG_LITE_LOCKDEP + lite_spin_release(&rq->lock.lite_dep_map, _THIS_IP_); +#else spin_release(&rq->lock.dep_map, _THIS_IP_); +#endif #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ rq->lock.owner = next; @@ -3554,7 +3562,11 @@ static inline void finish_lock_switch(struct rq *rq) * fix up the runqueue lock - which gets 'carried over' from * prev into current: */ +#ifdef CONFIG_LITE_LOCKDEP + lite_spin_acquire(&rq->lock.lite_dep_map, 0, 0, _THIS_IP_); +#else spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); +#endif raw_spin_unlock_irq(&rq->lock); } diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c index e1c655f928c747f74757985489fde2540abd39d7..0f9908cb5121b13f76ff1a7ff415e9a68035cbb5 100644 --- a/kernel/sched/swait.c +++ b/kernel/sched/swait.c @@ -8,7 +8,11 @@ void __init_swait_queue_head(struct swait_queue_head *q, const char *name, struct lock_class_key *key) { raw_spin_lock_init(&q->lock); +#ifdef CONFIG_LITE_LOCKDEP + lite_lockdep_set_class_and_name(&q->lock, key, name); +#else lockdep_set_class_and_name(&q->lock, key, name); +#endif INIT_LIST_HEAD(&q->task_list); } EXPORT_SYMBOL(__init_swait_queue_head); diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index a55642aa3f68b98940140fca6610cbd6694bd754..eb266fc1221af45c87e2356bd7cc9179ad2f49da 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c @@ -9,7 +9,11 @@ void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key) { spin_lock_init(&wq_head->lock); +#ifdef CONFIG_LITE_LOCKDEP + lite_lockdep_set_class_and_name(&wq_head->lock, key, name); +#else lockdep_set_class_and_name(&wq_head->lock, key, name); +#endif INIT_LIST_HEAD(&wq_head->head); } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 89ef0c1a16429b3a0f8f30be0e50489c33bb73fb..99d1c57b0e82c467d49e621172f14a63842d5217 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -95,6 +95,9 @@ #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_LOCK_STAT) #include #endif +#ifdef CONFIG_LITE_LOCKDEP +#include +#endif #ifdef CONFIG_CHR_DEV_SG #include #endif @@ -1901,6 +1904,29 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, +#endif +#ifdef CONFIG_LITE_LOCKDEP + { + .procname = "lite_lockdep", + .data = &lite_lockdep, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "check_reachability", + .data = &check_reachability, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "detect_deadlocks", + .data = &detect_deadlocks, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = detect_cycles_handler, + }, #endif { .procname = "panic", diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f906df9db2e21173f73aa9ed70b1136a0a57a8e9..28b932f9b938a3f50656a72281935905945c85dc 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1342,6 +1342,23 @@ config LOCKDEP select KALLSYMS select KALLSYMS_ALL +config LITE_LOCKDEP + bool "Lightweight deadlock detection" + depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT && !LOCKDEP + select DEBUG_SPINLOCK + select DEBUG_MUTEXES + select DEBUG_RWSEMS + default n + +config LOCK_REACHABILITY + bool "Lock reachability records for cycle detections" + depends on LITE_LOCKDEP + default n + help + Say Y here if you want to record lock dependencies during + bootup. If you say N, you can record dependencies later + by turning on the sysctl variable "check_reachability". + config LOCKDEP_SMALL bool