diff --git a/drivers/of/base.c b/drivers/of/base.c index f0dbb7ad88cf68019581399178c7c59d2f49080a..c59b30bab0e0d4c9edcf12883bc96920f0b27a05 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -130,31 +130,34 @@ static u32 phandle_cache_mask; /* * Caller must hold devtree_lock. */ -static void __of_free_phandle_cache(void) +static struct device_node** __of_free_phandle_cache(void) { u32 cache_entries = phandle_cache_mask + 1; u32 k; + struct device_node **shadow; if (!phandle_cache) - return; + return NULL; for (k = 0; k < cache_entries; k++) of_node_put(phandle_cache[k]); - kfree(phandle_cache); + shadow = phandle_cache; phandle_cache = NULL; + return shadow; } int of_free_phandle_cache(void) { unsigned long flags; + struct device_node **shadow; raw_spin_lock_irqsave(&devtree_lock, flags); - __of_free_phandle_cache(); + shadow = __of_free_phandle_cache(); raw_spin_unlock_irqrestore(&devtree_lock, flags); - + kfree(shadow); return 0; } #if !defined(CONFIG_MODULES) @@ -189,10 +192,11 @@ void of_populate_phandle_cache(void) u32 cache_entries; struct device_node *np; u32 phandles = 0; + struct device_node **shadow; raw_spin_lock_irqsave(&devtree_lock, flags); - __of_free_phandle_cache(); + shadow = __of_free_phandle_cache(); for_each_of_allnodes(np) if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) @@ -200,12 +204,14 @@ void of_populate_phandle_cache(void) if (!phandles) goto out; + raw_spin_unlock_irqrestore(&devtree_lock, flags); cache_entries = roundup_pow_of_two(phandles); phandle_cache_mask = cache_entries - 1; phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache), GFP_ATOMIC); + raw_spin_lock_irqsave(&devtree_lock, flags); if (!phandle_cache) goto out; @@ -217,6 +223,7 @@ void of_populate_phandle_cache(void) out: raw_spin_unlock_irqrestore(&devtree_lock, flags); + kfree(shadow); } void __init of_core_init(void) diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 72f9f2f1283498fc8ebf9681ac6e8eda30f78e8b..23c7b5037cf206973320b4b3b67c13301def738e 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -472,7 +472,6 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, smp_call_func_t func) { - int cpu; struct rcu_node *rnp; trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset")); @@ -494,13 +493,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp, continue; } INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); - preempt_disable(); - cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask); - /* If all offline, queue the work on an unbound CPU. */ - if (unlikely(cpu > rnp->grphi)) - cpu = WORK_CPU_UNBOUND; - queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); - preempt_enable(); + queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work); rnp->exp_need_flush = true; } diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index 3a8ddf8baf7dc3d52597bf0e53753c0cc17503cd..b209dbaefde822315201ab409dce65ca52b371eb 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c @@ -103,7 +103,7 @@ static int quarantine_head; static int quarantine_tail; /* Total size of all objects in global_quarantine across all batches. */ static unsigned long quarantine_size; -static DEFINE_SPINLOCK(quarantine_lock); +static DEFINE_RAW_SPINLOCK(quarantine_lock); DEFINE_STATIC_SRCU(remove_cache_srcu); /* Maximum size of the global queue. */ @@ -190,7 +190,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { qlist_move_all(q, &temp); - spin_lock(&quarantine_lock); + raw_spin_lock(&quarantine_lock); WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); qlist_move_all(&temp, &global_quarantine[quarantine_tail]); if (global_quarantine[quarantine_tail].bytes >= @@ -203,7 +203,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) if (new_tail != quarantine_head) quarantine_tail = new_tail; } - spin_unlock(&quarantine_lock); + raw_spin_unlock(&quarantine_lock); } local_irq_restore(flags); @@ -230,7 +230,7 @@ void quarantine_reduce(void) * expected case). */ srcu_idx = srcu_read_lock(&remove_cache_srcu); - spin_lock_irqsave(&quarantine_lock, flags); + raw_spin_lock_irqsave(&quarantine_lock, flags); /* * Update quarantine size in case of hotplug. Allocate a fraction of @@ -254,7 +254,7 @@ void quarantine_reduce(void) quarantine_head = 0; } - spin_unlock_irqrestore(&quarantine_lock, flags); + raw_spin_unlock_irqrestore(&quarantine_lock, flags); qlist_free_all(&to_free, NULL); srcu_read_unlock(&remove_cache_srcu, srcu_idx); @@ -310,17 +310,17 @@ void quarantine_remove_cache(struct kmem_cache *cache) */ on_each_cpu(per_cpu_remove_cache, cache, 1); - spin_lock_irqsave(&quarantine_lock, flags); + raw_spin_lock_irqsave(&quarantine_lock, flags); for (i = 0; i < QUARANTINE_BATCHES; i++) { if (qlist_empty(&global_quarantine[i])) continue; qlist_move_cache(&global_quarantine[i], &to_free, cache); /* Scanning whole quarantine can take a while. */ - spin_unlock_irqrestore(&quarantine_lock, flags); + raw_spin_unlock_irqrestore(&quarantine_lock, flags); cond_resched(); - spin_lock_irqsave(&quarantine_lock, flags); + raw_spin_lock_irqsave(&quarantine_lock, flags); } - spin_unlock_irqrestore(&quarantine_lock, flags); + raw_spin_unlock_irqrestore(&quarantine_lock, flags); qlist_free_all(&to_free, cache); diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 5eeabece0c17899e66850157062c8d925c9425ce..92ce99b15f2bae3581729a3cc1276d593e488914 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -26,7 +26,7 @@ * * The following locks and mutexes are used by kmemleak: * - * - kmemleak_lock (rwlock): protects the object_list modifications and + * - kmemleak_lock (raw spinlock): protects the object_list modifications and * accesses to the object_tree_root. The object_list is the main list * holding the metadata (struct kmemleak_object) for the allocated memory * blocks. The object_tree_root is a red black tree used to look-up @@ -197,7 +197,7 @@ static LIST_HEAD(gray_list); /* search tree for object boundaries */ static struct rb_root object_tree_root = RB_ROOT; /* rw_lock protecting the access to object_list and object_tree_root */ -static DEFINE_RWLOCK(kmemleak_lock); +static DEFINE_RAW_SPINLOCK(kmemleak_lock); /* allocation caches for kmemleak internal data */ static struct kmem_cache *object_cache; @@ -491,9 +491,9 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) struct kmemleak_object *object; rcu_read_lock(); - read_lock_irqsave(&kmemleak_lock, flags); + raw_spin_lock_irqsave(&kmemleak_lock, flags); object = lookup_object(ptr, alias); - read_unlock_irqrestore(&kmemleak_lock, flags); + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); /* check whether the object is still available */ if (object && !get_object(object)) @@ -513,13 +513,13 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali unsigned long flags; struct kmemleak_object *object; - write_lock_irqsave(&kmemleak_lock, flags); + raw_spin_lock_irqsave(&kmemleak_lock, flags); object = lookup_object(ptr, alias); if (object) { rb_erase(&object->rb_node, &object_tree_root); list_del_rcu(&object->object_list); } - write_unlock_irqrestore(&kmemleak_lock, flags); + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); return object; } @@ -593,7 +593,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, /* kernel backtrace */ object->trace_len = __save_stack_trace(object->trace); - write_lock_irqsave(&kmemleak_lock, flags); + raw_spin_lock_irqsave(&kmemleak_lock, flags); min_addr = min(min_addr, ptr); max_addr = max(max_addr, ptr + size); @@ -624,7 +624,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, list_add_tail_rcu(&object->object_list, &object_list); out: - write_unlock_irqrestore(&kmemleak_lock, flags); + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); return object; } @@ -1310,7 +1310,7 @@ static void scan_block(void *_start, void *_end, unsigned long *end = _end - (BYTES_PER_POINTER - 1); unsigned long flags; - read_lock_irqsave(&kmemleak_lock, flags); + raw_spin_lock_irqsave(&kmemleak_lock, flags); for (ptr = start; ptr < end; ptr++) { struct kmemleak_object *object; unsigned long pointer; @@ -1367,7 +1367,7 @@ static void scan_block(void *_start, void *_end, spin_unlock(&object->lock); } } - read_unlock_irqrestore(&kmemleak_lock, flags); + raw_spin_unlock_irqrestore(&kmemleak_lock, flags); } /*