diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8ee61f4b37062c97a3660d2f3d7bb50740ce6adf..a022d03e286b8f3bbdf390d9e82a93f99ed8d726 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -73,6 +73,7 @@ #include "slab.h" #include #include +#include #include @@ -104,6 +105,8 @@ bool cgroup_memory_noswap __read_mostly; static struct workqueue_struct *memcg_wmark_wq; +static DEFINE_LOCAL_IRQ_LOCK(event_lock); + /* Whether legacy memory+swap accounting is active */ static bool do_memsw_account(void) { @@ -2377,7 +2380,7 @@ void drain_all_stock(struct mem_cgroup *root_memcg) * as well as workers from this path always operate on the local * per-cpu data. CPU up doesn't touch memcg_stock at all. */ - curcpu = get_cpu(); + curcpu = get_cpu_light(); for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); struct mem_cgroup *memcg; @@ -2397,7 +2400,7 @@ void drain_all_stock(struct mem_cgroup *root_memcg) } css_put(&memcg->css); } - put_cpu(); + put_cpu_light(); mutex_unlock(&percpu_charge_mutex); } @@ -7060,12 +7063,12 @@ static int mem_cgroup_move_account(struct page *page, kidled_mem_cgroup_move_stats(from, to, page, nr_pages << PAGE_SHIFT); - local_irq_disable(); + local_lock_irq(event_lock); mem_cgroup_charge_statistics(to, page, nr_pages); memcg_check_events(to, page); mem_cgroup_charge_statistics(from, page, -nr_pages); memcg_check_events(from, page); - local_irq_enable(); + local_unlock_irq(event_lock); out_unlock: unlock_page(page); out: @@ -8059,10 +8062,10 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) commit_charge(page, memcg); - local_irq_disable(); + local_lock_irq(event_lock); mem_cgroup_charge_statistics(memcg, page, nr_pages); memcg_check_events(memcg, page); - local_irq_enable(); + local_unlock_irq(event_lock); if (PageSwapCache(page)) { swp_entry_t entry = { .val = page_private(page) }; @@ -8106,11 +8109,11 @@ static void uncharge_batch(const struct uncharge_gather *ug) memcg_oom_recover(ug->memcg); } - local_irq_save(flags); + local_lock_irqsave(event_lock, flags); __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages); memcg_check_events(ug->memcg, ug->dummy_page); - local_irq_restore(flags); + local_unlock_irqrestore(event_lock, flags); if (!mem_cgroup_is_root(ug->memcg)) css_put_many(&ug->memcg->css, ug->nr_pages); @@ -8266,10 +8269,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) commit_charge(newpage, memcg); - local_irq_save(flags); + local_lock_irqsave(event_lock, flags); mem_cgroup_charge_statistics(memcg, newpage, nr_pages); memcg_check_events(memcg, newpage); - local_irq_restore(flags); + local_unlock_irqrestore(event_lock, flags); } DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); @@ -8546,6 +8549,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) struct mem_cgroup *memcg, *swap_memcg; unsigned int nr_entries; unsigned short oldid; + unsigned long flags; VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); @@ -8595,12 +8599,16 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) * important here to have the interrupts disabled because it is the * only synchronisation we have for updating the per-CPU variables. */ + local_lock_irqsave(event_lock, flags); +#ifndef CONFIG_PREEMPT_RT_BASE VM_BUG_ON(!irqs_disabled()); +#endif mem_cgroup_charge_statistics(memcg, page, -nr_entries); memcg_check_events(memcg, page); if (!mem_cgroup_is_root(memcg)) css_put_many(&memcg->css, nr_entries); + local_unlock_irqrestore(event_lock, flags); } /**