diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 705a485a2689b4928ff3ecd4cfc8eb29b2c36368..6f7ed4e9340786db46a4dda6b65cf1cbd6919e9c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -60,6 +60,7 @@ #include #include #include +#include #include #include #include @@ -313,6 +314,18 @@ EXPORT_SYMBOL(nr_node_ids); EXPORT_SYMBOL(nr_online_nodes); #endif +static DEFINE_LOCAL_IRQ_LOCK(pa_lock); + +#ifdef CONFIG_PREEMPT_RT_BASE +# define cpu_lock_irqsave(cpu, flags) \ + local_lock_irqsave_on(pa_lock, flags, cpu) +# define cpu_unlock_irqrestore(cpu, flags) \ + local_unlock_irqrestore_on(pa_lock, flags, cpu) +#else +# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) +# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) +#endif + int page_group_by_mobility_disabled __read_mostly; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT @@ -1457,10 +1470,10 @@ static void __free_pages_ok(struct page *page, unsigned int order) return; migratetype = get_pfnblock_migratetype(page, pfn); - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); __count_vm_events(PGFREE, 1 << order); free_one_page(page_zone(page), page, pfn, order, migratetype); - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } void __free_pages_core(struct page *page, unsigned int order) @@ -2833,13 +2846,13 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) int to_drain, batch; LIST_HEAD(dst); - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); batch = READ_ONCE(pcp->batch); to_drain = min(pcp->count, batch); if (to_drain > 0) isolate_pcp_pages(to_drain, pcp, &dst); - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); if (to_drain > 0) free_pcppages_bulk(zone, &dst, false); @@ -2861,7 +2874,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) LIST_HEAD(dst); int count; - local_irq_save(flags); + cpu_lock_irqsave(cpu, flags); pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; @@ -2869,7 +2882,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) if (count) isolate_pcp_pages(count, pcp, &dst); - local_irq_restore(flags); + cpu_unlock_irqrestore(cpu, flags); if (count) free_pcppages_bulk(zone, &dst, false); @@ -2907,6 +2920,7 @@ void drain_local_pages(struct zone *zone) drain_pages(cpu); } +#ifndef CONFIG_PREEMPT_RT_BASE static void drain_local_pages_wq(struct work_struct *work) { /* @@ -2920,6 +2934,7 @@ static void drain_local_pages_wq(struct work_struct *work) drain_local_pages(NULL); preempt_enable(); } +#endif /* * Spill all the per-cpu pages from all CPUs back into the buddy allocator. @@ -2986,7 +3001,14 @@ void drain_all_pages(struct zone *zone) else cpumask_clear_cpu(cpu, &cpus_with_pcps); } - +#ifdef CONFIG_PREEMPT_RT_BASE + for_each_cpu(cpu, &cpus_with_pcps) { + if (zone) + drain_pages_zone(cpu, zone); + else + drain_pages(cpu); + } +#else for_each_cpu(cpu, &cpus_with_pcps) { struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); INIT_WORK(work, drain_local_pages_wq); @@ -2994,6 +3016,7 @@ void drain_all_pages(struct zone *zone) } for_each_cpu(cpu, &cpus_with_pcps) flush_work(per_cpu_ptr(&pcpu_drain, cpu)); +#endif mutex_unlock(&pcpu_drain_mutex); } @@ -3113,9 +3136,9 @@ void free_unref_page(struct page *page) if (!free_unref_page_prepare(page, pfn)) return; - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); free_unref_page_commit(page, pfn, &dst); - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); if (!list_empty(&dst)) free_pcppages_bulk(zone, &dst, false); } @@ -3142,7 +3165,7 @@ void free_unref_page_list(struct list_head *list) set_page_private(page, pfn); } - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); list_for_each_entry_safe(page, next, list, lru) { unsigned long pfn = page_private(page); enum zone_type type; @@ -3157,12 +3180,12 @@ void free_unref_page_list(struct list_head *list) * a large list of pages to free. */ if (++batch_count == SWAP_CLUSTER_MAX) { - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); batch_count = 0; - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); } } - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); for (i = 0; i < __MAX_NR_ZONES; ) { struct page *page; @@ -3331,7 +3354,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, struct page *page; unsigned long flags; - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); @@ -3339,7 +3362,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); zone_statistics(preferred_zone, zone); } - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); return page; } @@ -3366,7 +3389,7 @@ struct page *rmqueue(struct zone *preferred_zone, * allocate greater than order-1 page units with __GFP_NOFAIL. */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - spin_lock_irqsave(&zone->lock, flags); + local_spin_lock_irqsave(pa_lock, &zone->lock, flags); do { page = NULL; @@ -3386,7 +3409,7 @@ struct page *rmqueue(struct zone *preferred_zone, __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); zone_statistics(preferred_zone, zone); - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); out: /* Separate test+clear to avoid unnecessary atomics */ @@ -3399,7 +3422,7 @@ struct page *rmqueue(struct zone *preferred_zone, return page; failed: - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); return NULL; } @@ -8801,7 +8824,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); @@ -8810,7 +8833,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } #ifdef CONFIG_MEMORY_HOTREMOVE