From 79ae56a80ff68ae0a26dbf36f278359f398cdd2e Mon Sep 17 00:00:00 2001 From: meganz009 Date: Wed, 7 Jun 2023 13:52:19 +0800 Subject: [PATCH 1/2] mm/page_alloc: Use migrate_disable() in drain_local_pages_wq() commit f0c3639168b4b337fde527d4e89cb965a84a492b upstream. drain_local_pages_wq() disables preemption to avoid CPU migration during CPU hotplug. Using migrate_disable() makes the function preemptible on PREEMPT_RT but still avoids CPU migrations during CPU-hotplug. On !PREEMPT_RT it behaves like preempt_disable(). Use migrate_disable() in drain_local_pages_wq(). Signed-off-by: Sebastian Andrzej Siewior --- mm/page_alloc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 521426dfbe50..ba9e0fc45d49 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3213,9 +3213,9 @@ static void drain_local_pages_wq(struct work_struct *work) * cpu which is allright but we also have to make sure to not move to * a different one. */ - preempt_disable(); + migrate_disable(); drain_local_pages(drain->zone); - preempt_enable(); + migrate_enable(); } /* -- Gitee From f3ed24119922a286511668b198358b6428d52fbe Mon Sep 17 00:00:00 2001 From: meganz009 Date: Wed, 7 Jun 2023 14:10:50 +0800 Subject: [PATCH 2/2] mm: page_alloc: rt-friendly per-cpu pages commit eacabe461ab2bdaf8063ab7c3a2fb4ac23858ff1 upstream. rt-friendly per-cpu pages: convert the irqs-off per-cpu locking method into a preemptible, explicit-per-cpu-locks method. Contains fixes from: Peter Zijlstra Thomas Gleixner Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner Signed-off-by: Wenya Zhang Reviewed-by: Huang Jian --- mm/page_alloc.c | 47 ++++++++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ba9e0fc45d49..c48a261b2be2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -61,6 +61,7 @@ #include #include #include +#include #include #include #include @@ -391,6 +392,13 @@ EXPORT_SYMBOL(nr_node_ids); EXPORT_SYMBOL(nr_online_nodes); #endif +struct pa_lock { + local_lock_t l; +}; +static DEFINE_PER_CPU(struct pa_lock, pa_lock) = { + .l = INIT_LOCAL_LOCK(l), +}; + int page_group_by_mobility_disabled __read_mostly; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT @@ -1672,11 +1680,11 @@ static void __free_pages_ok(struct page *page, unsigned int order, return; migratetype = get_pfnblock_migratetype(page, pfn); - local_irq_save(flags); + local_lock_irqsave(&pa_lock.l, flags); __count_vm_events(PGFREE, 1 << order); free_one_page(page_zone(page), page, pfn, order, migratetype, fpi_flags); - local_irq_restore(flags); + local_unlock_irqrestore(&pa_lock.l, flags); } void __free_pages_core(struct page *page, unsigned int order) @@ -3137,12 +3145,12 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) unsigned long flags; int to_drain, batch; - local_irq_save(flags); + local_lock_irqsave(&pa_lock.l, flags); batch = READ_ONCE(pcp->batch); to_drain = min(pcp->count, batch); if (to_drain > 0) free_pcppages_bulk(zone, to_drain, pcp, 0); - local_irq_restore(flags); + local_unlock_irqrestore(&pa_lock.l, flags); } #endif @@ -3159,13 +3167,13 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; - local_irq_save(flags); + local_lock_irqsave(&pa_lock.l, flags); pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; if (pcp->count) free_pcppages_bulk(zone, pcp->count, pcp, 0); - local_irq_restore(flags); + local_unlock_irqrestore(&pa_lock.l, flags); } /* @@ -3416,9 +3424,9 @@ void free_unref_page(struct page *page, unsigned int order) migratetype = MIGRATE_MOVABLE; } - local_irq_save(flags); + local_lock_irqsave(&pa_lock.l, flags); free_unref_page_commit(page, pfn, migratetype, order); - local_irq_restore(flags); + local_unlock_irqrestore(&pa_lock.l, flags); } /* @@ -3460,7 +3468,7 @@ void free_unref_page_list(struct list_head *list) set_page_private(page, pfn); } - local_irq_save(flags); + local_lock_irqsave(&pa_lock.l, flags); list_for_each_entry_safe(page, next, list, lru) { pfn = page_private(page); set_page_private(page, 0); @@ -3473,12 +3481,12 @@ void free_unref_page_list(struct list_head *list) * a large list of pages to free. */ if (++batch_count == SWAP_CLUSTER_MAX) { - local_irq_restore(flags); + local_unlock_irqrestore(&pa_lock.l, flags); batch_count = 0; - local_irq_save(flags); + local_lock_irqsave(&pa_lock.l, flags); } } - local_irq_restore(flags); + local_unlock_irqrestore(&pa_lock.l, flags); } /* @@ -3650,7 +3658,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, struct page *page; unsigned long flags; - local_irq_save(flags); + local_lock_irqsave(&pa_lock.l, flags); pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[order_to_pindex(migratetype, order)]; page = __rmqueue_pcplist(zone, gfp_flags, order, migratetype, @@ -3659,7 +3667,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); zone_statistics(preferred_zone, zone); } - local_irq_restore(flags); + local_unlock_irqrestore(&pa_lock.l, flags); return page; } @@ -3693,7 +3701,8 @@ struct page *rmqueue(struct zone *preferred_zone, * allocate greater than order-1 page units with __GFP_NOFAIL. */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - spin_lock_irqsave(&zone->lock, flags); + local_lock_irqsave(&pa_lock.l, flags); + spin_lock(&zone->lock); #ifdef CONFIG_PAGE_PREZERO zone->alloc_zero = prezero_buddy_enabled() && (gfp_flags & __GFP_ZERO); @@ -3728,7 +3737,7 @@ struct page *rmqueue(struct zone *preferred_zone, __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); zone_statistics(preferred_zone, zone); - local_irq_restore(flags); + local_unlock_irqrestore(&pa_lock.l, flags); out: /* Separate test+clear to avoid unnecessary atomics */ @@ -3741,7 +3750,7 @@ struct page *rmqueue(struct zone *preferred_zone, return page; failed: - local_irq_restore(flags); + local_unlock_irqrestore(&pa_lock.l, flags); return NULL; } @@ -9157,7 +9166,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ - local_irq_save(flags); + local_lock_irqsave(&pa_lock.l, flags); if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); @@ -9166,7 +9175,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } - local_irq_restore(flags); + local_unlock_irqrestore(&pa_lock.l, flags); } #ifdef CONFIG_MEMORY_HOTREMOVE -- Gitee