From 245e814cfeafe4c8f8c31ff723925a2c08b29a99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9C=B1=E4=BD=B3=E9=91=AB?= Date: Thu, 25 Jul 2024 11:55:39 +0000 Subject: [PATCH] =?UTF-8?q?=E5=9B=9E=E9=80=80=20'Pull=20Request=20!34=20:?= =?UTF-8?q?=20=E5=86=85=E5=AD=98=E9=A2=86=E5=9F=9F=E7=89=B9=E6=80=A7?= =?UTF-8?q?=E9=80=82=E9=85=8D-CMA=E5=A4=8D=E7=94=A8'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- arch/arm64/mm/fault.c | 2 +- include/linux/gfp.h | 14 ++++---------- include/linux/gfp_types.h | 6 ++---- include/linux/highmem.h | 2 +- include/linux/mmzone.h | 11 +---------- mm/Kconfig | 10 ---------- mm/compaction.c | 2 +- mm/page_alloc.c | 34 ++-------------------------------- 8 files changed, 12 insertions(+), 69 deletions(-) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index bfb5065fb99e..2e5d1e238af9 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -940,7 +940,7 @@ NOKPROBE_SYMBOL(do_debug_exception); struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, unsigned long vaddr) { - gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO | __GFP_CMA; + gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO; /* * If the page is mapped with PROT_MTE, initialise the tags at the diff --git a/include/linux/gfp.h b/include/linux/gfp.h index ae6479d04887..665f06675c83 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -15,24 +15,18 @@ struct vm_area_struct; static inline int gfp_migratetype(const gfp_t gfp_flags) { - unsigned int ret_mt = 0; - VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); + BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE); + BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >> + GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC); if (unlikely(page_group_by_mobility_disabled)) return MIGRATE_UNMOVABLE; /* Group based on mobility */ - ret_mt = (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; - -#ifdef CONFIG_CMA_REUSE - if (ret_mt == MIGRATE_MOVABLE && (gfp_flags & __GFP_CMA)) - return MIGRATE_CMA; -#endif - - return ret_mt; + return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; } #undef GFP_MOVABLE_MASK #undef GFP_MOVABLE_SHIFT diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index 4bc68a46b97b..6583a58670c5 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -53,9 +53,8 @@ typedef unsigned int __bitwise gfp_t; #define ___GFP_SKIP_ZERO 0 #define ___GFP_SKIP_KASAN 0 #endif -#define ___GFP_CMA 0x4000000u #ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x8000000u +#define ___GFP_NOLOCKDEP 0x4000000u #else #define ___GFP_NOLOCKDEP 0 #endif @@ -72,7 +71,6 @@ typedef unsigned int __bitwise gfp_t; #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ -#define __GFP_CMA ((__force gfp_t)___GFP_CMA) #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) /** @@ -251,7 +249,7 @@ typedef unsigned int __bitwise gfp_t; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** diff --git a/include/linux/highmem.h b/include/linux/highmem.h index ccef475c0d44..75607d4ba26c 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -226,7 +226,7 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, { struct folio *folio; - folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_CMA, 0, vma, vaddr, false); + folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false); if (folio) clear_user_highpage(&folio->page, vaddr); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 58be39d6b951..0f62786269d0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -47,11 +47,8 @@ enum migratetype { MIGRATE_MOVABLE, MIGRATE_RECLAIMABLE, MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ -#ifdef CONFIG_CMA_REUSE - MIGRATE_CMA, -#endif MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, -#if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) +#ifdef CONFIG_CMA /* * MIGRATE_CMA migration type is designed to mimic the way * ZONE_MOVABLE works. Only movable pages can be allocated @@ -81,12 +78,6 @@ extern const char * const migratetype_names[MIGRATE_TYPES]; # define is_migrate_cma_page(_page) false #endif -#ifdef CONFIG_CMA_REUSE -# define get_cma_migratetype() MIGRATE_CMA -#else -# define get_cma_migratetype() MIGRATE_MOVABLE -#endif - static inline bool is_migrate_movable(int mt) { return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; diff --git a/mm/Kconfig b/mm/Kconfig index 414d96ee780c..264a2df5ecf5 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -941,16 +941,6 @@ config CMA_AREAS If unsure, leave the default value "7" in UMA and "19" in NUMA. -config CMA_REUSE - bool "CMA reuse feature" - depends on CMA - help - If enabled, it will add MIGRATE_CMA to pcp lists and movable - allocations with __GFP_CMA flag will use cma areas prior to - movable areas. - - It improves the utilization ratio of cma areas. - config MEM_SOFT_DIRTY bool "Track memory changes" depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS diff --git a/mm/compaction.c b/mm/compaction.c index 080aaf0f80a7..38c8d216c6a3 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2235,7 +2235,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) #ifdef CONFIG_CMA /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ - if (migratetype == get_cma_migratetype() && + if (migratetype == MIGRATE_MOVABLE && !free_area_empty(area, MIGRATE_CMA)) return COMPACT_SUCCESS; #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 667de7ed5c3f..afed33fd8761 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -275,11 +275,8 @@ const char * const migratetype_names[MIGRATE_TYPES] = { "Unmovable", "Movable", "Reclaimable", -#ifdef CONFIG_CMA_REUSE - "CMA", -#endif "HighAtomic", -#if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) +#ifdef CONFIG_CMA "CMA", #endif #ifdef CONFIG_MEMORY_ISOLATION @@ -2081,27 +2078,6 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, } -static __always_inline struct page * -__rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, - int migratetype, unsigned int alloc_flags) -{ - struct page *page = NULL; -retry: - page = __rmqueue_smallest(zone, order, migratetype); - - if (unlikely(!page) && is_migrate_cma(migratetype)) { - migratetype = MIGRATE_MOVABLE; - alloc_flags &= ~ALLOC_CMA; - page = __rmqueue_smallest(zone, order, migratetype); - } - - if (unlikely(!page) && - __rmqueue_fallback(zone, order, migratetype, alloc_flags)) - goto retry; - - return page; -} - /* * Do the hard work of removing an element from the buddy allocator. * Call me with the zone->lock already held. @@ -2112,12 +2088,6 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, { struct page *page; -#ifdef CONFIG_CMA_REUSE - page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags); - if (page) - return page; -#endif - if (IS_ENABLED(CONFIG_CMA)) { /* * Balance movable allocations between regular and CMA areas by @@ -3064,7 +3034,7 @@ static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, unsigned int alloc_flags) { #ifdef CONFIG_CMA - if (gfp_migratetype(gfp_mask) == get_cma_migratetype()) + if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; #endif return alloc_flags; -- Gitee