diff --git a/include/linux/gfp.h b/include/linux/gfp.h index c603237e006ceb5e7a6d6c8b06d3d4c52038566a..6336e792ecbca21457c7223ab84291829ac4e700 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -39,8 +39,9 @@ struct vm_area_struct; #define ___GFP_HARDWALL 0x100000u #define ___GFP_THISNODE 0x200000u #define ___GFP_ACCOUNT 0x400000u +#define ___GFP_CMA 0x800000u #ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x800000u +#define ___GFP_NOLOCKDEP 0x1000000u #else #define ___GFP_NOLOCKDEP 0 #endif @@ -57,6 +58,7 @@ struct vm_area_struct; #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ +#define __GFP_CMA ((__force gfp_t)___GFP_CMA) #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) /** @@ -224,7 +226,7 @@ struct vm_area_struct; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (24 + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** @@ -324,7 +326,12 @@ static inline int gfp_migratetype(const gfp_t gfp_flags) return MIGRATE_UNMOVABLE; /* Group based on mobility */ - return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; + unsigned int ret_mt = (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; + if (IS_ENABLED(CONFIG_CMA_REUSE) && ret_mt == MIGRATE_MOVABLE && + (gfp_flags & __GFP_CMA)) + return MIGRATE_CMA; + + return ret_mt; } #undef GFP_MOVABLE_MASK #undef GFP_MOVABLE_SHIFT diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 14e6202ce47f1395b61a45dc2d7455723dfdc0aa..97241457d97bc83fdea002e341c33d5b1c0c42c4 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -274,7 +274,9 @@ static inline struct page * alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, unsigned long vaddr) { - return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); + return __alloc_zeroed_user_highpage( + __GFP_MOVABLE | __GFP_CMA, vma, + vaddr); } static inline void clear_highpage(struct page *page) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 63b550403317a44c9120bce08950ccc153f76bac..3ac2799dcb4aea873b35c429c3c1f33d8f51bfab 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -42,9 +42,12 @@ enum migratetype { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RECLAIMABLE, +#ifdef CONFIG_CMA_REUSE + MIGRATE_CMA, +#endif MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, -#ifdef CONFIG_CMA +#if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) /* * MIGRATE_CMA migration type is designed to mimic the way * ZONE_MOVABLE works. Only movable pages can be allocated @@ -77,6 +80,12 @@ extern const char * const migratetype_names[MIGRATE_TYPES]; # define is_migrate_cma_page(_page) false #endif +#ifdef CONFIG_CMA_REUSE +# define get_cma_migratetype() MIGRATE_CMA +#else +# define get_cma_migratetype() MIGRATE_MOVABLE +#endif + static inline bool is_migrate_movable(int mt) { return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; diff --git a/mm/Kconfig b/mm/Kconfig index 9d606d258ab42cdf4ff00e6bc6831acba3e6cd78..b4d9eea471d2033a08292b0c934a87e82e0549cb 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -528,6 +528,14 @@ config CMA_AREAS If unsure, leave the default value "7" in UMA and "19" in NUMA. +config CMA_REUSE + bool "CMA reuse feature" + depends on CMA + help + If enabled, it will add MIGRATE_CMA to pcp list and movable + allocation with __GFP_CMA flag will use cma area prior to + movable area. It improves the utilization ratio of cma area. + config MEM_SOFT_DIRTY bool "Track memory changes" depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS diff --git a/mm/compaction.c b/mm/compaction.c index dba424447473d5f92c6eb7e77db6e8b58f66a7f1..22e6a6e21df86f9a1c926e7bb3b8f3f14e596f65 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2021,7 +2021,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) #ifdef CONFIG_CMA /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ - if (migratetype == MIGRATE_MOVABLE && + if (migratetype == get_cma_migratetype() && !free_area_empty(area, MIGRATE_CMA)) return COMPACT_SUCCESS; #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6d189b69a9e10de1c22053a62ee12fea51496e5d..83c0146cb59e6ccbac90a9b7c3acd812cdeffd9d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -324,8 +324,11 @@ const char * const migratetype_names[MIGRATE_TYPES] = { "Unmovable", "Movable", "Reclaimable", +#ifdef CONFIG_CMA_REUSE + "CMA", +#endif "HighAtomic", -#ifdef CONFIG_CMA +#if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) "CMA", #endif #ifdef CONFIG_MEMORY_ISOLATION @@ -2834,6 +2837,27 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, } +static __always_inline struct page * +__rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, + int migratetype, unsigned int alloc_flags) +{ + struct page *page = NULL; +retry: + page = __rmqueue_smallest(zone, order, migratetype); + + if (unlikely(!page) && is_migrate_cma(migratetype)) { + migratetype = MIGRATE_MOVABLE; + alloc_flags &= ~ALLOC_CMA; + page = __rmqueue_smallest(zone, order, migratetype); + } + + if (unlikely(!page) && + __rmqueue_fallback(zone, order, migratetype, alloc_flags)) + goto retry; + + return page; +} + /* * Do the hard work of removing an element from the buddy allocator. * Call me with the zone->lock already held. @@ -2844,6 +2868,11 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, { struct page *page; +#ifdef CONFIG_CMA_REUSE + page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags); + goto out; +#endif + if (IS_ENABLED(CONFIG_CMA)) { /* * Balance movable allocations between regular and CMA areas by @@ -3435,7 +3464,8 @@ struct page *rmqueue(struct zone *preferred_zone, * we need to skip it when CMA area isn't allowed. */ if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || - migratetype != MIGRATE_MOVABLE) { + migratetype != MIGRATE_MOVABLE || + IS_ENABLED(CONFIG_CMA_REUSE)) { page = rmqueue_pcplist(preferred_zone, zone, gfp_flags, migratetype, alloc_flags); goto out; @@ -3776,7 +3806,7 @@ static inline unsigned int current_alloc_flags(gfp_t gfp_mask, unsigned int pflags = current->flags; if (!(pflags & PF_MEMALLOC_NOCMA) && - gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) + gfp_migratetype(gfp_mask) == get_cma_migratetype()) alloc_flags |= ALLOC_CMA; #endif