diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 49bd6ccee23aec0384ffaa694c068cc0333ecf7e..d34874291b12ee208aa5df9a5e57ee3372fcd55e 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -119,6 +119,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, loff_t write_end, bool shmem_huge_force); bool shmem_hpage_pmd_enabled(void); +int shmem_allowable_huge_highest_order(void); #else static inline unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, @@ -131,6 +132,11 @@ static inline bool shmem_hpage_pmd_enabled(void) { return false; } + +static inline int shmem_allowable_huge_highest_order(void) +{ + return 0; +} #endif #ifdef CONFIG_SHMEM diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 92556dfe845166765b1d5d0efed85d6cca3ed060..e889f292d310b32d2200b1d37e8e30753377c9b3 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -578,6 +578,14 @@ static ssize_t file_enabled_store(struct kobject *kobj, } spin_unlock(&huge_file_orders_lock); + + if (ret > 0) { + int err; + + err = start_stop_khugepaged(); + if (err) + ret = err; + } return ret; } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 3a2d250ce3d7f0a409a94284a17b5d97bc6e45f4..b34a0ca13caf122ee286dd7b945c7dc1778a1119 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2607,11 +2607,32 @@ static int khugepaged(void *none) return 0; } +static int anon_allowable_huge_highest_order(void) +{ + unsigned long orders = READ_ONCE(huge_anon_orders_always) | + READ_ONCE(huge_anon_orders_madvise); + + if (hugepage_global_enabled()) + orders |= READ_ONCE(huge_anon_orders_inherit); + + return orders == 0 ? 0 : fls(orders) - 1; +} + +static unsigned long mthp_max_allowable_nr_pages(void) +{ + int anon_hignest_order = anon_allowable_huge_highest_order(); + int shmem_highest_order = shmem_allowable_huge_highest_order(); + int file_highest_order = file_orders_always() ? fls(file_orders_always()) - 1 : 0; + + return 1UL << max3(anon_hignest_order, shmem_highest_order, file_highest_order); +} + static void set_recommended_min_free_kbytes(void) { struct zone *zone; int nr_zones = 0; unsigned long recommended_min; + unsigned long recommended_nr_pages; if (!hugepage_pmd_enabled()) { calculate_min_free_kbytes(); @@ -2629,8 +2650,12 @@ static void set_recommended_min_free_kbytes(void) nr_zones++; } - /* Ensure 2 pageblocks are free to assist fragmentation avoidance */ - recommended_min = pageblock_nr_pages * nr_zones * 2; + /* Restrict min_free_kbytes reserve to mthp maximum */ + recommended_nr_pages = min(mthp_max_allowable_nr_pages(), + (unsigned long)pageblock_nr_pages); + + /* Ensure 2 * recommended_nr_pages are free to assist fragmentation avoidance */ + recommended_min = recommended_nr_pages * nr_zones * 2; /* * Make sure that on average at least two pageblocks are almost free @@ -2638,7 +2663,7 @@ static void set_recommended_min_free_kbytes(void) * second to avoid subsequent fallbacks of other types There are 3 * MIGRATE_TYPES we care about. */ - recommended_min += pageblock_nr_pages * nr_zones * + recommended_min += recommended_nr_pages * nr_zones * MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; /* don't ever allow to reserve more than 5% of the lowmem */ diff --git a/mm/shmem.c b/mm/shmem.c index e2a7889fbbc3ae497f2b42c816ac08f611e6ab55..a7a45e7ec26fe5c010713211e9b42653b497ae08 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1763,6 +1763,22 @@ bool shmem_hpage_pmd_enabled(void) return false; } +int shmem_allowable_huge_highest_order(void) +{ + unsigned long orders; + + if (shmem_huge == SHMEM_HUGE_DENY) + return 0; + + orders = READ_ONCE(huge_shmem_orders_always) | READ_ONCE(huge_shmem_orders_madvise) + | READ_ONCE(huge_shmem_orders_within_size); + + if (shmem_huge != SHMEM_HUGE_NEVER) + orders |= READ_ONCE(huge_shmem_orders_inherit); + + return orders == 0 ? 0 : fls(orders) - 1; +} + unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, loff_t write_end, bool shmem_huge_force)