diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 0092ac2519ae2da13040ed92df37c64a7d5cf4fe..2eb2f5c88c2c503d5ab34ae49f6ef7ec14d47838 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -88,7 +88,13 @@ DEFINE_STATIC_KEY_FALSE(kfence_skip_interval); static DEFINE_STATIC_KEY_FALSE(kfence_once_enabled); DEFINE_STATIC_KEY_TRUE(kfence_order0_page); -#define KFENCE_MAX_OBJECTS_PER_AREA (PUD_SIZE / PAGE_SIZE / 2 - 1) +#ifdef CONFIG_ARM64_64K_PAGES +/* For 64K page kernel, PUD is too large. Just split PMD table. (512M) */ +#define KFENCE_POOL_SIZE PMD_SIZE +#else +#define KFENCE_POOL_SIZE PUD_SIZE +#endif +#define KFENCE_MAX_OBJECTS_PER_AREA (KFENCE_POOL_SIZE / PAGE_SIZE / 2 - 1) static void kfence_enable_late(void); static int param_set_sample_interval(const char *val, const struct kernel_param *kp) @@ -1179,7 +1185,8 @@ static void __init kfence_alloc_pool_node(int node) while (nr_need) { unsigned long kfence_pool_size = (nr_request + 1) * 2 * PAGE_SIZE; - __kfence_pool_area[index] = memblock_alloc_node(kfence_pool_size, PUD_SIZE, node); + __kfence_pool_area[index] = memblock_alloc_node(kfence_pool_size, + KFENCE_POOL_SIZE, node); if (!__kfence_pool_area[index]) { pr_err("kfence alloc pool on node %d failed\n", node); break; @@ -1464,7 +1471,7 @@ static bool kfence_can_recover_tlb(struct kfence_pool_area *kpa) { #ifdef CONFIG_X86_64 /* only recover 1GiB aligned tlb */ - return kpa->pool_size == PUD_SIZE; + return kpa->pool_size == KFENCE_POOL_SIZE; #else /* * On arm64, the direct mapping area is already splited to page granularity @@ -1480,12 +1487,12 @@ static inline void __kfence_recover_tlb(unsigned long addr) { if (!arch_kfence_free_pool(addr)) pr_warn("fail to recover tlb to 1G at 0x%p-0x%p\n", - (void *)addr, (void *)(addr + PUD_SIZE)); + (void *)addr, (void *)(addr + KFENCE_POOL_SIZE)); } static inline void kfence_recover_tlb(struct kfence_pool_area *kpa) { - unsigned long base = ALIGN_DOWN((unsigned long)kpa->addr, PUD_SIZE); + unsigned long base = ALIGN_DOWN((unsigned long)kpa->addr, KFENCE_POOL_SIZE); if (kfence_can_recover_tlb(kpa)) __kfence_recover_tlb(base); @@ -1972,7 +1979,7 @@ int __init update_kfence_booting_max(void) { static bool done __initdata; - unsigned long long parse_mem = PUD_SIZE; + unsigned long long parse_mem = KFENCE_POOL_SIZE; unsigned long nr_pages, nr_obj_max; char *cmdline; int ret; @@ -1995,7 +2002,7 @@ int __init update_kfence_booting_max(void) if (ret) goto nokfence; - nr_pages = min_t(unsigned long, parse_mem, PUD_SIZE) / PAGE_SIZE; + nr_pages = min_t(unsigned long, parse_mem, KFENCE_POOL_SIZE) / PAGE_SIZE; /* We need at least 4 pages to enable KFENCE. */ if (nr_pages < 4) goto nokfence;