From 26bdefaaaa50f25940663172fcb91d3e95629c93 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Thu, 25 Nov 2021 10:03:37 +0800 Subject: [PATCH 1/2] anolis: kfence: improve performance on is_kfence_address() ANBZ: #27 Since is_kfence_address() is a hot path, add a static branch to skip the following steps such as get page and nid. This static branch is also used to speedup kfence_ksize() and fix error when KFENCE is not inited yet but kfence_ksize() is called by kmemleak. Signed-off-by: Tianchen Ding Reviewed-by: Xunlei Pang --- include/linux/kfence.h | 3 ++- mm/kfence/core.c | 11 ++++++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/include/linux/kfence.h b/include/linux/kfence.h index 1591b242ebd4..76bf6198bef0 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -31,6 +31,7 @@ DECLARE_STATIC_KEY_FALSE(kfence_allocation_key); extern atomic_t kfence_allocation_gate; #endif DECLARE_STATIC_KEY_FALSE(kfence_skip_interval); +DECLARE_STATIC_KEY_FALSE(kfence_once_inited); #define GFP_KFENCE_NOT_ALLOC ((GFP_ZONEMASK & ~__GFP_HIGHMEM) | __GFP_NOKFENCE | __GFP_THISNODE) /** @@ -69,7 +70,7 @@ static __always_inline bool is_kfence_address_node(const void *addr, const int n */ static __always_inline bool is_kfence_address(const void *addr) { - if (unlikely(!virt_addr_valid(addr))) + if (!static_branch_unlikely(&kfence_once_inited) || unlikely(!virt_addr_valid(addr))) return false; return unlikely(is_kfence_address_node(addr, page_to_nid(virt_to_page(addr)))); diff --git a/mm/kfence/core.c b/mm/kfence/core.c index a328c9084171..184a7f3a70c9 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -59,6 +59,8 @@ EXPORT_SYMBOL(kfence_pool_size); DEFINE_STATIC_KEY_FALSE(kfence_allocation_key); #endif DEFINE_STATIC_KEY_FALSE(kfence_skip_interval); +DEFINE_STATIC_KEY_FALSE(kfence_once_inited); +EXPORT_SYMBOL(kfence_once_inited); static int param_set_sample_interval(const char *val, const struct kernel_param *kp) { @@ -1093,6 +1095,8 @@ void __init kfence_init(void) pr_cont("\n"); } + static_branch_enable(&kfence_once_inited); + return; fail: @@ -1273,7 +1277,12 @@ struct page *__kfence_alloc_page(int node, gfp_t flags) size_t kfence_ksize(const void *addr) { - const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); + struct kfence_metadata *meta; + + if (!static_branch_unlikely(&kfence_once_inited)) + return 0; + + meta = addr_to_metadata((unsigned long)addr); /* * Read locklessly -- if there is a race with __kfence_alloc(), this is -- Gitee From b3aa831b9d039c953473231bd81c669dd5983e34 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Wed, 1 Dec 2021 17:53:23 +0800 Subject: [PATCH 2/2] anolis: kfence: adjust KFENCE page alloc to fit debug kernel ANBZ: #28 The debug kernel will check page refcount at prep_new_page(). Avoid this BUG by setting refcount to 0 before prep_new_page(). Signed-off-by: Tianchen Ding Reviewed-by: Xunlei Pang --- mm/kfence/core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 184a7f3a70c9..77ca870957a4 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -562,6 +562,9 @@ static struct page *kfence_guarded_alloc_page(int node) page = virt_to_page(addr); __ClearPageSlab(page); +#ifdef CONFIG_DEBUG_VM + atomic_set(&page->_refcount, 0); +#endif raw_spin_unlock_irqrestore(&meta->lock, flags); -- Gitee