From 3c1a47e6207f67df8749c5915356de8d02a04658 Mon Sep 17 00:00:00 2001 From: CY Fan Date: Mon, 9 May 2022 10:08:27 +0800 Subject: [PATCH] Add new page tracing in meminfo ohos inclusion category: feature issue: #I56CWZ CVE: NA -------------------------------- Add ZspageUsed, Skb and GLTrack page traing in meminfo Signed-off-by: CY Fan --- fs/proc/meminfo.c | 9 +++++++++ include/linux/mmzone.h | 3 +++ include/linux/page-flags.h | 9 +++++++++ mm/Kconfig | 6 ++++++ mm/debug.c | 4 ++++ mm/page_alloc.c | 37 +++++++++++++++++++++++++++++++++++-- mm/vmstat.c | 3 +++ mm/zsmalloc.c | 3 +++ 8 files changed, 72 insertions(+), 2 deletions(-) diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 887a5532e449..248e0afeac94 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -122,6 +122,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v) show_val_kb(m, "VmallocChunk: ", 0ul); show_val_kb(m, "Percpu: ", pcpu_nr_pages()); +#ifdef CONFIG_PAGE_TRACING + show_val_kb(m, "Skb: ", global_zone_page_state(NR_SKB_PAGES)); +#endif + #ifdef CONFIG_MEMORY_FAILURE seq_printf(m, "HardwareCorrupted: %5lu kB\n", atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)); @@ -146,6 +150,11 @@ static int meminfo_proc_show(struct seq_file *m, void *v) global_zone_page_state(NR_FREE_CMA_PAGES)); #endif +#ifdef CONFIG_PAGE_TRACING + seq_puts(m, "GLTrack: - kB\n"); + show_val_kb(m, "ZspageUsed: ", global_zone_page_state(NR_ZSPAGES)); +#endif + hugetlb_report_meminfo(m); arch_report_meminfo(m); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 855a598ff674..d66cecefa84f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -168,6 +168,9 @@ enum zone_stat_item { NR_ZSPAGES, /* allocated in zsmalloc */ #endif NR_FREE_CMA_PAGES, +#ifdef CONFIG_PAGE_TRACING + NR_SKB_PAGES, +#endif NR_VM_ZONE_STAT_ITEMS }; enum node_stat_item { diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 4f6ba9379112..a6446a50c39f 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -138,6 +138,10 @@ enum pageflags { #endif #ifdef CONFIG_64BIT PG_arch_2, +#endif +#ifdef CONFIG_PAGE_TRACING + PG_skb, + PG_zspage, #endif __NR_PAGEFLAGS, @@ -444,6 +448,11 @@ TESTCLEARFLAG(Young, young, PF_ANY) PAGEFLAG(Idle, idle, PF_ANY) #endif +#ifdef CONFIG_PAGE_TRACING + PAGEFLAG(SKB, skb, PF_ANY) + PAGEFLAG(Zspage, zspage, PF_ANY) +#endif + /* * PageReported() is used to track reported free pages within the Buddy * allocator. We can use the non-atomic version of the test and set diff --git a/mm/Kconfig b/mm/Kconfig index df9bf9f4ade7..db33064744bc 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -90,6 +90,12 @@ config HYPERHOLD_ZSWAPD and the refault of anonymous pages is high, the content of zram will exchanged to eswap by a certain percentage. +config PAGE_TRACING + bool "Enable Page Tracing" + default n + help + This option enables page tracing. + config DISCONTIGMEM def_bool y depends on (!SELECT_MEMORY_MODEL && ARCH_DISCONTIGMEM_ENABLE) || DISCONTIGMEM_MANUAL diff --git a/mm/debug.c b/mm/debug.c index ccca576b2899..e97a23eba0d3 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -29,6 +29,10 @@ const char *migrate_reason_names[MR_TYPES] = { const struct trace_print_flags pageflag_names[] = { __def_pageflag_names, +#ifdef CONFIG_PAGE_TRACING + {1UL << PG_skb, "skb"}, + {1UL << PG_zspage, "zspage"}, +#endif {0, NULL} }; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 15d25006cfa0..972a70d8fe85 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5095,6 +5095,23 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, nc->va = page ? page_address(page) : NULL; +#ifdef CONFIG_PAGE_TRACING + if (likely(page)) { + int order = get_order(nc->size); + int i; + struct page *newpage = page; + unsigned int deta = 1U << (unsigned int)order; + + for (i = 0; i < (1 << order); i++) { + if (!newpage) + break; + SetPageSKB(newpage); + newpage++; + } + mod_zone_page_state(page_zone(page), NR_SKB_PAGES, (long)deta); + } +#endif + return page; } @@ -5102,8 +5119,16 @@ void __page_frag_cache_drain(struct page *page, unsigned int count) { VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); - if (page_ref_sub_and_test(page, count)) + if (page_ref_sub_and_test(page, count)) { +#ifdef CONFIG_PAGE_TRACING + if (likely(page)) { + unsigned int deta = 1U << compound_order(page); + + mod_zone_page_state(page_zone(page), NR_SKB_PAGES, -(long)deta); + } +#endif free_the_page(page, compound_order(page)); + } } EXPORT_SYMBOL(__page_frag_cache_drain); @@ -5173,8 +5198,16 @@ void page_frag_free(void *addr) { struct page *page = virt_to_head_page(addr); - if (unlikely(put_page_testzero(page))) + if (unlikely(put_page_testzero(page))) { +#ifdef CONFIG_PAGE_TRACING + if (likely(page)) { + unsigned int deta = 1U << compound_order(page); + + mod_zone_page_state(page_zone(page), NR_SKB_PAGES, -(long)deta); + } +#endif free_the_page(page, compound_order(page)); + } } EXPORT_SYMBOL(page_frag_free); diff --git a/mm/vmstat.c b/mm/vmstat.c index ec58ac28b4f7..5b9b46f42f40 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1164,6 +1164,9 @@ const char * const vmstat_text[] = { #endif "nr_free_cma", +#ifdef CONFIG_PAGE_TRACING + "nr_skb_pages", +#endif /* enum numa_stat_item counters */ #ifdef CONFIG_NUMA "numa_hit", diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 7a0b79b0a689..daa9703b615a 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1085,6 +1085,9 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, } inc_zone_page_state(page, NR_ZSPAGES); +#ifdef CONFIG_PAGE_TRACING + SetPageZspage(page); +#endif pages[i] = page; } -- Gitee