diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 887a5532e449958f28686cce88432703b78c53e7..248e0afeac945bf2d6c51e67af2a3cbe0a05e869 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -122,6 +122,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v) show_val_kb(m, "VmallocChunk: ", 0ul); show_val_kb(m, "Percpu: ", pcpu_nr_pages()); +#ifdef CONFIG_PAGE_TRACING + show_val_kb(m, "Skb: ", global_zone_page_state(NR_SKB_PAGES)); +#endif + #ifdef CONFIG_MEMORY_FAILURE seq_printf(m, "HardwareCorrupted: %5lu kB\n", atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)); @@ -146,6 +150,11 @@ static int meminfo_proc_show(struct seq_file *m, void *v) global_zone_page_state(NR_FREE_CMA_PAGES)); #endif +#ifdef CONFIG_PAGE_TRACING + seq_puts(m, "GLTrack: - kB\n"); + show_val_kb(m, "ZspageUsed: ", global_zone_page_state(NR_ZSPAGES)); +#endif + hugetlb_report_meminfo(m); arch_report_meminfo(m); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 855a598ff674d69ab0f7d3abcf135868f555e156..d66cecefa84f4d33892230fd59104d0a70211548 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -168,6 +168,9 @@ enum zone_stat_item { NR_ZSPAGES, /* allocated in zsmalloc */ #endif NR_FREE_CMA_PAGES, +#ifdef CONFIG_PAGE_TRACING + NR_SKB_PAGES, +#endif NR_VM_ZONE_STAT_ITEMS }; enum node_stat_item { diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 4f6ba937911218a757827477ccdd000a00cde253..a6446a50c39fcfbc368023eefb8490b2b502bb83 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -138,6 +138,10 @@ enum pageflags { #endif #ifdef CONFIG_64BIT PG_arch_2, +#endif +#ifdef CONFIG_PAGE_TRACING + PG_skb, + PG_zspage, #endif __NR_PAGEFLAGS, @@ -444,6 +448,11 @@ TESTCLEARFLAG(Young, young, PF_ANY) PAGEFLAG(Idle, idle, PF_ANY) #endif +#ifdef CONFIG_PAGE_TRACING + PAGEFLAG(SKB, skb, PF_ANY) + PAGEFLAG(Zspage, zspage, PF_ANY) +#endif + /* * PageReported() is used to track reported free pages within the Buddy * allocator. We can use the non-atomic version of the test and set diff --git a/mm/Kconfig b/mm/Kconfig index df9bf9f4ade724db9a6e24b02cc44f59858fe30e..db33064744bc0c840ff6e8daa2c014a78f22b7a1 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -90,6 +90,12 @@ config HYPERHOLD_ZSWAPD and the refault of anonymous pages is high, the content of zram will exchanged to eswap by a certain percentage. +config PAGE_TRACING + bool "Enable Page Tracing" + default n + help + This option enables page tracing. + config DISCONTIGMEM def_bool y depends on (!SELECT_MEMORY_MODEL && ARCH_DISCONTIGMEM_ENABLE) || DISCONTIGMEM_MANUAL diff --git a/mm/debug.c b/mm/debug.c index ccca576b28990476dfdc1f73d44042a34d23bf3f..e97a23eba0d39e4ba232c219d8badca1ce030161 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -29,6 +29,10 @@ const char *migrate_reason_names[MR_TYPES] = { const struct trace_print_flags pageflag_names[] = { __def_pageflag_names, +#ifdef CONFIG_PAGE_TRACING + {1UL << PG_skb, "skb"}, + {1UL << PG_zspage, "zspage"}, +#endif {0, NULL} }; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 15d25006cfa0656f0f742fae18f9292cf2b11928..972a70d8fe8524a194af6e5062507d13633f5138 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5095,6 +5095,23 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, nc->va = page ? page_address(page) : NULL; +#ifdef CONFIG_PAGE_TRACING + if (likely(page)) { + int order = get_order(nc->size); + int i; + struct page *newpage = page; + unsigned int deta = 1U << (unsigned int)order; + + for (i = 0; i < (1 << order); i++) { + if (!newpage) + break; + SetPageSKB(newpage); + newpage++; + } + mod_zone_page_state(page_zone(page), NR_SKB_PAGES, (long)deta); + } +#endif + return page; } @@ -5102,8 +5119,16 @@ void __page_frag_cache_drain(struct page *page, unsigned int count) { VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); - if (page_ref_sub_and_test(page, count)) + if (page_ref_sub_and_test(page, count)) { +#ifdef CONFIG_PAGE_TRACING + if (likely(page)) { + unsigned int deta = 1U << compound_order(page); + + mod_zone_page_state(page_zone(page), NR_SKB_PAGES, -(long)deta); + } +#endif free_the_page(page, compound_order(page)); + } } EXPORT_SYMBOL(__page_frag_cache_drain); @@ -5173,8 +5198,16 @@ void page_frag_free(void *addr) { struct page *page = virt_to_head_page(addr); - if (unlikely(put_page_testzero(page))) + if (unlikely(put_page_testzero(page))) { +#ifdef CONFIG_PAGE_TRACING + if (likely(page)) { + unsigned int deta = 1U << compound_order(page); + + mod_zone_page_state(page_zone(page), NR_SKB_PAGES, -(long)deta); + } +#endif free_the_page(page, compound_order(page)); + } } EXPORT_SYMBOL(page_frag_free); diff --git a/mm/vmstat.c b/mm/vmstat.c index ec58ac28b4f7cab032872693819ac6a759742fa7..5b9b46f42f40d908057d7f687e0a6989c0293704 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1164,6 +1164,9 @@ const char * const vmstat_text[] = { #endif "nr_free_cma", +#ifdef CONFIG_PAGE_TRACING + "nr_skb_pages", +#endif /* enum numa_stat_item counters */ #ifdef CONFIG_NUMA "numa_hit", diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 7a0b79b0a689967102663cdd9759b970e22bc24e..daa9703b615a1a32a56905f7272612f7d000fcb2 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1085,6 +1085,9 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, } inc_zone_page_state(page, NR_ZSPAGES); +#ifdef CONFIG_PAGE_TRACING + SetPageZspage(page); +#endif pages[i] = page; }